{"repo_name":"ratnania\/pigasus","path":"doc\/manual\/include\/demo\/test_neumann_quartcircle.py","copies":"1","size":"2730","content":"#! \/usr\/bin\/python\n\n# ...\ntry:\n from matplotlib import pyplot as plt\n PLOT=True\nexcept ImportError:\n PLOT=False\n# ...\nimport numpy as np\nfrom pigasus.gallery.poisson import *\nimport sys\nimport inspect\nfilename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)\n\n# ...\nsin = np.sin ; cos = np.cos ; pi = np.pi ; exp = np.exp\n# ...\n\n#-----------------------------------\ntry:\n nx = int(sys.argv[1])\nexcept:\n nx = 31\n\ntry:\n ny = int(sys.argv[2])\nexcept:\n ny = 31\n\ntry:\n px = int(sys.argv[3])\nexcept:\n px = 2\n\ntry:\n py = int(sys.argv[4])\nexcept:\n py = 2\n\nfrom igakit.cad_geometry import quart_circle as domain\ngeo = domain(n=[nx,ny],p=[px,py])\n#-----------------------------------\n\n# ...\n# exact solution\n# ...\nR = 1.\nr = 0.5\nc = 1. # for neumann\n#c = pi \/ (R**2-r**2) # for all dirichlet bc\nu = lambda x,y : [ x * y * sin ( c * (R**2 - x**2 - y**2 )) ]\n# ...\n\n# ...\n# rhs\n# ...\nf = lambda x,y : [4*c**2*x**3*y*sin(c*(R**2 - x**2 - y**2)) \\\n + 4*c**2*x*y**3*sin(c*(R**2 - x**2 - y**2)) \\\n + 12*c*x*y*cos(c*(R**2 - x**2 - y**2)) ]\n# ...\n\n# ...\n# values of gradu.n at the boundary\n# ...\ngradu = lambda x,y : [-2*c*x**2*y*cos(c*(R**2 - x**2 - y**2)) + y*sin(c*(R**2\n -\n x**2\n -\n y**2)) \\\n ,-2*c*x*y**2*cos(c*(R**2 - x**2 - y**2)) + x*sin(c*(R**2 - x**2 - y**2)) ]\n\ndef func_g (x,y) :\n du = gradu (x, y)\n return [ du[0] , du[1] ]\n# ...\n\n# ...\n# values of u at the boundary\n# ...\n\nbc_neumann={}\n\nbc_neumann [0,0] = func_g\nDirichlet = [[1,2,3]]\n\n#AllDirichlet = True\n# ...\n\n# ...\ntry:\n bc_dirichlet\nexcept NameError:\n bc_dirichlet = None\nelse:\n pass\n\ntry:\n bc_neumann\nexcept NameError:\n bc_neumann = None\nelse:\n pass\n\ntry:\n AllDirichlet\nexcept NameError:\n AllDirichlet = None\nelse:\n pass\n\ntry:\n Dirichlet\nexcept NameError:\n Dirichlet = None\nelse:\n pass\n\ntry:\n Metric\nexcept NameError:\n Metric = None\nelse:\n pass\n# ...\n\n# ...\nPDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,\n AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)\n# ...\n\n# ...\nPDE.assembly(f=f)\nPDE.solve()\n# ...\n\n# ...\nnormU = PDE.norm(exact=u)\nprint \"norm U = \", normU\n# ...\n\n# ...\nif PLOT:\n PDE.plot() ; plt.colorbar(); plt.title('$u_h$')\n plt.savefig(filename.split('.py')[0]+'.png', format='png')\n plt.clf()\n# ...\n\nPDE.free()\n","license":"mit"} {"repo_name":"devanshdalal\/scikit-learn","path":"examples\/gaussian_process\/plot_gpr_noisy_targets.py","copies":"64","size":"3706","content":"\"\"\"\n=========================================================\nGaussian Processes regression: basic introductory example\n=========================================================\n\nA simple one-dimensional regression example computed in two different ways:\n\n1. A noise-free case\n2. A noisy case with known noise-level per datapoint\n\nIn both cases, the kernel's parameters are estimated using the maximum\nlikelihood principle.\n\nThe figures illustrate the interpolating property of the Gaussian Process\nmodel as well as its probabilistic nature in the form of a pointwise 95%\nconfidence interval.\n\nNote that the parameter ``alpha`` is applied as a Tikhonov\nregularization of the assumed covariance between the training points.\n\"\"\"\nprint(__doc__)\n\n# Author: Vincent Dubourg \n# Jake Vanderplas \n# Jan Hendrik Metzen s\n# License: BSD 3 clause\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel as C\n\nnp.random.seed(1)\n\n\ndef f(x):\n \"\"\"The function to predict.\"\"\"\n return x * np.sin(x)\n\n# ----------------------------------------------------------------------\n# First the noiseless case\nX = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T\n\n# Observations\ny = f(X).ravel()\n\n# Mesh the input space for evaluations of the real function, the prediction and\n# its MSE\nx = np.atleast_2d(np.linspace(0, 10, 1000)).T\n\n# Instanciate a Gaussian Process model\nkernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))\ngp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)\n\n# Fit to data using Maximum Likelihood Estimation of the parameters\ngp.fit(X, y)\n\n# Make the prediction on the meshed x-axis (ask for MSE as well)\ny_pred, sigma = gp.predict(x, return_std=True)\n\n# Plot the function, the prediction and the 95% confidence interval based on\n# the MSE\nfig = plt.figure()\nplt.plot(x, f(x), 'r:', label=u'$f(x) = x\\,\\sin(x)$')\nplt.plot(X, y, 'r.', markersize=10, label=u'Observations')\nplt.plot(x, y_pred, 'b-', label=u'Prediction')\nplt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma,\n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.5, fc='b', ec='None', label='95% confidence interval')\nplt.xlabel('$x$')\nplt.ylabel('$f(x)$')\nplt.ylim(-10, 20)\nplt.legend(loc='upper left')\n\n# ----------------------------------------------------------------------\n# now the noisy case\nX = np.linspace(0.1, 9.9, 20)\nX = np.atleast_2d(X).T\n\n# Observations and noise\ny = f(X).ravel()\ndy = 0.5 + 1.0 * np.random.random(y.shape)\nnoise = np.random.normal(0, dy)\ny += noise\n\n# Instanciate a Gaussian Process model\ngp = GaussianProcessRegressor(kernel=kernel, alpha=(dy \/ y) ** 2,\n n_restarts_optimizer=10)\n\n# Fit to data using Maximum Likelihood Estimation of the parameters\ngp.fit(X, y)\n\n# Make the prediction on the meshed x-axis (ask for MSE as well)\ny_pred, sigma = gp.predict(x, return_std=True)\n\n# Plot the function, the prediction and the 95% confidence interval based on\n# the MSE\nfig = plt.figure()\nplt.plot(x, f(x), 'r:', label=u'$f(x) = x\\,\\sin(x)$')\nplt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')\nplt.plot(x, y_pred, 'b-', label=u'Prediction')\nplt.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma,\n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.5, fc='b', ec='None', label='95% confidence interval')\nplt.xlabel('$x$')\nplt.ylabel('$f(x)$')\nplt.ylim(-10, 20)\nplt.legend(loc='upper left')\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"lordkman\/burnman","path":"examples\/example_geotherms.py","copies":"4","size":"4049","content":"# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences\n# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU\n# GPL v2 or later.\n\n\n\"\"\"\nexample_geotherms\n-----------------\n\nThis example shows each of the geotherms currently possible with BurnMan.\nThese are:\n\n1. Brown and Shankland, 1981 :cite:`Brown1981`\n2. Anderson, 1982 :cite:`anderson1982earth`\n3. Watson and Baxter, 2007 :cite:`Watson2007`\n4. linear extrapolation\n5. Read in from file from user\n6. Adiabatic from potential temperature and choice of mineral\n\n*Uses:*\n\n* :func:`burnman.geotherm.brown_shankland`\n* :func:`burnman.geotherm.anderson`\n* input geotherm file *input_geotherm\/example_geotherm.txt* (optional)\n* :class:`burnman.composite.Composite` for adiabat\n\n*Demonstrates:*\n\n* the available geotherms\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n# hack to allow scripts to be placed in subdirectories next to burnman:\nif not os.path.exists('burnman') and os.path.exists('..\/burnman'):\n sys.path.insert(1, os.path.abspath('..'))\n\nimport burnman\nfrom burnman import minerals\n\nif __name__ == \"__main__\":\n # we want to evaluate several geotherms at these values\n pressures = np.arange(9.0e9, 128e9, 3e9)\n seismic_model = burnman.seismic.PREM()\n depths = seismic_model.depth(pressures)\n # load two builtin geotherms and evaluate the temperatures at all pressures\n temperature1 = burnman.geotherm.brown_shankland(depths)\n temperature2 = burnman.geotherm.anderson(depths)\n\n # a geotherm is actually just a function that returns a list of temperatures given pressures in Pa\n # so we can just write our own function\n my_geotherm_function = lambda p: [1500 + (2500 - 1500) * x \/ 128e9 for x in p]\n temperature3 = my_geotherm_function(pressures)\n\n # what about a geotherm defined from datapoints given in a file (our\n # inline)?\n table = [[1e9, 1600], [30e9, 1700], [130e9, 2700]]\n # this could also be loaded from a file, just uncomment this\n # table = burnman.tools.read_table(\"input_geotherm\/example_geotherm.txt\")\n\n table_pressure = np.array(table)[:, 0]\n table_temperature = np.array(table)[:, 1]\n\n my_geotherm_interpolate = lambda p: [np.interp(x, table_pressure,\n table_temperature) for x in p]\n\n temperature4 = my_geotherm_interpolate(pressures)\n\n # finally, we can also calculate a self consistent\n # geotherm for an assemblage of minerals\n # based on self compression of the composite rock.\n # First we need to define an assemblage\n amount_perovskite = 0.8\n fe_pv = 0.05\n fe_pc = 0.2\n pv = minerals.SLB_2011.mg_fe_perovskite()\n pc = minerals.SLB_2011.ferropericlase()\n pv.set_composition([1. - fe_pv, fe_pv, 0.])\n pc.set_composition([1. - fe_pc, fe_pc])\n example_rock = burnman.Composite(\n [pv, pc], [amount_perovskite, 1.0 - amount_perovskite])\n\n # next, define an anchor temperature at which we are starting.\n # Perhaps 1500 K for the upper mantle\n T0 = 1500.\n # then generate temperature values using the self consistent function.\n # This takes more time than the above methods\n temperature5 = burnman.geotherm.adiabatic(pressures, T0, example_rock)\n\n # you can also look at burnman\/geotherm.py to see how the geotherms are\n # implemented\n\n plt.plot(pressures \/ 1e9, temperature1, '-r', label=\"Brown, Shankland\")\n plt.plot(pressures \/ 1e9, temperature2, '-c', label=\"Anderson\")\n plt.plot(pressures \/ 1e9, temperature3, '-b', label=\"handwritten linear\")\n plt.plot(pressures \/ 1e9, temperature4,\n '-k', label=\"handwritten from table\")\n plt.plot(pressures \/ 1e9, temperature5, '-m',\n label=\"Adiabat with pv (70%) and fp(30%)\")\n\n plt.legend(loc='lower right')\n plt.xlim([8.5, 130])\n plt.xlabel('Pressure\/GPa')\n plt.ylabel('Temperature')\n plt.savefig(\"output_figures\/example_geotherm.png\")\n plt.show()\n","license":"gpl-2.0"} {"repo_name":"francesco-mannella\/dmp-esn","path":"parametric\/parametric_dmp\/bin\/tr_datasets\/e_cursive_curves_angles_start_none\/results\/plot.py","copies":"18","size":"1043","content":"#!\/usr\/bin\/env python\n\nimport glob \nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\nimport sys\n\n\npathname = os.path.dirname(sys.argv[0])\nif pathname:\n os.chdir(pathname)\n\nn_dim = None\ntrains = []\nfor fname in glob.glob(\"tl*\"):\n t = np.loadtxt(fname)\n trains.append(t)\n\ntests = []\nfor fname in glob.glob(\"tt*\"):\n t = np.loadtxt(fname)\n tests.append(t)\n\ntrial_results= []\nfor fname in glob.glob(\"rtl*\"):\n t = np.loadtxt(fname)\n trial_results.append(t)\n\ntest_results= []\nfor fname in glob.glob(\"rtt*\"):\n t = np.loadtxt(fname)\n test_results.append(t)\n\nfig = plt.figure()\nax = fig.add_subplot(111, aspect=\"equal\")\nfor d in trains:\n ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=\"blue\", lw=3, alpha=0.5)\nfor d in tests: \n ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=\"red\", lw=3, alpha=0.5)\nfor d in trial_results:\n ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)\nfor d in test_results:\n ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)\nplt.show()\n\n","license":"gpl-2.0"} {"repo_name":"flowersteam\/SESM","path":"SESM\/pykinect.py","copies":"2","size":"3387","content":"import zmq\nimport numpy\nimport threading\n\nfrom collections import namedtuple\n\nPoint2D = namedtuple('Point2D', ('x', 'y'))\nPoint3D = namedtuple('Point3D', ('x', 'y', 'z'))\nQuaternion = namedtuple('Quaternion', ('x', 'y', 'z', 'w'))\n\ntorso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')\nleft_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')\nright_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')\nleft_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')\nright_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')\nskeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints\n\n\nclass Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):\n joints = skeleton_joints\n\n @property\n def to_np(self):\n l = []\n \n for j in self.joints:\n p = getattr(self, j).position\n l.append((p.x, p.y, p.z))\n \n return numpy.array(l)\n \nJoint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))\n\n\nclass KinectSensor(object):\n def __init__(self, addr, port):\n self._lock = threading.Lock()\n self._skeleton = None\n\n context = zmq.Context()\n self.socket = context.socket(zmq.REQ)\n self.socket.connect('tcp:\/\/{}:{}'.format(addr, port))\n\n t = threading.Thread(target=self.get_skeleton)\n t.daemon = True\n t.start()\n\n @property\n def tracked_skeleton(self):\n with self._lock:\n return self._skeleton\n\n @tracked_skeleton.setter\n def tracked_skeleton(self, skeleton):\n with self._lock:\n self._skeleton = skeleton\n\n def get_skeleton(self):\n while True:\n self.socket.send('Hello')\n\n md = self.socket.recv_json()\n msg = self.socket.recv()\n\n skeleton_array = numpy.frombuffer(buffer(msg), dtype=md['dtype'])\n skeleton_array = skeleton_array.reshape(md['shape'])\n\n joints = []\n for i in range(len(skeleton_joints)):\n x, y, z, w = skeleton_array[i][0:4]\n position = Point3D(x \/ w, y \/ w, z \/ w)\n pixel_coord = Point2D(*skeleton_array[i][4:6])\n orientation = Quaternion(*skeleton_array[i][6:10])\n joints.append(Joint(position, orientation, pixel_coord))\n\n self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)\n\n\ndef draw_position(skel, ax):\n xy, zy = [], []\n\n if not skel:\n return\n\n for j in skeleton_joints:\n p = getattr(skel, j).position\n xy.append((p.x, p.y))\n zy.append((p.z, p.y))\n\n ax.set_xlim(-2, 5)\n ax.set_ylim(-1.5, 1.5)\n ax.scatter(zip(*xy)[0], zip(*xy)[1], 30, 'b')\n ax.scatter(zip(*zy)[0], zip(*zy)[1], 30, 'r')\n\n\nif __name__ == '__main__':\n import time\n\n import matplotlib.pyplot as plt\n plt.ion()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n kinect_sensor = KinectSensor('193.50.110.210', 9999)\n\n import skelangle\n\n kinect_angle = skelangle.AngleFromSkel()\n\n try:\n while True:\n ax.clear()\n draw_position(kinect_sensor.tracked_skeleton, ax)\n plt.draw()\n time.sleep(0.1)\n except KeyboardInterrupt:\n plt.close('all')\n","license":"gpl-3.0"} {"repo_name":"gwparikh\/cvgui","path":"grouping_calibration.py","copies":"2","size":"9402","content":"#!\/usr\/bin\/env python\n\nimport os, sys, subprocess\nimport argparse\nimport subprocess\nimport threading\nimport timeit\nfrom multiprocessing import Queue, Lock\nfrom configobj import ConfigObj\nfrom numpy import loadtxt\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nimport moving\nfrom cvguipy import trajstorage, cvgenetic, cvconfig\n\n\"\"\" \nGrouping Calibration By Genetic Algorithm.\nThis script uses genetic algorithm to search for the best configuration.\n\nIt does not monitor RAM usage, therefore, CPU thrashing might be happened when number of parents (selection size) is too large. \n\"\"\"\n# class for genetic algorithm\nclass GeneticCompare(object):\n def __init__(self, motalist, motplist, IDlist, cfg_list, lock):\n self.motalist = motalist\n self.motplist = motplist\n self.IDlist = IDlist\n self.cfg_list = cfg_list\n self.lock = lock\n \n # This is used for calculte fitness of individual in genetic algorithn.\n # It is modified to create sqlite and cfg file before tuning computeClearMOT.\n # NOTE errors show up when loading two same ID\n def computeMOT(self, i):\n \n # create sqlite and cfg file with id i\n cfg_name = config_files +str(i)+'.cfg'\n sql_name = sqlite_files +str(i)+'.sqlite'\n open(cfg_name,'w').close()\n config = ConfigObj(cfg_name)\n cfg_list.write_config(i ,config)\n command = ['cp', 'tracking_only.sqlite', sql_name]\n process = subprocess.Popen(command)\n process.wait()\n command = ['trajextract.py', args.inputVideo, '-o', args.homography, '-t', cfg_name, '-d', sql_name, '--gf']\n # suppress output of grouping extraction\n devnull = open(os.devnull, 'wb')\n process = subprocess.Popen(command, stdout = devnull)\n process.wait()\n \n obj = trajstorage.CVsqlite(sql_name)\n print \"loading\", i\n obj.loadObjects()\n motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(cdb.annotations, obj.objects, args.matchDistance, firstFrame, lastFrame)\n if motp is None:\n motp = 0\n self.lock.acquire()\n self.IDlist.put(i)\n self.motplist.put(motp)\n self.motalist.put(mota)\n obj.close()\n if args.PrintMOTA:\n print(\"ID: mota:{} motp:{}\".format(mota, motp))\n self.lock.release()\n \n return mota\n \nif __name__ == '__main__' :\n parser = argparse.ArgumentParser(description=\"compare all sqlites that are created by cfg_combination.py to the Annotated version to find the ID of the best configuration\")\n parser.add_argument('inputVideo', help= \"input video filename\")\n parser.add_argument('-r', '--configuration-file', dest='range_cfg', help= \"the configuration-file contain the range of configuration\")\n parser.add_argument('-t', '--traffintel-config', dest='traffintelConfig', help= \"the TrafficIntelligence file to use for running the first extraction.\")\n parser.add_argument('-m', '--mask-File', dest='maskFilename', help=\"Name of the mask-File for trajextract\")\n parser.add_argument('-d', '--database-file', dest ='databaseFile', help =\"Name of the databaseFile.\")\n parser.add_argument('-o', '--homography-file', dest ='homography', help = \"Name of the homography file.\", required = True)\n parser.add_argument('-md', '--matching-distance', dest='matchDistance', help = \"matchDistance\", default = 10, type = float)\n parser.add_argument('-a', '--accuracy', dest = 'accuracy', help = \"accuracy parameter for genetic algorithm\", type = int)\n parser.add_argument('-p', '--population', dest = 'population', help = \"population parameter for genetic algorithm\", required = True, type = int)\n parser.add_argument('-np', '--num-of-parents', dest = 'num_of_parents', help = \"Number of parents that are selected each generation\", type = int)\n parser.add_argument('-mota', '--print-MOTA', dest='PrintMOTA', action = 'store_true', help = \"Print MOTA for each ID.\")\n args = parser.parse_args()\n \n os.mkdir('cfg_files')\n os.mkdir('sql_files')\n sqlite_files = \"sql_files\/Sqlite_ID_\"\n config_files = \"cfg_files\/Cfg_ID_\"\n \n # ------------------initialize annotated version if not existed ---------- #\n # inputVideo check\n if not os.path.exists(args.inputVideo):\n print(\"Input video {} does not exist! Exiting...\".format(args.inputVideo))\n sys.exit(1)\n\n # configuration file check\n if args.range_cfg is None:\n config = ConfigObj('range.cfg')\n else:\n config = ConfigObj(args.range_cfg)\n\n # get configuration and put them to a List\n cfg_list = cvconfig.CVConfigList()\n thread_cfgtolist = threading.Thread(target = cvconfig.config_to_list, args = (cfg_list, config))\n thread_cfgtolist.start();\n # check if dbfile name is entered\n if args.databaseFile is None:\n print(\"Database-file is not entered, running trajextract and cvplayer.\")\n if not os.path.exists(args.homography):\n print(\"Homography file does not exist! Exiting...\")\n sys.exit(1)\n else:\n videofile=args.inputVideo\n if 'avi' in videofile:\n if args.maskFilename is not None:\n command = ['trajextract.py',args.inputVideo,'-m', args.maskFilename,'-o', args.homography]\n else:\n command = ['trajextract.py',args.inputVideo,'-o', args.homography]\n process = subprocess.Popen(command)\n process.wait()\n databaseFile = videofile.replace('avi','sqlite')\n command = ['cvplayer.py',args.inputVideo,'-d',databaseFile,'-o',args.homography]\n process = subprocess.Popen(command)\n process.wait()\n else:\n print(\"Input video {} is not 'avi' type. Exiting...\".format(args.inputVideo))\n sys.exit(1)\n else:\n databaseFile = args.databaseFile\n thread_cfgtolist.join()\n # ------------------Done initialization for annotation-------------------- #\n \n # create first tracking only database template.\n print(\"creating the first tracking only database template.\")\n if args.maskFilename is not None:\n command = map(str, ['trajextract.py',args.inputVideo, '-d', 'tracking_only.sqlite', '-t', args.traffintelConfig, '-o', args.homography, '-m', args.maskFilename, '--tf'])\n else:\n command = map(str, ['trajextract.py',args.inputVideo, '-d', sql_name, '-t', args.traffintelConfig, '-o', args.homography, '--tf'])\n process = subprocess.Popen(command)\n process.wait()\n # ----start using genetic algorithm to search for best configuration-------#\n start = timeit.default_timer()\n \n dbfile = databaseFile;\n homography = loadtxt(args.homography)\n \n cdb = trajstorage.CVsqlite(dbfile)\n cdb.open()\n cdb.getLatestAnnotation()\n cdb.createBoundingBoxTable(cdb.latestannotations, inv(homography))\n cdb.loadAnnotaion()\n for a in cdb.annotations:\n a.computeCentroidTrajectory(homography)\n print \"Latest Annotaions in \"+dbfile+\": \", cdb.latestannotations\n \n cdb.frameNumbers = cdb.getFrameList()\n firstFrame = cdb.frameNumbers[0]\n lastFrame = cdb.frameNumbers[-1]\n \n foundmota = Queue()\n foundmotp = Queue()\n IDs = Queue()\n lock = Lock()\n \n Comp = GeneticCompare(foundmota, foundmotp, IDs, cfg_list, lock)\n if args.accuracy != None:\n GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT, args.accuracy)\n else:\n GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT)\n if args.num_of_parents != None:\n GeneticCal.run_thread(args.num_of_parents)\n else:\n GeneticCal.run_thread()\n \n # tranform queues to lists\n foundmota = cvgenetic.Queue_to_list(foundmota)\n foundmotp = cvgenetic.Queue_to_list(foundmotp)\n IDs = cvgenetic.Queue_to_list(IDs)\n\n for i in range(len(foundmotp)):\n foundmotp[i] \/= args.matchDistance\n Best_mota = max(foundmota)\n Best_ID = IDs[foundmota.index(Best_mota)]\n print \"Best multiple object tracking accuracy (MOTA)\", Best_mota\n print \"ID:\", Best_ID\n stop = timeit.default_timer()\n print str(stop-start) + \"s\"\n \n total = []\n for i in range(len(foundmota)):\n total.append(foundmota[i]- 0.1 * foundmotp[i])\n Best_total = max(total)\n Best_total_ID = IDs[total.index(Best_total)]\n # ------------------------------Done searching----------------------------#\n # use matplot to plot a graph of all calculated IDs along with thier mota\n plt.figure(1)\n plt.plot(foundmota ,IDs ,'bo')\n plt.plot(foundmotp ,IDs ,'yo')\n plt.plot(Best_mota, Best_ID, 'ro')\n plt.axis([-1, 1, -1, cfg_list.get_total_combination()])\n plt.xlabel('mota')\n plt.ylabel('ID')\n plt.title(b'Best MOTA: '+str(Best_mota) +'\\nwith ID: '+str(Best_ID))\n plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_mota.png'\n plt.savefig(plotFile)\n \n plt.figure(2)\n plt.plot(total, IDs, 'bo')\n plt.plot(Best_total, Best_total_ID, 'ro')\n plt.xlabel('mota + motp')\n plt.ylabel('ID')\n plt.title(b'Best total: '+str(Best_total) +'\\nwith ID: '+str(Best_total_ID))\n \n # save the plot\n plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_motp.png'\n plt.savefig(plotFile)\n \n plt.show()\n \n cdb.close()\n","license":"mit"} {"repo_name":"keflavich\/pyspeckit-obsolete","path":"pyspeckit\/spectrum\/models\/ammonia.py","copies":"1","size":"28836","content":"\"\"\"\n========================================\nAmmonia inversion transition TKIN fitter\n========================================\n\nAmmonia inversion transition TKIN fitter translated from Erik Rosolowsky's\nhttp:\/\/svn.ok.ubc.ca\/svn\/signals\/nh3fit\/\n\n.. moduleauthor:: Adam Ginsburg \n\nModule API\n^^^^^^^^^^\n\n\"\"\"\nimport numpy as np\nfrom pyspeckit.mpfit import mpfit\nfrom pyspeckit.spectrum.parinfo import ParinfoList,Parinfo\nimport fitter\nimport matplotlib.cbook as mpcb\nimport copy\nimport model\n\n\nline_names = ['oneone','twotwo','threethree','fourfour']\n\nfreq_dict = { \n 'oneone': 23.694506e9,\n 'twotwo': 23.722633335e9,\n 'threethree': 23.8701296e9,\n 'fourfour': 24.1394169e9,\n }\naval_dict = {\n 'oneone': 1.712e-7, #64*!pi**4\/(3*h*c**3)*nu11**3*mu0**2*(1\/2.)\n 'twotwo': 2.291e-7, #64*!pi**4\/(3*h*c**3)*nu22**3*mu0**2*(2\/3.)\n 'threethree': 2.625e-7, #64*!pi**4\/(3*h*c**3)*nu33**3*mu0**2*(3\/4.)\n 'fourfour': 3.167e-7, #64*!pi**4\/(3*h*c**3)*nu44**3*mu0**2*(4\/5.)\n }\northo_dict = {\n 'oneone': False,\n 'twotwo': False,\n 'threethree': True,\n 'fourfour': False,\n }\nn_ortho = np.arange(0,28,3) # 0..3..27\nn_para = np.array([x for x in range(28) if x % 3 != 0])\n\nvoff_lines_dict = {\n 'oneone': [19.8513, 19.3159, 7.88669, 7.46967, 7.35132, 0.460409, 0.322042,\n -0.0751680, -0.213003, 0.311034, 0.192266, -0.132382, -0.250923, -7.23349,\n -7.37280, -7.81526, -19.4117, -19.5500],\n 'twotwo':[26.5263, 26.0111, 25.9505, 16.3917, 16.3793, 15.8642, 0.562503,\n 0.528408, 0.523745, 0.0132820, -0.00379100, -0.0132820, -0.501831,\n -0.531340, -0.589080, -15.8547, -16.3698, -16.3822, -25.9505, -26.0111,\n -26.5263],\n 'threethree':[29.195098, 29.044147, 28.941877, 28.911408, 21.234827,\n 21.214619, 21.136387, 21.087456, 1.005122, 0.806082, 0.778062,\n 0.628569, 0.016754, -0.005589, -0.013401, -0.639734, -0.744554,\n -1.031924, -21.125222, -21.203441, -21.223649, -21.076291, -28.908067,\n -28.938523, -29.040794, -29.191744],\n 'fourfour':[ 0. , -30.49783692, 30.49783692, 0., 24.25907811,\n -24.25907811, 0. ]\n }\n\ntau_wts_dict = {\n 'oneone': [0.0740740, 0.148148, 0.0925930, 0.166667, 0.0185190, 0.0370370,\n 0.0185190, 0.0185190, 0.0925930, 0.0333330, 0.300000, 0.466667,\n 0.0333330, 0.0925930, 0.0185190, 0.166667, 0.0740740, 0.148148],\n 'twotwo': [0.00418600, 0.0376740, 0.0209300, 0.0372090, 0.0260470,\n 0.00186000, 0.0209300, 0.0116280, 0.0106310, 0.267442, 0.499668,\n 0.146512, 0.0116280, 0.0106310, 0.0209300, 0.00186000, 0.0260470,\n 0.0372090, 0.0209300, 0.0376740, 0.00418600],\n 'threethree': [0.012263, 0.008409, 0.003434, 0.005494, 0.006652, 0.008852,\n 0.004967, 0.011589, 0.019228, 0.010387, 0.010820, 0.009482, 0.293302,\n 0.459109, 0.177372, 0.009482, 0.010820, 0.019228, 0.004967, 0.008852,\n 0.006652, 0.011589, 0.005494, 0.003434, 0.008409, 0.012263],\n 'fourfour': [0.2431, 0.0162, 0.0162, 0.3008, 0.0163, 0.0163, 0.3911]}\n\ndef ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1,\n xoff_v=0.0, fortho=0.0, tau=None, fillingfraction=None, return_tau=False,\n thin=False, verbose=False, return_components=False, debug=False ):\n \"\"\"\n Generate a model Ammonia spectrum based on input temperatures, column, and\n gaussian parameters\n\n ntot can be specified as a column density (e.g., 10^15) or a log-column-density (e.g., 15)\n\n tex can be specified or can be assumed LTE if unspecified, if tex>tkin, or if \"thin\"\n is specified\n\n \"thin\" uses a different parametetrization and requires only the optical depth, width, offset,\n and tkin to be specified. In the 'thin' approximation, tex is not used in computation of\n the partition function - LTE is implicitly assumed\n\n If tau is specified, ntot is NOT fit but is set to a fixed value\n fillingfraction is an arbitrary scaling factor to apply to the model\n fortho is the ortho\/(ortho+para) fraction. The default is to assume all ortho.\n xoff_v is the velocity offset in km\/s \n\n tau refers to the optical depth of the 1-1 line. The optical depths of the\n other lines are fixed relative to tau_oneone\n\n (not implemented) if tau is specified, ntot is ignored\n \"\"\"\n\n # Convert X-units to frequency in GHz\n xarr = xarr.as_unit('GHz')\n\n if tex is not None:\n if tex > tkin: # cannot have Tex > Tkin\n tex = tkin \n elif thin: # tex is not used in this case\n tex = tkin\n else:\n tex = tkin\n\n if thin:\n ntot = 1e15\n elif 5 < ntot < 25: \n # allow ntot to be specified as a logarithm. This is\n # safe because ntot < 1e10 gives a spectrum of all zeros, and the\n # plausible range of columns is not outside the specified range\n ntot = 10**ntot\n elif (25 < ntot < 1e5) or (ntot < 5):\n # these are totally invalid for log\/non-log\n return 0\n\n # fillingfraction is an arbitrary scaling for the data\n # The model will be (normal model) * fillingfraction\n if fillingfraction is None:\n fillingfraction = 1.0\n\n ckms = 2.99792458e5\n ccms = ckms*1e5\n g1 = 1 \n g2 = 1 \n h = 6.6260693e-27 \n kb = 1.3806505e-16 \n mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)\n \n # Generate Partition Functions \n nlevs = 51\n jv=np.arange(nlevs)\n ortho = jv % 3 == 0\n para = True-ortho\n Jpara = jv[para]\n Jortho = jv[ortho]\n Brot = 298117.06e6\n Crot = 186726.36e6\n\n runspec = np.zeros(len(xarr))\n \n tau_dict = {}\n para_count = 0\n ortho_count = 1 # ignore 0-0\n\n if tau is not None and thin:\n \"\"\"\n Use optical depth in the 1-1 line as a free parameter\n The optical depths of the other lines are then set by the kinetic temperature\n Tex is still a free parameter in the final spectrum calculation at the bottom\n (technically, I think this process assumes LTE; Tex should come into play in\n these equations, not just the final one)\n \"\"\"\n dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K\n trot = tkin\/(1+tkin\/dT0*np.log(1+0.6*np.exp(-15.7\/tkin)))\n tau_dict['oneone'] = tau\n tau_dict['twotwo'] = tau*(23.722\/23.694)**2*4\/3.*5\/3.*np.exp(-41.5\/trot)\n tau_dict['threethree'] = tau*(23.8701279\/23.694)**2*3\/2.*14.\/3.*np.exp(-101.1\/trot)\n tau_dict['fourfour'] = tau*(24.1394169\/23.694)**2*8\/5.*9\/3.*np.exp(-177.34\/trot)\n else:\n \"\"\"\n Column density is the free parameter. It is used in conjunction with\n the full partition function to compute the optical depth in each band\n Given the complexity of these equations, it would be worth my while to\n comment each step carefully. \n \"\"\"\n Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+\n (Crot-Brot)*Jpara**2)\/(kb*tkin))\n Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+\n (Crot-Brot)*Jortho**2)\/(kb*tkin))\n for linename in line_names:\n if ortho_dict[linename]:\n orthoparafrac = fortho\n Z = Zortho \n count = ortho_count\n ortho_count += 1\n else:\n orthoparafrac = 1.0-fortho\n Z = Zpara\n count = para_count # need to treat partition function separately\n para_count += 1\n tau_dict[linename] = (ntot * orthoparafrac * Z[count]\/(Z.sum()) \/ ( 1\n + np.exp(-h*freq_dict[linename]\/(kb*tkin) )) * ccms**2 \/\n (8*np.pi*freq_dict[linename]**2) * aval_dict[linename]*\n (1-np.exp(-h*freq_dict[linename]\/(kb*tex))) \/\n (width\/ckms*freq_dict[linename]*np.sqrt(2*np.pi)) )\n\n # allow tau(11) to be specified instead of ntot\n # in the thin case, this is not needed: ntot plays no role\n # this process allows you to specify tau without using the approximate equations specified\n # above. It should remove ntot from the calculations anyway...\n if tau is not None and not thin:\n tau11_temp = tau_dict['oneone']\n # re-scale all optical depths so that tau is as specified, but the relative taus\n # are sest by the kinetic temperature and partition functions\n for linename,t in tau_dict.iteritems():\n tau_dict[linename] = t * tau\/tau11_temp\n\n components =[]\n for linename in line_names:\n voff_lines = np.array(voff_lines_dict[linename])\n tau_wts = np.array(tau_wts_dict[linename])\n \n lines = (1-voff_lines\/ckms)*freq_dict[linename]\/1e9\n tau_wts = tau_wts \/ (tau_wts).sum()\n nuwidth = np.abs(width\/ckms*lines)\n nuoff = xoff_v\/ckms*lines\n \n # tau array\n tauprof = np.zeros(len(xarr))\n for kk,no in enumerate(nuoff):\n tauprof += (tau_dict[linename] * tau_wts[kk] *\n np.exp(-(xarr+no-lines[kk])**2 \/ (2.0*nuwidth[kk]**2)) *\n fillingfraction)\n components.append( tauprof )\n \n T0 = (h*xarr*1e9\/kb) # \"temperature\" of wavelength\n if tau is not None and thin:\n #runspec = tauprof+runspec\n # is there ever a case where you want to ignore the optical depth function? I think no\n runspec = (T0\/(np.exp(T0\/tex)-1)-T0\/(np.exp(T0\/2.73)-1))*(1-np.exp(-tauprof))+runspec\n else:\n runspec = (T0\/(np.exp(T0\/tex)-1)-T0\/(np.exp(T0\/2.73)-1))*(1-np.exp(-tauprof))+runspec\n if runspec.min() < 0:\n raise ValueError(\"Model dropped below zero. That is not possible normally. Here are the input values: \"+\n (\"tex: %f \" % tex) + \n (\"tkin: %f \" % tkin) + \n (\"ntot: %f \" % ntot) + \n (\"width: %f \" % width) + \n (\"xoff_v: %f \" % xoff_v) + \n (\"fortho: %f \" % fortho)\n )\n\n if verbose or debug:\n print \"tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g\" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction)\n\n if return_components:\n return (T0\/(np.exp(T0\/tex)-1)-T0\/(np.exp(T0\/2.73)-1))*(1-np.exp(-1*np.array(components)))\n\n if return_tau:\n return tau_dict\n \n return runspec\n\nclass ammonia_model(model.SpectralModel):\n\n def __init__(self,npeaks=1,npars=6,multisingle='multi',**kwargs):\n self.npeaks = npeaks\n self.npars = npars\n self._default_parnames = ['tkin','tex','ntot','width','xoff_v','fortho']\n self.parnames = copy.copy(self._default_parnames)\n\n # all fitters must have declared modelfuncs, which should take the fitted pars...\n self.modelfunc = ammonia\n self.n_modelfunc = self.n_ammonia\n\n # for fitting ammonia simultaneously with a flat background\n self.onepeakammonia = fitter.vheightmodel(ammonia)\n #self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)\n\n if multisingle in ('multi','single'):\n self.multisingle = multisingle\n else:\n raise Exception(\"multisingle must be multi or single\")\n\n self.default_parinfo = None\n self.default_parinfo, kwargs = self._make_parinfo(**kwargs)\n\n # enforce ammonia-specific parameter limits\n for par in self.default_parinfo:\n if 'tex' in par.parname.lower():\n par.limited = (True,par.limited[1])\n par.limits = (max(par.limits[0],2.73), par.limits[1])\n if 'tkin' in par.parname.lower():\n par.limited = (True,par.limited[1])\n par.limits = (max(par.limits[0],2.73), par.limits[1])\n if 'width' in par.parname.lower():\n par.limited = (True,par.limited[1])\n par.limits = (max(par.limits[0],0), par.limits[1])\n if 'fortho' in par.parname.lower():\n par.limited = (True,True)\n if par.limits[1] != 0:\n par.limits = (max(par.limits[0],0), min(par.limits[1],1))\n else:\n par.limits = (max(par.limits[0],0), 1)\n if 'ntot' in par.parname.lower():\n par.limited = (True,par.limited[1])\n par.limits = (max(par.limits[0],0), par.limits[1])\n\n self.parinfo = copy.copy(self.default_parinfo)\n\n\n self.modelfunc_kwargs = kwargs\n # lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})\n\n def __call__(self,*args,**kwargs):\n #if 'use_lmfit' in kwargs: kwargs.pop('use_lmfit')\n use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit\n if use_lmfit:\n return self.lmfitter(*args,**kwargs)\n if self.multisingle == 'single':\n return self.onepeakammoniafit(*args,**kwargs)\n elif self.multisingle == 'multi':\n return self.multinh3fit(*args,**kwargs)\n\n def n_ammonia(self, pars=None, parnames=None, **kwargs):\n \"\"\"\n Returns a function that sums over N ammonia line profiles, where N is the length of\n tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) \/ 6\n\n The background \"height\" is assumed to be zero (you must \"baseline\" your\n spectrum before fitting)\n\n *pars* [ list ]\n a list with len(pars) = (6-nfixed)n, assuming\n tkin,tex,ntot,width,xoff_v,fortho repeated\n\n *parnames* [ list ] \n len(parnames) must = len(pars). parnames determine how the ammonia\n function parses the arguments\n \"\"\"\n if hasattr(pars,'values'):\n # important to treat as Dictionary, since lmfit params & parinfo both have .items\n parnames,parvals = zip(*pars.items())\n parnames = [p.lower() for p in parnames]\n parvals = [p.value for p in parvals]\n elif parnames is None:\n parvals = pars\n parnames = self.parnames\n else:\n parvals = pars\n if len(pars) != len(parnames):\n # this should only be needed when other codes are changing the number of peaks\n # during a copy, as opposed to letting them be set by a __call__\n # (n_modelfuncs = n_ammonia can be called directly)\n # n_modelfuncs doesn't care how many peaks there are\n if len(pars) % len(parnames) == 0:\n parnames = [p for ii in range(len(pars)\/len(parnames)) for p in parnames]\n npars = len(parvals) \/ self.npeaks\n else:\n raise ValueError(\"Wrong array lengths passed to n_ammonia!\")\n else:\n npars = len(parvals) \/ self.npeaks\n\n\n self._components = []\n def L(x):\n v = np.zeros(len(x))\n for jj in xrange(self.npeaks):\n modelkwargs = kwargs.copy()\n for ii in xrange(npars):\n name = parnames[ii+jj*npars].strip('0123456789').lower()\n modelkwargs.update({name:parvals[ii+jj*npars]})\n v += ammonia(x,**modelkwargs)\n return v\n return L\n\n def components(self, xarr, pars, hyperfine=False):\n \"\"\"\n Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.\n If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call\n \"\"\"\n\n comps=[]\n for ii in xrange(self.npeaks):\n if hyperfine:\n modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))\n comps.append( ammonia(xarr,return_components=True,**modelkwargs) )\n else:\n modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))\n comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )\n\n modelcomponents = np.concatenate(comps)\n\n return modelcomponents\n\n\n def multinh3fit(self, xax, data, npeaks=1, err=None, \n params=(20,20,14,1.0,0.0,0.5),\n parnames=None,\n fixed=(False,False,False,False,False,False),\n limitedmin=(True,True,True,True,False,True),\n limitedmax=(False,False,False,False,False,True), minpars=(2.73,2.73,0,0,0,0),\n parinfo=None,\n maxpars=(0,0,0,0,0,1), quiet=True, shh=True, veryverbose=False, **kwargs):\n \"\"\"\n Fit multiple nh3 profiles (multiple can be 1)\n\n Inputs:\n xax - x axis\n data - y axis\n npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)\n err - error corresponding to data\n\n These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will\n be replicated npeaks times, otherwise they will be reset to defaults:\n params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks\n If len(params) % 6 == 0, npeaks will be set to len(params) \/ 6\n fixed - Is parameter fixed?\n limitedmin\/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)\n limitedmax\/maxpars - set upper limits on each parameter\n parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']\n\n quiet - should MPFIT output each iteration?\n shh - output final parameters?\n\n Returns:\n Fit parameters\n Model\n Fit errors\n chi2\n \"\"\"\n\n if parinfo is None:\n self.npars = len(params) \/ npeaks\n\n if len(params) != npeaks and (len(params) \/ self.npars) > npeaks:\n npeaks = len(params) \/ self.npars \n self.npeaks = npeaks\n\n if isinstance(params,np.ndarray): params=params.tolist()\n # this is actually a hack, even though it's decently elegant\n # somehow, parnames was being changed WITHOUT being passed as a variable\n # this doesn't make sense - at all - but it happened.\n # (it is possible for self.parnames to have npars*npeaks elements where\n # npeaks > 1 coming into this function even though only 6 pars are specified;\n # _default_parnames is the workaround)\n if parnames is None: parnames = copy.copy(self._default_parnames)\n\n partype_dict = dict(zip(['params','parnames','fixed','limitedmin','limitedmax','minpars','maxpars'],\n [params,parnames,fixed,limitedmin,limitedmax,minpars,maxpars]))\n\n # make sure all various things are the right length; if they're not, fix them using the defaults\n for partype,parlist in partype_dict.iteritems():\n if len(parlist) != self.npars*self.npeaks:\n # if you leave the defaults, or enter something that can be multiplied by npars to get to the\n # right number of gaussians, it will just replicate\n if len(parlist) == self.npars: \n partype_dict[partype] *= npeaks \n elif len(parlist) > self.npars:\n # DANGER: THIS SHOULD NOT HAPPEN!\n print \"WARNING! Input parameters were longer than allowed for variable \",parlist\n partype_dict[partype] = partype_dict[partype][:self.npars]\n elif parlist==params: # this instance shouldn't really be possible\n partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks\n elif parlist==fixed:\n partype_dict[partype] = [False] * len(params)\n elif parlist==limitedmax: # only fortho, fillingfraction have upper limits\n partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')\n elif parlist==limitedmin: # no physical values can be negative except velocity\n partype_dict[partype] = (np.array(parnames) != 'xoff_v')\n elif parlist==minpars: # all have minima of zero except kinetic temperature, which can't be below CMB. Excitation temperature technically can be, but not in this model\n partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73\n elif parlist==maxpars: # fractions have upper limits of 1.0\n partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')\n elif parlist==parnames: # assumes the right number of parnames (essential)\n partype_dict[partype] = list(parnames) * self.npeaks \n\n if len(parnames) != len(partype_dict['params']):\n raise ValueError(\"Wrong array lengths AFTER fixing them\")\n\n # used in components. Is this just a hack?\n self.parnames = partype_dict['parnames']\n\n parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],\n 'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],\n 'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],\n 'parname':partype_dict['parnames'][ii]+str(ii\/self.npars),\n 'mpmaxstep':float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)\n 'error': 0} \n for ii in xrange(len(partype_dict['params'])) ]\n\n # hack: remove 'fixed' pars\n parinfo_with_fixed = parinfo\n parinfo = [p for p in parinfo_with_fixed if not p['fixed']]\n fixed_kwargs = dict((p['parname'].strip(\"0123456789\").lower(),p['value']) for p in parinfo_with_fixed if p['fixed'])\n # don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]\n # this is OK - not a permanent change\n parnames = [p['parname'] for p in parinfo]\n # not OK self.npars = len(parinfo)\/self.npeaks\n parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)\n #import pdb; pdb.set_trace()\n else: \n self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True) \n parinfo_with_fixed = None\n fixed_kwargs = {}\n\n fitfun_kwargs = dict(kwargs.items()+fixed_kwargs.items())\n\n npars = len(parinfo)\/self.npeaks\n\n # (fortho0 is not fortho)\n # this doesn't work if parinfo_with_fixed is not None:\n # this doesn't work for p in parinfo_with_fixed:\n # this doesn't work # users can change the defaults while holding them fixed \n # this doesn't work if p['fixed']:\n # this doesn't work kwargs.update({p['parname']:p['value']})\n\n def mpfitfun(x,y,err):\n if err is None:\n def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))]\n else:\n def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))\/err]\n return f\n\n if veryverbose:\n print \"GUESSES: \"\n print \"\\n\".join([\"%s: %s\" % (p['parname'],p['value']) for p in parinfo])\n\n mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)\n mpp = mp.params\n if mp.perror is not None: mpperr = mp.perror\n else: mpperr = mpp*0\n chi2 = mp.fnorm\n\n if mp.status == 0:\n raise Exception(mp.errmsg)\n\n for i,p in enumerate(mpp):\n parinfo[i]['value'] = p\n parinfo[i]['error'] = mpperr[i]\n\n if not shh:\n print \"Fit status: \",mp.status\n print \"Fit message: \",mp.errmsg\n print \"Final fit values: \"\n for i,p in enumerate(mpp):\n print parinfo[i]['parname'],p,\" +\/- \",mpperr[i]\n print \"Chi2: \",mp.fnorm,\" Reduced Chi2: \",mp.fnorm\/len(data),\" DOF:\",len(data)-len(mpp)\n\n if any(['tex' in s for s in parnames]) and any(['tkin' in s for s in parnames]):\n texnum = (i for i,s in enumerate(parnames) if 'tex' in s)\n tkinnum = (i for i,s in enumerate(parnames) if 'tkin' in s)\n for txn,tkn in zip(texnum,tkinnum):\n if mpp[txn] > mpp[tkn]: mpp[txn] = mpp[tkn] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)\n self.mp = mp\n\n if parinfo_with_fixed is not None:\n # self self.parinfo preserving the 'fixed' parameters \n # ORDER MATTERS!\n for p in parinfo:\n parinfo_with_fixed[p['n']] = p\n self.parinfo = ParinfoList([Parinfo(p) for p in parinfo_with_fixed], preserve_order=True)\n else:\n self.parinfo = parinfo\n self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)\n\n # I don't THINK these are necessary?\n #self.parinfo = parinfo\n #self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])\n\n # need to restore the fixed parameters....\n # though the above commented out section indicates that I've done and undone this dozens of times now\n # (a test has been added to test_nh3.py)\n # this was NEVER included or tested because it breaks the order\n #for par in parinfo_with_fixed:\n # if par.parname not in self.parinfo.keys():\n # self.parinfo.append(par)\n\n self.mpp = self.parinfo.values\n self.mpperr = self.parinfo.errors\n self.mppnames = self.parinfo.names\n self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames, **kwargs)(xax)\n #if self.model.sum() == 0:\n # print \"DON'T FORGET TO REMOVE THIS ERROR!\"\n # raise ValueError(\"Model is zeros.\")\n\n indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars] for jj in xrange(len(self.parinfo)\/self.npars)]\n modelkwargs = [\n dict([(p['parname'].strip(\"0123456789\").lower(),p['value']) for p in pi])\n for pi in indiv_parinfo]\n self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]\n\n return self.mpp,self.model,self.mpperr,chi2\n\n def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):\n \"\"\"\n Returns a very simple and likely incorrect guess\n \"\"\"\n\n # TKIN, TEX, ntot, width, center, ortho fraction\n return [20,10, 1e15, 1.0, 0.0, 1.0]\n\n def annotations(self):\n from decimal import Decimal # for formatting\n tex_key = {'tkin':'T_K','tex':'T_{ex}','ntot':'N','fortho':'F_o','width':'\\\\sigma','xoff_v':'v','fillingfraction':'FF','tau':'\\\\tau_{1-1}'}\n # small hack below: don't quantize if error > value. We want to see the values.\n label_list = []\n for pinfo in self.parinfo:\n parname = tex_key[pinfo['parname'].strip(\"0123456789\").lower()]\n parnum = int(pinfo['parname'][-1])\n if pinfo['fixed']:\n formatted_value = \"%s\" % pinfo['value']\n pm = \"\"\n formatted_error=\"\"\n else:\n formatted_value = Decimal(\"%g\" % pinfo['value']).quantize(Decimal(\"%0.2g\" % (min(pinfo['error'],pinfo['value']))))\n pm = \"$\\\\pm$\"\n formatted_error = Decimal(\"%g\" % pinfo['error']).quantize(Decimal(\"%0.2g\" % pinfo['error']))\n label = \"$%s(%i)$=%8s %s %8s\" % (parname, parnum, formatted_value, pm, formatted_error)\n label_list.append(label)\n labels = tuple(mpcb.flatten(label_list))\n return labels\n\nclass ammonia_model_vtau(ammonia_model):\n def __init__(self,**kwargs):\n super(ammonia_model_vtau,self).__init__()\n self.parnames = ['tkin','tex','tau','width','xoff_v','fortho']\n\n def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):\n \"\"\"\n Returns a very simple and likely incorrect guess\n \"\"\"\n\n # TKIN, TEX, ntot, width, center, ortho fraction\n return [20,10, 1, 1.0, 0.0, 1.0]\n\n def __call__(self,*args,**kwargs):\n if self.multisingle == 'single':\n return self.onepeakammoniafit(*args,**kwargs)\n elif self.multisingle == 'multi':\n return self.multinh3fit(*args,**kwargs)\n\n","license":"mit"} {"repo_name":"jakevdp\/seaborn","path":"doc\/sphinxext\/ipython_directive.py","copies":"37","size":"37557","content":"# -*- coding: utf-8 -*-\n\"\"\"\nSphinx directive to support embedded IPython code.\n\nThis directive allows pasting of entire interactive IPython sessions, prompts\nand all, and their code will actually get re-executed at doc build time, with\nall prompts renumbered sequentially. It also allows you to input code as a pure\npython input by giving the argument python to the directive. The output looks\nlike an interactive ipython section.\n\nTo enable this directive, simply list it in your Sphinx ``conf.py`` file\n(making sure the directory where you placed it is visible to sphinx, as is\nneeded for all Sphinx directives). For example, to enable syntax highlighting\nand the IPython directive::\n\n extensions = ['IPython.sphinxext.ipython_console_highlighting',\n 'IPython.sphinxext.ipython_directive']\n\nThe IPython directive outputs code-blocks with the language 'ipython'. So\nif you do not have the syntax highlighting extension enabled as well, then\nall rendered code-blocks will be uncolored. By default this directive assumes\nthat your prompts are unchanged IPython ones, but this can be customized.\nThe configurable options that can be placed in conf.py are:\n\nipython_savefig_dir:\n The directory in which to save the figures. This is relative to the\n Sphinx source directory. The default is `html_static_path`.\nipython_rgxin:\n The compiled regular expression to denote the start of IPython input\n lines. The default is re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_rgxout:\n The compiled regular expression to denote the start of IPython output\n lines. The default is re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'). You\n shouldn't need to change this.\nipython_promptin:\n The string to represent the IPython input prompt in the generated ReST.\n The default is 'In [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_promptout:\n The string to represent the IPython prompt in the generated ReST. The\n default is 'Out [%d]:'. This expects that the line numbers are used\n in the prompt.\nipython_mplbackend:\n The string which specifies if the embedded Sphinx shell should import\n Matplotlib and set the backend. The value specifies a backend that is\n passed to `matplotlib.use()` before any lines in `ipython_execlines` are\n executed. If not specified in conf.py, then the default value of 'agg' is\n used. To use the IPython directive without matplotlib as a dependency, set\n the value to `None`. It may end up that matplotlib is still imported\n if the user specifies so in `ipython_execlines` or makes use of the\n @savefig pseudo decorator.\nipython_execlines:\n A list of strings to be exec'd in the embedded Sphinx shell. Typical\n usage is to make certain packages always available. Set this to an empty\n list if you wish to have no imports always available. If specified in\n conf.py as `None`, then it has the effect of making no imports available.\n If omitted from conf.py altogether, then the default value of\n ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.\nipython_holdcount\n When the @suppress pseudo-decorator is used, the execution count can be\n incremented or not. The default behavior is to hold the execution count,\n corresponding to a value of `True`. Set this to `False` to increment\n the execution count after each suppressed command.\n\nAs an example, to use the IPython directive when `matplotlib` is not available,\none sets the backend to `None`::\n\n ipython_mplbackend = None\n\nAn example usage of the directive is:\n\n.. code-block:: rst\n\n .. ipython::\n\n In [1]: x = 1\n\n In [2]: y = x**2\n\n In [3]: print(y)\n\nSee http:\/\/matplotlib.org\/sampledoc\/ipython_directive.html for additional\ndocumentation.\n\nToDo\n----\n\n- Turn the ad-hoc test() function into a real test suite.\n- Break up ipython-specific functionality from matplotlib stuff into better\n separated code.\n\nAuthors\n-------\n\n- John D Hunter: orignal author.\n- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.\n- V\u00e1clav\u0160milauer : Prompt generalizations.\n- Skipper Seabold, refactoring, cleanups, pure python addition\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib\nimport os\nimport re\nimport sys\nimport tempfile\nimport ast\nfrom pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO\nimport warnings\n\n# To keep compatibility with various python versions\ntry:\n from hashlib import md5\nexcept ImportError:\n from md5 import md5\n\n# Third-party\nimport sphinx\nfrom docutils.parsers.rst import directives\nfrom docutils import nodes\nfrom sphinx.util.compat import Directive\n\n# Our own\nfrom IPython import Config, InteractiveShell\nfrom IPython.core.profiledir import ProfileDir\nfrom IPython.utils import io\nfrom IPython.utils.py3compat import PY3\n\nif PY3:\n from io import StringIO\n text_type = str\nelse:\n from StringIO import StringIO\n text_type = unicode\n\n#-----------------------------------------------------------------------------\n# Globals\n#-----------------------------------------------------------------------------\n# for tokenizing blocks\nCOMMENT, INPUT, OUTPUT = range(3)\n\n#-----------------------------------------------------------------------------\n# Functions and class declarations\n#-----------------------------------------------------------------------------\n\ndef block_parser(part, rgxin, rgxout, fmtin, fmtout):\n \"\"\"\n part is a string of ipython text, comprised of at most one\n input, one ouput, comments, and blank lines. The block parser\n parses the text into a list of::\n\n blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]\n\n where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and\n data is, depending on the type of token::\n\n COMMENT : the comment string\n\n INPUT: the (DECORATOR, INPUT_LINE, REST) where\n DECORATOR: the input decorator (or None)\n INPUT_LINE: the input as string (possibly multi-line)\n REST : any stdout generated by the input line (not OUTPUT)\n\n OUTPUT: the output string, possibly multi-line\n\n \"\"\"\n block = []\n lines = part.split('\\n')\n N = len(lines)\n i = 0\n decorator = None\n while 1:\n\n if i==N:\n # nothing left to parse -- the last line\n break\n\n line = lines[i]\n i += 1\n line_stripped = line.strip()\n if line_stripped.startswith('#'):\n block.append((COMMENT, line))\n continue\n\n if line_stripped.startswith('@'):\n # we're assuming at most one decorator -- may need to\n # rethink\n decorator = line_stripped\n continue\n\n # does this look like an input line?\n matchin = rgxin.match(line)\n if matchin:\n lineno, inputline = int(matchin.group(1)), matchin.group(2)\n\n # the ....: continuation string\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n Nc = len(continuation)\n # input lines can continue on for more than one line, if\n # we have a '\\' line continuation char or a function call\n # echo line 'print'. The input line can only be\n # terminated by the end of the block or an output line, so\n # we parse out the rest of the input line if it is\n # multiline as well as any echo text\n\n rest = []\n while i 1:\n if input_lines[-1] != \"\":\n input_lines.append('') # make sure there's a blank line\n # so splitter buffer gets reset\n\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n\n if is_savefig:\n image_file, image_directive = self.process_image(decorator)\n\n ret = []\n is_semicolon = False\n\n # Hold the execution count, if requested to do so.\n if is_suppress and self.hold_count:\n store_history = False\n else:\n store_history = True\n\n # Note: catch_warnings is not thread safe\n with warnings.catch_warnings(record=True) as ws:\n for i, line in enumerate(input_lines):\n if line.endswith(';'):\n is_semicolon = True\n\n if i == 0:\n # process the first input line\n if is_verbatim:\n self.process_input_line('')\n self.IP.execution_count += 1 # increment it anyway\n else:\n # only submit the line in non-verbatim mode\n self.process_input_line(line, store_history=store_history)\n formatted_line = '%s %s'%(input_prompt, line)\n else:\n # process a continuation line\n if not is_verbatim:\n self.process_input_line(line, store_history=store_history)\n\n formatted_line = '%s %s'%(continuation, line)\n\n if not is_suppress:\n ret.append(formatted_line)\n\n if not is_suppress and len(rest.strip()) and is_verbatim:\n # the \"rest\" is the standard output of the\n # input, which needs to be added in\n # verbatim mode\n ret.append(rest)\n\n self.cout.seek(0)\n output = self.cout.read()\n if not is_suppress and not is_semicolon:\n ret.append(output)\n elif is_semicolon: # get spacing right\n ret.append('')\n\n # context information\n filename = self.state.document.current_source\n lineno = self.state.document.current_line\n\n # output any exceptions raised during execution to stdout\n # unless :okexcept: has been specified.\n if not is_okexcept and \"Traceback\" in output:\n s = \"\\nException in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okexcept: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write(output)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n\\n')\n\n # output any warning raised during execution to stdout\n # unless :okwarning: has been specified.\n if not is_okwarning:\n for w in ws:\n s = \"\\nWarning in %s at block ending on line %s\\n\" % (filename, lineno)\n s += \"Specify :okwarning: as an option in the ipython:: block to suppress this message\\n\"\n sys.stdout.write('\\n\\n>>>' + ('-' * 73))\n sys.stdout.write(s)\n sys.stdout.write('-' * 76 + '\\n')\n s=warnings.formatwarning(w.message, w.category,\n w.filename, w.lineno, w.line)\n sys.stdout.write(s)\n sys.stdout.write('<<<' + ('-' * 73) + '\\n')\n\n self.cout.truncate(0)\n return (ret, input_lines, output, is_doctest, decorator, image_file,\n image_directive)\n\n\n def process_output(self, data, output_prompt,\n input_lines, output, is_doctest, decorator, image_file):\n \"\"\"\n Process data block for OUTPUT token.\n\n \"\"\"\n TAB = ' ' * 4\n\n if is_doctest and output is not None:\n\n found = output\n found = found.strip()\n submitted = data.strip()\n\n if self.directive is None:\n source = 'Unavailable'\n content = 'Unavailable'\n else:\n source = self.directive.state.document.current_source\n content = self.directive.content\n # Add tabs and join into a single string.\n content = '\\n'.join([TAB + line for line in content])\n\n # Make sure the output contains the output prompt.\n ind = found.find(output_prompt)\n if ind < 0:\n e = ('output does not contain output prompt\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'Input line(s):\\n{TAB}{2}\\n\\n'\n 'Output line(s):\\n{TAB}{3}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), TAB=TAB)\n raise RuntimeError(e)\n found = found[len(output_prompt):].strip()\n\n # Handle the actual doctest comparison.\n if decorator.strip() == '@doctest':\n # Standard doctest\n if found != submitted:\n e = ('doctest failure\\n\\n'\n 'Document source: {0}\\n\\n'\n 'Raw content: \\n{1}\\n\\n'\n 'On input line(s):\\n{TAB}{2}\\n\\n'\n 'we found output:\\n{TAB}{3}\\n\\n'\n 'instead of the expected:\\n{TAB}{4}\\n\\n')\n e = e.format(source, content, '\\n'.join(input_lines),\n repr(found), repr(submitted), TAB=TAB)\n raise RuntimeError(e)\n else:\n self.custom_doctest(decorator, input_lines, found, submitted)\n\n def process_comment(self, data):\n \"\"\"Process data fPblock for COMMENT token.\"\"\"\n if not self.is_suppress:\n return [data]\n\n def save_image(self, image_file):\n \"\"\"\n Saves the image file to disk.\n \"\"\"\n self.ensure_pyplot()\n command = ('plt.gcf().savefig(\"%s\", bbox_inches=\"tight\", '\n 'dpi=100)' % image_file)\n\n #print 'SAVEFIG', command # dbg\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\n self.process_input_line('cd -b ipy_savedir', store_history=False)\n self.process_input_line(command, store_history=False)\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\n self.clear_cout()\n\n def process_block(self, block):\n \"\"\"\n process block from the block_parser and return a list of processed lines\n \"\"\"\n ret = []\n output = None\n input_lines = None\n lineno = self.IP.execution_count\n\n input_prompt = self.promptin % lineno\n output_prompt = self.promptout % lineno\n image_file = None\n image_directive = None\n\n for token, data in block:\n if token == COMMENT:\n out_data = self.process_comment(data)\n elif token == INPUT:\n (out_data, input_lines, output, is_doctest, decorator,\n image_file, image_directive) = \\\n self.process_input(data, input_prompt, lineno)\n elif token == OUTPUT:\n out_data = \\\n self.process_output(data, output_prompt,\n input_lines, output, is_doctest,\n decorator, image_file)\n if out_data:\n ret.extend(out_data)\n\n # save the image files\n if image_file is not None:\n self.save_image(image_file)\n\n return ret, image_directive\n\n def ensure_pyplot(self):\n \"\"\"\n Ensures that pyplot has been imported into the embedded IPython shell.\n\n Also, makes sure to set the backend appropriately if not set already.\n\n \"\"\"\n # We are here if the @figure pseudo decorator was used. Thus, it's\n # possible that we could be here even if python_mplbackend were set to\n # `None`. That's also strange and perhaps worthy of raising an\n # exception, but for now, we just set the backend to 'agg'.\n\n if not self._pyplot_imported:\n if 'matplotlib.backends' not in sys.modules:\n # Then ipython_matplotlib was set to None but there was a\n # call to the @figure decorator (and ipython_execlines did\n # not set a backend).\n #raise Exception(\"No backend was set, but @figure was used!\")\n import matplotlib\n matplotlib.use('agg')\n\n # Always import pyplot into embedded shell.\n self.process_input_line('import matplotlib.pyplot as plt',\n store_history=False)\n self._pyplot_imported = True\n\n def process_pure_python(self, content):\n \"\"\"\n content is a list of strings. it is unedited directive content\n\n This runs it line by line in the InteractiveShell, prepends\n prompts as needed capturing stderr and stdout, then returns\n the content as a list as if it were ipython code\n \"\"\"\n output = []\n savefig = False # keep up with this to clear figure\n multiline = False # to handle line continuation\n multiline_start = None\n fmtin = self.promptin\n\n ct = 0\n\n for lineno, line in enumerate(content):\n\n line_stripped = line.strip()\n if not len(line):\n output.append(line)\n continue\n\n # handle decorators\n if line_stripped.startswith('@'):\n output.extend([line])\n if 'savefig' in line:\n savefig = True # and need to clear figure\n continue\n\n # handle comments\n if line_stripped.startswith('#'):\n output.extend([line])\n continue\n\n # deal with lines checking for multiline\n continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))\n if not multiline:\n modified = u\"%s %s\" % (fmtin % ct, line_stripped)\n output.append(modified)\n ct += 1\n try:\n ast.parse(line_stripped)\n output.append(u'')\n except Exception: # on a multiline\n multiline = True\n multiline_start = lineno\n else: # still on a multiline\n modified = u'%s %s' % (continuation, line)\n output.append(modified)\n\n # if the next line is indented, it should be part of multiline\n if len(content) > lineno + 1:\n nextline = content[lineno + 1]\n if len(nextline) - len(nextline.lstrip()) > 3:\n continue\n try:\n mod = ast.parse(\n '\\n'.join(content[multiline_start:lineno+1]))\n if isinstance(mod.body[0], ast.FunctionDef):\n # check to see if we have the whole function\n for element in mod.body[0].body:\n if isinstance(element, ast.Return):\n multiline = False\n else:\n output.append(u'')\n multiline = False\n except Exception:\n pass\n\n if savefig: # clear figure if plotted\n self.ensure_pyplot()\n self.process_input_line('plt.clf()', store_history=False)\n self.clear_cout()\n savefig = False\n\n return output\n\n def custom_doctest(self, decorator, input_lines, found, submitted):\n \"\"\"\n Perform a specialized doctest.\n\n \"\"\"\n from .custom_doctests import doctests\n\n args = decorator.split()\n doctest_type = args[1]\n if doctest_type in doctests:\n doctests[doctest_type](self, args, input_lines, found, submitted)\n else:\n e = \"Invalid option to @doctest: {0}\".format(doctest_type)\n raise Exception(e)\n\n\nclass IPythonDirective(Directive):\n\n has_content = True\n required_arguments = 0\n optional_arguments = 4 # python, suppress, verbatim, doctest\n final_argumuent_whitespace = True\n option_spec = { 'python': directives.unchanged,\n 'suppress' : directives.flag,\n 'verbatim' : directives.flag,\n 'doctest' : directives.flag,\n 'okexcept': directives.flag,\n 'okwarning': directives.flag,\n 'output_encoding': directives.unchanged_required\n }\n\n shell = None\n\n seen_docs = set()\n\n def get_config_options(self):\n # contains sphinx configuration variables\n config = self.state.document.settings.env.config\n\n # get config variables to set figure output directory\n confdir = self.state.document.settings.env.app.confdir\n savefig_dir = config.ipython_savefig_dir\n source_dir = os.path.dirname(self.state.document.current_source)\n if savefig_dir is None:\n savefig_dir = config.html_static_path\n if isinstance(savefig_dir, list):\n savefig_dir = savefig_dir[0] # safe to assume only one path?\n savefig_dir = os.path.join(confdir, savefig_dir)\n\n # get regex and prompt stuff\n rgxin = config.ipython_rgxin\n rgxout = config.ipython_rgxout\n promptin = config.ipython_promptin\n promptout = config.ipython_promptout\n mplbackend = config.ipython_mplbackend\n exec_lines = config.ipython_execlines\n hold_count = config.ipython_holdcount\n\n return (savefig_dir, source_dir, rgxin, rgxout,\n promptin, promptout, mplbackend, exec_lines, hold_count)\n\n def setup(self):\n # Get configuration values.\n (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,\n mplbackend, exec_lines, hold_count) = self.get_config_options()\n\n if self.shell is None:\n # We will be here many times. However, when the\n # EmbeddedSphinxShell is created, its interactive shell member\n # is the same for each instance.\n\n if mplbackend:\n import matplotlib\n # Repeated calls to use() will not hurt us since `mplbackend`\n # is the same each time.\n matplotlib.use(mplbackend)\n\n # Must be called after (potentially) importing matplotlib and\n # setting its backend since exec_lines might import pylab.\n self.shell = EmbeddedSphinxShell(exec_lines, self.state)\n\n # Store IPython directive to enable better error messages\n self.shell.directive = self\n\n # reset the execution count if we haven't processed this doc\n #NOTE: this may be borked if there are multiple seen_doc tmp files\n #check time stamp?\n if not self.state.document.current_source in self.seen_docs:\n self.shell.IP.history_manager.reset()\n self.shell.IP.execution_count = 1\n self.shell.IP.prompt_manager.width = 0\n self.seen_docs.add(self.state.document.current_source)\n\n # and attach to shell so we don't have to pass them around\n self.shell.rgxin = rgxin\n self.shell.rgxout = rgxout\n self.shell.promptin = promptin\n self.shell.promptout = promptout\n self.shell.savefig_dir = savefig_dir\n self.shell.source_dir = source_dir\n self.shell.hold_count = hold_count\n\n # setup bookmark for saving figures directory\n self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,\n store_history=False)\n self.shell.clear_cout()\n\n return rgxin, rgxout, promptin, promptout\n\n def teardown(self):\n # delete last bookmark\n self.shell.process_input_line('bookmark -d ipy_savedir',\n store_history=False)\n self.shell.clear_cout()\n\n def run(self):\n debug = False\n\n #TODO, any reason block_parser can't be a method of embeddable shell\n # then we wouldn't have to carry these around\n rgxin, rgxout, promptin, promptout = self.setup()\n\n options = self.options\n self.shell.is_suppress = 'suppress' in options\n self.shell.is_doctest = 'doctest' in options\n self.shell.is_verbatim = 'verbatim' in options\n self.shell.is_okexcept = 'okexcept' in options\n self.shell.is_okwarning = 'okwarning' in options\n\n self.shell.output_encoding = [options.get('output_encoding', 'utf8')]\n\n # handle pure python code\n if 'python' in self.arguments:\n content = self.content\n self.content = self.shell.process_pure_python(content)\n\n parts = '\\n'.join(self.content).split('\\n\\n')\n\n lines = ['.. code-block:: ipython', '']\n figures = []\n\n for part in parts:\n block = block_parser(part, rgxin, rgxout, promptin, promptout)\n if len(block):\n rows, figure = self.shell.process_block(block)\n for row in rows:\n lines.extend([' %s'%line for line in row.split('\\n')])\n\n if figure is not None:\n figures.append(figure)\n\n for figure in figures:\n lines.append('')\n lines.extend(figure.split('\\n'))\n lines.append('')\n\n if len(lines)>2:\n if debug:\n print('\\n'.join(lines))\n else:\n # This has to do with input, not output. But if we comment\n # these lines out, then no IPython code will appear in the\n # final output.\n self.state_machine.insert_input(\n lines, self.state_machine.input_lines.source(0))\n\n # cleanup\n self.teardown()\n\n return []\n\n# Enable as a proper Sphinx directive\ndef setup(app):\n setup.app = app\n\n app.add_directive('ipython', IPythonDirective)\n app.add_config_value('ipython_savefig_dir', None, 'env')\n app.add_config_value('ipython_rgxin',\n re.compile('In \\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_rgxout',\n re.compile('Out\\[(\\d+)\\]:\\s?(.*)\\s*'), 'env')\n app.add_config_value('ipython_promptin', 'In [%d]:', 'env')\n app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')\n\n # We could just let matplotlib pick whatever is specified as the default\n # backend in the matplotlibrc file, but this would cause issues if the\n # backend didn't work in headless environments. For this reason, 'agg'\n # is a good default backend choice.\n app.add_config_value('ipython_mplbackend', 'agg', 'env')\n\n # If the user sets this config value to `None`, then EmbeddedSphinxShell's\n # __init__ method will treat it as [].\n execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']\n app.add_config_value('ipython_execlines', execlines, 'env')\n\n app.add_config_value('ipython_holdcount', True, 'env')\n\n# Simple smoke test, needs to be converted to a proper automatic test.\ndef test():\n\n examples = [\n r\"\"\"\nIn [9]: pwd\nOut[9]: '\/home\/jdhunter\/py4science\/book'\n\nIn [10]: cd bookdata\/\n\/home\/jdhunter\/py4science\/book\/bookdata\n\nIn [2]: from pylab import *\n\nIn [2]: ion()\n\nIn [3]: im = imread('stinkbug.png')\n\n@savefig mystinkbug.png width=4in\nIn [4]: imshow(im)\nOut[4]: \n\n\"\"\",\n r\"\"\"\n\nIn [1]: x = 'hello world'\n\n# string methods can be\n# used to alter the string\n@doctest\nIn [2]: x.upper()\nOut[2]: 'HELLO WORLD'\n\n@verbatim\nIn [3]: x.st\nx.startswith x.strip\n\"\"\",\n r\"\"\"\n\nIn [130]: url = 'http:\/\/ichart.finance.yahoo.com\/table.csv?s=CROX\\\n .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'\n\nIn [131]: print url.split('&')\n['http:\/\/ichart.finance.yahoo.com\/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']\n\nIn [60]: import urllib\n\n\"\"\",\n r\"\"\"\\\n\nIn [133]: import numpy.random\n\n@suppress\nIn [134]: numpy.random.seed(2358)\n\n@doctest\nIn [135]: numpy.random.rand(10,2)\nOut[135]:\narray([[ 0.64524308, 0.59943846],\n [ 0.47102322, 0.8715456 ],\n [ 0.29370834, 0.74776844],\n [ 0.99539577, 0.1313423 ],\n [ 0.16250302, 0.21103583],\n [ 0.81626524, 0.1312433 ],\n [ 0.67338089, 0.72302393],\n [ 0.7566368 , 0.07033696],\n [ 0.22591016, 0.77731835],\n [ 0.0072729 , 0.34273127]])\n\n\"\"\",\n\n r\"\"\"\nIn [106]: print x\njdh\n\nIn [109]: for i in range(10):\n .....: print i\n .....:\n .....:\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n\"\"\",\n\n r\"\"\"\n\nIn [144]: from pylab import *\n\nIn [145]: ion()\n\n# use a semicolon to suppress the output\n@savefig test_hist.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\n@savefig test_plot.png width=4in\nIn [151]: plot(np.random.randn(10000), 'o');\n \"\"\",\n\n r\"\"\"\n# use a semicolon to suppress the output\nIn [151]: plt.clf()\n\n@savefig plot_simple.png width=4in\nIn [151]: plot([1,2,3])\n\n@savefig hist_simple.png width=4in\nIn [151]: hist(np.random.randn(10000), 100);\n\n\"\"\",\n r\"\"\"\n# update the current fig\nIn [151]: ylabel('number')\n\nIn [152]: title('normal distribution')\n\n\n@savefig hist_with_text.png\nIn [153]: grid(True)\n\n@doctest float\nIn [154]: 0.1 + 0.2\nOut[154]: 0.3\n\n@doctest float\nIn [155]: np.arange(16).reshape(4,4)\nOut[155]:\narray([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\nIn [1]: x = np.arange(16, dtype=float).reshape(4,4)\n\nIn [2]: x[0,0] = np.inf\n\nIn [3]: x[0,1] = np.nan\n\n@doctest float\nIn [4]: x\nOut[4]:\narray([[ inf, nan, 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n\n\n \"\"\",\n ]\n # skip local-file depending first example:\n examples = examples[1:]\n\n #ipython_directive.DEBUG = True # dbg\n #options = dict(suppress=True) # dbg\n options = dict()\n for example in examples:\n content = example.split('\\n')\n IPythonDirective('debug', arguments=None, options=options,\n content=content, lineno=0,\n content_offset=None, block_text=None,\n state=None, state_machine=None,\n )\n\n# Run test suite as a script\nif __name__=='__main__':\n if not os.path.isdir('_static'):\n os.mkdir('_static')\n test()\n print('All OK? Check figures in _static\/')\n","license":"bsd-3-clause"} {"repo_name":"INCF\/BIDS2ISATab","path":"setup.py","copies":"1","size":"2176","content":"from setuptools import setup\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nsetup(\n name=\"BIDS2ISATab\",\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # http:\/\/packaging.python.org\/en\/latest\/tutorial.html#version\n version='0.1.0',\n\n description=\"Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure \"\n \"compatible dataset.\",\n long_description=\"Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure \"\n \"compatible dataset.\",\n\n # The project URL.\n url='https:\/\/github.com\/INCF\/BIDS2ISATab',\n\n # Choose your license\n license='BSD',\n\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production\/Stable\n 'Development Status :: 4 - Beta',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: BSD License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n ],\n\n # What does your project relate to?\n keywords='bids isatab',\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages.\n packages=[\"bids2isatab\"],\n\n # List run-time dependencies here. These will be installed by pip when your\n # project is installed.\n install_requires = [\"future\",\n \"pandas\",\n 'nibabel'],\n\n include_package_data=True,\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\n 'console_scripts': [\n 'bids2isatab=bids2isatab.main:main',\n ],\n },\n)\n","license":"apache-2.0"} {"repo_name":"zooniverse\/aggregation","path":"experimental\/clusteringAlg\/adaptiveDBSCAN.py","copies":"2","size":"4734","content":"#!\/usr\/bin\/env python\n__author__ = 'greg'\nfrom sklearn.cluster import DBSCAN\nimport numpy as np\nimport math\n\ndef dist(c1,c2):\n return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)\n\nclass CannotSplit(Exception):\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return \"\"\nsamples_needed = 3\n\ndef adaptiveDBSCAN(XYpts,user_ids):\n if XYpts == []:\n return []\n\n pts_in_each_cluster = []\n users_in_each_cluster = []\n cluster_centers = []\n\n #increase the epsilon until we don't have any nearby clusters corresponding to non-overlapping\n #sets of users\n X = np.array(XYpts)\n #for epsilon in [5,10,15,20,25,30]:\n for first_epsilon in [100,200,300,400]:\n db = DBSCAN(eps=first_epsilon, min_samples=samples_needed).fit(X)\n\n labels = db.labels_\n pts_in_each_cluster = []\n users_in_each_cluster = []\n cluster_centers = []\n\n for k in sorted(set(labels)):\n if k == -1:\n continue\n\n class_member_mask = (labels == k)\n pts_in_cluster = list(X[class_member_mask])\n xSet,ySet = zip(*pts_in_cluster)\n\n cluster_centers.append((np.mean(xSet),np.mean(ySet)))\n pts_in_each_cluster.append(pts_in_cluster[:])\n users_in_each_cluster.append([u for u,l in zip(user_ids,labels) if l == k])\n\n #do we have any adjacent clusters with non-overlapping sets of users\n #if so, we should merge them by increasing the epsilon value\n cluster_compare = []\n for cluster_index, (c1,users) in enumerate(zip(cluster_centers,users_in_each_cluster)):\n for cluster_index, (c2,users2) in enumerate(zip(cluster_centers[cluster_index+1:],users_in_each_cluster[cluster_index+1:])):\n overlappingUsers = [u for u in users if u in users2]\n cluster_compare.append((dist(c1,c2),overlappingUsers))\n\n cluster_compare.sort(key = lambda x:x[0])\n needToMerge = [] in [c[1] for c in cluster_compare[:10]]\n if not(needToMerge):\n break\n #print epsilon\n #print [c[1] for c in cluster_compare[:10]]\n centers_to_return = []\n assert not(needToMerge)\n\n\n #do we need to split any clusters?\n for cluster_index in range(len(cluster_centers)):\n #print \"splitting\"\n needToSplit = (sorted(users_in_each_cluster[cluster_index]) != sorted(list(set(users_in_each_cluster[cluster_index]))))\n if needToSplit:\n subcluster_centers = []\n stillToSplit = []\n X = np.array(pts_in_each_cluster[cluster_index])\n #for epsilon in [30,25,20,15,10,5,1,0.1,0.01]:\n for second_epsilon in range(200,1,-2):#[400,300,200,100,80,75,65,60,50,25,24,23,22,21,20,19,18,17,16,15,14,13,10,5,1]:\n db = DBSCAN(eps=second_epsilon, min_samples=samples_needed).fit(X)\n\n labels = db.labels_\n subcluster_centers = []\n\n needToSplit = False\n\n for k in sorted(set(labels)):\n if k == -1:\n continue\n\n class_member_mask = (labels == k)\n users_in_subcluster = [u for u,l in zip(users_in_each_cluster[cluster_index],labels) if l == k]\n needToSplit = (sorted(users_in_subcluster) != sorted(list(set(users_in_subcluster))))\n if needToSplit:\n stillToSplit = list(X[class_member_mask])\n break\n\n pts_in_cluster = list(X[class_member_mask])\n xSet,ySet = zip(*pts_in_cluster)\n subcluster_centers.append((np.mean(xSet),np.mean(ySet)))\n\n if not(needToSplit):\n break\n\n\n if needToSplit:\n print \"second is \" + str(second_epsilon)\n print stillToSplit\n for i in range(len(stillToSplit)):\n p1 = stillToSplit[i]\n for j in range(len(stillToSplit[i+1:])):\n p2 = stillToSplit[j+i+1]\n print math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2),\n #print (i,j+i+1),\n print\n print X\n print users_in_each_cluster[cluster_index]\n raise CannotSplit(pts_in_each_cluster[cluster_index])\n centers_to_return.extend(subcluster_centers)\n\n #if needToSplit:\n # print pts_in_each_cluster[cluster_index]\n # print users_in_each_cluster[cluster_index]\n #else:\n\n else:\n centers_to_return.append(cluster_centers[cluster_index])\n\n\n\n return centers_to_return","license":"apache-2.0"} {"repo_name":"jrleja\/bsfh","path":"misc\/timings_pyfsps.py","copies":"3","size":"4274","content":"#compare a lookup table of spectra at ages and metallicities to\n#calls to fsps.sps.get_spectrum() for different metallicities\nimport time, os, subprocess, re, sys\nimport numpy as np\n#import matplotlib.pyplot as pl\nimport fsps\nfrom prospect import sources as sps_basis\nfrom prospect.models import sedmodel\n\n\ndef run_command(cmd):\n \"\"\"\n Open a child process, and return its exit status and stdout.\n\n \"\"\"\n child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out = [s for s in child.stdout]\n w = child.wait()\n return os.WEXITSTATUS(w), out\n\n\n# Check to make sure that the required environment variable is present.\ntry:\n ev = os.environ[\"SPS_HOME\"]\nexcept KeyError:\n raise ImportError(\"You need to have the SPS_HOME environment variable\")\n\n# Check the SVN revision number.\ncmd = [\"svnversion\", ev]\nstat, out = run_command(\" \".join(cmd))\nfsps_vers = int(re.match(\"^([0-9])+\", out[0]).group(0))\n\n\nsps = fsps.StellarPopulation(zcontinuous=True)\nprint('FSPS version = {}'.format(fsps_vers))\nprint('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths)))\nprint('single age')\n\ndef spec_from_fsps(z, t, s):\n t0 = time.time()\n sps.params['logzsol'] = z\n sps.params['sigma_smooth'] = s\n sps.params['tage'] = t\n wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage'])\n #print(spec.shape)\n return time.time()-t0\n\ndef mags_from_fsps(z, t, s):\n t0 = time.time()\n sps.params['zred']=t\n sps.params['logzsol'] = z\n sps.params['sigma_smooth'] = s\n sps.params['tage'] = t\n mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0)\n #print(spec.shape)\n return time.time()-t0\n\n\ndef spec_from_ztinterp(z, t, s):\n t0 = time.time()\n sps.params['logzsol'] = z\n sps.params['sigma_smooth'] = s\n sps.params['tage'] = t\n sps.params['imf3'] = s\n spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True)\n #print(spec.shape)\n return time.time()-t0\n \n\nif sys.argv[1] == 'mags':\n from_fsps = mags_from_fsps\n print('timing get_mags')\n print('nbands = {}'.format(len(sps.get_mags(tage=1.0))))\nelif sys.argv[1] == 'spec':\n from_fsps = spec_from_fsps\n print('timing get_spectrum')\nelif sys.argv[1] == 'ztinterp':\n from_fsps = spec_from_ztinterp\n print('timing get_spectrum')\nelif sys.argv[1] == 'sedpy':\n from sedpy import observate\n nbands = len(sps.get_mags(tage=1.0))\n fnames = nbands * ['sdss_r0']\n filters = observate.load_filters(fnames)\n \n def mags_from_sedpy(z, t, s):\n t0 = time.time()\n sps.params['logzsol'] = z\n sps.params['sigma_smooth'] = s\n sps.params['tage'] = t\n wave, spec = sps.get_spectrum(peraa=True,\n tage = sps.params['tage'])\n mags = observate.getSED(wave, spec, filters)\n return time.time()-t0\n \n from_fsps = mags_from_sedpy\n \nsps.params['add_neb_emission'] = False\nsps.params['smooth_velocity'] = True\nsps.params['sfh'] = 0\n\nntry = 30\nzz = np.random.uniform(-1,0,ntry)\ntt = np.random.uniform(0.1,4,ntry)\nss = np.random.uniform(1,2.5,ntry)\n\n#make sure all z's already compiled\n_ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]]\nall_dur = []\nprint('no neb emission:')\ndur_many = np.zeros(ntry)\nfor i in xrange(ntry):\n dur_many[i] = from_fsps(zz[i], tt[i], ss[i])\nprint('={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))\nall_dur += [dur_many]\n\nprint('no neb emission, no smooth:')\ndur_many = np.zeros(ntry)\nfor i in xrange(ntry):\n dur_many[i] = from_fsps(zz[i], tt[i], 0.0)\nprint('={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))\nall_dur += [dur_many]\n\nsps.params['add_neb_emission'] = True \nprint('neb emission:')\ndur_many = np.zeros(ntry)\nfor i in xrange(ntry):\n dur_many[i] = from_fsps(zz[i], tt[i], ss[i])\nprint('={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))\nall_dur += [dur_many]\n \nprint('neb emission, no smooth:')\ndur_many = np.zeros(ntry)\nfor i in xrange(ntry):\n dur_many[i] = from_fsps(zz[i], tt[i], 0.0)\nprint('={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))\nall_dur += [dur_many]\n\n\n","license":"mit"} {"repo_name":"ClinicalGraphics\/scikit-image","path":"doc\/examples\/xx_applications\/plot_morphology.py","copies":"6","size":"8329","content":"\"\"\"\n=======================\nMorphological Filtering\n=======================\n\nMorphological image processing is a collection of non-linear operations related\nto the shape or morphology of features in an image, such as boundaries,\nskeletons, etc. In any given technique, we probe an image with a small shape or\ntemplate called a structuring element, which defines the region of interest or\nneighborhood around a pixel.\n\nIn this document we outline the following basic morphological operations:\n\n1. Erosion\n2. Dilation\n3. Opening\n4. Closing\n5. White Tophat\n6. Black Tophat\n7. Skeletonize\n8. Convex Hull\n\n\nTo get started, let's load an image using ``io.imread``. Note that morphology\nfunctions only work on gray-scale or binary images, so we set ``as_grey=True``.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom skimage.data import data_dir\nfrom skimage.util import img_as_ubyte\nfrom skimage import io\n\nphantom = img_as_ubyte(io.imread(data_dir+'\/phantom.png', as_grey=True))\nfig, ax = plt.subplots()\nax.imshow(phantom, cmap=plt.cm.gray)\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nLet's also define a convenience function for plotting comparisons:\n\"\"\"\n\ndef plot_comparison(original, filtered, filter_name):\n\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)\n ax1.imshow(original, cmap=plt.cm.gray)\n ax1.set_title('original')\n ax1.axis('off')\n ax1.set_adjustable('box-forced')\n ax2.imshow(filtered, cmap=plt.cm.gray)\n ax2.set_title(filter_name)\n ax2.axis('off')\n ax2.set_adjustable('box-forced')\n\n\"\"\"\nErosion\n=======\n\nMorphological ``erosion`` sets a pixel at (i, j) to the *minimum over all\npixels in the neighborhood centered at (i, j)*. The structuring element,\n``selem``, passed to ``erosion`` is a boolean array that describes this\nneighborhood. Below, we use ``disk`` to create a circular structuring element,\nwhich we use for most of the following examples.\n\"\"\"\n\nfrom skimage.morphology import erosion, dilation, opening, closing, white_tophat\nfrom skimage.morphology import black_tophat, skeletonize, convex_hull_image\nfrom skimage.morphology import disk\n\nselem = disk(6)\neroded = erosion(phantom, selem)\nplot_comparison(phantom, eroded, 'erosion')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nNotice how the white boundary of the image disappears or gets eroded as we\nincrease the size of the disk. Also notice the increase in size of the two\nblack ellipses in the center and the disappearance of the 3 light grey\npatches in the lower part of the image.\n\n\nDilation\n========\n\nMorphological ``dilation`` sets a pixel at (i, j) to the *maximum over all\npixels in the neighborhood centered at (i, j)*. Dilation enlarges bright\nregions and shrinks dark regions.\n\"\"\"\n\ndilated = dilation(phantom, selem)\nplot_comparison(phantom, dilated, 'dilation')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nNotice how the white boundary of the image thickens, or gets dilated, as we\nincrease the size of the disk. Also notice the decrease in size of the two\nblack ellipses in the centre, and the thickening of the light grey circle in\nthe center and the 3 patches in the lower part of the image.\n\n\nOpening\n=======\n\nMorphological ``opening`` on an image is defined as an *erosion followed by a\ndilation*. Opening can remove small bright spots (i.e. \"salt\") and connect\nsmall dark cracks.\n\"\"\"\n\nopened = opening(phantom, selem)\nplot_comparison(phantom, opened, 'opening')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nSince ``opening`` an image starts with an erosion operation, light regions that\nare *smaller* than the structuring element are removed. The dilation operation\nthat follows ensures that light regions that are *larger* than the structuring\nelement retain their original size. Notice how the light and dark shapes in the\ncenter their original thickness but the 3 lighter patches in the bottom get\ncompletely eroded. The size dependence is highlighted by the outer white ring:\nThe parts of the ring thinner than the structuring element were completely\nerased, while the thicker region at the top retains its original thickness.\n\n\nClosing\n=======\n\nMorphological ``closing`` on an image is defined as a *dilation followed by an\nerosion*. Closing can remove small dark spots (i.e. \"pepper\") and connect\nsmall bright cracks.\n\nTo illustrate this more clearly, let's add a small crack to the white border:\n\"\"\"\n\nphantom = img_as_ubyte(io.imread(data_dir+'\/phantom.png', as_grey=True))\nphantom[10:30, 200:210] = 0\n\nclosed = closing(phantom, selem)\nplot_comparison(phantom, closed, 'closing')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nSince ``closing`` an image starts with an dilation operation, dark regions\nthat are *smaller* than the structuring element are removed. The dilation\noperation that follows ensures that dark regions that are *larger* than the\nstructuring element retain their original size. Notice how the white ellipses\nat the bottom get connected because of dilation, but other dark region retain\ntheir original sizes. Also notice how the crack we added is mostly removed.\n\n\nWhite tophat\n============\n\nThe ``white_tophat`` of an image is defined as the *image minus its\nmorphological opening*. This operation returns the bright spots of the image\nthat are smaller than the structuring element.\n\nTo make things interesting, we'll add bright and dark spots to the image:\n\"\"\"\n\nphantom = img_as_ubyte(io.imread(data_dir+'\/phantom.png', as_grey=True))\nphantom[340:350, 200:210] = 255\nphantom[100:110, 200:210] = 0\n\nw_tophat = white_tophat(phantom, selem)\nplot_comparison(phantom, w_tophat, 'white tophat')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nAs you can see, the 10-pixel wide white square is highlighted since it is\nsmaller than the structuring element. Also, the thin, white edges around most\nof the ellipse are retained because they're smaller than the structuring\nelement, but the thicker region at the top disappears.\n\n\nBlack tophat\n============\n\nThe ``black_tophat`` of an image is defined as its morphological **closing\nminus the original image**. This operation returns the *dark spots of the\nimage that are smaller than the structuring element*.\n\"\"\"\n\nb_tophat = black_tophat(phantom, selem)\nplot_comparison(phantom, b_tophat, 'black tophat')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nAs you can see, the 10-pixel wide black square is highlighted since it is\nsmaller than the structuring element.\n\n\nDuality\n-------\n\nAs you should have noticed, many of these operations are simply the reverse\nof another operation. This duality can be summarized as follows:\n\n1. Erosion <-> Dilation\n2. Opening <-> Closing\n3. White tophat <-> Black tophat\n\n\nSkeletonize\n===========\n\nThinning is used to reduce each connected component in a binary image to a\n*single-pixel wide skeleton*. It is important to note that this is performed\non binary images only.\n\n\"\"\"\n\nfrom skimage import img_as_bool\nhorse = ~img_as_bool(io.imread(data_dir+'\/horse.png', as_grey=True))\n\nsk = skeletonize(horse)\nplot_comparison(horse, sk, 'skeletonize')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nAs the name suggests, this technique is used to thin the image to 1-pixel wide\nskeleton by applying thinning successively.\n\n\nConvex hull\n===========\n\nThe ``convex_hull_image`` is the *set of pixels included in the smallest\nconvex polygon that surround all white pixels in the input image*. Again note\nthat this is also performed on binary images.\n\n\"\"\"\n\nhull1 = convex_hull_image(horse)\nplot_comparison(horse, hull1, 'convex hull')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nAs the figure illustrates, ``convex_hull_image`` gives the smallest polygon\nwhich covers the white or True completely in the image.\n\nIf we add a small grain to the image, we can see how the convex hull adapts to\nenclose that grain:\n\"\"\"\n\nimport numpy as np\n\nhorse2 = np.copy(horse)\nhorse2[45:50, 75:80] = 1\n\nhull2 = convex_hull_image(horse2)\nplot_comparison(horse2, hull2, 'convex hull')\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\n\nAdditional Resources\n====================\n\n1. `MathWorks tutorial on morphological processing\n`_\n2. `Auckland university's tutorial on Morphological Image Processing\n`_\n3. http:\/\/en.wikipedia.org\/wiki\/Mathematical_morphology\n\n\"\"\"\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"codenote\/chromium-test","path":"ppapi\/native_client\/tests\/breakpad_crash_test\/crash_dump_tester.py","copies":"6","size":"8213","content":"#!\/usr\/bin\/python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nscript_dir = os.path.dirname(__file__)\nsys.path.append(os.path.join(script_dir,\n '..\/..\/tools\/browser_tester'))\n\nimport browser_tester\nimport browsertester.browserlauncher\n\n# This script extends browser_tester to check for the presence of\n# Breakpad crash dumps.\n\n\n# This reads a file of lines containing 'key:value' pairs.\n# The file contains entries like the following:\n# plat:Win32\n# prod:Chromium\n# ptype:nacl-loader\n# rept:crash svc\ndef ReadDumpTxtFile(filename):\n dump_info = {}\n fh = open(filename, 'r')\n for line in fh:\n if ':' in line:\n key, value = line.rstrip().split(':', 1)\n dump_info[key] = value\n fh.close()\n return dump_info\n\n\ndef StartCrashService(browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, crash_service_exe):\n # Find crash_service.exe relative to chrome.exe. This is a bit icky.\n browser_dir = os.path.dirname(browser_path)\n proc = subprocess.Popen([os.path.join(browser_dir, crash_service_exe),\n '--v=1', # Verbose output for debugging failures\n '--dumps-dir=%s' % dumps_dir,\n '--pipe-name=%s' % windows_pipe_name])\n\n def Cleanup():\n # Note that if the process has already exited, this will raise\n # an 'Access is denied' WindowsError exception, but\n # crash_service.exe is not supposed to do this and such\n # behaviour should make the test fail.\n proc.terminate()\n status = proc.wait()\n sys.stdout.write('crash_dump_tester: %s exited with status %s\\n'\n % (crash_service_exe, status))\n\n cleanup_funcs.append(Cleanup)\n\n\ndef ListPathsInDir(dir_path):\n if os.path.exists(dir_path):\n return [os.path.join(dir_path, name)\n for name in os.listdir(dir_path)]\n else:\n return []\n\n\ndef GetDumpFiles(dumps_dirs):\n all_files = [filename\n for dumps_dir in dumps_dirs\n for filename in ListPathsInDir(dumps_dir)]\n sys.stdout.write('crash_dump_tester: Found %i files\\n' % len(all_files))\n for dump_file in all_files:\n sys.stdout.write(' %s (size %i)\\n'\n % (dump_file, os.stat(dump_file).st_size))\n return [dump_file for dump_file in all_files\n if dump_file.endswith('.dmp')]\n\n\ndef Main(cleanup_funcs):\n parser = browser_tester.BuildArgParser()\n parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',\n type=int, default=0,\n help='The number of crash dumps that we should expect')\n parser.add_option('--expected_process_type_for_crash',\n dest='expected_process_type_for_crash',\n type=str, default='nacl-loader',\n help='The type of Chromium process that we expect the '\n 'crash dump to be for')\n # Ideally we would just query the OS here to find out whether we are\n # running x86-32 or x86-64 Windows, but Python's win32api module\n # does not contain a wrapper for GetNativeSystemInfo(), which is\n # what NaCl uses to check this, or for IsWow64Process(), which is\n # what Chromium uses. Instead, we just rely on the build system to\n # tell us.\n parser.add_option('--win64', dest='win64', action='store_true',\n help='Pass this if we are running tests for x86-64 Windows')\n options, args = parser.parse_args()\n\n temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')\n def CleanUpTempDir():\n browsertester.browserlauncher.RemoveDirectory(temp_dir)\n cleanup_funcs.append(CleanUpTempDir)\n\n # To get a guaranteed unique pipe name, use the base name of the\n # directory we just created.\n windows_pipe_name = r'\\\\.\\pipe\\%s_crash_service' % os.path.basename(temp_dir)\n\n # This environment variable enables Breakpad crash dumping in\n # non-official builds of Chromium.\n os.environ['CHROME_HEADLESS'] = '1'\n if sys.platform == 'win32':\n dumps_dir = temp_dir\n # Override the default (global) Windows pipe name that Chromium will\n # use for out-of-process crash reporting.\n os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name\n # Launch the x86-32 crash service so that we can handle crashes in\n # the browser process.\n StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, 'crash_service.exe')\n if options.win64:\n # Launch the x86-64 crash service so that we can handle crashes\n # in the NaCl loader process (nacl64.exe).\n StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, 'crash_service64.exe')\n # We add a delay because there is probably a race condition:\n # crash_service.exe might not have finished doing\n # CreateNamedPipe() before NaCl does a crash dump and tries to\n # connect to that pipe.\n # TODO(mseaborn): We could change crash_service.exe to report when\n # it has successfully created the named pipe.\n time.sleep(1)\n elif sys.platform == 'darwin':\n dumps_dir = temp_dir\n os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir\n elif sys.platform.startswith('linux'):\n # The \"--user-data-dir\" option is not effective for the Breakpad\n # setup in Linux Chromium, because Breakpad is initialized before\n # \"--user-data-dir\" is read. So we set HOME to redirect the crash\n # dumps to a temporary directory.\n home_dir = temp_dir\n os.environ['HOME'] = home_dir\n options.enable_crash_reporter = True\n\n result = browser_tester.Run(options.url, options)\n\n # Find crash dump results.\n if sys.platform.startswith('linux'):\n # Look in \"~\/.config\/*\/Crash Reports\". This will find crash\n # reports under ~\/.config\/chromium or ~\/.config\/google-chrome, or\n # under other subdirectories in case the branding is changed.\n dumps_dirs = [os.path.join(path, 'Crash Reports')\n for path in ListPathsInDir(os.path.join(home_dir, '.config'))]\n else:\n dumps_dirs = [dumps_dir]\n dmp_files = GetDumpFiles(dumps_dirs)\n\n failed = False\n msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\\n' %\n (len(dmp_files), options.expected_crash_dumps))\n if len(dmp_files) != options.expected_crash_dumps:\n sys.stdout.write(msg)\n failed = True\n\n for dump_file in dmp_files:\n # Sanity check: Make sure dumping did not fail after opening the file.\n msg = 'crash_dump_tester: ERROR: Dump file is empty\\n'\n if os.stat(dump_file).st_size == 0:\n sys.stdout.write(msg)\n failed = True\n\n # On Windows, the crash dumps should come in pairs of a .dmp and\n # .txt file.\n if sys.platform == 'win32':\n second_file = dump_file[:-4] + '.txt'\n msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '\n '%r file\\n' % (dump_file, second_file))\n if not os.path.exists(second_file):\n sys.stdout.write(msg)\n failed = True\n continue\n # Check that the crash dump comes from the NaCl process.\n dump_info = ReadDumpTxtFile(second_file)\n if 'ptype' in dump_info:\n msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\\n'\n % (dump_info['ptype'], options.expected_process_type_for_crash))\n if dump_info['ptype'] != options.expected_process_type_for_crash:\n sys.stdout.write(msg)\n failed = True\n else:\n sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\\n')\n failed = True\n # TODO(mseaborn): Ideally we would also check that a backtrace\n # containing an expected function name can be extracted from the\n # crash dump.\n\n if failed:\n sys.stdout.write('crash_dump_tester: FAILED\\n')\n result = 1\n else:\n sys.stdout.write('crash_dump_tester: PASSED\\n')\n\n return result\n\n\ndef MainWrapper():\n cleanup_funcs = []\n try:\n return Main(cleanup_funcs)\n finally:\n for func in cleanup_funcs:\n func()\n\n\nif __name__ == '__main__':\n sys.exit(MainWrapper())\n","license":"bsd-3-clause"} {"repo_name":"hainm\/dask","path":"dask\/dataframe\/shuffle.py","copies":"4","size":"2967","content":"from itertools import count\nfrom collections import Iterator\nfrom math import ceil\nfrom toolz import merge, accumulate, merge_sorted\nimport toolz\nfrom operator import getitem, setitem\nimport pandas as pd\nimport numpy as np\nfrom pframe import pframe\n\nfrom .. import threaded\nfrom .core import DataFrame, Series, get, names\nfrom ..compatibility import unicode\nfrom ..utils import ignoring\n\n\ntokens = ('-%d' % i for i in count(1))\n\n\ndef set_index(f, index, npartitions=None, **kwargs):\n \"\"\" Set DataFrame index to new column\n\n Sorts index and realigns Dataframe to new sorted order. This shuffles and\n repartitions your data.\n \"\"\"\n npartitions = npartitions or f.npartitions\n if not isinstance(index, Series):\n index2 = f[index]\n else:\n index2 = index\n\n divisions = (index2\n .quantiles(np.linspace(0, 100, npartitions+1)[1:-1])\n .compute())\n return f.set_partition(index, divisions, **kwargs)\n\n\npartition_names = ('set_partition-%d' % i for i in count(1))\n\ndef set_partition(f, index, divisions, get=threaded.get, **kwargs):\n \"\"\" Set new partitioning along index given divisions \"\"\"\n divisions = unique(divisions)\n name = next(names)\n if isinstance(index, Series):\n assert index.divisions == f.divisions\n dsk = dict(((name, i), (f._partition_type.set_index, block, ind))\n for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))\n f2 = type(f)(merge(f.dask, index.dask, dsk), name,\n f.column_info, f.divisions)\n else:\n dsk = dict(((name, i), (f._partition_type.set_index, block, index))\n for i, block in enumerate(f._keys()))\n f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)\n\n head = f2.head()\n pf = pframe(like=head, divisions=divisions, **kwargs)\n\n def append(block):\n pf.append(block)\n return 0\n\n f2.map_blocks(append).compute(get=get)\n pf.flush()\n\n return from_pframe(pf)\n\n\ndef from_pframe(pf):\n \"\"\" Load dask.array from pframe \"\"\"\n name = next(names)\n dsk = dict(((name, i), (pframe.get_partition, pf, i))\n for i in range(pf.npartitions))\n\n return DataFrame(dsk, name, pf.columns, pf.divisions)\n\n\ndef unique(divisions):\n \"\"\" Polymorphic unique function\n\n >>> list(unique([1, 2, 3, 1, 2, 3]))\n [1, 2, 3]\n\n >>> unique(np.array([1, 2, 3, 1, 2, 3]))\n array([1, 2, 3])\n\n >>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))\n [Alice, Bob]\n Categories (2, object): [Alice, Bob]\n \"\"\"\n if isinstance(divisions, np.ndarray):\n return np.unique(divisions)\n if isinstance(divisions, pd.Categorical):\n return pd.Categorical.from_codes(np.unique(divisions.codes),\n divisions.categories, divisions.ordered)\n if isinstance(divisions, (tuple, list, Iterator)):\n return tuple(toolz.unique(divisions))\n raise NotImplementedError()\n","license":"bsd-3-clause"} {"repo_name":"allanino\/nupic","path":"external\/linux32\/lib\/python2.6\/site-packages\/matplotlib\/backends\/backend_tkagg.py","copies":"69","size":"24593","content":"# Todd Miller jmiller@stsci.edu\n\nfrom __future__ import division\n\nimport os, sys, math\n\nimport Tkinter as Tk, FileDialog\nimport tkagg # Paint image to Tk photo blitter extension\nfrom backend_agg import FigureCanvasAgg\n\nimport os.path\n\nimport matplotlib\nfrom matplotlib.cbook import is_string_like\nfrom matplotlib.backend_bases import RendererBase, GraphicsContextBase, \\\n FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors\n\nfrom matplotlib.figure import Figure\nfrom matplotlib._pylab_helpers import Gcf\n\nimport matplotlib.windowing as windowing\nfrom matplotlib.widgets import SubplotTool\n\nimport matplotlib.cbook as cbook\n\nrcParams = matplotlib.rcParams\nverbose = matplotlib.verbose\n\n\nbackend_version = Tk.TkVersion\n\n# the true dots per inch on the screen; should be display dependent\n# see http:\/\/groups.google.com\/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi\nPIXELS_PER_INCH = 75\n\ncursord = {\n cursors.MOVE: \"fleur\",\n cursors.HAND: \"hand2\",\n cursors.POINTER: \"arrow\",\n cursors.SELECT_REGION: \"tcross\",\n }\n\n\ndef round(x):\n return int(math.floor(x+0.5))\n\ndef raise_msg_to_str(msg):\n \"\"\"msg is a return arg from a raise. Join with new lines\"\"\"\n if not is_string_like(msg):\n msg = '\\n'.join(map(str, msg))\n return msg\n\ndef error_msg_tkpaint(msg, parent=None):\n import tkMessageBox\n tkMessageBox.showerror(\"matplotlib\", msg)\n\ndef draw_if_interactive():\n if matplotlib.is_interactive():\n figManager = Gcf.get_active()\n if figManager is not None:\n figManager.show()\n\n\ndef show():\n \"\"\"\n Show all the figures and enter the gtk mainloop\n\n This should be the last line of your script. This function sets\n interactive mode to True, as detailed on\n http:\/\/matplotlib.sf.net\/interactive.html\n \"\"\"\n for manager in Gcf.get_all_fig_managers():\n manager.show()\n import matplotlib\n matplotlib.interactive(True)\n if rcParams['tk.pythoninspect']:\n os.environ['PYTHONINSPECT'] = '1'\n if show._needmain:\n Tk.mainloop()\n show._needmain = False\nshow._needmain = True\n\ndef new_figure_manager(num, *args, **kwargs):\n \"\"\"\n Create a new figure manager instance\n \"\"\"\n _focus = windowing.FocusManager()\n FigureClass = kwargs.pop('FigureClass', Figure)\n figure = FigureClass(*args, **kwargs)\n window = Tk.Tk()\n canvas = FigureCanvasTkAgg(figure, master=window)\n figManager = FigureManagerTkAgg(canvas, num, window)\n if matplotlib.is_interactive():\n figManager.show()\n return figManager\n\n\nclass FigureCanvasTkAgg(FigureCanvasAgg):\n keyvald = {65507 : 'control',\n 65505 : 'shift',\n 65513 : 'alt',\n 65508 : 'control',\n 65506 : 'shift',\n 65514 : 'alt',\n 65361 : 'left',\n 65362 : 'up',\n 65363 : 'right',\n 65364 : 'down',\n 65307 : 'escape',\n 65470 : 'f1',\n 65471 : 'f2',\n 65472 : 'f3',\n 65473 : 'f4',\n 65474 : 'f5',\n 65475 : 'f6',\n 65476 : 'f7',\n 65477 : 'f8',\n 65478 : 'f9',\n 65479 : 'f10',\n 65480 : 'f11',\n 65481 : 'f12',\n 65300 : 'scroll_lock',\n 65299 : 'break',\n 65288 : 'backspace',\n 65293 : 'enter',\n 65379 : 'insert',\n 65535 : 'delete',\n 65360 : 'home',\n 65367 : 'end',\n 65365 : 'pageup',\n 65366 : 'pagedown',\n 65438 : '0',\n 65436 : '1',\n 65433 : '2',\n 65435 : '3',\n 65430 : '4',\n 65437 : '5',\n 65432 : '6',\n 65429 : '7',\n 65431 : '8',\n 65434 : '9',\n 65451 : '+',\n 65453 : '-',\n 65450 : '*',\n 65455 : '\/',\n 65439 : 'dec',\n 65421 : 'enter',\n }\n\n def __init__(self, figure, master=None, resize_callback=None):\n FigureCanvasAgg.__init__(self, figure)\n self._idle = True\n t1,t2,w,h = self.figure.bbox.bounds\n w, h = int(w), int(h)\n self._tkcanvas = Tk.Canvas(\n master=master, width=w, height=h, borderwidth=4)\n self._tkphoto = Tk.PhotoImage(\n master=self._tkcanvas, width=w, height=h)\n self._tkcanvas.create_image(w\/2, h\/2, image=self._tkphoto)\n self._resize_callback = resize_callback\n self._tkcanvas.bind(\"\", self.resize)\n self._tkcanvas.bind(\"\", self.key_press)\n self._tkcanvas.bind(\"\", self.motion_notify_event)\n self._tkcanvas.bind(\"\", self.key_release)\n for name in \"\", \"\", \"\":\n self._tkcanvas.bind(name, self.button_press_event)\n for name in \"\", \"\", \"\":\n self._tkcanvas.bind(name, self.button_release_event)\n\n # Mouse wheel on Linux generates button 4\/5 events\n for name in \"\", \"\":\n self._tkcanvas.bind(name, self.scroll_event)\n # Mouse wheel for windows goes to the window with the focus.\n # Since the canvas won't usually have the focus, bind the\n # event to the window containing the canvas instead.\n # See http:\/\/wiki.tcl.tk\/3893 (mousewheel) for details\n root = self._tkcanvas.winfo_toplevel()\n root.bind(\"\", self.scroll_event_windows)\n\n self._master = master\n self._tkcanvas.focus_set()\n\n # a dict from func-> cbook.Scheduler threads\n self.sourced = dict()\n\n # call the idle handler\n def on_idle(*ignore):\n self.idle_event()\n return True\n\n # disable until you figure out how to handle threads and interrupts\n #t = cbook.Idle(on_idle)\n #self._tkcanvas.after_idle(lambda *ignore: t.start())\n\n def resize(self, event):\n width, height = event.width, event.height\n if self._resize_callback is not None:\n self._resize_callback(event)\n\n # compute desired figure size in inches\n\tdpival = self.figure.dpi\n winch = width\/dpival\n hinch = height\/dpival\n self.figure.set_size_inches(winch, hinch)\n\n\n self._tkcanvas.delete(self._tkphoto)\n self._tkphoto = Tk.PhotoImage(\n master=self._tkcanvas, width=width, height=height)\n self._tkcanvas.create_image(width\/2,height\/2,image=self._tkphoto)\n self.resize_event()\n self.show()\n\n def draw(self):\n FigureCanvasAgg.draw(self)\n tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)\n self._master.update_idletasks()\n\n def blit(self, bbox=None):\n tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)\n self._master.update_idletasks()\n\n show = draw\n\n def draw_idle(self):\n 'update drawing area only if idle'\n d = self._idle\n self._idle = False\n def idle_draw(*args):\n self.draw()\n self._idle = True\n\n if d: self._tkcanvas.after_idle(idle_draw)\n\n def get_tk_widget(self):\n \"\"\"returns the Tk widget used to implement FigureCanvasTkAgg.\n Although the initial implementation uses a Tk canvas, this routine\n is intended to hide that fact.\n \"\"\"\n return self._tkcanvas\n\n def motion_notify_event(self, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)\n\n\n def button_press_event(self, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n num = getattr(event, 'num', None)\n\n if sys.platform=='darwin':\n # 2 and 3 were reversed on the OSX platform I\n # tested under tkagg\n if num==2: num=3\n elif num==3: num=2\n\n FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)\n\n def button_release_event(self, event):\n x = event.x\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y\n\n num = getattr(event, 'num', None)\n\n if sys.platform=='darwin':\n # 2 and 3 were reversed on the OSX platform I\n # tested under tkagg\n if num==2: num=3\n elif num==3: num=2\n\n FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)\n\n def scroll_event(self, event):\n x = event.x\n y = self.figure.bbox.height - event.y\n num = getattr(event, 'num', None)\n if num==4: step = -1\n elif num==5: step = +1\n else: step = 0\n\n FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)\n\n def scroll_event_windows(self, event):\n \"\"\"MouseWheel event processor\"\"\"\n # need to find the window that contains the mouse\n w = event.widget.winfo_containing(event.x_root, event.y_root)\n if w == self._tkcanvas:\n x = event.x_root - w.winfo_rootx()\n y = event.y_root - w.winfo_rooty()\n y = self.figure.bbox.height - y\n step = event.delta\/120.\n FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)\n\n def _get_key(self, event):\n val = event.keysym_num\n if val in self.keyvald:\n key = self.keyvald[val]\n elif val<256:\n key = chr(val)\n else:\n key = None\n return key\n\n\n def key_press(self, event):\n key = self._get_key(event)\n FigureCanvasBase.key_press_event(self, key, guiEvent=event)\n\n def key_release(self, event):\n key = self._get_key(event)\n FigureCanvasBase.key_release_event(self, key, guiEvent=event)\n\n def flush_events(self):\n self._master.update()\n\n def start_event_loop(self,timeout):\n FigureCanvasBase.start_event_loop_default(self,timeout)\n start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__\n\n def stop_event_loop(self):\n FigureCanvasBase.stop_event_loop_default(self)\n stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__\n\nclass FigureManagerTkAgg(FigureManagerBase):\n \"\"\"\n Public attributes\n\n canvas : The FigureCanvas instance\n num : The Figure number\n toolbar : The tk.Toolbar\n window : The tk.Window\n \"\"\"\n def __init__(self, canvas, num, window):\n FigureManagerBase.__init__(self, canvas, num)\n self.window = window\n self.window.withdraw()\n self.window.wm_title(\"Figure %d\" % num)\n self.canvas = canvas\n self._num = num\n t1,t2,w,h = canvas.figure.bbox.bounds\n w, h = int(w), int(h)\n self.window.minsize(int(w*3\/4),int(h*3\/4))\n if matplotlib.rcParams['toolbar']=='classic':\n self.toolbar = NavigationToolbar( canvas, self.window )\n elif matplotlib.rcParams['toolbar']=='toolbar2':\n self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )\n else:\n self.toolbar = None\n if self.toolbar is not None:\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n self._shown = False\n\n def notify_axes_change(fig):\n 'this will be called whenever the current axes is changed'\n if self.toolbar != None: self.toolbar.update()\n self.canvas.figure.add_axobserver(notify_axes_change)\n\n\n\n # attach a show method to the figure for pylab ease of use\n self.canvas.figure.show = lambda *args: self.show()\n\n\n def resize(self, event):\n width, height = event.width, event.height\n self.toolbar.configure(width=width) # , height=height)\n\n\n def show(self):\n \"\"\"\n this function doesn't segfault but causes the\n PyEval_RestoreThread: NULL state bug on win32\n \"\"\"\n\n def destroy(*args):\n self.window = None\n Gcf.destroy(self._num)\n\n if not self._shown: self.canvas._tkcanvas.bind(\"\", destroy)\n _focus = windowing.FocusManager()\n if not self._shown:\n self.window.deiconify()\n # anim.py requires this\n if sys.platform=='win32' : self.window.update()\n else:\n self.canvas.draw()\n self._shown = True\n\n\n def destroy(self, *args):\n if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():\n if self.window is not None:\n self.window.quit()\n if self.window is not None:\n #self.toolbar.destroy()\n self.window.destroy()\n\n pass\n self.window = None\n\n def set_window_title(self, title):\n self.window.wm_title(title)\n\nclass AxisMenu:\n def __init__(self, master, naxes):\n self._master = master\n self._naxes = naxes\n self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)\n self._mbar.pack(side=Tk.LEFT)\n self._mbutton = Tk.Menubutton(\n master=self._mbar, text=\"Axes\", underline=0)\n self._mbutton.pack(side=Tk.LEFT, padx=\"2m\")\n self._mbutton.menu = Tk.Menu(self._mbutton)\n self._mbutton.menu.add_command(\n label=\"Select All\", command=self.select_all)\n self._mbutton.menu.add_command(\n label=\"Invert All\", command=self.invert_all)\n self._axis_var = []\n self._checkbutton = []\n for i in range(naxes):\n self._axis_var.append(Tk.IntVar())\n self._axis_var[i].set(1)\n self._checkbutton.append(self._mbutton.menu.add_checkbutton(\n label = \"Axis %d\" % (i+1),\n variable=self._axis_var[i],\n command=self.set_active))\n self._mbutton.menu.invoke(self._mbutton.menu.index(\"Select All\"))\n self._mbutton['menu'] = self._mbutton.menu\n self._mbar.tk_menuBar(self._mbutton)\n self.set_active()\n\n def adjust(self, naxes):\n if self._naxes < naxes:\n for i in range(self._naxes, naxes):\n self._axis_var.append(Tk.IntVar())\n self._axis_var[i].set(1)\n self._checkbutton.append( self._mbutton.menu.add_checkbutton(\n label = \"Axis %d\" % (i+1),\n variable=self._axis_var[i],\n command=self.set_active))\n elif self._naxes > naxes:\n for i in range(self._naxes-1, naxes-1, -1):\n del self._axis_var[i]\n self._mbutton.menu.forget(self._checkbutton[i])\n del self._checkbutton[i]\n self._naxes = naxes\n self.set_active()\n\n def get_indices(self):\n a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]\n return a\n\n def set_active(self):\n self._master.set_active(self.get_indices())\n\n def invert_all(self):\n for a in self._axis_var:\n a.set(not a.get())\n self.set_active()\n\n def select_all(self):\n for a in self._axis_var:\n a.set(1)\n self.set_active()\n\nclass NavigationToolbar(Tk.Frame):\n \"\"\"\n Public attriubutes\n\n canvas - the FigureCanvas (gtk.DrawingArea)\n win - the gtk.Window\n\n \"\"\"\n def _Button(self, text, file, command):\n file = os.path.join(rcParams['datapath'], 'images', file)\n im = Tk.PhotoImage(master=self, file=file)\n b = Tk.Button(\n master=self, text=text, padx=2, pady=2, image=im, command=command)\n b._ntimage = im\n b.pack(side=Tk.LEFT)\n return b\n\n def __init__(self, canvas, window):\n self.canvas = canvas\n self.window = window\n\n xmin, xmax = canvas.figure.bbox.intervalx\n height, width = 50, xmax-xmin\n Tk.Frame.__init__(self, master=self.window,\n width=width, height=height,\n borderwidth=2)\n\n self.update() # Make axes menu\n\n self.bLeft = self._Button(\n text=\"Left\", file=\"stock_left.ppm\",\n command=lambda x=-1: self.panx(x))\n\n self.bRight = self._Button(\n text=\"Right\", file=\"stock_right.ppm\",\n command=lambda x=1: self.panx(x))\n\n self.bZoomInX = self._Button(\n text=\"ZoomInX\",file=\"stock_zoom-in.ppm\",\n command=lambda x=1: self.zoomx(x))\n\n self.bZoomOutX = self._Button(\n text=\"ZoomOutX\", file=\"stock_zoom-out.ppm\",\n command=lambda x=-1: self.zoomx(x))\n\n self.bUp = self._Button(\n text=\"Up\", file=\"stock_up.ppm\",\n command=lambda y=1: self.pany(y))\n\n self.bDown = self._Button(\n text=\"Down\", file=\"stock_down.ppm\",\n command=lambda y=-1: self.pany(y))\n\n self.bZoomInY = self._Button(\n text=\"ZoomInY\", file=\"stock_zoom-in.ppm\",\n command=lambda y=1: self.zoomy(y))\n\n self.bZoomOutY = self._Button(\n text=\"ZoomOutY\",file=\"stock_zoom-out.ppm\",\n command=lambda y=-1: self.zoomy(y))\n\n self.bSave = self._Button(\n text=\"Save\", file=\"stock_save_as.ppm\",\n command=self.save_figure)\n\n self.pack(side=Tk.BOTTOM, fill=Tk.X)\n\n\n def set_active(self, ind):\n self._ind = ind\n self._active = [ self._axes[i] for i in self._ind ]\n\n def panx(self, direction):\n for a in self._active:\n a.xaxis.pan(direction)\n self.canvas.draw()\n\n def pany(self, direction):\n for a in self._active:\n a.yaxis.pan(direction)\n self.canvas.draw()\n\n def zoomx(self, direction):\n\n for a in self._active:\n a.xaxis.zoom(direction)\n self.canvas.draw()\n\n def zoomy(self, direction):\n\n for a in self._active:\n a.yaxis.zoom(direction)\n self.canvas.draw()\n\n def save_figure(self):\n fs = FileDialog.SaveFileDialog(master=self.window,\n title='Save the figure')\n try:\n self.lastDir\n except AttributeError:\n self.lastDir = os.curdir\n\n fname = fs.go(dir_or_file=self.lastDir) # , pattern=\"*.png\")\n if fname is None: # Cancel\n return\n\n self.lastDir = os.path.dirname(fname)\n try:\n self.canvas.print_figure(fname)\n except IOError, msg:\n err = '\\n'.join(map(str, msg))\n msg = 'Failed to save %s: Error msg was\\n\\n%s' % (\n fname, err)\n error_msg_tkpaint(msg)\n\n def update(self):\n _focus = windowing.FocusManager()\n self._axes = self.canvas.figure.axes\n naxes = len(self._axes)\n if not hasattr(self, \"omenu\"):\n self.set_active(range(naxes))\n self.omenu = AxisMenu(master=self, naxes=naxes)\n else:\n self.omenu.adjust(naxes)\n\nclass NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):\n \"\"\"\n Public attriubutes\n\n canvas - the FigureCanvas (gtk.DrawingArea)\n win - the gtk.Window\n \"\"\"\n def __init__(self, canvas, window):\n self.canvas = canvas\n self.window = window\n self._idle = True\n #Tk.Frame.__init__(self, master=self.canvas._tkcanvas)\n NavigationToolbar2.__init__(self, canvas)\n\n def destroy(self, *args):\n del self.message\n Tk.Frame.destroy(self, *args)\n\n def set_message(self, s):\n self.message.set(s)\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y0 = height-y0\n y1 = height-y1\n try: self.lastrect\n except AttributeError: pass\n else: self.canvas._tkcanvas.delete(self.lastrect)\n self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)\n\n #self.canvas.draw()\n\n def release(self, event):\n try: self.lastrect\n except AttributeError: pass\n else:\n self.canvas._tkcanvas.delete(self.lastrect)\n del self.lastrect\n\n def set_cursor(self, cursor):\n self.window.configure(cursor=cursord[cursor])\n\n def _Button(self, text, file, command):\n file = os.path.join(rcParams['datapath'], 'images', file)\n im = Tk.PhotoImage(master=self, file=file)\n b = Tk.Button(\n master=self, text=text, padx=2, pady=2, image=im, command=command)\n b._ntimage = im\n b.pack(side=Tk.LEFT)\n return b\n\n def _init_toolbar(self):\n xmin, xmax = self.canvas.figure.bbox.intervalx\n height, width = 50, xmax-xmin\n Tk.Frame.__init__(self, master=self.window,\n width=width, height=height,\n borderwidth=2)\n\n self.update() # Make axes menu\n\n self.bHome = self._Button( text=\"Home\", file=\"home.ppm\",\n command=self.home)\n\n self.bBack = self._Button( text=\"Back\", file=\"back.ppm\",\n command = self.back)\n\n self.bForward = self._Button(text=\"Forward\", file=\"forward.ppm\",\n command = self.forward)\n\n self.bPan = self._Button( text=\"Pan\", file=\"move.ppm\",\n command = self.pan)\n\n self.bZoom = self._Button( text=\"Zoom\",\n file=\"zoom_to_rect.ppm\",\n command = self.zoom)\n\n self.bsubplot = self._Button( text=\"Configure Subplots\", file=\"subplots.ppm\",\n command = self.configure_subplots)\n\n self.bsave = self._Button( text=\"Save\", file=\"filesave.ppm\",\n command = self.save_figure)\n self.message = Tk.StringVar(master=self)\n self._message_label = Tk.Label(master=self, textvariable=self.message)\n self._message_label.pack(side=Tk.RIGHT)\n self.pack(side=Tk.BOTTOM, fill=Tk.X)\n\n\n def configure_subplots(self):\n toolfig = Figure(figsize=(6,3))\n window = Tk.Tk()\n canvas = FigureCanvasTkAgg(toolfig, master=window)\n toolfig.subplots_adjust(top=0.9)\n tool = SubplotTool(self.canvas.figure, toolfig)\n canvas.show()\n canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n\n def save_figure(self):\n from tkFileDialog import asksaveasfilename\n from tkMessageBox import showerror\n filetypes = self.canvas.get_supported_filetypes().copy()\n default_filetype = self.canvas.get_default_filetype()\n\n # Tk doesn't provide a way to choose a default filetype,\n # so we just have to put it first\n default_filetype_name = filetypes[default_filetype]\n del filetypes[default_filetype]\n\n sorted_filetypes = filetypes.items()\n sorted_filetypes.sort()\n sorted_filetypes.insert(0, (default_filetype, default_filetype_name))\n\n tk_filetypes = [\n (name, '*.%s' % ext) for (ext, name) in sorted_filetypes]\n\n fname = asksaveasfilename(\n master=self.window,\n title='Save the figure',\n filetypes = tk_filetypes,\n defaultextension = self.canvas.get_default_filetype()\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n try:\n # This method will handle the delegation to the correct type\n self.canvas.print_figure(fname)\n except Exception, e:\n showerror(\"Error saving file\", str(e))\n\n def set_active(self, ind):\n self._ind = ind\n self._active = [ self._axes[i] for i in self._ind ]\n\n def update(self):\n _focus = windowing.FocusManager()\n self._axes = self.canvas.figure.axes\n naxes = len(self._axes)\n #if not hasattr(self, \"omenu\"):\n # self.set_active(range(naxes))\n # self.omenu = AxisMenu(master=self, naxes=naxes)\n #else:\n # self.omenu.adjust(naxes)\n NavigationToolbar2.update(self)\n\n def dynamic_update(self):\n 'update drawing area only if idle'\n # legacy method; new method is canvas.draw_idle\n self.canvas.draw_idle()\n\n\nFigureManager = FigureManagerTkAgg\n\n","license":"agpl-3.0"} {"repo_name":"mhoffman\/kmos","path":"kmos\/cli.py","copies":"1","size":"16514","content":"#!\/usr\/bin\/env python\n\"\"\"Entry point module for the command-line\n interface. The kmos executable should be\n on the program path, import this modules\n main function and run it.\n\n To call kmos command as you would from the shell,\n use ::\n\n kmos.cli.main('...')\n\n Every command can be shortened as long as it is non-ambiguous, e.g. ::\n\n\n kmos ex \n\n instead of ::\n\n kmos export \n\n\n etc.\n\n\"\"\"\n\n# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)\n# This file is part of kmos.\n#\n# kmos is free software: you can redistribute it and\/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# kmos is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with kmos. If not, see .\n\nfrom __future__ import with_statement\nimport os\nimport shutil\n\nusage = {}\nusage['all'] = \"\"\"kmos help all\n Display documentation for all commands.\n \"\"\"\nusage['benchmark'] = \"\"\"kmos benchmark\n Run 1 mio. kMC steps on model in current directory\n and report runtime.\n \"\"\"\n\nusage['build'] = \"\"\"kmos build\n Build kmc_model.%s from *f90 files in the\n current directory.\n\n Additional Parameters ::\n -d\/--debug\n Turn on assertion statements in F90 code\n\n -n\/--no-compiler-optimization\n Do not send optimizing flags to compiler.\n \"\"\" % ('pyd' if os.name == 'nt' else 'so')\n\nusage['help'] = \"\"\"kmos help \n Print usage information for the given command.\n \"\"\"\n\nusage['export'] = \"\"\"kmos export []\n Take a kmos xml-file and export all generated\n source code to the export-path. There try to\n build the kmc_model.%s.\n\n Additional Parameters ::\n\n -s\/--source-only\n Export source only and don't build binary\n\n -b\/--backend (local_smart|lat_int)\n Choose backend. Default is \"local_smart\".\n lat_int is EXPERIMENTAL and not made\n for production, yet.\n\n -d\/--debug\n Turn on assertion statements in F90 code.\n (Only active in compile step)\n\n --acf\n Build the modules base_acf.f90 and proclist_acf.f90. Default is false.\n This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement). \n \n -n\/--no-compiler-optimization\n Do not send optimizing flags to compiler.\n \"\"\" % ('pyd' if os.name == 'nt' else 'so')\n \nusage['settings-export'] = \"\"\"kmos settings-export []\n Take a kmos xml-file and export kmc_settings.py\n to the export-path.\n \"\"\"\n\nusage['edit'] = \"\"\"kmos edit \n Open the kmos xml-file in a GUI to edit\n the model.\n \"\"\"\n\nusage['import'] = \"\"\"kmos import \n Take a kmos xml-file and open an ipython shell\n with the project_tree imported as pt.\n \"\"\"\nusage['rebuild'] = \"\"\"kmos rebuild\n Export code and rebuild binary module from XML\n information included in kmc_settings.py in\n current directory.\n\n Additional Parameters ::\n -d\/--debug\n Turn on assertion statements in F90 code\n \"\"\"\n\nusage['shell'] = \"\"\"kmos shell\n Open an interactive shell and create a KMC_Model in it\n run == shell\n \"\"\"\nusage['run'] = \"\"\"kmos run\n Open an interactive shell and create a KMC_Model in it\n run == shell\n \"\"\"\n\nusage['version'] = \"\"\"kmos version\n Print version number and exit.\n \"\"\"\n\nusage['view'] = \"\"\"kmos view\n Take a kmc_model.%s and kmc_settings.py in the\n same directory and start to simulate the\n model visually.\n\n Additional Parameters ::\n -v\/--steps-per-frame \n Number of steps per frame\n\n \"\"\" % ('pyd' if os.name == 'nt' else 'so')\n\nusage['xml'] = \"\"\"kmos xml\n Print xml representation of model to stdout\n \"\"\"\n\n\ndef get_options(args=None, get_parser=False):\n import optparse\n import os\n from glob import glob\n import kmos\n\n parser = optparse.OptionParser(\n 'Usage: %prog [help] ('\n + '|'.join(sorted(usage.keys()))\n + ') [options]',\n version=kmos.__version__)\n\n parser.add_option('-s', '--source-only',\n dest='source_only',\n action='store_true',\n default=False)\n\n parser.add_option('-p', '--path-to-f2py',\n dest='path_to_f2py',\n default='f2py')\n\n parser.add_option('-b', '--backend',\n dest='backend',\n default='local_smart')\n parser.add_option('-a', '--avoid-default-state',\n dest='avoid_default_state',\n action='store_true',\n default=False,\n )\n\n parser.add_option('-v', '--steps-per-frame',\n dest='steps_per_frame',\n type='int',\n default='50000')\n\n parser.add_option('-d', '--debug',\n default=False,\n dest='debug',\n action='store_true')\n\n parser.add_option('-n', '--no-compiler-optimization',\n default=False,\n dest='no_optimize',\n action='store_true')\n\n parser.add_option('-o', '--overwrite',\n default=False,\n action='store_true')\n\n parser.add_option('-l', '--variable-length',\n dest='variable_length',\n default=95,\n type='int')\n\n parser.add_option('-c', '--catmap',\n default=False,\n action='store_true')\n \n parser.add_option('--acf',\n dest='acf',\n action='store_true',\n default=False,\n )\n \n try:\n from numpy.distutils.fcompiler import get_default_fcompiler\n from numpy.distutils import log\n log.set_verbosity(-1, True)\n fcompiler = get_default_fcompiler()\n except:\n fcompiler = 'gfortran'\n\n parser.add_option('-f', '--fcompiler',\n dest='fcompiler',\n default=os.environ.get('F2PY_FCOMPILER', fcompiler))\n\n if args is not None:\n options, args = parser.parse_args(args.split())\n else:\n options, args = parser.parse_args()\n if len(args) < 1:\n parser.error('Command expected')\n if get_parser:\n return options, args, parser\n else:\n return options, args\n\n\ndef match_keys(arg, usage, parser):\n \"\"\"Try to match part of a command against\n the set of commands from usage. Throws\n an error if not successful.\n\n \"\"\"\n possible_args = [key for key in usage if key.startswith(arg)]\n if len(possible_args) == 0:\n parser.error('Command \"%s\" not understood.' % arg)\n elif len(possible_args) > 1:\n parser.error(('Command \"%s\" ambiguous.\\n'\n 'Could be one of %s\\n\\n') % (arg, possible_args))\n else:\n return possible_args[0]\n\n\ndef main(args=None):\n \"\"\"The CLI main entry point function.\n\n The optional argument args, can be used to\n directly supply command line argument like\n\n $ kmos \n\n otherwise args will be taken from STDIN.\n\n \"\"\"\n\n from glob import glob\n\n options, args, parser = get_options(args, get_parser=True)\n\n global model, pt, np, cm_model\n\n if not args[0] in usage.keys():\n args[0] = match_keys(args[0], usage, parser)\n\n if args[0] == 'benchmark':\n from sys import path\n path.append(os.path.abspath(os.curdir))\n nsteps = 1000000\n from time import time\n from kmos.run import KMC_Model\n model = KMC_Model(print_rates=False, banner=False)\n time0 = time()\n try:\n model.proclist.do_kmc_steps(nsteps)\n except: # kmos < 0.3 had no model.proclist.do_kmc_steps\n model.do_steps(nsteps)\n\n needed_time = time() - time0\n print('Using the [%s] backend.' % model.get_backend())\n print('%s steps took %.2f seconds' % (nsteps, needed_time))\n print('Or %.2e steps\/s' % (1e6 \/ needed_time))\n model.deallocate()\n elif args[0] == 'build':\n from kmos.utils import build\n build(options)\n elif args[0] == 'edit':\n from kmos import gui\n gui.main()\n elif args[0] == 'settings-export':\n import kmos.types\n import kmos.io\n from kmos.io import ProcListWriter\n\n if len(args) < 2:\n parser.error('XML file and export path expected.')\n if len(args) < 3:\n out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)\n print('No export path provided. Exporting to %s' % out_dir)\n args.append(out_dir)\n\n xml_file = args[1]\n export_dir = args[2]\n project = kmos.types.Project()\n project.import_file(xml_file)\n\n writer = ProcListWriter(project, export_dir)\n writer.write_settings()\n\n elif args[0] == 'export':\n import kmos.types\n import kmos.io\n from kmos.utils import build\n if len(args) < 2:\n parser.error('XML file and export path expected.')\n if len(args) < 3:\n out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)\n\n print('No export path provided. Exporting to %s' % out_dir)\n args.append(out_dir)\n\n xml_file = args[1]\n export_dir = os.path.join(args[2], 'src')\n\n project = kmos.types.Project()\n project.import_file(xml_file)\n\n project.shorten_names(max_length=options.variable_length)\n\n kmos.io.export_source(project,\n export_dir,\n options=options)\n\n if ((os.name == 'posix'\n and os.uname()[0] in ['Linux', 'Darwin'])\n or os.name == 'nt') \\\n and not options.source_only:\n os.chdir(export_dir)\n build(options)\n for out in glob('kmc_*'):\n if os.path.exists('..\/%s' % out) :\n if options.overwrite :\n overwrite = 'y'\n else:\n overwrite = raw_input(('Should I overwrite existing %s ?'\n '[y\/N] ') % out).lower()\n if overwrite.startswith('y') :\n print('Overwriting {out}'.format(**locals()))\n os.remove('..\/%s' % out)\n shutil.move(out, '..')\n else :\n print('Skipping {out}'.format(**locals()))\n else:\n shutil.move(out, '..')\n\n elif args[0] == 'settings-export':\n import kmos.io\n pt = kmos.io.import_file(args[1])\n if len(args) < 3:\n out_dir = os.path.splitext(args[1])[0]\n print('No export path provided. Exporting kmc_settings.py to %s'\n % out_dir)\n args.append(out_dir)\n\n if not os.path.exists(args[2]):\n os.mkdir(args[2])\n elif not os.path.isdir(args[2]):\n raise UserWarning(\"Cannot overwrite %s; Exiting;\" % args[2])\n writer = kmos.io.ProcListWriter(pt, args[2])\n writer.write_settings()\n\n elif args[0] == 'help':\n if len(args) < 2:\n parser.error('Which help do you want?')\n if args[1] == 'all':\n for command in sorted(usage):\n print(usage[command])\n elif args[1] in usage:\n print('Usage: %s\\n' % usage[args[1]])\n else:\n arg = match_keys(args[1], usage, parser)\n print('Usage: %s\\n' % usage[arg])\n\n elif args[0] == 'import':\n import kmos.io\n if not len(args) >= 2:\n raise UserWarning('XML file name expected.')\n pt = kmos.io.import_xml_file(args[1])\n if len(args) == 2:\n sh(banner='Note: pt = kmos.io.import_xml(\\'%s\\')' % args[1])\n elif len(args) == 3: # if optional 3rd argument is given, store model there and exit\n pt.save(args[2])\n\n elif args[0] == 'rebuild':\n from time import sleep\n print('Will rebuild model from kmc_settings.py in current directory')\n print('Please do not interrupt,'\n ' build process, as you will most likely')\n print('loose the current model files.')\n sleep(2.)\n from sys import path\n path.append(os.path.abspath(os.curdir))\n from tempfile import mktemp\n if not os.path.exists('kmc_model.so') \\\n and not os.path.exists('kmc_model.pyd'):\n raise Exception('No kmc_model.so found.')\n if not os.path.exists('kmc_settings.py'):\n raise Exception('No kmc_settings.py found.')\n\n from kmos.run import KMC_Model\n\n model = KMC_Model(print_rates=False, banner=False)\n tempfile = mktemp()\n f = file(tempfile, 'w')\n f.write(model.xml())\n f.close()\n\n for kmc_model in glob('kmc_model.*'):\n os.remove(kmc_model)\n os.remove('kmc_settings.py')\n main('export %s -b %s .' % (tempfile, options.backend))\n os.remove(tempfile)\n model.deallocate()\n\n elif args[0] in ['run', 'shell']:\n from sys import path\n path.append(os.path.abspath(os.curdir))\n from kmos.run import KMC_Model\n\n # useful to have in interactive mode\n import numpy as np\n try:\n from matplotlib import pyplot as plt\n except:\n plt = None\n\n if options.catmap:\n import catmap\n import catmap.cli.kmc_runner\n seed = catmap.cli.kmc_runner.get_seed_from_path('.')\n cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals()))\n catmap_message = '\\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file=\"{seed}.mkm\")'.format(**locals())\n else:\n catmap_message = ''\n\n try:\n model = KMC_Model(print_rates=False)\n except:\n print(\"Warning: could not import kmc_model!\"\n \" Please make sure you are in the right directory\")\n sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals()))\n try:\n model.deallocate()\n except:\n print(\"Warning: could not deallocate model. Was is allocated?\")\n\n elif args[0] == 'version':\n from kmos import VERSION\n print(VERSION)\n\n elif args[0] == 'view':\n from sys import path\n path.append(os.path.abspath(os.curdir))\n from kmos import view\n view.main(steps_per_frame=options.steps_per_frame)\n\n elif args[0] == 'xml':\n from sys import path\n path.append(os.path.abspath(os.curdir))\n from kmos.run import KMC_Model\n model = KMC_Model(banner=False, print_rates=False)\n print(model.xml())\n\n else:\n parser.error('Command \"%s\" not understood.' % args[0])\n\n\ndef sh(banner):\n \"\"\"Wrapper around interactive ipython shell\n that factors out ipython version depencies.\n\n \"\"\"\n\n from distutils.version import LooseVersion\n import IPython\n if hasattr(IPython, 'release'):\n try:\n from IPython.terminal.embed import InteractiveShellEmbed\n InteractiveShellEmbed(banner1=banner)()\n\n except ImportError:\n try:\n from IPython.frontend.terminal.embed \\\n import InteractiveShellEmbed\n InteractiveShellEmbed(banner1=banner)()\n\n except ImportError:\n from IPython.Shell import IPShellEmbed\n IPShellEmbed(banner=banner)()\n else:\n from IPython.Shell import IPShellEmbed\n IPShellEmbed(banner=banner)()\n","license":"gpl-3.0"} {"repo_name":"zorojean\/scikit-learn","path":"sklearn\/preprocessing\/data.py","copies":"113","size":"56747","content":"# Authors: Alexandre Gramfort \n# Mathieu Blondel \n# Olivier Grisel \n# Andreas Mueller \n# Eric Martin \n# License: BSD 3 clause\n\nfrom itertools import chain, combinations\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..externals import six\nfrom ..utils import check_array\nfrom ..utils.extmath import row_norms\nfrom ..utils.fixes import combinations_with_replacement as combinations_w_r\nfrom ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,\n inplace_csr_row_normalize_l2)\nfrom ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,\n min_max_axis, inplace_row_scale)\nfrom ..utils.validation import check_is_fitted, FLOAT_DTYPES\n\n\nzip = six.moves.zip\nmap = six.moves.map\nrange = six.moves.range\n\n__all__ = [\n 'Binarizer',\n 'KernelCenterer',\n 'MinMaxScaler',\n 'MaxAbsScaler',\n 'Normalizer',\n 'OneHotEncoder',\n 'RobustScaler',\n 'StandardScaler',\n 'add_dummy_feature',\n 'binarize',\n 'normalize',\n 'scale',\n 'robust_scale',\n 'maxabs_scale',\n 'minmax_scale',\n]\n\n\ndef _mean_and_std(X, axis=0, with_mean=True, with_std=True):\n \"\"\"Compute mean and std deviation for centering, scaling.\n\n Zero valued std components are reset to 1.0 to avoid NaNs when scaling.\n \"\"\"\n X = np.asarray(X)\n Xr = np.rollaxis(X, axis)\n\n if with_mean:\n mean_ = Xr.mean(axis=0)\n else:\n mean_ = None\n\n if with_std:\n std_ = Xr.std(axis=0)\n std_ = _handle_zeros_in_scale(std_)\n else:\n std_ = None\n\n return mean_, std_\n\n\ndef _handle_zeros_in_scale(scale):\n ''' Makes sure that whenever scale is zero, we handle it correctly.\n\n This happens in most scalers when we have constant features.'''\n\n # if we are fitting on 1D arrays, scale might be a scalar\n if np.isscalar(scale):\n if scale == 0:\n scale = 1.\n elif isinstance(scale, np.ndarray):\n scale[scale == 0.0] = 1.0\n scale[~np.isfinite(scale)] = 1.0\n return scale\n\n\ndef scale(X, axis=0, with_mean=True, with_std=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the mean and component wise scale to unit variance.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the means and standard deviations along. If 0,\n independently standardize each feature, otherwise (if 1) standardize\n each sample.\n\n with_mean : boolean, True by default\n If True, center the data before scaling.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_mean=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he\/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,\n warn_on_dtype=True, estimator='the scale function',\n dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` instead\"\n \" See docstring for motivation and alternatives.\")\n if axis != 0:\n raise ValueError(\"Can only scale sparse matrix on axis=0, \"\n \" got axis=%d\" % axis)\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n _, var = mean_variance_axis(X, axis=0)\n var = _handle_zeros_in_scale(var)\n inplace_column_scale(X, 1 \/ np.sqrt(var))\n else:\n X = np.asarray(X)\n mean_, std_ = _mean_and_std(\n X, axis, with_mean=with_mean, with_std=with_std)\n if copy:\n X = X.copy()\n # Xr is a view on the original array that enables easy use of\n # broadcasting on the axis in which we are interested in\n Xr = np.rollaxis(X, axis)\n if with_mean:\n Xr -= mean_\n mean_1 = Xr.mean(axis=0)\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n if not np.allclose(mean_1, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n if with_std:\n Xr \/= std_\n if with_mean:\n mean_2 = Xr.mean(axis=0)\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1\/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again:\n if not np.allclose(mean_2, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0. \")\n Xr -= mean_2\n return X\n\n\nclass MinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) \/ (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n copy : boolean, optional, default True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array).\n\n Attributes\n ----------\n min_ : ndarray, shape (n_features,)\n Per feature adjustment for minimum.\n\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, feature_range=(0, 1), copy=True):\n self.feature_range = feature_range\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\"Minimum of desired feature range must be smaller\"\n \" than maximum. Got %s.\" % str(feature_range))\n data_min = np.min(X, axis=0)\n data_range = np.max(X, axis=0) - data_min\n data_range = _handle_zeros_in_scale(data_range)\n self.scale_ = (feature_range[1] - feature_range[0]) \/ data_range\n self.min_ = feature_range[0] - data_min * self.scale_\n self.data_range = data_range\n self.data_min = data_min\n return self\n\n def transform(self, X):\n \"\"\"Scaling features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n X *= self.scale_\n X += self.min_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n X -= self.min_\n X \/= self.scale_\n return X\n\n\ndef minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) \/ (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass StandardScaler(BaseEstimator, TransformerMixin):\n \"\"\"Standardize features by removing the mean and scaling to unit variance\n\n Centering and scaling happen independently on each feature by computing\n the relevant statistics on the samples in the training set. Mean and\n standard deviation are then stored to be used on later data using the\n `transform` method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators: they might behave badly if the\n individual feature do not more or less look like standard normally\n distributed data (e.g. Gaussian with 0 mean and unit variance).\n\n For instance many elements used in the objective function of\n a learning algorithm (such as the RBF kernel of Support Vector\n Machines or the L1 and L2 regularizers of linear models) assume that\n all features are centered around 0 and have variance in the same\n order. If a feature has a variance that is orders of magnitude larger\n that others, it might dominate the objective function and make the\n estimator unable to learn from other features correctly as expected.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_mean : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n mean_ : array of floats with shape [n_features]\n The mean value for each feature in the training set.\n\n std_ : array of floats with shape [n_features]\n The standard deviation for each feature in the training set.\n Set to one if the standard deviation is zero for a given feature.\n\n See also\n --------\n :func:`sklearn.preprocessing.scale` to perform centering and\n scaling without using the ``Transformer`` object oriented API\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n \"\"\"\n\n def __init__(self, copy=True, with_mean=True, with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : array-like or CSR matrix with shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=self.copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n self.mean_ = None\n\n if self.with_std:\n var = mean_variance_axis(X, axis=0)[1]\n self.std_ = np.sqrt(var)\n self.std_ = _handle_zeros_in_scale(self.std_)\n else:\n self.std_ = None\n return self\n else:\n self.mean_, self.std_ = _mean_and_std(\n X, axis=0, with_mean=self.with_mean, with_std=self.with_std)\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Perform standardization by centering and scaling\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr', copy=copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n if self.std_ is not None:\n inplace_column_scale(X, 1 \/ self.std_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X \/= self.std_\n return X\n\n def inverse_transform(self, X, copy=None):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot uncenter sparse matrices: pass `with_mean=False` \"\n \"instead See docstring for motivation and alternatives.\")\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n if self.std_ is not None:\n inplace_column_scale(X, self.std_)\n else:\n X = np.asarray(X)\n if copy:\n X = X.copy()\n if self.with_std:\n X *= self.std_\n if self.with_mean:\n X += self.mean_\n return X\n\n\nclass MaxAbsScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale each feature by its maximum absolute value.\n\n This estimator scales and translates each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0. It does not shift\/center the data, and\n thus does not destroy any sparsity.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Attributes\n ----------\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, copy=True):\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0)\n scales = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n scales = np.abs(X).max(axis=0)\n scales = np.array(scales)\n scales = scales.reshape(-1)\n self.scale_ = _handle_zeros_in_scale(scales)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be scaled.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 \/ self.scale_)\n else:\n inplace_column_scale(X, 1.0 \/ self.scale_)\n else:\n X \/= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be transformed back.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n return X\n\n\ndef maxabs_scale(X, axis=0, copy=True):\n \"\"\"Scale each feature to the [-1, 1] range without breaking the sparsity.\n\n This estimator scales each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MaxAbsScaler(copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass RobustScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale features using statistics that are robust to outliers.\n\n This Scaler removes the median and scales the data according to\n the Interquartile Range (IQR). The IQR is the range between the 1st\n quartile (25th quantile) and the 3rd quartile (75th quantile).\n\n Centering and scaling happen independently on each feature (or each\n sample, depending on the `axis` argument) by computing the relevant\n statistics on the samples in the training set. Median and interquartile\n range are then stored to be used on later data using the `transform`\n method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators. Typically this is done by removing the mean\n and scaling to unit variance. However, outliers can often influence the\n sample mean \/ variance in a negative way. In such cases, the median and\n the interquartile range often give better results.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_centering : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_scaling : boolean, True by default\n If True, scale the data to interquartile range.\n\n copy : boolean, optional, default is True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n center_ : array of floats\n The median value for each feature in the training set.\n\n scale_ : array of floats\n The (scaled) interquartile range for each feature in the training set.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering\n and scaling using mean and variance.\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n\n Notes\n -----\n See examples\/preprocessing\/plot_robust_scaling.py for an example.\n\n http:\/\/en.wikipedia.org\/wiki\/Median_(statistics)\n http:\/\/en.wikipedia.org\/wiki\/Interquartile_range\n \"\"\"\n\n def __init__(self, with_centering=True, with_scaling=True, copy=True):\n self.with_centering = with_centering\n self.with_scaling = with_scaling\n self.copy = copy\n\n def _check_array(self, X, copy):\n \"\"\"Makes sure centering is not enabled for sparse matrices.\"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_centering:\n raise ValueError(\n \"Cannot center sparse matrices: use `with_centering=False`\"\n \" instead. See docstring for motivation and alternatives.\")\n return X\n\n def fit(self, X, y=None):\n \"\"\"Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n \"\"\"\n if sparse.issparse(X):\n raise TypeError(\"RobustScaler cannot be fitted on sparse inputs\")\n\n X = self._check_array(X, self.copy)\n if self.with_centering:\n self.center_ = np.median(X, axis=0)\n\n if self.with_scaling:\n q = np.percentile(X, (25, 75), axis=0)\n self.scale_ = (q[1] - q[0])\n self.scale_ = _handle_zeros_in_scale(self.scale_)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Center and scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 \/ self.scale_)\n elif self.axis == 0:\n inplace_column_scale(X, 1.0 \/ self.scale_)\n else:\n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X \/= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_scaling:\n X *= self.scale_\n if self.with_centering:\n X += self.center_\n return X\n\n\ndef robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the median and component wise scale\n according to the interquartile range.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the medians and IQR along. If 0,\n independently scale each feature, otherwise (if 1) scale\n each sample.\n\n with_centering : boolean, True by default\n If True, center the data before scaling.\n\n with_scaling : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default is True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_centering=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he\/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.RobustScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,\n copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass PolynomialFeatures(BaseEstimator, TransformerMixin):\n \"\"\"Generate polynomial and interaction features.\n\n Generate a new feature matrix consisting of all polynomial combinations\n of the features with degree less than or equal to the specified degree.\n For example, if an input sample is two dimensional and of the form\n [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n\n Parameters\n ----------\n degree : integer\n The degree of the polynomial features. Default = 2.\n\n interaction_only : boolean, default = False\n If true, only interaction features are produced: features that are\n products of at most ``degree`` *distinct* input features (so not\n ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).\n\n include_bias : boolean\n If True (default), then include a bias column, the feature in which\n all polynomial powers are zero (i.e. a column of ones - acts as an\n intercept term in a linear model).\n\n Examples\n --------\n >>> X = np.arange(6).reshape(3, 2)\n >>> X\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> poly = PolynomialFeatures(2)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0, 0, 1],\n [ 1, 2, 3, 4, 6, 9],\n [ 1, 4, 5, 16, 20, 25]])\n >>> poly = PolynomialFeatures(interaction_only=True)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0],\n [ 1, 2, 3, 6],\n [ 1, 4, 5, 20]])\n\n Attributes\n ----------\n powers_ : array, shape (n_input_features, n_output_features)\n powers_[i, j] is the exponent of the jth input in the ith output.\n\n n_input_features_ : int\n The total number of input features.\n\n n_output_features_ : int\n The total number of polynomial output features. The number of output\n features is computed by iterating over all suitably sized combinations\n of input features.\n\n Notes\n -----\n Be aware that the number of features in the output array scales\n polynomially in the number of features of the input array, and\n exponentially in the degree. High degrees can cause overfitting.\n\n See :ref:`examples\/linear_model\/plot_polynomial_interpolation.py\n `\n \"\"\"\n def __init__(self, degree=2, interaction_only=False, include_bias=True):\n self.degree = degree\n self.interaction_only = interaction_only\n self.include_bias = include_bias\n\n @staticmethod\n def _combinations(n_features, degree, interaction_only, include_bias):\n comb = (combinations if interaction_only else combinations_w_r)\n start = int(not include_bias)\n return chain.from_iterable(comb(range(n_features), i)\n for i in range(start, degree + 1))\n\n @property\n def powers_(self):\n check_is_fitted(self, 'n_input_features_')\n\n combinations = self._combinations(self.n_input_features_, self.degree,\n self.interaction_only,\n self.include_bias)\n return np.vstack(np.bincount(c, minlength=self.n_input_features_)\n for c in combinations)\n\n def fit(self, X, y=None):\n \"\"\"\n Compute number of output features.\n \"\"\"\n n_samples, n_features = check_array(X).shape\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n self.n_input_features_ = n_features\n self.n_output_features_ = sum(1 for _ in combinations)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Transform data to polynomial features\n\n Parameters\n ----------\n X : array with shape [n_samples, n_features]\n The data to transform, row by row.\n\n Returns\n -------\n XP : np.ndarray shape [n_samples, NP]\n The matrix of features, where NP is the number of polynomial\n features generated from the combination of inputs.\n \"\"\"\n check_is_fitted(self, ['n_input_features_', 'n_output_features_'])\n\n X = check_array(X)\n n_samples, n_features = X.shape\n\n if n_features != self.n_input_features_:\n raise ValueError(\"X shape does not match training shape\")\n\n # allocate output data\n XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)\n\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n for i, c in enumerate(combinations):\n XP[:, i] = X[:, c].prod(1)\n\n return XP\n\n\ndef normalize(X, norm='l2', axis=1, copy=True):\n \"\"\"Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : 0 or 1, optional (1 by default)\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Normalizer` to perform normalization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n if norm not in ('l1', 'l2', 'max'):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = 'csc'\n elif axis == 1:\n sparse_format = 'csr'\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,\n estimator='the normalize function', dtype=FLOAT_DTYPES)\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if norm == 'l1':\n inplace_csr_row_normalize_l1(X)\n elif norm == 'l2':\n inplace_csr_row_normalize_l2(X)\n elif norm == 'max':\n _, norms = min_max_axis(X, 1)\n norms = norms.repeat(np.diff(X.indptr))\n mask = norms != 0\n X.data[mask] \/= norms[mask]\n else:\n if norm == 'l1':\n norms = np.abs(X).sum(axis=1)\n elif norm == 'l2':\n norms = row_norms(X)\n elif norm == 'max':\n norms = np.max(X, axis=1)\n norms = _handle_zeros_in_scale(norms)\n X \/= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n return X\n\n\nclass Normalizer(BaseEstimator, TransformerMixin):\n \"\"\"Normalize samples individually to unit norm.\n\n Each sample (i.e. each row of the data matrix) with at least one\n non zero component is rescaled independently of other samples so\n that its norm (l1 or l2) equals one.\n\n This transformer is able to work both with dense numpy arrays and\n scipy.sparse matrix (use CSR format if you want to avoid the burden of\n a copy \/ conversion).\n\n Scaling inputs to unit norms is a common operation for text\n classification or clustering for instance. For instance the dot\n product of two l2-normalized TF-IDF vectors is the cosine similarity\n of the vectors and is the base similarity metric for the Vector\n Space Model commonly used by the Information Retrieval community.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix).\n\n Notes\n -----\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n See also\n --------\n :func:`sklearn.preprocessing.normalize` equivalent function\n without the object oriented API\n \"\"\"\n\n def __init__(self, norm='l2', copy=True):\n self.norm = norm\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Scale each non zero row of X to unit norm\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, row by row. scipy.sparse matrices should be\n in CSR format to avoid an un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr')\n return normalize(X, norm=self.norm, axis=1, copy=copy)\n\n\ndef binarize(X, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or scipy.sparse matrix\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR or CSC format to avoid an\n un-necessary copy.\n\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy\n (if the input is already a numpy array or a scipy.sparse CSR \/ CSC\n matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Binarizer` to perform binarization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError('Cannot binarize a sparse matrix with threshold '\n '< 0')\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return X\n\n\nclass Binarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize data (set feature values to 0 or 1) according to a threshold\n\n Values greater than the threshold map to 1, while values less than\n or equal to the threshold map to 0. With the default threshold of 0,\n only positive values map to 1.\n\n Binarization is a common operation on text count data where the\n analyst can decide to only consider the presence or absence of a\n feature rather than a quantified number of occurrences for instance.\n\n It can also be used as a pre-processing step for estimators that\n consider boolean random variables (e.g. modelled using the Bernoulli\n distribution in a Bayesian setting).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy (if\n the input is already a numpy array or a scipy.sparse CSR matrix).\n\n Notes\n -----\n If the input is a sparse matrix, only the non-zero values are subject\n to update by the Binarizer class.\n\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n \"\"\"\n\n def __init__(self, threshold=0.0, copy=True):\n self.threshold = threshold\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Binarize each element of X\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n return binarize(X, threshold=self.threshold, copy=copy)\n\n\nclass KernelCenterer(BaseEstimator, TransformerMixin):\n \"\"\"Center a kernel matrix\n\n Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a\n function mapping x to a Hilbert space. KernelCenterer centers (i.e.,\n normalize to have zero mean) the data without explicitly computing phi(x).\n It is equivalent to centering phi(x) with\n sklearn.preprocessing.StandardScaler(with_std=False).\n\n Read more in the :ref:`User Guide `.\n \"\"\"\n\n def fit(self, K, y=None):\n \"\"\"Fit KernelCenterer\n\n Parameters\n ----------\n K : numpy array of shape [n_samples, n_samples]\n Kernel matrix.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n K = check_array(K)\n n_samples = K.shape[0]\n self.K_fit_rows_ = np.sum(K, axis=0) \/ n_samples\n self.K_fit_all_ = self.K_fit_rows_.sum() \/ n_samples\n return self\n\n def transform(self, K, y=None, copy=True):\n \"\"\"Center kernel matrix.\n\n Parameters\n ----------\n K : numpy array of shape [n_samples1, n_samples2]\n Kernel matrix.\n\n copy : boolean, optional, default True\n Set to False to perform inplace computation.\n\n Returns\n -------\n K_new : numpy array of shape [n_samples1, n_samples2]\n \"\"\"\n check_is_fitted(self, 'K_fit_all_')\n\n K = check_array(K)\n if copy:\n K = K.copy()\n\n K_pred_cols = (np.sum(K, axis=1) \/\n self.K_fit_rows_.shape[0])[:, np.newaxis]\n\n K -= self.K_fit_rows_\n K -= K_pred_cols\n K += self.K_fit_all_\n\n return K\n\n\ndef add_dummy_feature(X, value=1.0):\n \"\"\"Augment dataset with an additional dummy feature.\n\n This is useful for fitting an intercept term with implementations which\n cannot otherwise fit it directly.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n Data.\n\n value : float\n Value to use for the dummy feature.\n\n Returns\n -------\n\n X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]\n Same data with dummy feature added as first column.\n\n Examples\n --------\n\n >>> from sklearn.preprocessing import add_dummy_feature\n >>> add_dummy_feature([[0, 1], [1, 0]])\n array([[ 1., 0., 1.],\n [ 1., 1., 0.]])\n \"\"\"\n X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])\n n_samples, n_features = X.shape\n shape = (n_samples, n_features + 1)\n if sparse.issparse(X):\n if sparse.isspmatrix_coo(X):\n # Shift columns to the right.\n col = X.col + 1\n # Column indices of dummy feature are 0 everywhere.\n col = np.concatenate((np.zeros(n_samples), col))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n row = np.concatenate((np.arange(n_samples), X.row))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.coo_matrix((data, (row, col)), shape)\n elif sparse.isspmatrix_csc(X):\n # Shift index pointers since we need to add n_samples elements.\n indptr = X.indptr + n_samples\n # indptr[0] must be 0.\n indptr = np.concatenate((np.array([0]), indptr))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n indices = np.concatenate((np.arange(n_samples), X.indices))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.csc_matrix((data, indices, indptr), shape)\n else:\n klass = X.__class__\n return klass(add_dummy_feature(X.tocoo(), value))\n else:\n return np.hstack((np.ones((n_samples, 1)) * value, X))\n\n\ndef _transform_selected(X, transform, selected=\"all\", copy=True):\n \"\"\"Apply a transform function to portion of selected features\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Dense array or sparse matrix.\n\n transform : callable\n A callable transform(X) -> X_transformed\n\n copy : boolean, optional\n Copy X even if it could be avoided.\n\n selected: \"all\" or array of indices or mask\n Specify which features to apply the transform to.\n\n Returns\n -------\n X : array or sparse matrix, shape=(n_samples, n_features_new)\n \"\"\"\n if selected == \"all\":\n return transform(X)\n\n X = check_array(X, accept_sparse='csc', copy=copy)\n\n if len(selected) == 0:\n return X\n\n n_features = X.shape[1]\n ind = np.arange(n_features)\n sel = np.zeros(n_features, dtype=bool)\n sel[np.asarray(selected)] = True\n not_sel = np.logical_not(sel)\n n_selected = np.sum(sel)\n\n if n_selected == 0:\n # No features selected.\n return X\n elif n_selected == n_features:\n # All features selected.\n return transform(X)\n else:\n X_sel = transform(X[:, ind[sel]])\n X_not_sel = X[:, ind[not_sel]]\n\n if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):\n return sparse.hstack((X_sel, X_not_sel))\n else:\n return np.hstack((X_sel, X_not_sel))\n\n\nclass OneHotEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode categorical integer features using a one-hot aka one-of-K scheme.\n\n The input to this transformer should be a matrix of integers, denoting\n the values taken on by categorical (discrete) features. The output will be\n a sparse matrix where each column corresponds to one possible value of one\n feature. It is assumed that input features take on values in the range\n [0, n_values).\n\n This encoding is needed for feeding categorical data to many scikit-learn\n estimators, notably linear models and SVMs with the standard kernels.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_values : 'auto', int or array of ints\n Number of values per feature.\n\n - 'auto' : determine value range from training data.\n - int : maximum value for all features.\n - array : maximum value per feature.\n\n categorical_features: \"all\" or array of indices or mask\n Specify what features are treated as categorical.\n\n - 'all' (default): All features are treated as categorical.\n - array of indices: Array of categorical feature indices.\n - mask: Array of length n_features and with dtype=bool.\n\n Non-categorical features are always stacked to the right of the matrix.\n\n dtype : number type, default=np.float\n Desired dtype of output.\n\n sparse : boolean, default=True\n Will return sparse matrix if set True else will return an array.\n\n handle_unknown : str, 'error' or 'ignore'\n Whether to raise an error or ignore if a unknown categorical feature is\n present during transform.\n\n Attributes\n ----------\n active_features_ : array\n Indices for active features, meaning values that actually occur\n in the training set. Only available when n_values is ``'auto'``.\n\n feature_indices_ : array of shape (n_features,)\n Indices to feature ranges.\n Feature ``i`` in the original data is mapped to features\n from ``feature_indices_[i]`` to ``feature_indices_[i+1]``\n (and then potentially masked by `active_features_` afterwards)\n\n n_values_ : array of shape (n_features,)\n Maximum number of values per feature.\n\n Examples\n --------\n Given a dataset with three features and two samples, we let the encoder\n find the maximum value per feature and transform the data to a binary\n one-hot encoding.\n\n >>> from sklearn.preprocessing import OneHotEncoder\n >>> enc = OneHotEncoder()\n >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \\\n[1, 0, 2]]) # doctest: +ELLIPSIS\n OneHotEncoder(categorical_features='all', dtype=<... 'float'>,\n handle_unknown='error', n_values='auto', sparse=True)\n >>> enc.n_values_\n array([2, 3, 4])\n >>> enc.feature_indices_\n array([0, 2, 5, 9])\n >>> enc.transform([[0, 1, 1]]).toarray()\n array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])\n\n See also\n --------\n sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of\n dictionary items (also handles string-valued features).\n sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot\n encoding of dictionary items or strings.\n \"\"\"\n def __init__(self, n_values=\"auto\", categorical_features=\"all\",\n dtype=np.float, sparse=True, handle_unknown='error'):\n self.n_values = n_values\n self.categorical_features = categorical_features\n self.dtype = dtype\n self.sparse = sparse\n self.handle_unknown = handle_unknown\n\n def fit(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_feature)\n Input array of type int.\n\n Returns\n -------\n self\n \"\"\"\n self.fit_transform(X)\n return self\n\n def _fit_transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n if self.n_values == 'auto':\n n_values = np.max(X, axis=0) + 1\n elif isinstance(self.n_values, numbers.Integral):\n if (np.max(X, axis=0) >= self.n_values).any():\n raise ValueError(\"Feature out of bounds for n_values=%d\"\n % self.n_values)\n n_values = np.empty(n_features, dtype=np.int)\n n_values.fill(self.n_values)\n else:\n try:\n n_values = np.asarray(self.n_values, dtype=int)\n except (ValueError, TypeError):\n raise TypeError(\"Wrong type for parameter `n_values`. Expected\"\n \" 'auto', int or array of ints, got %r\"\n % type(X))\n if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:\n raise ValueError(\"Shape mismatch: if n_values is an array,\"\n \" it has to be of shape (n_features,).\")\n\n self.n_values_ = n_values\n n_values = np.hstack([[0], n_values])\n indices = np.cumsum(n_values)\n self.feature_indices_ = indices\n\n column_indices = (X + indices[:-1]).ravel()\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)\n data = np.ones(n_samples * n_features)\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n\n if self.n_values == 'auto':\n mask = np.array(out.sum(axis=0)).ravel() != 0\n active_features = np.where(mask)[0]\n out = out[:, active_features]\n self.active_features_ = active_features\n\n return out if self.sparse else out.toarray()\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X, then transform X.\n\n Equivalent to self.fit(X).transform(X), but more convenient and more\n efficient. See fit for the parameters, transform for the return value.\n \"\"\"\n return _transform_selected(X, self._fit_transform,\n self.categorical_features, copy=True)\n\n def _transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n\n indices = self.feature_indices_\n if n_features != indices.shape[0] - 1:\n raise ValueError(\"X has different shape than during fitting.\"\n \" Expected %d, got %d.\"\n % (indices.shape[0] - 1, n_features))\n\n # We use only those catgorical features of X that are known using fit.\n # i.e lesser than n_values_ using mask.\n # This means, if self.handle_unknown is \"ignore\", the row_indices and\n # col_indices corresponding to the unknown categorical feature are\n # ignored.\n mask = (X < self.n_values_).ravel()\n if np.any(~mask):\n if self.handle_unknown not in ['error', 'ignore']:\n raise ValueError(\"handle_unknown should be either error or \"\n \"unknown got %s\" % self.handle_unknown)\n if self.handle_unknown == 'error':\n raise ValueError(\"unknown categorical feature present %s \"\n \"during transform.\" % X[~mask])\n\n column_indices = (X + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(np.sum(mask))\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.n_values == 'auto':\n out = out[:, self.active_features_]\n\n return out if self.sparse else out.toarray()\n\n def transform(self, X):\n \"\"\"Transform X using one-hot encoding.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_features)\n Input array of type int.\n\n Returns\n -------\n X_out : sparse matrix if sparse=True else a 2-d array, dtype=int\n Transformed input.\n \"\"\"\n return _transform_selected(X, self._transform,\n self.categorical_features, copy=True)\n","license":"bsd-3-clause"} {"repo_name":"Tong-Chen\/scikit-learn","path":"sklearn\/cluster\/bicluster\/tests\/test_utils.py","copies":"10","size":"1427","content":"\"\"\"Tests for bicluster utilities.\"\"\"\n\nimport numpy as np\n\nfrom scipy.sparse import csr_matrix, issparse\n\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_true\n\nfrom sklearn.cluster.bicluster.utils import get_indicators\nfrom sklearn.cluster.bicluster.utils import get_shape\nfrom sklearn.cluster.bicluster.utils import get_submatrix\n\n\ndef test_get_indicators():\n rows = [2, 4, 5]\n columns = [0, 1, 3]\n shape = (6, 4)\n row_ind, col_ind = get_indicators(rows, columns, shape)\n assert_array_equal(row_ind, [False, False, True, False, True, True])\n assert_array_equal(col_ind, [True, True, False, True])\n\n\ndef test_get_shape():\n rows = [True, True, False, False]\n cols = [True, False, True, True]\n assert_equal(get_shape(rows, cols), (2, 3))\n\n\ndef test_get_submatrix():\n data = np.arange(20).reshape(5, 4)\n rows = [True, True, False, False, True]\n cols = [False, False, True, True]\n for X in (data, csr_matrix(data)):\n submatrix = get_submatrix(rows, cols, X)\n if issparse(submatrix):\n submatrix = submatrix.todense()\n assert_array_equal(submatrix, [[2, 3],\n [6, 7],\n [18, 19]])\n submatrix[:] = -1\n if issparse(X):\n X = X.todense()\n assert_true(np.all(X != -1))\n","license":"bsd-3-clause"} {"repo_name":"doutib\/lobpredict","path":"lobpredictrst\/execute_model.py","copies":"1","size":"5878","content":"import sys\nimport imp\nimport yaml\nimport csv\nimport pandas as pd\nimport re\nfrom rf import *\nfrom svm import *\nmodl = imp.load_source('read_model_yaml', 'read_model_yaml.py')\n\n# Parse the YAML file location as the first parameter\ninp_yaml = sys.argv[1]\n\ndef write_results_txt(filename, result):\n \"\"\"\n Write results into csv file.\n\n Parameters\n ----------\n filename : string\n filename to output the result\n labels : list\n labels for the results, i.e. names of parameters and metrics\n \"\"\"\n with open(filename, \"w\") as fp:\n for item in result:\n fp.write(\"%s\\n\\n\" % item)\n\n\ndef execute_model(inp_yaml):\n \"\"\"Apply trees in the forest to X, return leaf indices.\n Parameters\n ----------\n inp_yaml : A yaml file with model specifications\n\n Returns\n -------\n parameters_dict : A python dictionary with the model specifications\n to be used to encode metadata for the model\n and pass into specific model functions e.g. random\n forest\n \"\"\"\n\n # Read in and parse all parameters from the YAML file\n yaml_params = modl.read_model_yaml(inp_yaml)\n\n # Define output file name based on input\n folder_name = re.split(\"\/\", inp_yaml)[2]\n file_name = re.split(\"\/\", inp_yaml)[3][:-5]\n output_txt_file = 'data\/output\/' + folder_name + '\/' + file_name + '.txt'\n\n #-------------------------------------------------\n # Create Train and Test Datasets\n #-------------------------------------------------\n\n data_source_dir = yaml_params[\"data_source_dir\"]\n test_type = yaml_params[\"test_type\"]\n\n print('data source dir is: %s' % (data_source_dir))\n print('test type is: %s' % (test_type))\n\n if test_type == \"test\":\n train_ds_name = \"train.tar.gz\"\n test_ds_name = \"test.tar.gz\"\n elif test_type == \"validation\":\n train_ds_name = \"train_test.tar.gz\"\n test_ds_name = \"validation.tar.gz\"\n else:\n train_ds_name = \"train_test_validation.tar.gz\"\n test_ds_name = \"strategy_validation.tar.gz\"\n\n train_ds_ref = \"data\/output\/model_clean_data\/\" + data_source_dir + \"\/\" + train_ds_name\n test_ds_ref = \"data\/output\/model_clean_data\/\" + data_source_dir + \"\/\" + test_ds_name\n\n print('training dataset is: %s' % (train_ds_ref))\n print('test dataset is: %s' % (test_ds_ref))\n\n # Open test and train sets\n df_train = pd.read_csv(train_ds_ref\n , compression='gzip', index_col = None)\n df_test = pd.read_csv(test_ds_ref\n , compression='gzip', index_col = None)\n\n # Drop the first columns - they are not useful\n df_train_clean = df_train.iloc[:,1:]\n df_test_clean = df_test.iloc[:,1:]\n\n # Traning data column names - used for variale importance\n X_train_cols = list(df_train_clean.drop(['labels', 'index', 'Time'], axis=1).columns.values)\n\n # Define test\/training set\n X_train = np.array(df_train_clean.drop(['labels', 'index', 'Time'], axis = 1))\n Y_train = np.array(df_train_clean[['labels']])[:,0]\n X_test = np.array(df_test_clean.drop(['labels', 'index', 'Time'], axis = 1))\n Y_test = np.array(df_test_clean[['labels']])[:,0]\n\n\n #-------------------------------------------------\n # Run RF (RANDOM FOREST)\n #-------------------------------------------------\n\n if yaml_params[\"model_type\"] == \"RF\":\n\n # Extract the RF model variables from the YAML file\n n_estimators = yaml_params[\"parameters\"][\"n_estimators\"]\n criterion = yaml_params[\"parameters\"][\"criterion\"]\n max_features = yaml_params[\"parameters\"][\"max_features\"]\n max_depth = yaml_params[\"parameters\"][\"max_depth\"]\n n_jobs = yaml_params[\"parameters\"][\"n_jobs\"]\n\n print('number of trees is: %d' % (n_estimators))\n print('max depth is: %d' % (max_depth))\n\n print(\"running RF WITHOUT simulation...\")\n\n # Run simulation\n result = rf(X_train_cols = X_train_cols\n , X_train = X_train\n , Y_train = Y_train\n , X_test = X_test\n , Y_test = Y_test\n , n_estimators = n_estimators\n , criterion = criterion\n , max_features = max_features\n , max_depth = max_depth)\n\n print(\"finished - rf without simulation\")\n\n # Write into text file\n write_results_txt(output_txt_file, result)\n\n #-------------------------------------------------\n # Run SVM (SUPPORT VECTOR MACHINE)\n #-------------------------------------------------\n\n # Extract the SVM model variables from the YAML file\n if yaml_params[\"model_type\"] == \"SVM\":\n kernel = yaml_params[\"parameters\"][\"kernel\"]\n degree = yaml_params[\"parameters\"][\"degree\"]\n gamma = yaml_params[\"parameters\"][\"gamma\"]\n tol = yaml_params[\"parameters\"][\"tol\"]\n C = yaml_params[\"parameters\"][\"C\"]\n\n print('The value of C is: %.2f' % (C))\n\n print(\"running SVM WITHOUT simulation...\")\n\n # Run a single simulation\n result = svm(X_train = X_train\n , Y_train = Y_train\n , X_test = X_test\n , Y_test = Y_test\n , kernel = kernel\n , C = C\n , degree = degree\n , gamma = gamma\n , tol = tol\n , decision_function_shape='ovr')\n\n # Write into text file\n write_results_txt(output_txt_file, result)\n\n print(\"finished - SVM without simulation\")\n\n# Run the execute model code\nexecute_model(inp_yaml)\n","license":"isc"} {"repo_name":"hep-gc\/panda-autopyfactory","path":"bin\/factory.py","copies":"1","size":"6335","content":"#! \/usr\/bin\/env python\n#\n# Simple(ish) python condor_g factory for panda pilots\n#\n# $Id$\n#\n#\n# Copyright (C) 2007,2008,2009 Graeme Andrew Stewart\n#\n# This program is free software: you can redistribute it and\/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nfrom optparse import OptionParser\nimport logging\nimport logging.handlers\nimport time\nimport os\nimport sys\nimport traceback\n\n# Need to set PANDA_URL_MAP before the Client module is loaded (which happens\n# when the Factory module is loaded). Unfortunately this means that logging\n# is not yet available.\nif not 'APF_NOSQUID' in os.environ:\n if not 'PANDA_URL_MAP' in os.environ:\n os.environ['PANDA_URL_MAP'] = 'CERN,http:\/\/pandaserver.cern.ch:25085\/server\/panda,https:\/\/pandaserver.cern.ch:25443\/server\/panda'\n print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL_MAP to %s' % os.environ['PANDA_URL_MAP']\n else:\n print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL_MAP set to %s. Not changed.' % os.environ['PANDA_URL_MAP']\n if not 'PANDA_URL' in os.environ:\n os.environ['PANDA_URL'] = 'http:\/\/pandaserver.cern.ch:25085\/server\/panda'\n print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL to %s' % os.environ['PANDA_URL']\n else:\n print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL set to %s. Not changed.' % os.environ['PANDA_URL']\nelse:\n print >>sys.stderr, 'FACTORY DEBUG: Found APF_NOSQUID set. Not changing\/setting panda client environment.'\n\n\nfrom autopyfactory.Factory import factory\nfrom autopyfactory.Exceptions import FactoryConfigurationFailure\n\ndef main():\n parser = OptionParser(usage='''%prog [OPTIONS]\n\n autopyfactory is an ATLAS pilot factory.\n\n This program is licenced under the GPL, as set out in LICENSE file.\n\n Author(s):\n Graeme A Stewart , Peter Love \n ''', version=\"%prog $Id$\")\n\n parser.add_option(\"--verbose\", \"--debug\", dest=\"logLevel\", default=logging.INFO,\n action=\"store_const\", const=logging.DEBUG, help=\"Set logging level to DEBUG [default INFO]\")\n parser.add_option(\"--quiet\", dest=\"logLevel\",\n action=\"store_const\", const=logging.WARNING, help=\"Set logging level to WARNING [default INFO]\")\n parser.add_option(\"--test\", \"--dry-run\", dest=\"dryRun\", default=False,\n action=\"store_true\", help=\"Dry run - supress job submission\")\n parser.add_option(\"--oneshot\", \"--one-shot\", dest=\"cyclesToDo\", default=0,\n action=\"store_const\", const=1, help=\"Run one cycle only\")\n parser.add_option(\"--cycles\", dest=\"cyclesToDo\",\n action=\"store\", type=\"int\", metavar=\"CYCLES\", help=\"Run CYCLES times, then exit [default infinite]\")\n parser.add_option(\"--sleep\", dest=\"sleepTime\", default=120,\n action=\"store\", type=\"int\", metavar=\"TIME\", help=\"Sleep TIME seconds between cycles [default %default]\")\n parser.add_option(\"--conf\", dest=\"confFiles\", default=\"factory.conf\",\n action=\"store\", metavar=\"FILE1[,FILE2,FILE3]\", help=\"Load configuration from FILEs (comma separated list)\")\n parser.add_option(\"--log\", dest=\"logfile\", default=\"syslog\", metavar=\"LOGFILE\", action=\"store\", \n help=\"Send logging output to LOGFILE or SYSLOG or stdout [default ]\")\n (options, args) = parser.parse_args()\n\n options.confFiles = options.confFiles.split(',')\n \n # Setup logging\n factoryLogger = logging.getLogger('main')\n if options.logfile == \"stdout\":\n logStream = logging.StreamHandler()\n elif options.logfile == 'syslog':\n logStream = logging.handlers.SysLogHandler('\/dev\/log')\n else:\n logStream = logging.handlers.RotatingFileHandler(filename=options.logfile, maxBytes=10000000, backupCount=5) \n\n formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s %(message)s')\n logStream.setFormatter(formatter)\n factoryLogger.addHandler(logStream)\n factoryLogger.setLevel(options.logLevel)\n\n factoryLogger.debug('logging initialised')\n \n # Main loop\n try:\n f = factory(factoryLogger, options.dryRun, options.confFiles)\n cyclesDone = 0\n while True:\n factoryLogger.info('\\nStarting factory cycle %d at %s', cyclesDone, time.asctime(time.localtime()))\n f.factorySubmitCycle(cyclesDone)\n factoryLogger.info('Factory cycle %d done' % cyclesDone)\n cyclesDone += 1\n if cyclesDone == options.cyclesToDo:\n break\n factoryLogger.info('Sleeping %ds' % options.sleepTime)\n time.sleep(options.sleepTime)\n f.updateConfig(cyclesDone)\n except KeyboardInterrupt:\n factoryLogger.info('Caught keyboard interrupt - exiting')\n except FactoryConfigurationFailure, errMsg:\n factoryLogger.error('Factory configuration failure: %s', errMsg)\n except ImportError, errorMsg:\n factoryLogger.error('Failed to import necessary python module: %s' % errorMsg)\n except:\n # TODO - make this a logger.exception() call\n factoryLogger.error('''Unexpected exception! There was an exception\n raised which the factory was not expecting and did not know how to\n handle. You may have discovered a new bug or an unforseen error\n condition. Please report this exception to Graeme\n . The factory will now re-raise this\n exception so that the python stack trace is printed, which will allow\n it to be debugged - please send output from this message\n onwards. Exploding in 5...4...3...2...1... Have a nice day!''')\n # The following line prints the exception to the logging module\n factoryLogger.error(traceback.format_exc(None))\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","license":"gpl-3.0"} {"repo_name":"gandalfcode\/gandalf","path":"tests\/paper_tests\/binaryorbit.py","copies":"1","size":"3711","content":"#==============================================================================\n# freefalltest.py\n# Run the freefall collapse test using initial conditions specified in the\n# file 'freefall.dat'.\n#==============================================================================\nfrom gandalf.analysis.facade import *\nfrom gandalf.analysis.data_fetcher import *\nfrom gandalf.analysis.compute import particle_data\nfrom gandalf.analysis.SimBuffer import SimBuffer, BufferException\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom matplotlib import rc\nfrom mpl_toolkits.axes_grid1 import AxesGrid\n\n\n\n#--------------------------------------------------------------------------------------------------\nrc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})\nrc('text', usetex=True)\n\n\n# Binary parameters\nm1 = 0.5\nm2 = 0.5\nabin = 1.0\nebin = 0.5\netot0 = -0.5*m1*m2\/abin\nperiod = 2.0*math.pi*math.sqrt(abin*abin*abin\/(m1 + m2))\n\nxmin = -0.6\nxmax = 2.1\nymin = -0.85\nymax = 0.85\nxsize = xmax - xmin\nysize = ymax - ymin\n\n\n\nCreateTimeData('x',particle_data,quantity='x')\nCreateTimeData('y',particle_data,quantity='y')\n\n\n# Leapfrog KDK\nkdksim = newsim('binaryorbit.dat')\nkdksim.SetParam('nbody','lfkdk')\nsetupsim()\nrun()\nx_kdk = get_time_data(\"t\",\"x\")\ny_kdk = get_time_data(\"t\",\"y\")\n\n\n# Leapfrog DKD\ndkdsim = newsim('binaryorbit.dat')\ndkdsim.SetParam('nbody','lfdkd')\nsetupsim()\nrun()\nx_dkd = get_time_data(\"t\",\"x\")\ny_dkd = get_time_data(\"t\",\"y\")\n\n\n# 4th-order Hermite\nhermite4sim = newsim('binaryorbit.dat')\nhermite4sim.SetParam('nbody','hermite4')\nsetupsim()\nrun()\nx_hermite4 = get_time_data(\"t\",\"x\")\ny_hermite4 = get_time_data(\"t\",\"y\")\n\n\n# 4th-order Hermite TS\nhermite4tssim = newsim('binaryorbit.dat')\nhermite4tssim.SetParam('nbody','hermite4ts')\nhermite4tssim.SetParam('Npec',5)\nsetupsim()\nrun()\nx_4ts = get_time_data(\"t\",\"x\")\ny_4ts = get_time_data(\"t\",\"y\")\n\n\n# 6th-order Hermite\n#hermite6tssim = newsim('binaryorbit.dat')\n#hermite6tssim.SetParam('nbody','hermite6ts')\n#hermite6tssim.SetParam('Npec',5)\n#setupsim()\n#run()\n#x_6ts = get_time_data(\"t\",\"x\")\n#y_6ts = get_time_data(\"t\",\"y\")\n\n\n\n# Create matplotlib figure object with shared x-axis\n#--------------------------------------------------------------------------------------------------\n#fig, axarr = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(10,4))\nfig, axarr = plt.subplots(4, 1, figsize=(6,11), sharex='col', sharey='row')\nfig.subplots_adjust(hspace=0.001, wspace=0.001)\nfig.subplots_adjust(bottom=0.06, top=0.98, left=0.14, right=0.98)\n\naxarr[0].set_ylabel(r\"$y$\")\naxarr[0].set_ylim([ymin, ymax])\naxarr[0].set_xlim([xmin, xmax])\naxarr[0].plot(x_kdk.y_data, y_kdk.y_data, color=\"black\", linestyle='-', label='Leapfrog KDK', lw=1.0)\naxarr[0].text(xmin + 0.02*xsize, ymax - 0.1*ysize, \"(a) Leapfrog-KDK\", fontsize=12)\n\naxarr[1].set_ylabel(r\"$y$\")\naxarr[1].set_ylim([ymin, ymax])\naxarr[1].plot(x_dkd.y_data, y_dkd.y_data, color=\"black\", linestyle='-', label='Leapfrog DKD', lw=1.0)\naxarr[1].text(xmin + 0.02*xsize, ymax - 0.1*ysize, \"(b) Leapfrog-DKD\", fontsize=12)\n\naxarr[2].set_ylabel(r\"$y$\")\naxarr[2].set_ylim([ymin, ymax])\naxarr[2].plot(x_hermite4.y_data, y_hermite4.y_data, color=\"black\", linestyle='-', label='4H', lw=1.0)\naxarr[2].text(xmin + 0.02*xsize, ymax - 0.1*ysize, \"(c) 4th-order Hermite\", fontsize=12)\n\naxarr[3].set_xlabel(r\"$x$\")\naxarr[3].set_ylabel(r\"$y$\")\naxarr[3].set_ylim([ymin, ymax])\naxarr[3].plot(x_4ts.y_data, y_4ts.y_data, color=\"black\", linestyle='-', label='4TS', lw=1.0)\naxarr[3].text(xmin + 0.02*xsize, ymax - 0.1*ysize, \"(d) 4th-order Hermite TS\", fontsize=12)\n\n\nplt.show()\nfig.savefig('binaryorbit.pdf', dpi=50)\n\n\n# Prevent program from closing before showing plot window\nblock()\n","license":"gpl-2.0"} {"repo_name":"IssamLaradji\/scikit-learn","path":"sklearn\/linear_model\/ransac.py","copies":"16","size":"13870","content":"# coding: utf-8\n\n# Author: Johannes Sch\u00f6nberger\n#\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone\nfrom ..utils import check_random_state, check_array, check_consistent_length\nfrom ..utils.random import sample_without_replacement\nfrom .base import LinearRegression\n\n\n_EPSILON = np.spacing(1)\n\n\ndef _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):\n \"\"\"Determine number trials such that at least one outlier-free subset is\n sampled for the given inlier\/outlier ratio.\n\n Parameters\n ----------\n n_inliers : int\n Number of inliers in the data.\n\n n_samples : int\n Total number of samples in the data.\n\n min_samples : int\n Minimum number of samples chosen randomly from original data.\n\n probability : float\n Probability (confidence) that one outlier-free sample is generated.\n\n Returns\n -------\n trials : int\n Number of trials.\n\n \"\"\"\n inlier_ratio = n_inliers \/ float(n_samples)\n nom = max(_EPSILON, 1 - probability)\n denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)\n if nom == 1:\n return 0\n if denom == 1:\n return float('inf')\n return abs(float(np.ceil(np.log(nom) \/ np.log(denom))))\n\n\nclass RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):\n \"\"\"RANSAC (RANdom SAmple Consensus) algorithm.\n\n RANSAC is an iterative algorithm for the robust estimation of parameters\n from a subset of inliers from the complete data set. More information can\n be found in the general documentation of linear models.\n\n A detailed description of the algorithm can be found in the documentation\n of the ``linear_model`` sub-package.\n\n Parameters\n ----------\n base_estimator : object, optional\n Base estimator object which implements the following methods:\n\n * `fit(X, y)`: Fit model to given training data and target values.\n * `score(X, y)`: Returns the mean accuracy on the given test data,\n which is used for the stop criterion defined by `stop_score`.\n Additionally, the score is used to decide which of two equally\n large consensus sets is chosen as the better one.\n\n If `base_estimator` is None, then\n ``base_estimator=sklearn.linear_model.LinearRegression()`` is used for\n target values of dtype float.\n\n Note that the current implementation only supports regression\n estimators.\n\n min_samples : int (>= 1) or float ([0, 1]), optional\n Minimum number of samples chosen randomly from original data. Treated\n as an absolute number of samples for `min_samples >= 1`, treated as a\n relative number `ceil(min_samples * X.shape[0]`) for\n `min_samples < 1`. This is typically chosen as the minimal number of\n samples necessary to estimate the given `base_estimator`. By default a\n ``sklearn.linear_model.LinearRegression()`` estimator is assumed and\n `min_samples` is chosen as ``X.shape[1] + 1``.\n\n residual_threshold : float, optional\n Maximum residual for a data sample to be classified as an inlier.\n By default the threshold is chosen as the MAD (median absolute\n deviation) of the target values `y`.\n\n is_data_valid : callable, optional\n This function is called with the randomly selected data before the\n model is fitted to it: `is_data_valid(X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n\n is_model_valid : callable, optional\n This function is called with the estimated model and the randomly\n selected data: `is_model_valid(model, X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n Rejecting samples with this function is computationally costlier than\n with `is_data_valid`. `is_model_valid` should therefore only be used if\n the estimated model is needed for making the rejection decision.\n\n max_trials : int, optional\n Maximum number of iterations for random sample selection.\n\n stop_n_inliers : int, optional\n Stop iteration if at least this number of inliers are found.\n\n stop_score : float, optional\n Stop iteration if score is greater equal than this threshold.\n\n stop_probability : float in range [0, 1], optional\n RANSAC iteration stops if at least one outlier-free set of the training\n data is sampled in RANSAC. This requires to generate at least N\n samples (iterations)::\n\n N >= log(1 - probability) \/ log(1 - e**m)\n\n where the probability (confidence) is typically set to high value such\n as 0.99 (the default) and e is the current fraction of inliers w.r.t.\n the total number of samples.\n\n residual_metric : callable, optional\n Metric to reduce the dimensionality of the residuals to 1 for\n multi-dimensional target values ``y.shape[1] > 1``. By default the sum\n of absolute differences is used::\n\n lambda dy: np.sum(np.abs(dy), axis=1)\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n Attributes\n ----------\n estimator_ : object\n Best fitted model (copy of the `base_estimator` object).\n\n n_trials_ : int\n Number of random selection trials until one of the stop criteria is\n met. It is always ``<= max_trials``.\n\n inlier_mask_ : bool array of shape [n_samples]\n Boolean mask of inliers classified as ``True``.\n\n References\n ----------\n .. [1] http:\/\/en.wikipedia.org\/wiki\/RANSAC\n .. [2] http:\/\/www.cs.columbia.edu\/~belhumeur\/courses\/compPhoto\/ransac.pdf\n .. [3] http:\/\/www.bmva.org\/bmvc\/2009\/Papers\/Paper355\/Paper355.pdf\n \"\"\"\n\n def __init__(self, base_estimator=None, min_samples=None,\n residual_threshold=None, is_data_valid=None,\n is_model_valid=None, max_trials=100,\n stop_n_inliers=np.inf, stop_score=np.inf,\n stop_probability=0.99, residual_metric=None,\n random_state=None):\n\n self.base_estimator = base_estimator\n self.min_samples = min_samples\n self.residual_threshold = residual_threshold\n self.is_data_valid = is_data_valid\n self.is_model_valid = is_model_valid\n self.max_trials = max_trials\n self.stop_n_inliers = stop_n_inliers\n self.stop_score = stop_score\n self.stop_probability = stop_probability\n self.residual_metric = residual_metric\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"Fit estimator using RANSAC algorithm.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape [n_samples, n_features]\n Training data.\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values.\n\n Raises\n ------\n ValueError\n If no valid consensus set could be found. This occurs if\n `is_data_valid` and `is_model_valid` return False for all\n `max_trials` randomly chosen sub-samples.\n\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n y = check_array(y, ensure_2d=False)\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n check_consistent_length(X, y)\n\n if self.base_estimator is not None:\n base_estimator = clone(self.base_estimator)\n else:\n base_estimator = LinearRegression()\n\n if self.min_samples is None:\n # assume linear model by default\n min_samples = X.shape[1] + 1\n elif 0 < self.min_samples < 1:\n min_samples = np.ceil(self.min_samples * X.shape[0])\n elif self.min_samples >= 1:\n if self.min_samples % 1 != 0:\n raise ValueError(\"Absolute number of samples must be an \"\n \"integer value.\")\n min_samples = self.min_samples\n else:\n raise ValueError(\"Value for `min_samples` must be scalar and \"\n \"positive.\")\n if min_samples > X.shape[0]:\n raise ValueError(\"`min_samples` may not be larger than number \"\n \"of samples ``X.shape[0]``.\")\n\n if self.stop_probability < 0 or self.stop_probability > 1:\n raise ValueError(\"`stop_probability` must be in range [0, 1].\")\n\n if self.residual_threshold is None:\n # MAD (median absolute deviation)\n residual_threshold = np.median(np.abs(y - np.median(y)))\n else:\n residual_threshold = self.residual_threshold\n\n if self.residual_metric is None:\n residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)\n else:\n residual_metric = self.residual_metric\n\n random_state = check_random_state(self.random_state)\n\n try: # Not all estimator accept a random_state\n base_estimator.set_params(random_state=random_state)\n except ValueError:\n pass\n\n n_inliers_best = 0\n score_best = np.inf\n inlier_mask_best = None\n X_inlier_best = None\n y_inlier_best = None\n\n # number of data samples\n n_samples = X.shape[0]\n sample_idxs = np.arange(n_samples)\n\n n_samples, _ = X.shape\n\n for self.n_trials_ in range(1, self.max_trials + 1):\n\n # choose random sample set\n subset_idxs = sample_without_replacement(n_samples, min_samples,\n random_state=random_state)\n X_subset = X[subset_idxs]\n y_subset = y[subset_idxs]\n\n # check if random sample set is valid\n if (self.is_data_valid is not None\n and not self.is_data_valid(X_subset, y_subset)):\n continue\n\n # fit model for current random sample set\n base_estimator.fit(X_subset, y_subset)\n\n # check if estimated model is valid\n if (self.is_model_valid is not None and not\n self.is_model_valid(base_estimator, X_subset, y_subset)):\n continue\n\n # residuals of all data for current random sample model\n y_pred = base_estimator.predict(X)\n if y_pred.ndim == 1:\n y_pred = y_pred[:, None]\n\n residuals_subset = residual_metric(y_pred - y)\n\n # classify data into inliers and outliers\n inlier_mask_subset = residuals_subset < residual_threshold\n n_inliers_subset = np.sum(inlier_mask_subset)\n\n # less inliers -> skip current random sample\n if n_inliers_subset < n_inliers_best:\n continue\n\n # extract inlier data set\n inlier_idxs_subset = sample_idxs[inlier_mask_subset]\n X_inlier_subset = X[inlier_idxs_subset]\n y_inlier_subset = y[inlier_idxs_subset]\n\n # score of inlier data set\n score_subset = base_estimator.score(X_inlier_subset,\n y_inlier_subset)\n\n # same number of inliers but worse score -> skip current random\n # sample\n if (n_inliers_subset == n_inliers_best\n and score_subset < score_best):\n continue\n\n # save current random sample as best sample\n n_inliers_best = n_inliers_subset\n score_best = score_subset\n inlier_mask_best = inlier_mask_subset\n X_inlier_best = X_inlier_subset\n y_inlier_best = y_inlier_subset\n\n # break if sufficient number of inliers or score is reached\n if (n_inliers_best >= self.stop_n_inliers\n or score_best >= self.stop_score\n or self.n_trials_\n >= _dynamic_max_trials(n_inliers_best, n_samples,\n min_samples,\n self.stop_probability)):\n break\n\n # if none of the iterations met the required criteria\n if inlier_mask_best is None:\n raise ValueError(\n \"RANSAC could not find valid consensus set, because\"\n \" either the `residual_threshold` rejected all the samples or\"\n \" `is_data_valid` and `is_model_valid` returned False for all\"\n \" `max_trials` randomly \"\"chosen sub-samples. Consider \"\n \"relaxing the \"\"constraints.\")\n\n # estimate final model using all inliers\n base_estimator.fit(X_inlier_best, y_inlier_best)\n\n self.estimator_ = base_estimator\n self.inlier_mask_ = inlier_mask_best\n return self\n\n def predict(self, X):\n \"\"\"Predict using the estimated model.\n\n This is a wrapper for `estimator_.predict(X)`.\n\n Parameters\n ----------\n X : numpy array of shape [n_samples, n_features]\n\n Returns\n -------\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Returns predicted values.\n \"\"\"\n return self.estimator_.predict(X)\n\n def score(self, X, y):\n \"\"\"Returns the score of the prediction.\n\n This is a wrapper for `estimator_.score(X, y)`.\n\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples, n_features]\n Training data.\n\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Target values.\n\n Returns\n -------\n z : float\n Score of the prediction.\n \"\"\"\n return self.estimator_.score(X, y)\n","license":"bsd-3-clause"} {"repo_name":"hilaskis\/UAV_MissionPlanner","path":"Lib\/site-packages\/numpy\/linalg\/linalg.py","copies":"53","size":"61098","content":"\"\"\"Lite version of scipy.linalg.\n\nNotes\n-----\nThis module is a lite version of the linalg.py module in SciPy which\ncontains high-level Python interface to the LAPACK library. The lite\nversion only accesses the following LAPACK functions: dgesv, zgesv,\ndgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,\nzgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.\n\"\"\"\n\n__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',\n 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',\n 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',\n 'LinAlgError']\n\nimport sys\nfrom numpy.core import array, asarray, zeros, empty, transpose, \\\n intc, single, double, csingle, cdouble, inexact, complexfloating, \\\n newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \\\n maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \\\n isfinite, size, finfo, absolute, log, exp\nfrom numpy.lib import triu\nfrom numpy.linalg import lapack_lite\nfrom numpy.matrixlib.defmatrix import matrix_power\nfrom numpy.compat import asbytes\n\n# For Python2\/3 compatibility\n_N = asbytes('N')\n_V = asbytes('V')\n_A = asbytes('A')\n_S = asbytes('S')\n_L = asbytes('L')\n\nfortran_int = intc\n\n# Error object\nclass LinAlgError(Exception):\n \"\"\"\n Generic Python-exception-derived object raised by linalg functions.\n\n General purpose exception class, derived from Python's exception.Exception\n class, programmatically raised in linalg functions when a Linear\n Algebra-related condition would prevent further correct execution of the\n function.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> LA.inv(np.zeros((2,2)))\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"...linalg.py\", line 350,\n in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))\n File \"...linalg.py\", line 249,\n in solve\n raise LinAlgError, 'Singular matrix'\n numpy.linalg.linalg.LinAlgError: Singular matrix\n\n \"\"\"\n pass\n\ndef _makearray(a):\n new = asarray(a)\n wrap = getattr(a, \"__array_prepare__\", new.__array_wrap__)\n return new, wrap\n\ndef isComplexType(t):\n return issubclass(t, complexfloating)\n\n_real_types_map = {single : single,\n double : double,\n csingle : single,\n cdouble : double}\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _realType(t, default=double):\n return _real_types_map.get(t, default)\n\ndef _complexType(t, default=cdouble):\n return _complex_types_map.get(t, default)\n\ndef _linalgRealType(t):\n \"\"\"Cast the type t to either double or cdouble.\"\"\"\n return double\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _commonType(*arrays):\n # in lite version, use higher precision (always double or cdouble)\n result_type = single\n is_complex = False\n for a in arrays:\n if issubclass(a.dtype.type, inexact):\n if isComplexType(a.dtype.type):\n is_complex = True\n rt = _realType(a.dtype.type, default=None)\n if rt is None:\n # unsupported inexact scalar\n raise TypeError(\"array type %s is unsupported in linalg\" %\n (a.dtype.name,))\n else:\n rt = double\n if rt is double:\n result_type = double\n if is_complex:\n t = cdouble\n result_type = _complex_types_map[result_type]\n else:\n t = double\n return t, result_type\n\n# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).\n\n_fastCT = fastCopyAndTranspose\n\ndef _to_native_byte_order(*arrays):\n ret = []\n for arr in arrays:\n if arr.dtype.byteorder not in ('=', '|'):\n ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))\n else:\n ret.append(arr)\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\ndef _fastCopyAndTranspose(type, *arrays):\n cast_arrays = ()\n for a in arrays:\n if a.dtype.type is type:\n cast_arrays = cast_arrays + (_fastCT(a),)\n else:\n cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)\n if len(cast_arrays) == 1:\n return cast_arrays[0]\n else:\n return cast_arrays\n\ndef _assertRank2(*arrays):\n for a in arrays:\n if len(a.shape) != 2:\n raise LinAlgError, '%d-dimensional array given. Array must be \\\n two-dimensional' % len(a.shape)\n\ndef _assertSquareness(*arrays):\n for a in arrays:\n if max(a.shape) != min(a.shape):\n raise LinAlgError, 'Array must be square'\n\ndef _assertFinite(*arrays):\n for a in arrays:\n if not (isfinite(a).all()):\n raise LinAlgError, \"Array must not contain infs or NaNs\"\n\ndef _assertNonEmpty(*arrays):\n for a in arrays:\n if size(a) == 0:\n raise LinAlgError(\"Arrays cannot be empty\")\n\n\n# Linear equations\n\ndef tensorsolve(a, b, axes=None):\n \"\"\"\n Solve the tensor equation ``a x = b`` for x.\n\n It is assumed that all indices of `x` are summed over in the product,\n together with the rightmost indices of `a`, as is done in, for example,\n ``tensordot(a, x, axes=len(b.shape))``.\n\n Parameters\n ----------\n a : array_like\n Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals\n the shape of that sub-tensor of `a` consisting of the appropriate\n number of its rightmost indices, and must be such that\n ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be\n 'square').\n b : array_like\n Right-hand tensor, which can be of any shape.\n axes : tuple of ints, optional\n Axes in `a` to reorder to the right, before inversion.\n If None (default), no reordering is done.\n\n Returns\n -------\n x : ndarray, shape Q\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n tensordot, tensorinv\n\n Examples\n --------\n >>> a = np.eye(2*3*4)\n >>> a.shape = (2*3, 4, 2, 3, 4)\n >>> b = np.random.randn(2*3, 4)\n >>> x = np.linalg.tensorsolve(a, b)\n >>> x.shape\n (2, 3, 4)\n >>> np.allclose(np.tensordot(a, x, axes=3), b)\n True\n\n \"\"\"\n a,wrap = _makearray(a)\n b = asarray(b)\n an = a.ndim\n\n if axes is not None:\n allaxes = range(0, an)\n for k in axes:\n allaxes.remove(k)\n allaxes.insert(an, k)\n a = a.transpose(allaxes)\n\n oldshape = a.shape[-(an-b.ndim):]\n prod = 1\n for k in oldshape:\n prod *= k\n\n a = a.reshape(-1, prod)\n b = b.ravel()\n res = wrap(solve(a, b))\n res.shape = oldshape\n return res\n\ndef solve(a, b):\n \"\"\"\n Solve a linear matrix equation, or system of linear scalar equations.\n\n Computes the \"exact\" solution, `x`, of the well-determined, i.e., full\n rank, linear matrix equation `ax = b`.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n Coefficient matrix.\n b : array_like, shape (M,) or (M, N)\n Ordinate or \"dependent variable\" values.\n\n Returns\n -------\n x : ndarray, shape (M,) or (M, N) depending on b\n Solution to the system a x = b\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not square.\n\n Notes\n -----\n `solve` is a wrapper for the LAPACK routines `dgesv`_ and\n `zgesv`_, the former being used if `a` is real-valued, the latter if\n it is complex-valued. The solution to the system of linear equations\n is computed using an LU decomposition [1]_ with partial pivoting and\n row interchanges.\n\n .. _dgesv: http:\/\/www.netlib.org\/lapack\/double\/dgesv.f\n\n .. _zgesv: http:\/\/www.netlib.org\/lapack\/complex16\/zgesv.f\n\n `a` must be square and of full-rank, i.e., all rows (or, equivalently,\n columns) must be linearly independent; if either is not true, use\n `lstsq` for the least-squares best \"solution\" of the\n system\/equation.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 22.\n\n Examples\n --------\n Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:\n\n >>> a = np.array([[3,1], [1,2]])\n >>> b = np.array([9,8])\n >>> x = np.linalg.solve(a, b)\n >>> x\n array([ 2., 3.])\n\n Check that the solution is correct:\n\n >>> (np.dot(a, x) == b).all()\n True\n\n \"\"\"\n a, _ = _makearray(a)\n b, wrap = _makearray(b)\n one_eq = len(b.shape) == 1\n if one_eq:\n b = b[:, newaxis]\n _assertRank2(a, b)\n _assertSquareness(a)\n n_eq = a.shape[0]\n n_rhs = b.shape[1]\n if n_eq != b.shape[0]:\n raise LinAlgError, 'Incompatible dimensions'\n t, result_t = _commonType(a, b)\n# lapack_routine = _findLapackRoutine('gesv', t)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgesv\n else:\n lapack_routine = lapack_lite.dgesv\n a, b = _fastCopyAndTranspose(t, a, b)\n a, b = _to_native_byte_order(a, b)\n pivots = zeros(n_eq, fortran_int)\n results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)\n if results['info'] > 0:\n raise LinAlgError, 'Singular matrix'\n if one_eq:\n return wrap(b.ravel().astype(result_t))\n else:\n return wrap(b.transpose().astype(result_t))\n\n\ndef tensorinv(a, ind=2):\n \"\"\"\n Compute the 'inverse' of an N-dimensional array.\n\n The result is an inverse for `a` relative to the tensordot operation\n ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,\n ``tensordot(tensorinv(a), a, ind)`` is the \"identity\" tensor for the\n tensordot operation.\n\n Parameters\n ----------\n a : array_like\n Tensor to 'invert'. Its shape must be 'square', i. e.,\n ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.\n ind : int, optional\n Number of first indices that are involved in the inverse sum.\n Must be a positive integer, default is 2.\n\n Returns\n -------\n b : ndarray\n `a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n tensordot, tensorsolve\n\n Examples\n --------\n >>> a = np.eye(4*6)\n >>> a.shape = (4, 6, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=2)\n >>> ainv.shape\n (8, 3, 4, 6)\n >>> b = np.random.randn(4, 6)\n >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))\n True\n\n >>> a = np.eye(4*6)\n >>> a.shape = (24, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=1)\n >>> ainv.shape\n (8, 3, 24)\n >>> b = np.random.randn(24)\n >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))\n True\n\n \"\"\"\n a = asarray(a)\n oldshape = a.shape\n prod = 1\n if ind > 0:\n invshape = oldshape[ind:] + oldshape[:ind]\n for k in oldshape[ind:]:\n prod *= k\n else:\n raise ValueError, \"Invalid ind argument.\"\n a = a.reshape(prod, -1)\n ia = inv(a)\n return ia.reshape(*invshape)\n\n\n# Matrix inversion\n\ndef inv(a):\n \"\"\"\n Compute the (multiplicative) inverse of a matrix.\n\n Given a square matrix `a`, return the matrix `ainv` satisfying\n ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n Matrix to be inverted.\n\n Returns\n -------\n ainv : ndarray or matrix, shape (M, M)\n (Multiplicative) inverse of the matrix `a`.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not square.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1., 2.], [3., 4.]])\n >>> ainv = LA.inv(a)\n >>> np.allclose(np.dot(a, ainv), np.eye(2))\n True\n >>> np.allclose(np.dot(ainv, a), np.eye(2))\n True\n\n If a is a matrix object, then the return value is a matrix as well:\n\n >>> ainv = LA.inv(np.matrix(a))\n >>> ainv\n matrix([[-2. , 1. ],\n [ 1.5, -0.5]])\n\n \"\"\"\n a, wrap = _makearray(a)\n return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))\n\n\n# Cholesky decomposition\n\ndef cholesky(a):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n\n Returns\n -------\n L : ndarray, or matrix object if `a` is, shape (M, M)\n Lower-triangular Cholesky factor of a.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian\/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> A = np.array([[1,-2j],[2j,5]])\n >>> A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = np.linalg.cholesky(A)\n >>> L\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> np.dot(L, L.T.conj()) # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> np.linalg.cholesky(A) # an ndarray object is returned\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> # But a matrix object is returned if A is a matrix object\n >>> LA.cholesky(np.matrix(A))\n matrix([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertSquareness(a)\n t, result_t = _commonType(a)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n m = a.shape[0]\n n = a.shape[1]\n if isComplexType(t):\n lapack_routine = lapack_lite.zpotrf\n else:\n lapack_routine = lapack_lite.dpotrf\n results = lapack_routine(_L, n, a, m, 0)\n if results['info'] > 0:\n raise LinAlgError, 'Matrix is not positive definite - \\\n Cholesky decomposition cannot be computed'\n s = triu(a, k=0).transpose()\n if (s.dtype != result_t):\n s = s.astype(result_t)\n return wrap(s)\n\n# QR decompostion\n\ndef qr(a, mode='full'):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is\n upper-triangular.\n\n Parameters\n ----------\n a : array_like\n Matrix to be factored, of shape (M, N).\n mode : {'full', 'r', 'economic'}, optional\n Specifies the values to be returned. 'full' is the default.\n Economic mode is slightly faster then 'r' mode if only `r` is needed.\n\n Returns\n -------\n q : ndarray of float or complex, optional\n The orthonormal matrix, of shape (M, K). Only returned if\n ``mode='full'``.\n r : ndarray of float or complex, optional\n The upper-triangular matrix, of shape (K, N) with K = min(M, N).\n Only returned when ``mode='full'`` or ``mode='r'``.\n a2 : ndarray of float or complex, optional\n Array of shape (M, N), only returned when ``mode='economic``'.\n The diagonal and the upper triangle of `a2` contains `r`, while\n the rest of the matrix is undefined.\n\n Raises\n ------\n LinAlgError\n If factoring fails.\n\n Notes\n -----\n This is an interface to the LAPACK routines dgeqrf, zgeqrf,\n dorgqr, and zungqr.\n\n For more information on the qr factorization, see for example:\n http:\/\/en.wikipedia.org\/wiki\/QR_factorization\n\n Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,\n all the return values will be matrices too.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6)\n >>> q, r = np.linalg.qr(a)\n >>> np.allclose(a, np.dot(q, r)) # a does equal qr\n True\n >>> r2 = np.linalg.qr(a, mode='r')\n >>> r3 = np.linalg.qr(a, mode='economic')\n >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'\n True\n >>> # But only triu parts are guaranteed equal when mode='economic'\n >>> np.allclose(r, np.triu(r3[:6,:6], k=0))\n True\n\n Example illustrating a common use of `qr`: solving of least squares\n problems\n\n What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for\n the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points\n and you'll see that it should be y0 = 0, m = 1.) The answer is provided\n by solving the over-determined matrix equation ``Ax = b``, where::\n\n A = array([[0, 1], [1, 1], [1, 1], [2, 1]])\n x = array([[y0], [m]])\n b = array([[1], [0], [2], [1]])\n\n If A = qr such that q is orthonormal (which is always possible via\n Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,\n however, we simply use `lstsq`.)\n\n >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])\n >>> A\n array([[0, 1],\n [1, 1],\n [1, 1],\n [2, 1]])\n >>> b = np.array([1, 0, 2, 1])\n >>> q, r = LA.qr(A)\n >>> p = np.dot(q.T, b)\n >>> np.dot(LA.inv(r), p)\n array([ 1.1e-16, 1.0e+00])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRank2(a)\n m, n = a.shape\n t, result_t = _commonType(a)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n mn = min(m, n)\n tau = zeros((mn,), t)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgeqrf\n routine_name = 'zgeqrf'\n else:\n lapack_routine = lapack_lite.dgeqrf\n routine_name = 'dgeqrf'\n\n # calculate optimal size of work data 'work'\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError, '%s returns %d' % (routine_name, results['info'])\n\n # do qr decomposition\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, lwork, 0)\n\n if results['info'] != 0:\n raise LinAlgError, '%s returns %d' % (routine_name, results['info'])\n\n # economic mode. Isn't actually economic.\n if mode[0] == 'e':\n if t != result_t :\n a = a.astype(result_t)\n return a.T\n\n # generate r\n r = _fastCopyAndTranspose(result_t, a[:,:mn])\n for i in range(mn):\n r[i,:i].fill(0.0)\n\n # 'r'-mode, that is, calculate only r\n if mode[0] == 'r':\n return r\n\n # from here on: build orthonormal matrix q from a\n\n if isComplexType(t):\n lapack_routine = lapack_lite.zungqr\n routine_name = 'zungqr'\n else:\n lapack_routine = lapack_lite.dorgqr\n routine_name = 'dorgqr'\n\n # determine optimal lwork\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError, '%s returns %d' % (routine_name, results['info'])\n\n # compute q\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)\n if results['info'] != 0:\n raise LinAlgError, '%s returns %d' % (routine_name, results['info'])\n\n q = _fastCopyAndTranspose(result_t, a[:mn,:])\n\n return wrap(q), wrap(r)\n\n\n# Eigenvalues\n\n\ndef eigvals(a):\n \"\"\"\n Compute the eigenvalues of a general matrix.\n\n Main difference between `eigvals` and `eig`: the eigenvectors aren't\n returned.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n A complex- or real-valued matrix whose eigenvalues will be computed.\n\n Returns\n -------\n w : ndarray, shape (M,)\n The eigenvalues, each repeated according to its multiplicity.\n They are not necessarily ordered, nor are they necessarily\n real for real matrices.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eig : eigenvalues and right eigenvectors of general arrays\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eigh : eigenvalues and eigenvectors of symmetric\/Hermitian arrays.\n\n Notes\n -----\n This is a simple interface to the LAPACK routines dgeev and zgeev\n that sets those routines' flags to return only the eigenvalues of\n general real and complex arrays, respectively.\n\n Examples\n --------\n Illustration, using the fact that the eigenvalues of a diagonal matrix\n are its diagonal elements, that multiplying a matrix on the left\n by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose\n of `Q`), preserves the eigenvalues of the \"middle\" matrix. In other words,\n if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as\n ``A``:\n\n >>> from numpy import linalg as LA\n >>> x = np.random.random()\n >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])\n >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])\n (1.0, 1.0, 0.0)\n\n Now multiply a diagonal matrix by Q on one side and by Q.T on the other:\n\n >>> D = np.diag((-1,1))\n >>> LA.eigvals(D)\n array([-1., 1.])\n >>> A = np.dot(Q, D)\n >>> A = np.dot(A, Q.T)\n >>> LA.eigvals(A)\n array([ 1., -1.])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertSquareness(a)\n _assertFinite(a)\n t, result_t = _commonType(a)\n real_t = _linalgRealType(t)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n n = a.shape[0]\n dummy = zeros((1,), t)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgeev\n w = zeros((n,), t)\n rwork = zeros((n,), real_t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _N, n, a, n, w,\n dummy, 1, dummy, 1, work, -1, rwork, 0)\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _N, n, a, n, w,\n dummy, 1, dummy, 1, work, lwork, rwork, 0)\n else:\n lapack_routine = lapack_lite.dgeev\n wr = zeros((n,), t)\n wi = zeros((n,), t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _N, n, a, n, wr, wi,\n dummy, 1, dummy, 1, work, -1, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _N, n, a, n, wr, wi,\n dummy, 1, dummy, 1, work, lwork, 0)\n if all(wi == 0.):\n w = wr\n result_t = _realType(result_t)\n else:\n w = wr+1j*wi\n result_t = _complexType(result_t)\n if results['info'] > 0:\n raise LinAlgError, 'Eigenvalues did not converge'\n return w.astype(result_t)\n\n\ndef eigvalsh(a, UPLO='L'):\n \"\"\"\n Compute the eigenvalues of a Hermitian or real symmetric matrix.\n\n Main difference from eigh: the eigenvectors are not computed.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n A complex- or real-valued matrix whose eigenvalues are to be\n computed.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n\n Returns\n -------\n w : ndarray, shape (M,)\n The eigenvalues, not necessarily ordered, each repeated according to\n its multiplicity.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigh : eigenvalues and eigenvectors of symmetric\/Hermitian arrays.\n eigvals : eigenvalues of general real or complex arrays.\n eig : eigenvalues and right eigenvectors of general real or complex\n arrays.\n\n Notes\n -----\n This is a simple interface to the LAPACK routines dsyevd and zheevd\n that sets those routines' flags to return only the eigenvalues of\n real symmetric and complex Hermitian arrays, respectively.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> LA.eigvalsh(a)\n array([ 0.17157288+0.j, 5.82842712+0.j])\n\n \"\"\"\n UPLO = asbytes(UPLO)\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertSquareness(a)\n t, result_t = _commonType(a)\n real_t = _linalgRealType(t)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n n = a.shape[0]\n liwork = 5*n+3\n iwork = zeros((liwork,), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zheevd\n w = zeros((n,), real_t)\n lwork = 1\n work = zeros((lwork,), t)\n lrwork = 1\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,\n rwork, -1, iwork, liwork, 0)\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n lrwork = int(rwork[0])\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,\n rwork, lrwork, iwork, liwork, 0)\n else:\n lapack_routine = lapack_lite.dsyevd\n w = zeros((n,), t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,\n iwork, liwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,\n iwork, liwork, 0)\n if results['info'] > 0:\n raise LinAlgError, 'Eigenvalues did not converge'\n return w.astype(result_t)\n\ndef _convertarray(a):\n t, result_t = _commonType(a)\n a = _fastCT(a.astype(t))\n return a, t, result_t\n\n\n# Eigenvectors\n\n\ndef eig(a):\n \"\"\"\n Compute the eigenvalues and right eigenvectors of a square array.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n A square array of real or complex elements.\n\n Returns\n -------\n w : ndarray, shape (M,)\n The eigenvalues, each repeated according to its multiplicity.\n The eigenvalues are not necessarily ordered, nor are they\n necessarily real for real arrays (though for real arrays\n complex-valued eigenvalues should occur in conjugate pairs).\n\n v : ndarray, shape (M, M)\n The normalized (unit \"length\") eigenvectors, such that the\n column ``v[:,i]`` is the eigenvector corresponding to the\n eigenvalue ``w[i]``.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)\n array.\n\n eigvals : eigenvalues of a non-symmetric array.\n\n Notes\n -----\n This is a simple interface to the LAPACK routines dgeev and zgeev\n which compute the eigenvalues and eigenvectors of, respectively,\n general real- and complex-valued square arrays.\n\n The number `w` is an eigenvalue of `a` if there exists a vector\n `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and\n `v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``\n for :math:`i \\\\in \\\\{0,...,M-1\\\\}`.\n\n The array `v` of eigenvectors may not be of maximum rank, that is, some\n of the columns may be linearly dependent, although round-off error may\n obscure that fact. If the eigenvalues are all different, then theoretically\n the eigenvectors are linearly independent. Likewise, the (complex-valued)\n matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,\n if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate\n transpose of `a`.\n\n Finally, it is emphasized that `v` consists of the *right* (as in\n right-hand side) eigenvectors of `a`. A vector `y` satisfying\n ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*\n eigenvector of `a`, and, in general, the left and right eigenvectors\n of a matrix are not necessarily the (perhaps conjugate) transposes\n of each other.\n\n References\n ----------\n G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,\n Academic Press, Inc., 1980, Various pp.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n\n (Almost) trivial example with real e-values and e-vectors.\n\n >>> w, v = LA.eig(np.diag((1, 2, 3)))\n >>> w; v\n array([ 1., 2., 3.])\n array([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n\n Real matrix possessing complex e-values and e-vectors; note that the\n e-values are complex conjugates of each other.\n\n >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))\n >>> w; v\n array([ 1. + 1.j, 1. - 1.j])\n array([[ 0.70710678+0.j , 0.70710678+0.j ],\n [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])\n\n Complex-valued matrix with real e-values (but complex-valued e-vectors);\n note that a.conj().T = a, i.e., a is Hermitian.\n\n >>> a = np.array([[1, 1j], [-1j, 1]])\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}\n array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],\n [ 0.70710678+0.j , 0.00000000+0.70710678j]])\n\n Be careful about round-off error!\n\n >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])\n >>> # Theor. e-values are 1 +\/- 1e-9\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 1., 1.])\n array([[ 1., 0.],\n [ 0., 1.]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertSquareness(a)\n _assertFinite(a)\n a, t, result_t = _convertarray(a) # convert to double or cdouble type\n a = _to_native_byte_order(a)\n real_t = _linalgRealType(t)\n n = a.shape[0]\n dummy = zeros((1,), t)\n if isComplexType(t):\n # Complex routines take different arguments\n lapack_routine = lapack_lite.zgeev\n w = zeros((n,), t)\n v = zeros((n, n), t)\n lwork = 1\n work = zeros((lwork,), t)\n rwork = zeros((2*n,), real_t)\n results = lapack_routine(_N, _V, n, a, n, w,\n dummy, 1, v, n, work, -1, rwork, 0)\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _V, n, a, n, w,\n dummy, 1, v, n, work, lwork, rwork, 0)\n else:\n lapack_routine = lapack_lite.dgeev\n wr = zeros((n,), t)\n wi = zeros((n,), t)\n vr = zeros((n, n), t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _V, n, a, n, wr, wi,\n dummy, 1, vr, n, work, -1, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(_N, _V, n, a, n, wr, wi,\n dummy, 1, vr, n, work, lwork, 0)\n if all(wi == 0.0):\n w = wr\n v = vr\n result_t = _realType(result_t)\n else:\n w = wr+1j*wi\n v = array(vr, w.dtype)\n ind = flatnonzero(wi != 0.0) # indices of complex e-vals\n for i in range(len(ind)\/\/2):\n v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]\n v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]\n result_t = _complexType(result_t)\n\n if results['info'] > 0:\n raise LinAlgError, 'Eigenvalues did not converge'\n vt = v.transpose().astype(result_t)\n return w.astype(result_t), wrap(vt)\n\n\ndef eigh(a, UPLO='L'):\n \"\"\"\n Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.\n\n Returns two objects, a 1-D array containing the eigenvalues of `a`, and\n a 2-D square array or matrix (depending on the input type) of the\n corresponding eigenvectors (in columns).\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n A complex Hermitian or real symmetric matrix.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n\n Returns\n -------\n w : ndarray, shape (M,)\n The eigenvalues, not necessarily ordered.\n v : ndarray, or matrix object if `a` is, shape (M, M)\n The column ``v[:, i]`` is the normalized eigenvector corresponding\n to the eigenvalue ``w[i]``.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eig : eigenvalues and right eigenvectors for non-symmetric arrays.\n eigvals : eigenvalues of non-symmetric arrays.\n\n Notes\n -----\n This is a simple interface to the LAPACK routines dsyevd and zheevd,\n which compute the eigenvalues and eigenvectors of real symmetric and\n complex Hermitian arrays, respectively.\n\n The eigenvalues of real symmetric or complex Hermitian matrices are\n always real. [1]_ The array `v` of (column) eigenvectors is unitary\n and `a`, `w`, and `v` satisfy the equations\n ``dot(a, v[:, i]) = w[i] * v[:, i]``.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 222.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> a\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(a)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n array([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val\/vec pair\n array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])\n >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val\/vec pair\n array([ 0.+0.j, 0.+0.j])\n\n >>> A = np.matrix(a) # what happens if input is a matrix object\n >>> A\n matrix([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(A)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n matrix([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n \"\"\"\n UPLO = asbytes(UPLO)\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertSquareness(a)\n t, result_t = _commonType(a)\n real_t = _linalgRealType(t)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n n = a.shape[0]\n liwork = 5*n+3\n iwork = zeros((liwork,), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zheevd\n w = zeros((n,), real_t)\n lwork = 1\n work = zeros((lwork,), t)\n lrwork = 1\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,\n rwork, -1, iwork, liwork, 0)\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n lrwork = int(rwork[0])\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,\n rwork, lrwork, iwork, liwork, 0)\n else:\n lapack_routine = lapack_lite.dsyevd\n w = zeros((n,), t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,\n iwork, liwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,\n iwork, liwork, 0)\n if results['info'] > 0:\n raise LinAlgError, 'Eigenvalues did not converge'\n at = a.transpose().astype(result_t)\n return w.astype(_realType(result_t)), wrap(at)\n\n\n# Singular value decomposition\n\ndef svd(a, full_matrices=1, compute_uv=1):\n \"\"\"\n Singular Value Decomposition.\n\n Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n are unitary and `s` is a 1-d array of `a`'s singular values.\n\n Parameters\n ----------\n a : array_like\n A real or complex matrix of shape (`M`, `N`) .\n full_matrices : bool, optional\n If True (default), `u` and `v` have the shapes (`M`, `M`) and\n (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)\n and (`K`, `N`), respectively, where `K` = min(`M`, `N`).\n compute_uv : bool, optional\n Whether or not to compute `u` and `v` in addition to `s`. True\n by default.\n\n Returns\n -------\n u : ndarray\n Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)\n depending on value of ``full_matrices``.\n s : ndarray\n The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is\n a 1-d array of length min(`M`, `N`).\n v : ndarray\n Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on\n ``full_matrices``.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n Notes\n -----\n The SVD is commonly written as ``a = U S V.H``. The `v` returned\n by this function is ``V.H`` and ``u = U``.\n\n If ``U`` is a unitary matrix, it means that it\n satisfies ``U.H = inv(U)``.\n\n The rows of `v` are the eigenvectors of ``a.H a``. The columns\n of `u` are the eigenvectors of ``a a.H``. For row ``i`` in\n `v` and column ``i`` in `u`, the corresponding eigenvalue is\n ``s[i]**2``.\n\n If `a` is a `matrix` object (as opposed to an `ndarray`), then so\n are all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n\n Reconstruction based on full SVD:\n\n >>> U, s, V = np.linalg.svd(a, full_matrices=True)\n >>> U.shape, V.shape, s.shape\n ((9, 6), (6, 6), (6,))\n >>> S = np.zeros((9, 6), dtype=complex)\n >>> S[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(U, np.dot(S, V)))\n True\n\n Reconstruction based on reduced SVD:\n\n >>> U, s, V = np.linalg.svd(a, full_matrices=False)\n >>> U.shape, V.shape, s.shape\n ((9, 6), (6, 6), (6,))\n >>> S = np.diag(s)\n >>> np.allclose(a, np.dot(U, np.dot(S, V)))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertNonEmpty(a)\n m, n = a.shape\n t, result_t = _commonType(a)\n real_t = _linalgRealType(t)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n s = zeros((min(n, m),), real_t)\n if compute_uv:\n if full_matrices:\n nu = m\n nvt = n\n option = _A\n else:\n nu = min(n, m)\n nvt = min(n, m)\n option = _S\n u = zeros((nu, m), t)\n vt = zeros((n, nvt), t)\n else:\n option = _N\n nu = 1\n nvt = 1\n u = empty((1, 1), t)\n vt = empty((1, 1), t)\n\n iwork = zeros((8*min(m, n),), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgesdd\n rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,\n work, -1, rwork, iwork, 0)\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,\n work, lwork, rwork, iwork, 0)\n else:\n lapack_routine = lapack_lite.dgesdd\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,\n work, -1, iwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,\n work, lwork, iwork, 0)\n if results['info'] > 0:\n raise LinAlgError, 'SVD did not converge'\n s = s.astype(_realType(result_t))\n if compute_uv:\n u = u.transpose().astype(result_t)\n vt = vt.transpose().astype(result_t)\n return wrap(u), s, wrap(vt)\n else:\n return s\n\ndef cond(x, p=None):\n \"\"\"\n Compute the condition number of a matrix.\n\n This function is capable of returning the condition number using\n one of seven different norms, depending on the value of `p` (see\n Parameters below).\n\n Parameters\n ----------\n x : array_like, shape (M, N)\n The matrix whose condition number is sought.\n p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional\n Order of the norm:\n\n ===== ============================\n p norm for matrices\n ===== ============================\n None 2-norm, computed directly using the ``SVD``\n 'fro' Frobenius norm\n inf max(sum(abs(x), axis=1))\n -inf min(sum(abs(x), axis=1))\n 1 max(sum(abs(x), axis=0))\n -1 min(sum(abs(x), axis=0))\n 2 2-norm (largest sing. value)\n -2 smallest singular value\n ===== ============================\n\n inf means the numpy.inf object, and the Frobenius norm is\n the root-of-sum-of-squares norm.\n\n Returns\n -------\n c : {float, inf}\n The condition number of the matrix. May be infinite.\n\n See Also\n --------\n numpy.linalg.linalg.norm\n\n Notes\n -----\n The condition number of `x` is defined as the norm of `x` times the\n norm of the inverse of `x` [1]_; the norm can be the usual L2-norm\n (root-of-sum-of-squares) or one of a number of other matrix norms.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,\n Academic Press, Inc., 1980, pg. 285.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])\n >>> a\n array([[ 1, 0, -1],\n [ 0, 1, 0],\n [ 1, 0, 1]])\n >>> LA.cond(a)\n 1.4142135623730951\n >>> LA.cond(a, 'fro')\n 3.1622776601683795\n >>> LA.cond(a, np.inf)\n 2.0\n >>> LA.cond(a, -np.inf)\n 1.0\n >>> LA.cond(a, 1)\n 2.0\n >>> LA.cond(a, -1)\n 1.0\n >>> LA.cond(a, 2)\n 1.4142135623730951\n >>> LA.cond(a, -2)\n 0.70710678118654746\n >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))\n 0.70710678118654746\n\n \"\"\"\n x = asarray(x) # in case we have a matrix\n if p is None:\n s = svd(x,compute_uv=False)\n return s[0]\/s[-1]\n else:\n return norm(x,p)*norm(inv(x),p)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"\n Return matrix rank of array using SVD method\n\n Rank of the array is the number of SVD singular values of the\n array that are greater than `tol`.\n\n Parameters\n ----------\n M : array_like\n array of <=2 dimensions\n tol : {None, float}\n threshold below which SVD values are considered zero. If `tol` is\n None, and ``S`` is an array with singular values for `M`, and\n ``eps`` is the epsilon value for datatype of ``S``, then `tol` is\n set to ``S.max() * eps``.\n\n Notes\n -----\n Golub and van Loan [1]_ define \"numerical rank deficiency\" as using\n tol=eps*S[0] (where S[0] is the maximum singular value and thus the\n 2-norm of the matrix). This is one definition of rank deficiency,\n and the one we use here. When floating point roundoff is the main\n concern, then \"numerical rank deficiency\" is a reasonable choice. In\n some cases you may prefer other definitions. The most useful measure\n of the tolerance depends on the operations you intend to use on your\n matrix. For example, if your data come from uncertain measurements\n with uncertainties greater than floating point epsilon, choosing a\n tolerance near that uncertainty may be preferable. The tolerance\n may be absolute if the uncertainties are absolute rather than\n relative.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.\n Baltimore: Johns Hopkins University Press, 1996.\n\n Examples\n --------\n >>> matrix_rank(np.eye(4)) # Full rank matrix\n 4\n >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix\n >>> matrix_rank(I)\n 3\n >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0\n 1\n >>> matrix_rank(np.zeros((4,)))\n 0\n\n \"\"\"\n M = asarray(M)\n if M.ndim > 2:\n raise TypeError('array should have 2 or fewer dimensions')\n if M.ndim < 2:\n return int(not all(M==0))\n S = svd(M, compute_uv=False)\n if tol is None:\n tol = S.max() * finfo(S.dtype).eps\n return sum(S > tol)\n\n\n# Generalized inverse\n\ndef pinv(a, rcond=1e-15 ):\n \"\"\"\n Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n\n Calculate the generalized inverse of a matrix using its\n singular-value decomposition (SVD) and including all\n *large* singular values.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be pseudo-inverted.\n rcond : float\n Cutoff for small singular values.\n Singular values smaller (in modulus) than\n `rcond` * largest_singular_value (again, in modulus)\n are set to zero.\n\n Returns\n -------\n B : ndarray, shape (N, M)\n The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so\n is `B`.\n\n Raises\n ------\n LinAlgError\n If the SVD computation does not converge.\n\n Notes\n -----\n The pseudo-inverse of a matrix A, denoted :math:`A^+`, is\n defined as: \"the matrix that 'solves' [the least-squares problem]\n :math:`Ax = b`,\" i.e., if :math:`\\\\bar{x}` is said solution, then\n :math:`A^+` is that matrix such that :math:`\\\\bar{x} = A^+b`.\n\n It can be shown that if :math:`Q_1 \\\\Sigma Q_2^T = A` is the singular\n value decomposition of A, then\n :math:`A^+ = Q_2 \\\\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are\n orthogonal matrices, :math:`\\\\Sigma` is a diagonal matrix consisting\n of A's so-called singular values, (followed, typically, by\n zeros), and then :math:`\\\\Sigma^+` is simply the diagonal matrix\n consisting of the reciprocals of A's singular values\n (again, followed by zeros). [1]_\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pp. 139-142.\n\n Examples\n --------\n The following example checks that ``a * a+ * a == a`` and\n ``a+ * a * a+ == a+``:\n\n >>> a = np.random.randn(9, 6)\n >>> B = np.linalg.pinv(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertNonEmpty(a)\n a = a.conjugate()\n u, s, vt = svd(a, 0)\n m = u.shape[0]\n n = vt.shape[1]\n cutoff = rcond*maximum.reduce(s)\n for i in range(min(n, m)):\n if s[i] > cutoff:\n s[i] = 1.\/s[i]\n else:\n s[i] = 0.;\n res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))\n return wrap(res)\n\n# Determinant\n\ndef slogdet(a):\n \"\"\"\n Compute the sign and (natural) logarithm of the determinant of an array.\n\n If an array has a very small or very large determinant, than a call to\n `det` may overflow or underflow. This routine is more robust against such\n issues, because it computes the logarithm of the determinant rather than\n the determinant itself.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n Input array.\n\n Returns\n -------\n sign : float or complex\n A number representing the sign of the determinant. For a real matrix,\n this is 1, 0, or -1. For a complex matrix, this is a complex number\n with absolute value 1 (i.e., it is on the unit circle), or else 0.\n logdet : float\n The natural log of the absolute value of the determinant.\n\n If the determinant is zero, then `sign` will be 0 and `logdet` will be\n -Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.\n\n Notes\n -----\n The determinant is computed via LU factorization using the LAPACK\n routine z\/dgetrf.\n\n .. versionadded:: 2.0.0.\n\n Examples\n --------\n The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> (sign, logdet) = np.linalg.slogdet(a)\n >>> (sign, logdet)\n (-1, 0.69314718055994529)\n >>> sign * np.exp(logdet)\n -2.0\n\n This routine succeeds where ordinary `det` does not:\n\n >>> np.linalg.det(np.eye(500) * 0.1)\n 0.0\n >>> np.linalg.slogdet(np.eye(500) * 0.1)\n (1, -1151.2925464970228)\n\n See Also\n --------\n det\n\n \"\"\"\n a = asarray(a)\n _assertRank2(a)\n _assertSquareness(a)\n t, result_t = _commonType(a)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n n = a.shape[0]\n if isComplexType(t):\n lapack_routine = lapack_lite.zgetrf\n else:\n lapack_routine = lapack_lite.dgetrf\n pivots = zeros((n,), fortran_int)\n results = lapack_routine(n, n, a, n, pivots, 0)\n info = results['info']\n if (info < 0):\n raise TypeError, \"Illegal input to Fortran routine\"\n elif (info > 0):\n return (t(0.0), _realType(t)(-Inf))\n sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)\n d = diagonal(a)\n absd = absolute(d)\n sign *= multiply.reduce(d \/ absd)\n log(absd, absd)\n logdet = add.reduce(absd, axis=-1)\n return sign, logdet\n\ndef det(a):\n \"\"\"\n Compute the determinant of an array.\n\n Parameters\n ----------\n a : array_like, shape (M, M)\n Input array.\n\n Returns\n -------\n det : ndarray\n Determinant of `a`.\n\n Notes\n -----\n The determinant is computed via LU factorization using the LAPACK\n routine z\/dgetrf.\n\n Examples\n --------\n The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.linalg.det(a)\n -2.0\n\n See Also\n --------\n slogdet : Another way to representing the determinant, more suitable\n for large matrices where underflow\/overflow may occur.\n\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * exp(logdet)\n\n# Linear Least Squares\n\ndef lstsq(a, b, rcond=-1):\n \"\"\"\n Return the least-squares solution to a linear matrix equation.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n \"Coefficient\" matrix.\n b : array_like, shape (M,) or (M, K)\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n rcond : float, optional\n Cut-off ratio for small singular values of `a`.\n Singular values are set to zero if they are smaller than `rcond`\n times the largest singular value of `a`.\n\n Returns\n -------\n x : ndarray, shape (N,) or (N, K)\n Least-squares solution. The shape of `x` depends on the shape of\n `b`.\n residues : ndarray, shape (), (1,), or (K,)\n Sums of residues; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If the rank of `a` is < N or > M, this is an empty array.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : int\n Rank of matrix `a`.\n s : ndarray, shape (min(M,N),)\n Singular values of `a`.\n\n Raises\n ------\n LinAlgError\n If computation does not converge.\n\n Notes\n -----\n If `b` is a matrix, then all array results are returned as matrices.\n\n Examples\n --------\n Fit a line, ``y = mx + c``, through some noisy data-points:\n\n >>> x = np.array([0, 1, 2, 3])\n >>> y = np.array([-1, 0.2, 0.9, 2.1])\n\n By examining the coefficients, we see that the line should have a\n gradient of roughly 1 and cut the y-axis at, more or less, -1.\n\n We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``\n and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:\n\n >>> A = np.vstack([x, np.ones(len(x))]).T\n >>> A\n array([[ 0., 1.],\n [ 1., 1.],\n [ 2., 1.],\n [ 3., 1.]])\n\n >>> m, c = np.linalg.lstsq(A, y)[0]\n >>> print m, c\n 1.0 -0.95\n\n Plot the data along with the fitted line:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o', label='Original data', markersize=10)\n >>> plt.plot(x, m*x + c, 'r', label='Fitted line')\n >>> plt.legend()\n >>> plt.show()\n\n \"\"\"\n import math\n a, _ = _makearray(a)\n b, wrap = _makearray(b)\n is_1d = len(b.shape) == 1\n if is_1d:\n b = b[:, newaxis]\n _assertRank2(a, b)\n m = a.shape[0]\n n = a.shape[1]\n n_rhs = b.shape[1]\n ldb = max(n, m)\n if m != b.shape[0]:\n raise LinAlgError, 'Incompatible dimensions'\n t, result_t = _commonType(a, b)\n result_real_t = _realType(result_t)\n real_t = _linalgRealType(t)\n bstar = zeros((ldb, n_rhs), t)\n bstar[:b.shape[0],:n_rhs] = b.copy()\n a, bstar = _fastCopyAndTranspose(t, a, bstar)\n a, bstar = _to_native_byte_order(a, bstar)\n s = zeros((min(m, n),), real_t)\n nlvl = max( 0, int( math.log( float(min(m, n))\/2. ) ) + 1 )\n iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgelsd\n lwork = 1\n rwork = zeros((lwork,), real_t)\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, rwork, iwork, 0)\n lwork = int(abs(work[0]))\n rwork = zeros((lwork,), real_t)\n a_real = zeros((m, n), real_t)\n bstar_real = zeros((ldb, n_rhs,), real_t)\n results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,\n bstar_real, ldb, s, rcond,\n 0, rwork, -1, iwork, 0)\n lrwork = int(rwork[0])\n work = zeros((lwork,), t)\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, rwork, iwork, 0)\n else:\n lapack_routine = lapack_lite.dgelsd\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, iwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, iwork, 0)\n if results['info'] > 0:\n raise LinAlgError, 'SVD did not converge in Linear Least Squares'\n resids = array([], result_real_t)\n if is_1d:\n x = array(ravel(bstar)[:n], dtype=result_t, copy=True)\n if results['rank'] == n and m > n:\n if isComplexType(t):\n resids = array([sum(abs(ravel(bstar)[n:])**2)],\n dtype=result_real_t)\n else:\n resids = array([sum((ravel(bstar)[n:])**2)],\n dtype=result_real_t)\n else:\n x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)\n if results['rank'] == n and m > n:\n if isComplexType(t):\n resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(\n result_real_t)\n else:\n resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(\n result_real_t)\n\n st = s[:min(n, m)].copy().astype(result_real_t)\n return wrap(x), wrap(resids), results['rank'], st\n\ndef norm(x, ord=None):\n \"\"\"\n Matrix or vector norm.\n\n This function is able to return one of seven different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like, shape (M,) or (M, N)\n Input array.\n ord : {non-zero int, inf, -inf, 'fro'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object.\n\n Returns\n -------\n n : float\n Norm of the matrix or vector.\n\n Notes\n -----\n For values of ``ord <= 0``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1.\/ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1\/2}`\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a)\n 7.745966692414834\n >>> LA.norm(b)\n 7.745966692414834\n >>> LA.norm(b, 'fro')\n 7.745966692414834\n >>> LA.norm(a, np.inf)\n 4\n >>> LA.norm(b, np.inf)\n 9\n >>> LA.norm(a, -np.inf)\n 0\n >>> LA.norm(b, -np.inf)\n 2\n\n >>> LA.norm(a, 1)\n 20\n >>> LA.norm(b, 1)\n 7\n >>> LA.norm(a, -1)\n -4.6566128774142013e-010\n >>> LA.norm(b, -1)\n 6\n >>> LA.norm(a, 2)\n 7.745966692414834\n >>> LA.norm(b, 2)\n 7.3484692283495345\n\n >>> LA.norm(a, -2)\n nan\n >>> LA.norm(b, -2)\n 1.8570331885190563e-016\n >>> LA.norm(a, 3)\n 5.8480354764257312\n >>> LA.norm(a, -3)\n nan\n\n \"\"\"\n x = asarray(x)\n if ord is None: # check the default case first and handle it immediately\n return sqrt(add.reduce((x.conj() * x).ravel().real))\n\n nd = x.ndim\n if nd == 1:\n if ord == Inf:\n return abs(x).max()\n elif ord == -Inf:\n return abs(x).min()\n elif ord == 0:\n return (x != 0).sum() # Zero norm\n elif ord == 1:\n return abs(x).sum() # special case for speedup\n elif ord == 2:\n return sqrt(((x.conj()*x).real).sum()) # special case for speedup\n else:\n try:\n ord + 1\n except TypeError:\n raise ValueError, \"Invalid norm order for vectors.\"\n return ((abs(x)**ord).sum())**(1.0\/ord)\n elif nd == 2:\n if ord == 2:\n return svd(x, compute_uv=0).max()\n elif ord == -2:\n return svd(x, compute_uv=0).min()\n elif ord == 1:\n return abs(x).sum(axis=0).max()\n elif ord == Inf:\n return abs(x).sum(axis=1).max()\n elif ord == -1:\n return abs(x).sum(axis=0).min()\n elif ord == -Inf:\n return abs(x).sum(axis=1).min()\n elif ord in ['fro','f']:\n return sqrt(add.reduce((x.conj() * x).real.ravel()))\n else:\n raise ValueError, \"Invalid norm order for matrices.\"\n else:\n raise ValueError, \"Improper number of dimensions to norm.\"\n","license":"gpl-2.0"} {"repo_name":"ishanic\/scikit-learn","path":"sklearn\/preprocessing\/data.py","copies":"113","size":"56747","content":"# Authors: Alexandre Gramfort \n# Mathieu Blondel \n# Olivier Grisel \n# Andreas Mueller \n# Eric Martin \n# License: BSD 3 clause\n\nfrom itertools import chain, combinations\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..externals import six\nfrom ..utils import check_array\nfrom ..utils.extmath import row_norms\nfrom ..utils.fixes import combinations_with_replacement as combinations_w_r\nfrom ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,\n inplace_csr_row_normalize_l2)\nfrom ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,\n min_max_axis, inplace_row_scale)\nfrom ..utils.validation import check_is_fitted, FLOAT_DTYPES\n\n\nzip = six.moves.zip\nmap = six.moves.map\nrange = six.moves.range\n\n__all__ = [\n 'Binarizer',\n 'KernelCenterer',\n 'MinMaxScaler',\n 'MaxAbsScaler',\n 'Normalizer',\n 'OneHotEncoder',\n 'RobustScaler',\n 'StandardScaler',\n 'add_dummy_feature',\n 'binarize',\n 'normalize',\n 'scale',\n 'robust_scale',\n 'maxabs_scale',\n 'minmax_scale',\n]\n\n\ndef _mean_and_std(X, axis=0, with_mean=True, with_std=True):\n \"\"\"Compute mean and std deviation for centering, scaling.\n\n Zero valued std components are reset to 1.0 to avoid NaNs when scaling.\n \"\"\"\n X = np.asarray(X)\n Xr = np.rollaxis(X, axis)\n\n if with_mean:\n mean_ = Xr.mean(axis=0)\n else:\n mean_ = None\n\n if with_std:\n std_ = Xr.std(axis=0)\n std_ = _handle_zeros_in_scale(std_)\n else:\n std_ = None\n\n return mean_, std_\n\n\ndef _handle_zeros_in_scale(scale):\n ''' Makes sure that whenever scale is zero, we handle it correctly.\n\n This happens in most scalers when we have constant features.'''\n\n # if we are fitting on 1D arrays, scale might be a scalar\n if np.isscalar(scale):\n if scale == 0:\n scale = 1.\n elif isinstance(scale, np.ndarray):\n scale[scale == 0.0] = 1.0\n scale[~np.isfinite(scale)] = 1.0\n return scale\n\n\ndef scale(X, axis=0, with_mean=True, with_std=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the mean and component wise scale to unit variance.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the means and standard deviations along. If 0,\n independently standardize each feature, otherwise (if 1) standardize\n each sample.\n\n with_mean : boolean, True by default\n If True, center the data before scaling.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_mean=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he\/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,\n warn_on_dtype=True, estimator='the scale function',\n dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` instead\"\n \" See docstring for motivation and alternatives.\")\n if axis != 0:\n raise ValueError(\"Can only scale sparse matrix on axis=0, \"\n \" got axis=%d\" % axis)\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n _, var = mean_variance_axis(X, axis=0)\n var = _handle_zeros_in_scale(var)\n inplace_column_scale(X, 1 \/ np.sqrt(var))\n else:\n X = np.asarray(X)\n mean_, std_ = _mean_and_std(\n X, axis, with_mean=with_mean, with_std=with_std)\n if copy:\n X = X.copy()\n # Xr is a view on the original array that enables easy use of\n # broadcasting on the axis in which we are interested in\n Xr = np.rollaxis(X, axis)\n if with_mean:\n Xr -= mean_\n mean_1 = Xr.mean(axis=0)\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n if not np.allclose(mean_1, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n if with_std:\n Xr \/= std_\n if with_mean:\n mean_2 = Xr.mean(axis=0)\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1\/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again:\n if not np.allclose(mean_2, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0. \")\n Xr -= mean_2\n return X\n\n\nclass MinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) \/ (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n copy : boolean, optional, default True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array).\n\n Attributes\n ----------\n min_ : ndarray, shape (n_features,)\n Per feature adjustment for minimum.\n\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, feature_range=(0, 1), copy=True):\n self.feature_range = feature_range\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\"Minimum of desired feature range must be smaller\"\n \" than maximum. Got %s.\" % str(feature_range))\n data_min = np.min(X, axis=0)\n data_range = np.max(X, axis=0) - data_min\n data_range = _handle_zeros_in_scale(data_range)\n self.scale_ = (feature_range[1] - feature_range[0]) \/ data_range\n self.min_ = feature_range[0] - data_min * self.scale_\n self.data_range = data_range\n self.data_min = data_min\n return self\n\n def transform(self, X):\n \"\"\"Scaling features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n X *= self.scale_\n X += self.min_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n X -= self.min_\n X \/= self.scale_\n return X\n\n\ndef minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) \/ (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass StandardScaler(BaseEstimator, TransformerMixin):\n \"\"\"Standardize features by removing the mean and scaling to unit variance\n\n Centering and scaling happen independently on each feature by computing\n the relevant statistics on the samples in the training set. Mean and\n standard deviation are then stored to be used on later data using the\n `transform` method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators: they might behave badly if the\n individual feature do not more or less look like standard normally\n distributed data (e.g. Gaussian with 0 mean and unit variance).\n\n For instance many elements used in the objective function of\n a learning algorithm (such as the RBF kernel of Support Vector\n Machines or the L1 and L2 regularizers of linear models) assume that\n all features are centered around 0 and have variance in the same\n order. If a feature has a variance that is orders of magnitude larger\n that others, it might dominate the objective function and make the\n estimator unable to learn from other features correctly as expected.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_mean : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n mean_ : array of floats with shape [n_features]\n The mean value for each feature in the training set.\n\n std_ : array of floats with shape [n_features]\n The standard deviation for each feature in the training set.\n Set to one if the standard deviation is zero for a given feature.\n\n See also\n --------\n :func:`sklearn.preprocessing.scale` to perform centering and\n scaling without using the ``Transformer`` object oriented API\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n \"\"\"\n\n def __init__(self, copy=True, with_mean=True, with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : array-like or CSR matrix with shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=self.copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n self.mean_ = None\n\n if self.with_std:\n var = mean_variance_axis(X, axis=0)[1]\n self.std_ = np.sqrt(var)\n self.std_ = _handle_zeros_in_scale(self.std_)\n else:\n self.std_ = None\n return self\n else:\n self.mean_, self.std_ = _mean_and_std(\n X, axis=0, with_mean=self.with_mean, with_std=self.with_std)\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Perform standardization by centering and scaling\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr', copy=copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n if self.std_ is not None:\n inplace_column_scale(X, 1 \/ self.std_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X \/= self.std_\n return X\n\n def inverse_transform(self, X, copy=None):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot uncenter sparse matrices: pass `with_mean=False` \"\n \"instead See docstring for motivation and alternatives.\")\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n if self.std_ is not None:\n inplace_column_scale(X, self.std_)\n else:\n X = np.asarray(X)\n if copy:\n X = X.copy()\n if self.with_std:\n X *= self.std_\n if self.with_mean:\n X += self.mean_\n return X\n\n\nclass MaxAbsScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale each feature by its maximum absolute value.\n\n This estimator scales and translates each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0. It does not shift\/center the data, and\n thus does not destroy any sparsity.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Attributes\n ----------\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, copy=True):\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0)\n scales = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n scales = np.abs(X).max(axis=0)\n scales = np.array(scales)\n scales = scales.reshape(-1)\n self.scale_ = _handle_zeros_in_scale(scales)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be scaled.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 \/ self.scale_)\n else:\n inplace_column_scale(X, 1.0 \/ self.scale_)\n else:\n X \/= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be transformed back.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n return X\n\n\ndef maxabs_scale(X, axis=0, copy=True):\n \"\"\"Scale each feature to the [-1, 1] range without breaking the sparsity.\n\n This estimator scales each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MaxAbsScaler(copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass RobustScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale features using statistics that are robust to outliers.\n\n This Scaler removes the median and scales the data according to\n the Interquartile Range (IQR). The IQR is the range between the 1st\n quartile (25th quantile) and the 3rd quartile (75th quantile).\n\n Centering and scaling happen independently on each feature (or each\n sample, depending on the `axis` argument) by computing the relevant\n statistics on the samples in the training set. Median and interquartile\n range are then stored to be used on later data using the `transform`\n method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators. Typically this is done by removing the mean\n and scaling to unit variance. However, outliers can often influence the\n sample mean \/ variance in a negative way. In such cases, the median and\n the interquartile range often give better results.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_centering : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_scaling : boolean, True by default\n If True, scale the data to interquartile range.\n\n copy : boolean, optional, default is True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n center_ : array of floats\n The median value for each feature in the training set.\n\n scale_ : array of floats\n The (scaled) interquartile range for each feature in the training set.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering\n and scaling using mean and variance.\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n\n Notes\n -----\n See examples\/preprocessing\/plot_robust_scaling.py for an example.\n\n http:\/\/en.wikipedia.org\/wiki\/Median_(statistics)\n http:\/\/en.wikipedia.org\/wiki\/Interquartile_range\n \"\"\"\n\n def __init__(self, with_centering=True, with_scaling=True, copy=True):\n self.with_centering = with_centering\n self.with_scaling = with_scaling\n self.copy = copy\n\n def _check_array(self, X, copy):\n \"\"\"Makes sure centering is not enabled for sparse matrices.\"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_centering:\n raise ValueError(\n \"Cannot center sparse matrices: use `with_centering=False`\"\n \" instead. See docstring for motivation and alternatives.\")\n return X\n\n def fit(self, X, y=None):\n \"\"\"Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n \"\"\"\n if sparse.issparse(X):\n raise TypeError(\"RobustScaler cannot be fitted on sparse inputs\")\n\n X = self._check_array(X, self.copy)\n if self.with_centering:\n self.center_ = np.median(X, axis=0)\n\n if self.with_scaling:\n q = np.percentile(X, (25, 75), axis=0)\n self.scale_ = (q[1] - q[0])\n self.scale_ = _handle_zeros_in_scale(self.scale_)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Center and scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 \/ self.scale_)\n elif self.axis == 0:\n inplace_column_scale(X, 1.0 \/ self.scale_)\n else:\n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X \/= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_scaling:\n X *= self.scale_\n if self.with_centering:\n X += self.center_\n return X\n\n\ndef robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the median and component wise scale\n according to the interquartile range.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the medians and IQR along. If 0,\n independently scale each feature, otherwise (if 1) scale\n each sample.\n\n with_centering : boolean, True by default\n If True, center the data before scaling.\n\n with_scaling : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default is True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_centering=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he\/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.RobustScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,\n copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass PolynomialFeatures(BaseEstimator, TransformerMixin):\n \"\"\"Generate polynomial and interaction features.\n\n Generate a new feature matrix consisting of all polynomial combinations\n of the features with degree less than or equal to the specified degree.\n For example, if an input sample is two dimensional and of the form\n [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n\n Parameters\n ----------\n degree : integer\n The degree of the polynomial features. Default = 2.\n\n interaction_only : boolean, default = False\n If true, only interaction features are produced: features that are\n products of at most ``degree`` *distinct* input features (so not\n ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).\n\n include_bias : boolean\n If True (default), then include a bias column, the feature in which\n all polynomial powers are zero (i.e. a column of ones - acts as an\n intercept term in a linear model).\n\n Examples\n --------\n >>> X = np.arange(6).reshape(3, 2)\n >>> X\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> poly = PolynomialFeatures(2)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0, 0, 1],\n [ 1, 2, 3, 4, 6, 9],\n [ 1, 4, 5, 16, 20, 25]])\n >>> poly = PolynomialFeatures(interaction_only=True)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0],\n [ 1, 2, 3, 6],\n [ 1, 4, 5, 20]])\n\n Attributes\n ----------\n powers_ : array, shape (n_input_features, n_output_features)\n powers_[i, j] is the exponent of the jth input in the ith output.\n\n n_input_features_ : int\n The total number of input features.\n\n n_output_features_ : int\n The total number of polynomial output features. The number of output\n features is computed by iterating over all suitably sized combinations\n of input features.\n\n Notes\n -----\n Be aware that the number of features in the output array scales\n polynomially in the number of features of the input array, and\n exponentially in the degree. High degrees can cause overfitting.\n\n See :ref:`examples\/linear_model\/plot_polynomial_interpolation.py\n `\n \"\"\"\n def __init__(self, degree=2, interaction_only=False, include_bias=True):\n self.degree = degree\n self.interaction_only = interaction_only\n self.include_bias = include_bias\n\n @staticmethod\n def _combinations(n_features, degree, interaction_only, include_bias):\n comb = (combinations if interaction_only else combinations_w_r)\n start = int(not include_bias)\n return chain.from_iterable(comb(range(n_features), i)\n for i in range(start, degree + 1))\n\n @property\n def powers_(self):\n check_is_fitted(self, 'n_input_features_')\n\n combinations = self._combinations(self.n_input_features_, self.degree,\n self.interaction_only,\n self.include_bias)\n return np.vstack(np.bincount(c, minlength=self.n_input_features_)\n for c in combinations)\n\n def fit(self, X, y=None):\n \"\"\"\n Compute number of output features.\n \"\"\"\n n_samples, n_features = check_array(X).shape\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n self.n_input_features_ = n_features\n self.n_output_features_ = sum(1 for _ in combinations)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Transform data to polynomial features\n\n Parameters\n ----------\n X : array with shape [n_samples, n_features]\n The data to transform, row by row.\n\n Returns\n -------\n XP : np.ndarray shape [n_samples, NP]\n The matrix of features, where NP is the number of polynomial\n features generated from the combination of inputs.\n \"\"\"\n check_is_fitted(self, ['n_input_features_', 'n_output_features_'])\n\n X = check_array(X)\n n_samples, n_features = X.shape\n\n if n_features != self.n_input_features_:\n raise ValueError(\"X shape does not match training shape\")\n\n # allocate output data\n XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)\n\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n for i, c in enumerate(combinations):\n XP[:, i] = X[:, c].prod(1)\n\n return XP\n\n\ndef normalize(X, norm='l2', axis=1, copy=True):\n \"\"\"Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : 0 or 1, optional (1 by default)\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Normalizer` to perform normalization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n if norm not in ('l1', 'l2', 'max'):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = 'csc'\n elif axis == 1:\n sparse_format = 'csr'\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,\n estimator='the normalize function', dtype=FLOAT_DTYPES)\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if norm == 'l1':\n inplace_csr_row_normalize_l1(X)\n elif norm == 'l2':\n inplace_csr_row_normalize_l2(X)\n elif norm == 'max':\n _, norms = min_max_axis(X, 1)\n norms = norms.repeat(np.diff(X.indptr))\n mask = norms != 0\n X.data[mask] \/= norms[mask]\n else:\n if norm == 'l1':\n norms = np.abs(X).sum(axis=1)\n elif norm == 'l2':\n norms = row_norms(X)\n elif norm == 'max':\n norms = np.max(X, axis=1)\n norms = _handle_zeros_in_scale(norms)\n X \/= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n return X\n\n\nclass Normalizer(BaseEstimator, TransformerMixin):\n \"\"\"Normalize samples individually to unit norm.\n\n Each sample (i.e. each row of the data matrix) with at least one\n non zero component is rescaled independently of other samples so\n that its norm (l1 or l2) equals one.\n\n This transformer is able to work both with dense numpy arrays and\n scipy.sparse matrix (use CSR format if you want to avoid the burden of\n a copy \/ conversion).\n\n Scaling inputs to unit norms is a common operation for text\n classification or clustering for instance. For instance the dot\n product of two l2-normalized TF-IDF vectors is the cosine similarity\n of the vectors and is the base similarity metric for the Vector\n Space Model commonly used by the Information Retrieval community.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix).\n\n Notes\n -----\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n See also\n --------\n :func:`sklearn.preprocessing.normalize` equivalent function\n without the object oriented API\n \"\"\"\n\n def __init__(self, norm='l2', copy=True):\n self.norm = norm\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Scale each non zero row of X to unit norm\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, row by row. scipy.sparse matrices should be\n in CSR format to avoid an un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr')\n return normalize(X, norm=self.norm, axis=1, copy=copy)\n\n\ndef binarize(X, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or scipy.sparse matrix\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR or CSC format to avoid an\n un-necessary copy.\n\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy\n (if the input is already a numpy array or a scipy.sparse CSR \/ CSC\n matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Binarizer` to perform binarization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError('Cannot binarize a sparse matrix with threshold '\n '< 0')\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return X\n\n\nclass Binarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize data (set feature values to 0 or 1) according to a threshold\n\n Values greater than the threshold map to 1, while values less than\n or equal to the threshold map to 0. With the default threshold of 0,\n only positive values map to 1.\n\n Binarization is a common operation on text count data where the\n analyst can decide to only consider the presence or absence of a\n feature rather than a quantified number of occurrences for instance.\n\n It can also be used as a pre-processing step for estimators that\n consider boolean random variables (e.g. modelled using the Bernoulli\n distribution in a Bayesian setting).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy (if\n the input is already a numpy array or a scipy.sparse CSR matrix).\n\n Notes\n -----\n If the input is a sparse matrix, only the non-zero values are subject\n to update by the Binarizer class.\n\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n \"\"\"\n\n def __init__(self, threshold=0.0, copy=True):\n self.threshold = threshold\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Binarize each element of X\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n return binarize(X, threshold=self.threshold, copy=copy)\n\n\nclass KernelCenterer(BaseEstimator, TransformerMixin):\n \"\"\"Center a kernel matrix\n\n Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a\n function mapping x to a Hilbert space. KernelCenterer centers (i.e.,\n normalize to have zero mean) the data without explicitly computing phi(x).\n It is equivalent to centering phi(x) with\n sklearn.preprocessing.StandardScaler(with_std=False).\n\n Read more in the :ref:`User Guide `.\n \"\"\"\n\n def fit(self, K, y=None):\n \"\"\"Fit KernelCenterer\n\n Parameters\n ----------\n K : numpy array of shape [n_samples, n_samples]\n Kernel matrix.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n K = check_array(K)\n n_samples = K.shape[0]\n self.K_fit_rows_ = np.sum(K, axis=0) \/ n_samples\n self.K_fit_all_ = self.K_fit_rows_.sum() \/ n_samples\n return self\n\n def transform(self, K, y=None, copy=True):\n \"\"\"Center kernel matrix.\n\n Parameters\n ----------\n K : numpy array of shape [n_samples1, n_samples2]\n Kernel matrix.\n\n copy : boolean, optional, default True\n Set to False to perform inplace computation.\n\n Returns\n -------\n K_new : numpy array of shape [n_samples1, n_samples2]\n \"\"\"\n check_is_fitted(self, 'K_fit_all_')\n\n K = check_array(K)\n if copy:\n K = K.copy()\n\n K_pred_cols = (np.sum(K, axis=1) \/\n self.K_fit_rows_.shape[0])[:, np.newaxis]\n\n K -= self.K_fit_rows_\n K -= K_pred_cols\n K += self.K_fit_all_\n\n return K\n\n\ndef add_dummy_feature(X, value=1.0):\n \"\"\"Augment dataset with an additional dummy feature.\n\n This is useful for fitting an intercept term with implementations which\n cannot otherwise fit it directly.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n Data.\n\n value : float\n Value to use for the dummy feature.\n\n Returns\n -------\n\n X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]\n Same data with dummy feature added as first column.\n\n Examples\n --------\n\n >>> from sklearn.preprocessing import add_dummy_feature\n >>> add_dummy_feature([[0, 1], [1, 0]])\n array([[ 1., 0., 1.],\n [ 1., 1., 0.]])\n \"\"\"\n X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])\n n_samples, n_features = X.shape\n shape = (n_samples, n_features + 1)\n if sparse.issparse(X):\n if sparse.isspmatrix_coo(X):\n # Shift columns to the right.\n col = X.col + 1\n # Column indices of dummy feature are 0 everywhere.\n col = np.concatenate((np.zeros(n_samples), col))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n row = np.concatenate((np.arange(n_samples), X.row))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.coo_matrix((data, (row, col)), shape)\n elif sparse.isspmatrix_csc(X):\n # Shift index pointers since we need to add n_samples elements.\n indptr = X.indptr + n_samples\n # indptr[0] must be 0.\n indptr = np.concatenate((np.array([0]), indptr))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n indices = np.concatenate((np.arange(n_samples), X.indices))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.csc_matrix((data, indices, indptr), shape)\n else:\n klass = X.__class__\n return klass(add_dummy_feature(X.tocoo(), value))\n else:\n return np.hstack((np.ones((n_samples, 1)) * value, X))\n\n\ndef _transform_selected(X, transform, selected=\"all\", copy=True):\n \"\"\"Apply a transform function to portion of selected features\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Dense array or sparse matrix.\n\n transform : callable\n A callable transform(X) -> X_transformed\n\n copy : boolean, optional\n Copy X even if it could be avoided.\n\n selected: \"all\" or array of indices or mask\n Specify which features to apply the transform to.\n\n Returns\n -------\n X : array or sparse matrix, shape=(n_samples, n_features_new)\n \"\"\"\n if selected == \"all\":\n return transform(X)\n\n X = check_array(X, accept_sparse='csc', copy=copy)\n\n if len(selected) == 0:\n return X\n\n n_features = X.shape[1]\n ind = np.arange(n_features)\n sel = np.zeros(n_features, dtype=bool)\n sel[np.asarray(selected)] = True\n not_sel = np.logical_not(sel)\n n_selected = np.sum(sel)\n\n if n_selected == 0:\n # No features selected.\n return X\n elif n_selected == n_features:\n # All features selected.\n return transform(X)\n else:\n X_sel = transform(X[:, ind[sel]])\n X_not_sel = X[:, ind[not_sel]]\n\n if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):\n return sparse.hstack((X_sel, X_not_sel))\n else:\n return np.hstack((X_sel, X_not_sel))\n\n\nclass OneHotEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode categorical integer features using a one-hot aka one-of-K scheme.\n\n The input to this transformer should be a matrix of integers, denoting\n the values taken on by categorical (discrete) features. The output will be\n a sparse matrix where each column corresponds to one possible value of one\n feature. It is assumed that input features take on values in the range\n [0, n_values).\n\n This encoding is needed for feeding categorical data to many scikit-learn\n estimators, notably linear models and SVMs with the standard kernels.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_values : 'auto', int or array of ints\n Number of values per feature.\n\n - 'auto' : determine value range from training data.\n - int : maximum value for all features.\n - array : maximum value per feature.\n\n categorical_features: \"all\" or array of indices or mask\n Specify what features are treated as categorical.\n\n - 'all' (default): All features are treated as categorical.\n - array of indices: Array of categorical feature indices.\n - mask: Array of length n_features and with dtype=bool.\n\n Non-categorical features are always stacked to the right of the matrix.\n\n dtype : number type, default=np.float\n Desired dtype of output.\n\n sparse : boolean, default=True\n Will return sparse matrix if set True else will return an array.\n\n handle_unknown : str, 'error' or 'ignore'\n Whether to raise an error or ignore if a unknown categorical feature is\n present during transform.\n\n Attributes\n ----------\n active_features_ : array\n Indices for active features, meaning values that actually occur\n in the training set. Only available when n_values is ``'auto'``.\n\n feature_indices_ : array of shape (n_features,)\n Indices to feature ranges.\n Feature ``i`` in the original data is mapped to features\n from ``feature_indices_[i]`` to ``feature_indices_[i+1]``\n (and then potentially masked by `active_features_` afterwards)\n\n n_values_ : array of shape (n_features,)\n Maximum number of values per feature.\n\n Examples\n --------\n Given a dataset with three features and two samples, we let the encoder\n find the maximum value per feature and transform the data to a binary\n one-hot encoding.\n\n >>> from sklearn.preprocessing import OneHotEncoder\n >>> enc = OneHotEncoder()\n >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \\\n[1, 0, 2]]) # doctest: +ELLIPSIS\n OneHotEncoder(categorical_features='all', dtype=<... 'float'>,\n handle_unknown='error', n_values='auto', sparse=True)\n >>> enc.n_values_\n array([2, 3, 4])\n >>> enc.feature_indices_\n array([0, 2, 5, 9])\n >>> enc.transform([[0, 1, 1]]).toarray()\n array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])\n\n See also\n --------\n sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of\n dictionary items (also handles string-valued features).\n sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot\n encoding of dictionary items or strings.\n \"\"\"\n def __init__(self, n_values=\"auto\", categorical_features=\"all\",\n dtype=np.float, sparse=True, handle_unknown='error'):\n self.n_values = n_values\n self.categorical_features = categorical_features\n self.dtype = dtype\n self.sparse = sparse\n self.handle_unknown = handle_unknown\n\n def fit(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_feature)\n Input array of type int.\n\n Returns\n -------\n self\n \"\"\"\n self.fit_transform(X)\n return self\n\n def _fit_transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n if self.n_values == 'auto':\n n_values = np.max(X, axis=0) + 1\n elif isinstance(self.n_values, numbers.Integral):\n if (np.max(X, axis=0) >= self.n_values).any():\n raise ValueError(\"Feature out of bounds for n_values=%d\"\n % self.n_values)\n n_values = np.empty(n_features, dtype=np.int)\n n_values.fill(self.n_values)\n else:\n try:\n n_values = np.asarray(self.n_values, dtype=int)\n except (ValueError, TypeError):\n raise TypeError(\"Wrong type for parameter `n_values`. Expected\"\n \" 'auto', int or array of ints, got %r\"\n % type(X))\n if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:\n raise ValueError(\"Shape mismatch: if n_values is an array,\"\n \" it has to be of shape (n_features,).\")\n\n self.n_values_ = n_values\n n_values = np.hstack([[0], n_values])\n indices = np.cumsum(n_values)\n self.feature_indices_ = indices\n\n column_indices = (X + indices[:-1]).ravel()\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)\n data = np.ones(n_samples * n_features)\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n\n if self.n_values == 'auto':\n mask = np.array(out.sum(axis=0)).ravel() != 0\n active_features = np.where(mask)[0]\n out = out[:, active_features]\n self.active_features_ = active_features\n\n return out if self.sparse else out.toarray()\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X, then transform X.\n\n Equivalent to self.fit(X).transform(X), but more convenient and more\n efficient. See fit for the parameters, transform for the return value.\n \"\"\"\n return _transform_selected(X, self._fit_transform,\n self.categorical_features, copy=True)\n\n def _transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n\n indices = self.feature_indices_\n if n_features != indices.shape[0] - 1:\n raise ValueError(\"X has different shape than during fitting.\"\n \" Expected %d, got %d.\"\n % (indices.shape[0] - 1, n_features))\n\n # We use only those catgorical features of X that are known using fit.\n # i.e lesser than n_values_ using mask.\n # This means, if self.handle_unknown is \"ignore\", the row_indices and\n # col_indices corresponding to the unknown categorical feature are\n # ignored.\n mask = (X < self.n_values_).ravel()\n if np.any(~mask):\n if self.handle_unknown not in ['error', 'ignore']:\n raise ValueError(\"handle_unknown should be either error or \"\n \"unknown got %s\" % self.handle_unknown)\n if self.handle_unknown == 'error':\n raise ValueError(\"unknown categorical feature present %s \"\n \"during transform.\" % X[~mask])\n\n column_indices = (X + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(np.sum(mask))\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.n_values == 'auto':\n out = out[:, self.active_features_]\n\n return out if self.sparse else out.toarray()\n\n def transform(self, X):\n \"\"\"Transform X using one-hot encoding.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_features)\n Input array of type int.\n\n Returns\n -------\n X_out : sparse matrix if sparse=True else a 2-d array, dtype=int\n Transformed input.\n \"\"\"\n return _transform_selected(X, self._transform,\n self.categorical_features, copy=True)\n","license":"bsd-3-clause"} {"repo_name":"mueller-lab\/PyFRAP","path":"pyfrp\/modules\/pyfrp_optimization_module.py","copies":"2","size":"6867","content":"#=====================================================================================================================================\n#Copyright\n#=====================================================================================================================================\n\n#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society\n#This software is distributed under the terms of the GNU General Public License.\n\n#This file is part of PyFRAP.\n\n#PyFRAP is free software: you can redistribute it and\/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License\n#along with this program. If not, see .\n\n#===========================================================================================================================================================================\n#Module Description\n#===========================================================================================================================================================================\n\n\"\"\"Optimization module for PyFRAP toolbox.\n\nCurrently contains all functions necessary to transform a constrained FRAP optimization problem into\na unconstrained one, making it suitable to Nelder-Mead optimization algorithm. \n\n\"\"\"\n\n#===========================================================================================================================================================================\n#Importing necessary modules\n#===========================================================================================================================================================================\n\n#Numpy\/Scipy\nimport numpy as np\n\n#PyFRAP\nimport pyfrp_fit_module \n\nfrom pyfrp_term_module import *\n\n#===========================================================================================================================================================================\n#Module Functions\n#===========================================================================================================================================================================\n\ndef constrObjFunc(x,fit,debug,ax,returnFit):\n\t\n\t\"\"\"Objective function when using Constrained Nelder-Mead.\n\t\n\tCalls :py:func:`pyfrp.modules.pyfrp_optimization_module.xTransform` to transform x into\n\tconstrained version, then uses :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPObjFunc` to \n\tfind SSD.\n\t\n\tArgs:\n\t\tx (list): Input vector, consiting of [D,(prod),(degr)].\n\t\tfit (pyfrp.subclasses.pyfrp_fit): Fit object.\n\t\tdebug (bool): Display debugging output and plots.\n\t\tax (matplotlib.axes): Axes to display plots in.\n\t\treturnFit (bool): Return fit instead of SSD.\n\t\n\tReturns:\n\t\t float: SSD of fit. Except ``returnFit==True``, then will return fit itself. \n\t\"\"\"\n\t\n\t\n\tLBs, UBs = buildBoundLists(fit)\n\t\n\tx=xTransform(x,LBs,UBs)\n\n\tssd=pyfrp_fit_module.FRAPObjFunc(x,fit,debug,ax,returnFit)\n\t\n\treturn ssd\n\ndef xTransform(x,LB,UB):\n\t\n\t\"\"\"Transforms ``x`` into constrained form, obeying upper \n\tbounds ``UB`` and lower bounds ``LB``.\n\t\n\t.. note:: Will add tiny offset to LB(D), to avoid singularities.\n\t\n\tIdea taken from http:\/\/www.mathworks.com\/matlabcentral\/fileexchange\/8277-fminsearchbnd--fminsearchcon\n\t\n\tArgs:\n\t\tx (list): Input vector, consiting of [D,(prod),(degr)].\n\t\tLB (list): List of lower bounds for ``D,prod,degr``.\n\t\tUB (list): List of upper bounds for ``D,prod,degr``.\n\t\n\tReturns:\n\t\tlist: Transformed x-values. \n\t\"\"\"\n\t\n\t#Make sure everything is float\n\tx=np.asarray(x,dtype=np.float64)\n\tLB=np.asarray(LB,dtype=np.float64)\n\tUB=np.asarray(UB,dtype=np.float64)\n\t\n\t#Check if LB_D==0, then add a little noise to it so we do not end up with xtrans[D]==0 and later have singularities when scaling tvec\n\tif LB[0]==0:\n\t\tLB[0]=1E-10\n\t\n\t#Determine number of parameters to be fitted\n\tnparams=len(x)\n\n\t#Make empty vector\n\txtrans = np.zeros(np.shape(x))\n\t\n\t# k allows some variables to be fixed, thus dropped from the\n\t# optimization.\n\tk=0\n\n\tfor i in range(nparams):\n\n\t\t#Upper bound only\n\t\tif UB[i]!=None and LB[i]==None:\n\t\t\n\t\t\txtrans[i]=UB[i]-x[k]**2\n\t\t\tk=k+1\n\t\t\t\n\t\t#Lower bound only\t\n\t\telif UB[i]==None and LB[i]!=None:\n\t\t\t\n\t\t\txtrans[i]=LB[i]+x[k]**2\n\t\t\tk=k+1\n\t\t\n\t\t#Both bounds\n\t\telif UB[i]!=None and LB[i]!=None:\n\t\t\t\n\t\t\txtrans[i] = (np.sin(x[k])+1.)\/2.*(UB[i] - LB[i]) + LB[i]\n\t\t\txtrans[i] = max([LB[i],min([UB[i],xtrans[i]])])\n\t\t\tk=k+1\n\t\t\n\t\t#No bounds\n\t\telif UB[i]==None and LB[i]==None:\n\t\t\n\t\t\txtrans[i] = x[k]\n\t\t\tk=k+1\n\t\t\t\n\t\t#Note: The original file has here another case for fixed variable, but since we made the decision earlier which when we call frap_fitting, we don't need this here.\n\t\n\treturn xtrans\t\n\t\t\ndef transformX0(x0,LB,UB):\n\t\n\t\"\"\"Transforms ``x0`` into constrained form, obeying upper \n\tbounds ``UB`` and lower bounds ``LB``.\n\t\n\tIdea taken from http:\/\/www.mathworks.com\/matlabcentral\/fileexchange\/8277-fminsearchbnd--fminsearchcon\n\t\n\tArgs:\n\t\tx0 (list): Input initial vector, consiting of [D,(prod),(degr)].\n\t\tLB (list): List of lower bounds for ``D,prod,degr``.\n\t\tUB (list): List of upper bounds for ``D,prod,degr``.\n\t\n\tReturns:\n\t\tlist: Transformed x-values. \n\t\"\"\"\n\t\n\tx0u = list(x0)\n\t\n\tnparams=len(x0)\n\t\n\tk=0\n\tfor i in range(nparams):\n\t\t\n\t\t#Upper bound only\n\t\tif UB[i]!=None and LB[i]==None:\n\t\t\tif UB[i]<=x0[i]:\n\t\t\t\tx0u[k]=0\n\t\t\telse:\n\t\t\t\tx0u[k]=sqrt(UB[i]-x0[i])\t\n\t\t\tk=k+1\n\t\t\t\n\t\t#Lower bound only\n\t\telif UB[i]==None and LB[i]!=None:\n\t\t\tif LB[i]>=x0[i]:\n\t\t\t\tx0u[k]=0\n\t\t\telse:\n\t\t\t\tx0u[k]=np.sqrt(x0[i]-LB[i])\t\n\t\t\tk=k+1\n\t\t\n\t\t\n\t\t#Both bounds\n\t\telif UB[i]!=None and LB[i]!=None:\n\t\t\tif UB[i]<=x0[i]:\n\t\t\t\tx0u[k]=np.pi\/2\n\t\t\telif LB[i]>=x0[i]:\n\t\t\t\tx0u[k]=-np.pi\/2\n\t\t\telse:\n\t\t\t\tx0u[k] = 2*(x0[i] - LB[i])\/(UB[i]-LB[i]) - 1;\n\t\t\t\t#shift by 2*pi to avoid problems at zero in fminsearch otherwise, the initial simplex is vanishingly small\n\t\t\t\tx0u[k] = 2*np.pi+np.arcsin(max([-1,min(1,x0u[k])]));\n\t\t\tk=k+1\n\t\t\n\t\t#No bounds\n\t\telif UB[i]==None and LB[i]==None:\n\t\t\tx0u[k] = x[i]\n\t\t\tk=k+1\n\t\n\treturn x0u\n\ndef buildBoundLists(fit):\n\t\n\t\"\"\"Builds list of lower bounds and upper bounds.\n\t\n\tArgs:\n\t\tfit (pyfrp.subclasses.pyfrp_fit): Fit object.\n\t\t\n\tReturns:\n\t\ttuple: Tuple containing: \n\t\t\n\t\t\t* LBs (list): List of lower bounds.\n\t\t\t* UBs (list): List of upper bounds.\n\t\t\t\n\t\n\t\n\t\"\"\"\n\t\n\tLBs=[fit.LBD]+int(fit.fitProd)*[fit.LBProd]+int(fit.fitDegr)*[fit.LBDegr]+len(fit.ROIsFitted)*[fit.LBEqu]\n\tUBs=[fit.UBD]+int(fit.fitProd)*[fit.UBProd]+int(fit.fitDegr)*[fit.UBDegr]+len(fit.ROIsFitted)*[fit.UBEqu]\n\t\n\treturn LBs,UBs","license":"gpl-3.0"} {"repo_name":"ryanraaum\/african-mtdna","path":"popdata_sources\/coelho2009\/process.py","copies":"1","size":"2502","content":"from oldowan.mtconvert import seq2sites, sites2seq, str2sites\nfrom string import translate\nimport pandas as pd\nimport sys\n\nsys.path.append('..\/..\/scripts')\nfrom utils import *\n\n## load metadata\nmetadata = pd.read_csv('metadata.csv', index_col=0)\n\nregionparts = metadata.ix[0,'SeqRange'].split(';')\nregion1 = range2region(regionparts[0])\nregion2 = range2region(regionparts[1])\n\nwith open('coelho2009_haplotypes.csv', 'rU') as f:\n\tf.readline() # skip past header\n\tdata = f.readlines()\n\nhids = []\nhvr1sites = []\nhvr2sites = []\n\nfor l in data:\n\tparts = l.strip().split(',')\n\tif int(parts[3]) == 377 and int(parts[7]) == 268: \n\t\thids.append(parts[0])\n\t\thvr1sites.append(parts[4])\n\t\thvr2sites.append(parts[8])\n\n## need to preprocess sites data for some nonstandard notation in hvr2\nhvr1 = []\nhvr2 = []\nfor i in range(len(hids)):\n\ts1 = str2sites(hvr1sites[i], add16k=True)\n\thvr1.append(s1)\n\n\ts2 = hvr2sites[i].split()\n\ts2new = []\n\tfor j in range(len(s2)):\n\t\tif s2[j].endswith('.2C'):\n\t\t\tparts = s2[j].split('.')\n\t\t\ts2new.append('%s.1C' % parts[0])\n\t\t\ts2new.append('%s.2C' % parts[0])\n\t\telse:\n\t\t\ts2new.append(s2[j])\n\ts2 = str2sites(' '.join(s2new))\n\thvr2.append(s2)\n\nnewsites = []\nfor i in range(len(hvr1)):\n\tnewsites.append(hvr1[i] + hvr2[i])\n\n## Validate\npassed_validation = True\n\nfor i in range(len(newsites)):\n\tcurr_sites = newsites[i]\n\tseq1 = translate(sites2seq(curr_sites, region1), None, '-')\n\tseq2 = translate(sites2seq(curr_sites, region2), None, '-')\n\tmysites = seq2sites(seq1) + seq2sites(seq1)\n\tif not mysites == curr_sites:\n\t\tmyseq1 = translate(sites2seq(mysites, region1), None, '-')\n\t\tmyseq2 = translate(sites2seq(mysites, region2), None, '-')\n\t\tif not seq1 == myseq1 and seq2 == myseq2:\n\t\t\tpassed_validation = False\n\t\t\tprint i, hids[i]\n\nif passed_validation:\n\tcounts = pd.read_csv('coelho2009_counts.csv', index_col=0)\n\tcounts = counts.fillna(0)\n\tcounter = [0] * 5\n\twith open('processed.csv', 'w') as f:\n\t\tfor i in range(len(newsites)):\n\t\t\thid = hids[i]\n\t\t\tcurr_sites = newsites[i]\n\t\t\tseq1 = translate(sites2seq(curr_sites, region1), None, '-')\n\t\t\tseq2 = translate(sites2seq(curr_sites, region2), None, '-')\n\t\t\tmysites = seq2sites(seq1) + seq2sites(seq2)\n\t\t\tmysites = ' '.join([str(x) for x in mysites])\n\t\t\tfor j in range(len(metadata.index)):\n\t\t\t\tprefix = metadata.ix[metadata.index[j],'NewPrefix']\n\t\t\t\tfor k in range(int(counts.ix[hid, metadata.index[j]])):\n\t\t\t\t\tcounter[j] += 1\n\t\t\t\t\tnum = str(counter[j]).zfill(3)\n\t\t\t\t\tnewid = prefix + num\n\t\t\t\t\tf.write('%s,%s,%s\\n' % (newid, hid, mysites))","license":"cc0-1.0"} {"repo_name":"koobonil\/Boss2D","path":"Boss2D\/addon\/_old\/webrtc-qt5.11.2_for_boss\/tools_webrtc\/cpu\/cpu_mon.py","copies":"6","size":"2057","content":"#!\/usr\/bin\/env python\n#\n# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.\n#\n# Use of this source code is governed by a BSD-style license\n# that can be found in the LICENSE file in the root of the source\n# tree. An additional intellectual property rights grant can be found\n# in the file PATENTS. All contributing project authors may\n# be found in the AUTHORS file in the root of the source tree.\n\n\nimport psutil\nimport sys\n\nimport numpy\nfrom matplotlib import pyplot\n\n\nclass CpuSnapshot(object):\n def __init__(self, label):\n self.label = label\n self.samples = []\n\n def Capture(self, sample_count):\n print ('Capturing %d CPU samples for %s...' %\n ((sample_count - len(self.samples)), self.label))\n while len(self.samples) < sample_count:\n self.samples.append(psutil.cpu_percent(1.0, False))\n\n def Text(self):\n return ('%s: avg=%s, median=%s, min=%s, max=%s' %\n (self.label, numpy.average(self.samples),\n numpy.median(self.samples),\n numpy.min(self.samples), numpy.max(self.samples)))\n\n def Max(self):\n return numpy.max(self.samples)\n\n\ndef GrabCpuSamples(sample_count):\n print 'Label for snapshot (enter to quit): '\n label = raw_input().strip()\n if len(label) == 0:\n return None\n\n snapshot = CpuSnapshot(label)\n snapshot.Capture(sample_count)\n\n return snapshot\n\n\ndef main():\n print 'How many seconds to capture per snapshot (enter for 60)?'\n sample_count = raw_input().strip()\n if len(sample_count) > 0 and int(sample_count) > 0:\n sample_count = int(sample_count)\n else:\n print 'Defaulting to 60 samples.'\n sample_count = 60\n\n snapshots = []\n while True:\n snapshot = GrabCpuSamples(sample_count)\n if snapshot == None:\n break\n snapshots.append(snapshot)\n\n if len(snapshots) == 0:\n print 'no samples captured'\n return -1\n\n pyplot.title('CPU usage')\n\n for s in snapshots:\n pyplot.plot(s.samples, label=s.Text(), linewidth=2)\n\n pyplot.legend()\n\n pyplot.show()\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","license":"mit"} {"repo_name":"aemerick\/galaxy_analysis","path":"method_paper_plots\/star_abundances.py","copies":"1","size":"26128","content":"from galaxy_analysis.plot.plot_styles import *\nimport matplotlib.pyplot as plt\nimport glob\nimport deepdish as dd\nimport yt\nfrom galaxy_analysis.utilities import utilities\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\nfrom galaxy_analysis.particle_analysis.abundances import single_MDF\n#\nfrom galaxy_analysis.analysis import Galaxy\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport h5py\n\n# grab the most recent file\nworkdir = '\/mnt\/ceph\/users\/emerick\/enzo_runs\/pleiades\/starIC\/run11_30km\/final_sndriving\/'\n#workdir = '\/home\/emerick\/work\/enzo_runs\/pleiades\/starIC\/run11_30km\/final_sndriving\/'\ndata_files = np.sort(glob.glob(workdir + 'DD????'))\nname = data_files[-1].split('final_sndriving\/')[1]\ngal = Galaxy(name, wdir = workdir)\n\n#\n#\n#\ndef plot_alpha_vs_fe():\n fig,ax = plt.subplots()\n fig.set_size_inches(8,7)\n\n ptype = gal.df['particle_type']\n fe_over_h = gal.df[('io','particle_Fe_over_H')]\n alpha = gal.df[('io','particle_alpha_over_Fe')]\n age = (gal.ds.current_time - gal.df[('io','creation_time')]).convert_to_units('Myr')\n\n age = age - np.min(age)\n\n p = ax.scatter(fe_over_h[ptype==11], alpha[ptype==11],\n s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)\n p.set_clim([0.0, np.max(age)])\n cb = fig.colorbar(p)\n cb.set_label(r'Stellar Age (Myr)')\n\n ax.set_xlim(-9,-1)\n ax.set_ylim(-1.75,1.75)\n\n ax.set_xlabel(r'[Fe\/H]')\n ax.set_ylabel(r'[$\\rm \\alpha$\/Fe]')\n\n plt.minorticks_on()\n plt.tight_layout()\n fig.savefig('alpha_over_fe.png')\n plt.close()\n\n return\n\ndef plot_alpha_vs_fe_movie():\n times = np.arange(0, 245, 1)\n for i, t in enumerate(times):\n plot_alpha_vs_fe_with_histograms(t_f = t, image_num = i)\n\ndef plot_alpha_vs_fe_with_histograms(t_f = None, image_num = 0):\n\n sep = 0.02\n left, width = 0.125, 0.65\n bottom, height = 0.1, 0.65\n left_h = left + width + sep\n bottom_h = bottom + height + sep\n\n rect_scatter = [left,bottom,width,height]\n# rect_colorbar =\n# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]\n# rect_histy = [left_h, bottom, 0.95 - left_h, height]\n\n# fig,ax = plt.subplots()\n fig = plt.figure(1, figsize=(8,8))\n# fig.set_size_inches(8,8)\n\n ax_scatter = plt.axes(rect_scatter)\n# ax_hist_x = plt.axes(rect_histx)\n# ax_hist_y = plt.axes(rect_histy)\n# ax_color = plt.axes(rect_colorbar)\n\n ptype = gal.df['particle_type']\n fe_over_h = gal.df[('io','particle_Fe_over_H')]\n alpha = gal.df[('io','particle_alpha_over_Fe')]\n creation_time = gal.df[('io','creation_time')].convert_to_units('Myr')\n age = (gal.ds.current_time - creation_time)\n\n if t_f is None: # plot normally all MS stars\n age = age - np.min(age)\n\n # scatter plot\n p = ax_scatter.scatter(fe_over_h[ptype==11], alpha[ptype==11],\n s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)\n p.set_clim([0.0, np.max(age)])\n else:\n min_clim = 0.0\n max_clim = np.max( age - np.min(age))\n\n particle_lifetimes = gal.df[('io','particle_model_lifetime')].convert_to_units('Myr')\n selection = (t_f >= creation_time) * ( t_f < creation_time + particle_lifetimes)\n age = t_f - creation_time\n\n if np.size(fe_over_h[selection]) < 1:\n plot_fe_over_h = np.ones(np.size(fe_over_h))*(-10000) # make dummy values so plot still diplays, but is empty\n plot_alpha = np.ones(np.size(alpha))*(-10000)\n plot_age = np.ones(np.size(age))*(-10000)\n else:\n plot_fe_over_h = fe_over_h[selection]\n plot_alpha = alpha[selection]\n plot_age = age[selection]\n\n p = ax_scatter.scatter(plot_fe_over_h, plot_alpha, s = point_size, lw = 2,\n c = plot_age, cmap = 'plasma_r', alpha = 0.75)\n p.set_clim([min_clim,max_clim])\n\n cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,\n aspect = 40)\n cb.set_label(r'Stellar Age (Myr)')\n#\n#\n ax_scatter.set_xlim(-9,-1)\n ax_scatter.set_ylim(-1.75,1.75)\n ax_scatter.tick_params(axis='x',which='minor',bottom='on')\n ax_scatter.tick_params(axis='y',which='minor',bottom='on')\n\n ax_scatter.set_xlabel(r'[Fe\/H]')\n ax_scatter.set_ylabel(r'[$\\rm \\alpha$\/Fe]')\n plt.minorticks_on()\n ax_scatter.plot( ax_scatter.get_xlim(), [0.0,0.0], lw = line_width, color = 'black', ls = '--')\n\n #\n # find main plot and construct histograms\n #\n divider = make_axes_locatable(ax_scatter)\n left, bottom, width, height = divider.get_position()\n# width, height = divider.get_horizontal(), divider.get_vertical()\n sep = 0.01\n thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))\n rect_histx = [left, bottom + height + sep, width, thickness]\n rect_histy = [left + width + sep, bottom, thickness, height]\n ax_hist_x = plt.axes(rect_histx)\n ax_hist_y = plt.axes(rect_histy)\n\n\n nbins = 100\n hist,bins = np.histogram(fe_over_h, bins = nbins)\n weights = np.ones(np.size(fe_over_h)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_x.hist(fe_over_h, color = 'C0', bins = nbins, weights = weights)\n if not (t_f is None):\n if np.max(plot_fe_over_h) > -1000:\n hist,bins = np.histogram(plot_fe_over_h, bins = nbins)\n weights = np.ones(np.size(plot_fe_over_h)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_x.hist(plot_fe_over_h, color = 'black', bins = nbins, weights = weights, \n histtype = 'step', lw = 2.0)\n\n# plot_histogram(ax_hist_x, bins, hist \/ (1.0*np.max(hist)), color = 'black')\n plt.minorticks_on()\n# hist,bins = np.histogram(alpha, bins = 24)\n# plot_histogram(ax_hist_y, bins, hist \/ (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')\n nbins = 50\n hist,bins = np.histogram(alpha, bins = nbins)\n weights = np.ones(np.size(fe_over_h)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_y.hist(alpha, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)\n if not (t_f is None):\n if np.max(plot_alpha) > -1000:\n hist,bins = np.histogram(plot_alpha, bins = nbins)\n weights = np.ones(np.size(plot_alpha)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_y.hist(plot_alpha, orientation = 'horizontal', color = 'black', bins = nbins,\n weights = weights, histtype='step', lw = 2.0)\n\n ax_hist_x.xaxis.set_major_formatter(NullFormatter())\n ax_hist_y.yaxis.set_major_formatter(NullFormatter())\n ax_hist_x.set_xlim(ax_scatter.get_xlim())\n ax_hist_y.set_ylim(ax_scatter.get_ylim())\n ticks = [0.0,0.25,0.5,0.75,1.0]\n ax_hist_x.set_yticks(ticks)\n ax_hist_y.set_xticks(ticks)\n ax_hist_y.set_xticklabels(ticks, rotation = 270)\n\n plt.minorticks_on()\n# plt.tight_layout()\n if t_f is None:\n fig.savefig('alpha_over_fe_hist.png')\n else:\n fig.savefig('alpha_movie\/alpha_over_fe_hist_%0004i.png'%(image_num))\n\n plt.close()\n\n return\n\ndef plot_panel(A = 'Fe', B = 'Fe', C = 'H', color = True):\n \"\"\"\n Make panel plots of X\/A vs. B\/C where \"X\" is a loop through all elements available,\n and A, B, C are fixed for all plots, chosen by user. Defualt will plot\n [X\/Fe] vs. [Fe\/H]. Default behavior is to color points by age.\n \"\"\"\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n elements = elements + ['alpha']\n age = data['Time'] - data['creation_time'] # age of all particles in this data set\n\n for base in ['H','Fe']:\n fig, ax = plt.subplots(4,4, sharex = True, sharey = True)\n fig.set_size_inches(4*4,4*4)\n fig.subplots_adjust(hspace=0.0, wspace = 0.0)\n\n if base == 'Fe':\n bins = np.arange(-3,3.1,0.1)\n else:\n bins = np.arange(-9,0,0.1)\n\n i,j = 0,0\n for e in elements:\n if (A == e): # skip\n continue\n\n index = (i,j)\n y = np.array(data['abundances'][e][A])\n x = np.array(data['abundances'][B][C])\n\n p = ax[index].scatter(x, y, s = point_size*0.5,\n lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)\n p.set_clim([0.0, np.max(age)])\n xy = (0.8,0.8)\n ax[index].annotate(e, xy=xy, xytext=xy, xycoords = 'axes fraction',\n textcoords = 'axes fraction')\n# cb = fig.colorbar(p)\n# cb.set_label(r'Stellar Age (Myr)')\n j = j + 1\n if j >= 4:\n j = 0\n i = i + 1\n\n for i in np.arange(4):\n ax[(3,i)].set_xlabel(r'log([' + B + '\/' + C + '])')\n ax[(i,0)].set_ylabel(r'log([X\/' + A + '])')\n\n if C == 'H':\n ax[(i,0)].set_xlim(-10.25, 0.125)\n else:\n ax[(i,0)].set_xlim(-3.25, 3.25)\n\n if A == 'H':\n ax[(0,i)].set_ylim(-10.25, 0.125)\n else:\n ax[(0,i)].set_ylim(-3.25, 3.25)\n for j in np.arange(4):\n ax[(j,i)].plot([-10,10], [0.0,0.0], lw = 0.5 * line_width, ls = ':', color = 'black')\n\n plt.minorticks_on()\n fig.savefig('X_over_' + A +'_vs_' + B + '_over_' + C + '_panel.png')\n plt.close()\n\n return\n\n\ndef plot_spatial_profiles(field = 'metallicity', abundance = False,\n bins = None, spatial_type = 'cylindrical_radius'):\n\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n elements = elements + ['alpha']\n\n if spatial_type == 'cylindrical_radius':\n bin_field = np.sqrt(data['kinematics']['x']**2 + data['kinematics']['y']**2)\n xlabel = r'Radius (pc)'\n elif spatial_type == 'z':\n bin_field = np.abs( data['kinematics']['z'] )\n xlabel = r'Z (pc)'\n\n if bins is None:\n bins = np.linspace(np.floor(np.min(bin_field)), np.ceil(np.max(bin_field)), 100)\n centers = 0.5 * (bins[1:] + bins[:-1])\n nbins = np.size(bins)\n\n hist_index = np.digitize(bin_field, bins = bins)\n median, q1, q3 = np.zeros(nbins-1), np.zeros(nbins-1), np.zeros(nbins-1)\n\n if field == 'metallicity':\n # make a single plot\n # bin the data\n for i in np.arange(nbins-1):\n x = data['metallicity'][hist_index == i + 1]\n median[i] = np.median(x)\n\n if np.size(x) > 1:\n q1[i] = np.percentile(x, 25.0)\n q3[i] = np.percentile(x, 75.0)\n elif np.size(x) == 1:\n q1[i] = median[i]\n q3[i] = median[i]\n\n # now plot\n fig, ax = plt.subplots()\n fig.set_size_inches(8,8)\n\n plot_histogram(ax, bins, median, lw = line_width, color = 'black', ls = '-')\n ax.fill_between(centers, q1, q3, lw = 1.5, color = 'grey')\n\n ax.set_ylabel(r'Metallicity Fraction')\n ax.set_xlabel(xlabel)\n ax.set_xlim( np.min(bins), np.max(bins))\n plt.tight_layout()\n plt.minorticks_on()\n fig.savefig('metallicity_' + spatial_type + '_profile.png')\n plt.close()\n\n elif abundance:\n\n fig, ax = plt.subplots(4,4, sharex = True, sharey = True)\n fig.set_size_inches(16,16)\n fig.subplots_adjust(hspace = 0.0, wspace = 0.0)\n\n axi, axj = 0,0\n for e in elements:\n if field == e:\n continue\n index = (axi,axj)\n\n for i in np.arange(nbins-1):\n x = np.array(data['abundances'][e][field])\n x = x[ hist_index == (i + 1)]\n\n if np.size(x) > 0:\n median[i] = np.median(x)\n q1[i] = np.percentile(x, 25)\n q3[i] = np.percentile(x, 75)\n else:\n median[i] = None; q1[i] = None; q3[i] = None\n\n ax[index].annotate(e, xy=(0.8,0.8),xytext=(0.8,0.8),\n xycoords='axes fraction',textcoords = 'axes fraction')\n plot_histogram(ax[index], bins, median, lw = line_width, color = 'black', ls = '-')\n ax[index].fill_between(centers,q1,q3,lw=1.5,color='grey')\n\n axj = axj+1\n if axj>=4:\n axj = 0\n axi = axi + 1\n\n for i in np.arange(4):\n ax[(3,i)].set_xlabel(xlabel)\n ax[(i,0)].set_ylabel(r'log[X\/' + field +'])')\n\n if field == 'H':\n ax[(0,i)].set_ylim(-10.25,0.125)\n else:\n ax[(0,i)].set_ylim(-3.25,3.25)\n for j in np.arange(4):\n ax[(j,i)].plot([bins[0],bins[-1]], [0.0,0.0], lw = 0.5 * line_width, ls = '--',color ='black')\n\n ax[(i,0)].set_xlim(np.min(bins), np.max(bins))\n\n plt.minorticks_on()\n fig.savefig(field + '_' + spatial_type + '_profile_panel.png')\n\n plt.close()\n return\n\ndef plot_MDF(plot_base = ['H','Fe']):\n \"\"\"\n Make a panel plot of the time evolution of all elemental abundance ratios\n with respect to both H and Fe (as separate plots)\n \"\"\"\n if (not (type(plot_base) is list)):\n plot_base = [plot_base]\n\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n elements = elements + ['alpha']\n\n for base in plot_base:\n fig, ax = plt.subplots(4,4, sharex = True, sharey = True)\n fig.set_size_inches(4*4,4*4)\n fig.subplots_adjust(hspace=0.0, wspace = 0.0)\n\n if base == 'Fe':\n bins = np.arange(-3,3.1,0.1)\n else:\n bins = np.arange(-9,0,0.1)\n\n i,j = 0,0\n for e in elements:\n if (base == e):\n continue\n index = (i,j)\n\n points = np.array(data['abundances'][e][base])\n\n single_MDF(points, bins = bins, norm = 'peak', ax = ax[index],\n label = False, lw = line_width)\n x = np.max(bins) - (0.25\/6.0 * (bins[-1] - bins[0]))\n y = 0.9\n ax[index].annotate(e, xy = (x,y), xytext =(x,y))\n ax[index].plot([0,0], [0.0,1.0], ls = ':', lw = 0.5 * line_width, color = 'black')\n\n j = j + 1\n if j >= 4:\n j = 0\n i = i + 1\n\n for i in np.arange(4):\n ax[(3,i)].set_xlabel(r'log([X\/' + base + '])')\n ax[(i,0)].set_ylabel(r'N\/N$_{\\rm peak}$')\n\n if base == 'H':\n ax[(i,0)].set_xlim(-10.25, 0.125)\n elif base == 'Fe':\n ax[(i,0)].set_xlim(-3.25, 3.25)\n\n plt.minorticks_on()\n fig.savefig(base + '_MDF.png')\n plt.close()\n\n return\n\ndef plot_time_evolution():\n \"\"\"\n Make a panel plot of the time evolution of all elemental abundance ratios\n with respect to both H and Fe (as separate plots)\n \"\"\"\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n elements = elements + ['alpha']\n\n\n for time_type in ['cumulative','10Myr']:\n for base in ['H','Fe']:\n fig, ax = plt.subplots(4,4, sharex = True, sharey = True)\n fig.set_size_inches(4*4,4*4)\n fig.subplots_adjust(hspace=0.0, wspace = 0.0)\n\n\n i,j = 0,0\n for e in elements:\n if (base == e):\n continue\n print(\"plotting \" + e + \"\/\" + base + \" time evolution\")\n index = (i,j)\n\n t = data['statistics'][time_type]['bins']\n y = data['statistics'][time_type][e][base]['median']\n Q1 = data['statistics'][time_type][e][base]['Q1']\n Q3 = data['statistics'][time_type][e][base]['Q3']\n select = (y*0 == 0) # remove nan values\n\n t = t[select]\n t = t - t[0]\n\n ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')\n ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)\n ax[index].set_xlim(0.0, np.max(t))\n ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)\n ax[index].legend(loc = 'upper right')\n\n j = j + 1\n if j >= 4:\n j = 0\n i = i + 1\n\n for i in np.arange(4):\n ax[(3,i)].set_xlabel(r'Time (Myr)')\n ax[(i,0)].set_ylabel(r'[X\/' + base +']')\n\n if base == 'H':\n ax[(i,0)].set_ylim(-12.25, 0.125)\n elif base == 'Fe':\n ax[(i,0)].set_ylim(-3.25, 3.25)\n\n# for j in np.arange(3):\n# ax[(j,i)].set_xticklabels([])\n# ax[(i,j+1)].set_yticklabels([])\n# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))\n# if base == 'Fe':\n# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])\n# else:\n# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])\n\n plt.minorticks_on()\n fig.savefig('stellar_x_over_' + base + '_' + time_type +'_evolution.png')\n plt.close()\n\n return\n\ndef plot_mass_fraction_time_evolution():\n \"\"\"\n Make a panel plot of the time evolution of all elemental abundance ratios\n with respect to both H and Fe (as separate plots)\n \"\"\"\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n# elements = elements + ['alpha']\n\n\n for time_type in ['cumulative','10Myr']:\n fig, ax = plt.subplots(4,4, sharex = True, sharey = True)\n fig.set_size_inches(4*4,4*4)\n fig.subplots_adjust(hspace=0.0, wspace = 0.0)\n\n\n i,j = 0,0\n for e in elements:\n print(\"plotting \" + e + \"mass fraction time evolution\")\n index = (i,j)\n\n t = data['mass_fraction_statistics'][time_type]['bins']\n y = data['mass_fraction_statistics'][time_type][e]['median']\n Q1 = data['mass_fraction_statistics'][time_type][e]['Q1']\n Q3 = data['mass_fraction_statistics'][time_type][e]['Q3']\n select = (y*0 == 0) # remove nan values\n\n t = t[select]\n t = t - t[0]\n\n ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')\n ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)\n ax[index].set_xlim(0.0, np.max(t))\n ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)\n ax[index].legend(loc = 'upper right')\n\n j = j + 1\n if j >= 4:\n j = 0\n i = i + 1\n\n for i in np.arange(4):\n ax[(3,i)].set_xlabel(r'Time (Myr)')\n ax[(i,0)].set_ylabel(r'log(X Mass Fraction)')\n\n ax[(i,0)].set_ylim(1.0E-10, 1.0E-4)\n ax[(i,0)].semilogy()\n\n# for j in np.arange(3):\n# ax[(j,i)].set_xticklabels([])\n# ax[(i,j+1)].set_yticklabels([])\n# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))\n# if base == 'Fe':\n# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])\n# else:\n# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])\n\n plt.minorticks_on()\n fig.savefig('stellar_mass_fraction_' + time_type +'_evolution.png')\n plt.close()\n\n return\n\ndef plot_ratios_with_histograms(X='alpha',A='Fe',B='Fe',C='H'):\n filename = workdir + '\/abundances\/abundances\/abundances.h5'\n\n hdf5_data = h5py.File(filename, 'r')\n dfiles = hdf5_data.keys()\n dfile = dfiles[-1] # do this with most recent data file\n\n data = dd.io.load(filename, '\/' + str(dfile))\n elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])\n elements = elements + ['alpha'] + ['H']\n age = data['Time'] - data['creation_time'] # age of all particles in this data set\n\n # --------------------\n check_elements = [x for x in [X,A,B,C] if (not (x in elements))]\n if len(check_elements) > 0:\n print(check_elements, \" not in elements list\")\n print(\"available: \", elements)\n raise ValueError\n\n sep = 0.02\n left, width = 0.125, 0.65\n bottom, height = 0.1, 0.65\n left_h = left + width + sep\n bottom_h = bottom + height + sep\n\n rect_scatter = [left,bottom,width,height]\n# rect_colorbar =\n# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]\n# rect_histy = [left_h, bottom, 0.95 - left_h, height]\n\n# fig,ax = plt.subplots()\n fig = plt.figure(1, figsize=(8,8))\n# fig.set_size_inches(8,8)\n\n ax_scatter = plt.axes(rect_scatter)\n# ax_hist_x = plt.axes(rect_histx)\n# ax_hist_y = plt.axes(rect_histy)\n# ax_color = plt.axes(rect_colorbar)\n\n x_values = data['abundances'][B][C]\n y_values = data['abundances'][X][A]\n\n age = age - np.min(age) # normalize\n\n # scatter plot\n p = ax_scatter.scatter(x_values, y_values,\n s = point_size, lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)\n p.set_clim([0.0, np.max(age)])\n\n cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,\n aspect = 40)\n cb.set_label(r'Stellar Age (Myr)')\n#\n#\n#\n ax_scatter.set_xlim(-9,-1)\n ax_scatter.set_ylim(-1.75,1.75)\n ax_scatter.tick_params(axis='x',which='minor',bottom='on')\n ax_scatter.tick_params(axis='y',which='minor',bottom='on')\n\n ax_scatter.set_xlabel(r'log([' + B + '\/' + C + '])')\n ax_scatter.set_ylabel(r'log([' + X + '\/' + A + '])')\n plt.minorticks_on()\n\n #\n # find main plot and construct histograms\n #\n divider = make_axes_locatable(ax_scatter)\n left, bottom, width, height = divider.get_position()\n# width, height = divider.get_horizontal(), divider.get_vertical()\n sep = 0.01\n thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))\n rect_histx = [left, bottom + height + sep, width, thickness]\n rect_histy = [left + width + sep, bottom, thickness, height]\n ax_hist_x = plt.axes(rect_histx)\n ax_hist_y = plt.axes(rect_histy)\n\n # construct the histogram for the horizontal axis (goes up top)\n nbins = 100\n hist,bins = np.histogram(x_values, bins = nbins)\n weights = np.ones(np.size(x_values)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_x.hist(x_values, color = 'C0', bins = nbins, weights = weights)\n# plot_histogram(ax_hist_x, bins, hist \/ (1.0*np.max(hist)), color = 'black')\n plt.minorticks_on()\n# hist,bins = np.histogram(alpha, bins = 24)\n# plot_histogram(ax_hist_y, bins, hist \/ (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')\n\n # now do the same for the vertical axis histogram\n nbins = 50\n hist,bins = np.histogram(y_values, bins = nbins)\n weights = np.ones(np.size(y_values)) * (1.0 \/ (1.0*np.max(hist)))\n ax_hist_y.hist(y_values, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)\n\n ax_hist_x.xaxis.set_major_formatter(NullFormatter())\n ax_hist_y.yaxis.set_major_formatter(NullFormatter())\n ax_hist_x.set_xlim(ax_scatter.get_xlim())\n ax_hist_y.set_ylim(ax_scatter.get_ylim())\n ticks = [0.0,0.25,0.5,0.75,1.0]\n ax_hist_x.set_yticks(ticks)\n ax_hist_y.set_xticks(ticks)\n ax_hist_y.set_xticklabels(ticks, rotation = 270)\n\n plt.minorticks_on()\n# plt.tight_layout()\n fig.savefig(X + '_over_' + A + '_vs_' + B + '_over_' + C + '_hist.png')\n\n plt.close()\n\n return\n\nif __name__ == '__main__':\n plot_mass_fraction_time_evolution() # \n\n# plot_ratios_with_histograms('C','O','Fe','H') # C\/O vs Fe\/H\n# plot_ratios_with_histograms('alpha','Mg','Mg','H')\n# plot_ratios_with_histograms('alpha','Fe','Fe','H')\n\n# plot_panel() # default [X\/Fe] vs [Fe\/H]\n# plot_panel(A = 'Mg', B = 'Fe', C = 'H')\n# plot_panel(A = 'Mg', B = 'Mg', C = 'Fe')\n# plot_panel(A = 'O', B = 'Fe', C = 'H')\n# plot_panel(A = 'O', B = 'O', C = 'Fe')\n# plot_panel(A = 'Ba', B = 'Ba', C = 'Fe')\n\n# plot_MDF(plot_base = ['H','Fe','O','Ba'])\n\n# plot_time_evolution()\n\n# plot_alpha_vs_fe_with_histograms()\n\n# plot_alpha_vs_fe()\n\n# plot_alpha_vs_fe_movie()\n# plot_spatial_profiles(bins=np.arange(0,505,10))\n# plot_spatial_profiles(field = 'Fe',abundance=True, bins = np.arange(0,505,10))\n# plot_spatial_profiles(field = 'H', abundance=True, bins = np.arange(0,505,10))\n\n","license":"mit"} {"repo_name":"sernst\/cauldron","path":"cauldron\/session\/display\/__init__.py","copies":"1","size":"23013","content":"import json as _json_io\nimport textwrap\nimport typing\nfrom datetime import timedelta\n\nimport cauldron as _cd\nfrom cauldron import environ\nfrom cauldron import render\nfrom cauldron.render import plots as render_plots\nfrom cauldron.render import texts as render_texts\nfrom cauldron.session import report\n\n\ndef _get_report() -> 'report.Report':\n \"\"\"Fetches the report associated with the currently running step.\"\"\"\n return _cd.project.get_internal_project().current_step.report\n\n\ndef inspect(source: dict):\n \"\"\"\n Inspects the data and structure of the source dictionary object and\n adds the results to the display for viewing.\n\n :param source:\n A dictionary object to be inspected.\n :return:\n \"\"\"\n r = _get_report()\n r.append_body(render.inspect(source))\n\n\ndef header(header_text: str, level: int = 1, expand_full: bool = False):\n \"\"\"\n Adds a text header to the display with the specified level.\n\n :param header_text:\n The text to display in the header.\n :param level:\n The level of the header, which corresponds to the html header\n levels, such as

,

, ...\n :param expand_full:\n Whether or not the header will expand to fill the width of the entire\n notebook page, or be constrained by automatic maximum page width. The\n default value of False lines the header up with text displays.\n \"\"\"\n r = _get_report()\n r.append_body(render.header(\n header_text,\n level=level,\n expand_full=expand_full\n ))\n\n\ndef text(value: str, preformatted: bool = False):\n \"\"\"\n Adds text to the display. If the text is not preformatted, it will be\n displayed in paragraph format. Preformatted text will be displayed\n inside a pre tag with a monospace font.\n\n :param value:\n The text to display.\n :param preformatted:\n Whether or not to preserve the whitespace display of the text.\n \"\"\"\n if preformatted:\n result = render_texts.preformatted_text(value)\n else:\n result = render_texts.text(value)\n r = _get_report()\n r.append_body(result)\n r.stdout_interceptor.write_source(\n '{}\\n'.format(textwrap.dedent(value))\n )\n\n\ndef markdown(\n source: str = None,\n source_path: str = None,\n preserve_lines: bool = False,\n font_size: float = None,\n **kwargs\n):\n \"\"\"\n Renders the specified source string or source file using markdown and\n adds the resulting HTML to the notebook display.\n\n :param source:\n A markdown formatted string.\n :param source_path:\n A file containing markdown text.\n :param preserve_lines:\n If True, all line breaks will be treated as hard breaks. Use this\n for pre-formatted markdown text where newlines should be retained\n during rendering.\n :param font_size:\n Specifies a relative font size adjustment. The default value is 1.0,\n which preserves the inherited font size values. Set it to a value\n below 1.0 for smaller font-size rendering and greater than 1.0 for\n larger font size rendering.\n :param kwargs:\n Any variable replacements to make within the string using Jinja2\n templating syntax.\n \"\"\"\n r = _get_report()\n\n result = render_texts.markdown(\n source=source,\n source_path=source_path,\n preserve_lines=preserve_lines,\n font_size=font_size,\n **kwargs\n )\n r.library_includes += result['library_includes']\n\n r.append_body(result['body'])\n r.stdout_interceptor.write_source(\n '{}\\n'.format(textwrap.dedent(result['rendered']))\n )\n\n\ndef json(**kwargs):\n \"\"\"\n Adds the specified data to the the output display window with the\n specified key. This allows the user to make available arbitrary\n JSON-compatible data to the display for runtime use.\n\n :param kwargs:\n Each keyword argument is added to the CD.data object with the\n specified key and value.\n \"\"\"\n r = _get_report()\n r.append_body(render.json(**kwargs))\n r.stdout_interceptor.write_source(\n '{}\\n'.format(_json_io.dumps(kwargs, indent=2))\n )\n\n\ndef plotly(\n data: typing.Union[dict, list, typing.Any] = None,\n layout: typing.Union[dict, typing.Any] = None,\n scale: float = 0.5,\n figure: typing.Union[dict, typing.Any] = None,\n static: bool = False\n):\n \"\"\"\n Creates a Plotly plot in the display with the specified data and\n layout.\n\n :param data:\n The Plotly trace data to be plotted.\n :param layout:\n The layout data used for the plot.\n :param scale:\n The display scale with units of fractional screen height. A value\n of 0.5 constrains the output to a maximum height equal to half the\n height of browser window when viewed. Values below 1.0 are usually\n recommended so the entire output can be viewed without scrolling.\n :param figure:\n In cases where you need to create a figure instead of separate data\n and layout information, you can pass the figure here and leave the\n data and layout values as None.\n :param static:\n If true, the plot will be created without interactivity.\n This is useful if you have a lot of plots in your notebook.\n \"\"\"\n r = _get_report()\n\n if not figure and not isinstance(data, (list, tuple)):\n data = [data]\n\n if 'plotly' not in r.library_includes:\n r.library_includes.append('plotly')\n\n r.append_body(render.plotly(\n data=data,\n layout=layout,\n scale=scale,\n figure=figure,\n static=static\n ))\n r.stdout_interceptor.write_source('[ADDED] Plotly plot\\n')\n\n\ndef table(\n data_frame,\n scale: float = 0.7,\n include_index: bool = False,\n max_rows: int = 500,\n sample_rows: typing.Optional[int] = None,\n formats: typing.Union[\n str,\n typing.Callable[[typing.Any], str],\n typing.Dict[\n str,\n typing.Union[str, typing.Callable[[typing.Any], str]]\n ]\n ] = None\n):\n \"\"\"\n Adds the specified data frame to the display in a nicely formatted\n scrolling table.\n\n :param data_frame:\n The pandas data frame to be rendered to a table.\n :param scale:\n The display scale with units of fractional screen height. A value\n of 0.5 constrains the output to a maximum height equal to half the\n height of browser window when viewed. Values below 1.0 are usually\n recommended so the entire output can be viewed without scrolling.\n :param include_index:\n Whether or not the index column should be included in the displayed\n output. The index column is not included by default because it is\n often unnecessary extra information in the display of the data.\n :param max_rows:\n This argument exists to prevent accidentally writing very large data\n frames to a table, which can cause the notebook display to become\n sluggish or unresponsive. If you want to display large tables, you need\n only increase the value of this argument.\n :param sample_rows:\n When set to a positive integer value, the DataFrame will be randomly\n sampled to the specified number of rows when displayed in the table.\n If the value here is larger than the number of rows in the DataFrame,\n the sampling will have no effect and the entire DataFrame will be\n displayed instead.\n :param formats:\n An optional dictionary that, when specified, should contain a mapping\n between column names and formatting strings to apply to that column\n for display purposes. For example, ``{'foo': '{:,.2f}%'}`` would\n transform a column ``foo = [12.2121, 34.987123, 42.72839]`` to\n display as ``foo = [12.21%, 34.99%, 42.73%]``. The formatters should\n follow the standard Python string formatting guidelines the same as\n the ``str.format()`` command having the value of the column as the only\n positional argument in the format arguments. A string value can also\n be specified for uniform formatting of all columns (or if displaying\n a series with only a single value).\n \"\"\"\n r = _get_report()\n r.append_body(render.table(\n data_frame=data_frame,\n scale=scale,\n include_index=include_index,\n max_rows=max_rows,\n sample_rows=sample_rows,\n formats=formats\n ))\n r.stdout_interceptor.write_source('[ADDED] Table\\n')\n\n\ndef svg(svg_dom: str, filename: str = None):\n \"\"\"\n Adds the specified SVG string to the display. If a filename is\n included, the SVG data will also be saved to that filename within the\n project results folder.\n\n :param svg_dom:\n The SVG string data to add to the display.\n :param filename:\n An optional filename where the SVG data should be saved within\n the project results folder.\n \"\"\"\n r = _get_report()\n r.append_body(render.svg(svg_dom))\n r.stdout_interceptor.write_source('[ADDED] SVG\\n')\n\n if not filename:\n return\n\n if not filename.endswith('.svg'):\n filename += '.svg'\n\n r.files[filename] = svg_dom\n\n\ndef jinja(path: str, **kwargs):\n \"\"\"\n Renders the specified Jinja2 template to HTML and adds the output to the\n display.\n\n :param path:\n The fully-qualified path to the template to be rendered.\n :param kwargs:\n Any keyword arguments that will be use as variable replacements within\n the template.\n \"\"\"\n r = _get_report()\n r.append_body(render.jinja(path, **kwargs))\n r.stdout_interceptor.write_source('[ADDED] Jinja2 rendered HTML\\n')\n\n\ndef whitespace(lines: float = 1.0):\n \"\"\"\n Adds the specified number of lines of whitespace.\n\n :param lines:\n The number of lines of whitespace to show.\n \"\"\"\n r = _get_report()\n r.append_body(render.whitespace(lines))\n r.stdout_interceptor.write_source('\\n')\n\n\ndef image(\n filename: str,\n width: int = None,\n height: int = None,\n justify: str = 'left'\n):\n \"\"\"\n Adds an image to the display. The image must be located within the\n assets directory of the Cauldron notebook's folder.\n\n :param filename:\n Name of the file within the assets directory,\n :param width:\n Optional width in pixels for the image.\n :param height:\n Optional height in pixels for the image.\n :param justify:\n One of 'left', 'center' or 'right', which specifies how the image\n is horizontally justified within the notebook display.\n \"\"\"\n r = _get_report()\n path = '\/'.join(['reports', r.project.uuid, 'latest', 'assets', filename])\n r.append_body(render.image(path, width, height, justify))\n r.stdout_interceptor.write_source('[ADDED] Image\\n')\n\n\ndef html(dom: str):\n \"\"\"\n A string containing a valid HTML snippet.\n\n :param dom:\n The HTML string to add to the display.\n \"\"\"\n r = _get_report()\n r.append_body(render.html(dom))\n r.stdout_interceptor.write_source('[ADDED] HTML\\n')\n\n\ndef workspace(show_values: bool = True, show_types: bool = True):\n \"\"\"\n Adds a list of the shared variables currently stored in the project\n workspace.\n\n :param show_values:\n When true the values for each variable will be shown in addition to\n their name.\n :param show_types:\n When true the data types for each shared variable will be shown in\n addition to their name.\n \"\"\"\n r = _get_report()\n\n data = {}\n for key, value in r.project.shared.fetch(None).items():\n if key.startswith('__cauldron_'):\n continue\n data[key] = value\n\n r.append_body(render.status(data, values=show_values, types=show_types))\n\n\ndef pyplot(\n figure=None,\n scale: float = 0.8,\n clear: bool = True,\n aspect_ratio: typing.Union[list, tuple] = None\n):\n \"\"\"\n Creates a matplotlib plot in the display for the specified figure. The size\n of the plot is determined automatically to best fit the notebook.\n\n :param figure:\n The matplotlib figure to plot. If omitted, the currently active\n figure will be used.\n :param scale:\n The display scale with units of fractional screen height. A value\n of 0.5 constrains the output to a maximum height equal to half the\n height of browser window when viewed. Values below 1.0 are usually\n recommended so the entire output can be viewed without scrolling.\n :param clear:\n Clears the figure after it has been rendered. This is useful to\n prevent persisting old plot data between repeated runs of the\n project files. This can be disabled if the plot is going to be\n used later in the project files.\n :param aspect_ratio:\n The aspect ratio for the displayed plot as a two-element list or\n tuple. The first element is the width and the second element the\n height. The units are \"inches,\" which is an important consideration\n for the display of text within the figure. If no aspect ratio is\n specified, the currently assigned values to the plot will be used\n instead.\n \"\"\"\n r = _get_report()\n r.append_body(render_plots.pyplot(\n figure,\n scale=scale,\n clear=clear,\n aspect_ratio=aspect_ratio\n ))\n r.stdout_interceptor.write_source('[ADDED] PyPlot plot\\n')\n\n\ndef bokeh(model, scale: float = 0.7, responsive: bool = True):\n \"\"\"\n Adds a Bokeh plot object to the notebook display.\n\n :param model:\n The plot object to be added to the notebook display.\n :param scale:\n How tall the plot should be in the notebook as a fraction of screen\n height. A number between 0.1 and 1.0. The default value is 0.7.\n :param responsive:\n Whether or not the plot should responsively scale to fill the width\n of the notebook. The default is True.\n \"\"\"\n r = _get_report()\n\n if 'bokeh' not in r.library_includes:\n r.library_includes.append('bokeh')\n\n r.append_body(render_plots.bokeh_plot(\n model=model,\n scale=scale,\n responsive=responsive\n ))\n r.stdout_interceptor.write_source('[ADDED] Bokeh plot\\n')\n\n\ndef listing(\n source: list,\n ordered: bool = False,\n expand_full: bool = False\n):\n \"\"\"\n An unordered or ordered list of the specified *source* iterable where\n each element is converted to a string representation for display.\n\n :param source:\n The iterable to display as a list.\n :param ordered:\n Whether or not the list should be ordered. If False, which is the\n default, an unordered bulleted list is created.\n :param expand_full:\n Whether or not the list should expand to fill the screen horizontally.\n When defaulted to False, the list is constrained to the center view\n area of the screen along with other text. This can be useful to keep\n lists aligned with the text flow.\n \"\"\"\n r = _get_report()\n r.append_body(render.listing(\n source=source,\n ordered=ordered,\n expand_full=expand_full\n ))\n r.stdout_interceptor.write_source('[ADDED] Listing\\n')\n\n\ndef list_grid(\n source: list,\n expand_full: bool = False,\n column_count: int = 2,\n row_spacing: float = 1.0\n):\n \"\"\"\n An multi-column list of the specified *source* iterable where\n each element is converted to a string representation for display.\n\n :param source:\n The iterable to display as a list.\n :param expand_full:\n Whether or not the list should expand to fill the screen horizontally.\n When defaulted to False, the list is constrained to the center view\n area of the screen along with other text. This can be useful to keep\n lists aligned with the text flow.\n :param column_count:\n The number of columns to display. The specified count is applicable to\n high-definition screens. For Lower definition screens the actual count\n displayed may be fewer as the layout responds to less available\n horizontal screen space.\n :param row_spacing:\n The number of lines of whitespace to include between each row in the\n grid. Set this to 0 for tightly displayed lists.\n \"\"\"\n r = _get_report()\n r.append_body(render.list_grid(\n source=source,\n expand_full=expand_full,\n column_count=column_count,\n row_spacing=row_spacing\n ))\n r.stdout_interceptor.write_source('[ADDED] List grid\\n')\n\n\ndef latex(source: str):\n \"\"\"\n Add a mathematical equation in latex math-mode syntax to the display.\n Instead of the traditional backslash escape character, the @ character is\n used instead to prevent backslash conflicts with Python strings. For\n example, \\\\delta would be @delta.\n\n :param source:\n The string representing the latex equation to be rendered.\n \"\"\"\n r = _get_report()\n if 'katex' not in r.library_includes:\n r.library_includes.append('katex')\n\n r.append_body(render_texts.latex(source.replace('@', '\\\\')))\n r.stdout_interceptor.write_source('[ADDED] Latex equation\\n')\n\n\ndef head(source, count: int = 5):\n \"\"\"\n Displays a specified number of elements in a source object of many\n different possible types.\n\n :param source:\n DataFrames will show *count* rows of that DataFrame. A list, tuple or\n other iterable, will show the first *count* rows. Dictionaries will\n show *count* keys from the dictionary, which will be randomly selected\n unless you are using an OrderedDict. Strings will show the first\n *count* characters.\n :param count:\n The number of elements to show from the source.\n \"\"\"\n r = _get_report()\n r.append_body(render_texts.head(source, count=count))\n r.stdout_interceptor.write_source('[ADDED] Head\\n')\n\n\ndef tail(source, count: int = 5):\n \"\"\"\n The opposite of the head function. Displays the last *count* elements of\n the *source* object.\n\n :param source:\n DataFrames will show the last *count* rows of that DataFrame. A list,\n tuple or other iterable, will show the last *count* rows. Dictionaries\n will show *count* keys from the dictionary, which will be randomly\n selected unless you are using an OrderedDict. Strings will show the\n last *count* characters.\n :param count:\n The number of elements to show from the source.\n \"\"\"\n r = _get_report()\n r.append_body(render_texts.tail(source, count=count))\n r.stdout_interceptor.write_source('[ADDED] Tail\\n')\n\n\ndef status(\n message: str = None,\n progress: float = None,\n section_message: str = None,\n section_progress: float = None,\n):\n \"\"\"\n Updates the status display, which is only visible while a step is running.\n This is useful for providing feedback and information during long-running\n steps.\n\n A section progress is also available for cases where long running tasks\n consist of multiple tasks and you want to display sub-progress messages\n within the context of the larger status.\n\n Note: this is only supported when running in the Cauldron desktop\n application.\n\n :param message:\n The status message you want to display. If left blank the previously\n set status message will be retained. Should you desire to remove an\n existing message, specify a blank string for this argument.\n :param progress:\n A number between zero and one that indicates the overall progress for\n the current status. If no value is specified, the previously assigned\n progress will be retained.\n :param section_message:\n The status message you want to display for a particular task within a\n long-running step. If left blank the previously set section message\n will be retained. Should you desire to remove an existing message,\n specify a blank string for this argument.\n :param section_progress:\n A number between zero and one that indicates the progress for the\n current section status. If no value is specified, the previously\n assigned section progress value will be retained.\n \"\"\"\n environ.abort_thread()\n r = _get_report()\n step = _cd.project.get_internal_project().current_step\n\n changes = 0\n has_changed = step.progress_message != message\n if message is not None and has_changed:\n changes += 1\n step.progress_message = message\n\n has_changed = step.progress_message != max(0, min(1, progress or 0))\n if progress is not None and has_changed:\n changes += 1\n step.progress = max(0.0, min(1.0, progress))\n\n has_changed = step.sub_progress_message != section_message\n if section_message is not None and has_changed:\n changes += 1\n step.sub_progress_message = section_message\n\n has_changed = step.sub_progress != max(0, min(1, section_progress or 0))\n if section_progress is not None and has_changed:\n changes += 1\n step.sub_progress = section_progress\n\n if changes > 0:\n # update the timestamp to inform rendering that a status\n # has changed and should be re-rendered into the step.\n r.update_last_modified()\n\n\ndef code_block(\n code: str = None,\n path: str = None,\n language_id: str = None,\n title: str = None,\n caption: str = None\n):\n \"\"\"\n Adds a block of syntax highlighted code to the display from either\n the supplied code argument, or from the code file specified\n by the path argument.\n\n :param code:\n A string containing the code to be added to the display\n :param path:\n A path to a file containing code to be added to the display\n :param language_id:\n The language identifier that indicates what language should\n be used by the syntax highlighter. Valid values are any of the\n languages supported by the Pygments highlighter.\n :param title:\n If specified, the code block will include a title bar with the\n value of this argument\n :param caption:\n If specified, the code block will include a caption box below the code\n that contains the value of this argument\n \"\"\"\n environ.abort_thread()\n r = _get_report()\n r.append_body(render.code_block(\n block=code,\n path=path,\n language=language_id,\n title=title,\n caption=caption\n ))\n r.stdout_interceptor.write_source('{}\\n'.format(code))\n\n\ndef elapsed():\n \"\"\"\n Displays the elapsed time since the step started running.\n \"\"\"\n environ.abort_thread()\n step = _cd.project.get_internal_project().current_step\n r = _get_report()\n r.append_body(render.elapsed_time(step.elapsed_time))\n\n result = '[ELAPSED]: {}\\n'.format(timedelta(seconds=step.elapsed_time))\n r.stdout_interceptor.write_source(result)\n","license":"mit"} {"repo_name":"rwgdrummer\/maskgen","path":"maskgen\/analytics\/dctAnalytic.py","copies":"1","size":"17525","content":"# =============================================================================\n# Authors: PAR Government\n# Organization: DARPA\n#\n# Copyright (c) 2016 PAR Government\n# All rights reserved.\n#\n#\n# adapted from https:\/\/github.com\/enmasse\/jpeg_read\n#==============================================================================\n\n\n\n\nimport sys\nfrom math import *\nfrom Tkinter import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logging\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\n\ndef memoize (function):\n # http:\/\/programmingzen.com\/2009\/05\/18\/memoization-in-ruby-and-python\/\n cache = {}\n\n def decorated_function (*args):\n try:\n return cache[args]\n except KeyError:\n val = function (*args)\n cache[args] = val\n return val\n\n return decorated_function\n\n\n@memoize\ndef decodeBits (len, val):\n \"\"\" Calculate the value from the \"additional\" bits in the huffman data. \"\"\"\n\n return val if (val & (1 << len - 1)) else val - ((1 << len) - 1)\n\n\ndef extractCoeffs (data):\n dclum = []\n dcchr1 = []\n dcchr2 = []\n aclum = []\n acchr1 = []\n acchr2 = []\n for MCU in data:\n lum = MCU[0]\n chr1 = MCU[1]\n chr2 = MCU[2]\n for MCU_component in lum:\n if len (MCU_component):\n dclum.append (MCU_component[0])\n aclum.extend (MCU_component[1:])\n for MCU_component in chr1:\n if len (MCU_component):\n dcchr1.append (MCU_component[0])\n acchr1.extend (MCU_component[1:])\n for MCU_component in chr2:\n if len (MCU_component):\n dcchr2.append (MCU_component[0])\n acchr2.extend (MCU_component[1:])\n\n return (dclum, dcchr1, dcchr2, aclum, acchr1, acchr2)\n \n\ndef generateHuffmanCodes (huffsize):\n \"\"\" Calculate the huffman code of each length. \"\"\"\n huffcode = []\n k = 0\n code = 0\n\n # Magic\n for i in range (len (huffsize)):\n si = huffsize[i]\n for k in range (si):\n huffcode.append ((i + 1, code))\n code += 1\n\n code <<= 1\n\n return huffcode\n\n\ndef getBits (num, gen):\n \"\"\" Get \"num\" bits from gen. \"\"\"\n out = 0\n for i in range (num):\n out <<= 1\n val = gen.next ()\n if val != []:\n out += val & 0x01\n else:\n return []\n\n return out\n\n\ndef mapHuffmanCodes (codes, values):\n \"\"\" Map the huffman code to the right value. \"\"\"\n out = {}\n\n for i in range (len (codes)):\n out[codes[i]] = values[i]\n\n return out\n\n\ndef readAPP (type, file):\n \"\"\" Read APP marker. \"\"\"\n Lp = readWord (file)\n Lp -= 2\n\n # If APP0 try to read the JFIF header\n # Not really necessary\n if type == 0:\n identifier = file.read (5)\n Lp -= 5\n version = file.read (2)\n Lp -= 2\n units = ord (file.read (1))\n Lp -= 1\n Xdensity = ord (file.read (1)) << 8\n Xdensity |= ord (file.read (1))\n Lp -= 2\n Ydensity = ord (file.read (1)) << 8\n Ydensity |= ord (file.read (1))\n Lp -= 2\n\n file.seek (Lp, 1)\n\n\ndef readByte (file):\n \"\"\" Read a byte from file. \"\"\"\n return ord (file.read (1))\n\n\ndef readWord (file):\n \"\"\" Read a 16 bit word from file. \"\"\"\n return ord (file.read (1)) << 8 | ord (file.read (1))\n\n\ndef restoreDC (data):\n \"\"\" Restore the DC values. They are coded as the difference from the\n previous DC value of the same component.\n \"\"\"\n\n out = []\n dc_prev = [0 for x in range (len (data[0]))]\n\n # For each MCU\n for mcu in data:\n # For each component\n for comp_num in range (len (mcu)):\n # For each DU\n for du in range (len (mcu[comp_num])):\n if mcu[comp_num][du]:\n mcu[comp_num][du][0] += dc_prev[comp_num]\n dc_prev[comp_num] = mcu[comp_num][du][0]\n\n out.append (mcu)\n\n return out\n\n\nclass JPEG_Reader:\n \"\"\" Class for reading DCT coefficients from JPEG files. \"\"\"\n\n def __init__ (self):\n self.huffman_ac_tables = [{}, {}, {}, {}]\n self.huffman_dc_tables = [{}, {}, {}, {}]\n self.q_table = [[], [], [], []]\n\n self.XYP = 0, 0, 0\n self.component = {}\n self.num_components = 0\n self.mcus_read = 0\n self.dc = []\n self.inline_dc = 0\n self.bit_stream = []\n self.EOI = False\n \t\n def readDCT_Coeffs (self, filename):\n \"\"\" Reads and returns DCT coefficients from the supplied JPEG file. \"\"\"\n\n self.__init__ ()\n\tdata = []\n\n with open (filename, \"rb\") as inputFile:\n in_char = inputFile.read (1)\n while in_char:\n if in_char == chr (0xff):\n in_char = inputFile.read (1)\n in_num = ord (in_char)\n if 0xe0 <= in_num <= 0xef:\n readAPP (in_num - 0xe0, inputFile)\n elif in_num == 0xdb:\n self.__readDQT (inputFile)\n elif in_num == 0xdc:\n self.__readDNL (inputFile)\n elif in_num == 0xc4:\n self.__readDHT (inputFile)\n elif in_num == 0xc8:\n print \"JPG\"\n elif 0xc0 <= in_num <= 0xcf:\n self.__readSOF (in_num - 0xc0, inputFile)\n elif in_num == 0xda:\n self.__readSOS (inputFile)\n self.bit_stream = self.__readBit (inputFile)\n while not self.EOI:\n data.append (self.__readMCU ())\n in_char = inputFile.read (1)\n\n return extractCoeffs (data if self.inline_dc else restoreDC (data))\n\n def __readBit (self, file):\n \"\"\" A generator that reads one bit from file and handles markers and\n byte stuffing.\n \"\"\"\n\n input = file.read (1)\n while input and not self.EOI:\n if input == chr (0xFF):\n cmd = file.read (1)\n if cmd:\n # Byte stuffing\n if cmd == chr (0x00):\n input = chr (0xFF)\n # End of image marker\n elif cmd == chr (0xD9):\n self.EOI = True\n # Restart markers\n elif 0xD0 <= ord (cmd) <= 0xD7 and self.inline_dc:\n # Reset dc value\n self.dc = [0 for i in range (self.num_components + 1)]\n input = file.read (1)\n else:\n input = file.read (1)\n #print \"CMD: %x\" % ord(cmd)\n\n if not self.EOI:\n for i in range (7, -1, -1):\n # Output next bit\n yield (ord (input) >> i) & 0x01\n\n input = file.read (1)\n\n while True:\n yield []\n\n def __readDHT (self, file):\n \"\"\" Read and compute the huffman tables. \"\"\"\n\n # Read the marker length\n Lh = readWord (file)\n Lh -= 2\n while Lh > 0:\n huffsize = []\n huffval = []\n T = readByte (file)\n Th = T & 0x0F\n Tc = (T >> 4) & 0x0F\n #print \"Lh: %d Th: %d Tc: %d\" % (Lh, Th, Tc)\n Lh -= 1\n\n # Read how many symbols of each length\n # up to 16 bits\n for i in range (16):\n huffsize.append (readByte (file))\n Lh -= 1\n\n # Generate the huffman codes\n huffcode = generateHuffmanCodes (huffsize)\n #print \"Huffcode\", huffcode\n\n # Read the values that should be mapped to huffman codes\n for i in huffcode:\n #print i\n try:\n huffval.append (readByte (file))\n Lh -= 1\n except TypeError:\n continue\n\n # Generate lookup tables\n if Tc == 0:\n self.huffman_dc_tables[Th] = mapHuffmanCodes (huffcode, huffval)\n else:\n self.huffman_ac_tables[Th] = mapHuffmanCodes (huffcode, huffval)\n\n def __readDNL (self, file):\n \"\"\" Read the DNL marker. Changes the number of lines. \"\"\"\n\n Ld = readWord (file)\n Ld -= 2\n NL = readWord (file)\n Ld -= 2\n\n X, Y, P = self.XYP\n\n if Y == 0:\n self.XYP = X, NL, P\n\n def __readDQT (self, file):\n \"\"\" Read the quantization table. The table is in zigzag order. \"\"\"\n\n Lq = readWord (file)\n Lq -= 2\n while Lq > 0:\n table = []\n Tq = readByte (file)\n Pq = Tq >> 4\n Tq &= 0xF\n Lq -= 1\n\n if Pq == 0:\n for i in range (64):\n table.append (readByte (file))\n Lq -= 1\n else:\n for i in range (64):\n val = readWord (file)\n table.append (val)\n Lq -= 2\n\n self.q_table[Tq] = table\n\n def __readDU (self, comp_num):\n \"\"\" Read one data unit with component index comp_num. \"\"\"\n\n data = [] \n comp = self.component[comp_num]\n huff_tbl = self.huffman_dc_tables[comp['Td']]\n\n # Fill data with 64 coefficients\n while len (data) < 64:\n key = 0\n\n for bits in range (1, 17):\n key_len = []\n key <<= 1\n # Get one bit from bit_stream\n val = getBits (1, self.bit_stream)\n if val == []:\n break\n key |= val\n # If huffman code exists\n if huff_tbl.has_key ((bits, key)):\n key_len = huff_tbl[(bits, key)]\n break\n\n # After getting the DC value switch to the AC table\n huff_tbl = self.huffman_ac_tables[comp['Ta']]\n\n if key_len == []:\n #print (bits, key, bin(key)), \"key not found\"\n break\n # If ZRL fill with 16 zero coefficients\n elif key_len == 0xF0:\n for i in range (16):\n data.append (0)\n continue\n\n # If not DC coefficient\n if len (data) != 0:\n # If End of block\n if key_len == 0x00:\n # Fill the rest of the DU with zeros\n while len (data) < 64:\n data.append (0)\n break\n\n # The first part of the AC key_len is the number of leading\n # zeros\n for i in range (key_len >> 4):\n if len (data) < 64:\n data.append (0)\n key_len &= 0x0F\n\n if len (data) >= 64:\n break\n\n if key_len != 0:\n # The rest of key_len is the number of \"additional\" bits\n val = getBits (key_len, self.bit_stream)\n if val == []:\n break\n # Decode the additional bits\n num = decodeBits (key_len, val)\n\n # Experimental, doesn't work right\n if len (data) == 0 and self.inline_dc:\n # The DC coefficient value is added to the DC value from\n # the corresponding DU in the previous MCU\n num += self.dc[comp_num]\n self.dc[comp_num] = num\n\n data.append (num)\n else:\n data.append (0)\n\n #if len(data) != 64:\n #print \"Wrong size\", len(data)\n\n return data\n\n def __readMCU (self):\n \"\"\" Read an MCU. \"\"\"\n\n comp_num = mcu = range (self.num_components)\n\n # For each component\n for i in comp_num:\n comp = self.component[i + 1]\n mcu[i] = []\n # For each DU\n for j in range (comp['H'] * comp['V']):\n if not self.EOI:\n mcu[i].append (self.__readDU (i + 1))\n\n self.mcus_read += 1\n\n return mcu\n\n def __readSOF (self, type, file):\n \"\"\" Read the start of frame marker. \"\"\"\n\n Lf = readWord (file) # Read the marker length\n Lf -= 2\n P = readByte (file) # Read the sample precision\n Lf -= 1\n Y = readWord (file) # Read number of lines\n Lf -= 2\n X = readWord (file) # Read the number of samples per line\n Lf -= 2\n Nf = readByte (file) # Read number of components\n Lf -= 1\n\n self.XYP = X, Y, P\n #print self.XYP\n\n while Lf > 0:\n C = readByte (file) # Read component identifier\n V = readByte (file) # Read sampling factors\n Tq = readByte (file)\n Lf -= 3\n H = V >> 4\n V &= 0xF\n # Assign horizontal & vertical sampling factors and qtable\n self.component[C] = { 'H' : H, 'V' : V, 'Tq' : Tq }\n\n def __readSOS (self, file):\n \"\"\" Read the start of scan marker. \"\"\"\n\n Ls = readWord (file)\n Ls -= 2\n\n Ns = readByte (file) # Read number of components in scan\n Ls -= 1\n\n for i in range (Ns):\n Cs = readByte (file) # Read the scan component selector\n Ls -= 1\n Ta = readByte (file) # Read the huffman table selectors\n Ls -= 1\n Td = Ta >> 4\n Ta &= 0xF\n # Assign the DC huffman table\n self.component[Cs]['Td'] = Td\n # Assign the AC huffman table\n self.component[Cs]['Ta'] = Ta\n\n Ss = readByte (file) # Should be zero if baseline DCT\n Ls -= 1\n Se = readByte (file) # Should be 63 if baseline DCT\n Ls -= 1\n A = readByte (file) # Should be zero if baseline DCT\n Ls -= 1\n\n #print \"Ns:%d Ss:%d Se:%d A:%02X\" % (Ns, Ss, Se, A)\n self.num_components = Ns\n self.dc = [0 for i in range (self.num_components + 1)]\n\n def dequantize (self, mcu):\n \"\"\" Dequantize an MCU. \"\"\"\n\n out = mcu\n\n # For each coefficient in each DU in each component, multiply by the\n # corresponding value in the quantization table.\n for c in range (len (out)):\n for du in range (len (out[c])):\n for i in range (len (out[c][du])):\n out[c][du][i] *= self.q_table[self.component[c + 1]['Tq']][i]\n\n return out\n\ndef getHist(filename):\n try:\n import JPEG_MetaInfoPy\n hist, lowValue = JPEG_MetaInfoPy.generateHistogram(filename)\n return np.asarray(hist),np.asarray(range(lowValue,lowValue+len(hist)+1))\n except Exception as ex:\n logging.getLogger('maskgen').warn('External JPEG_MetaInfoPy failed: {}'.format(str(ex)))\n DC = JPEG_Reader().readDCT_Coeffs(filename)[0]\n minDC = min(DC)\n maxDC = max(DC)\n binCount = maxDC - minDC + 1\n return np.histogram (DC, bins=binCount,\n range=(minDC, maxDC + 1))\nclass JPEG_View:\n def appliesTo (self, filename):\n return filename.lower ().endswith (('jpg', 'jpeg'))\n\n def draw (self, frame, filename):\n\n fig = plt.figure ();\n self._plotHistogram (fig, getHist(filename))\n canvas = FigureCanvasTkAgg (fig, frame)\n canvas.show ()\n canvas.get_tk_widget ().pack (side=BOTTOM, fill=BOTH, expand=True)\n\n def _labelSigma (self, figure, sigma):\n \"\"\" Add a label of the value of sigma to the histogram plot. \"\"\"\n\n props = dict (boxstyle='round', facecolor='wheat', alpha=0.5)\n figure.text (0.25, 0.85, '$\\sigma=%.2f$' % (sigma),\n fontsize=14, verticalalignment='top', bbox=props)\n\n\nclass DCTView (JPEG_View):\n def screenName (self):\n return 'JPG DCT Histogram'\n\n def _plotHistogram (self, figure, histogram):\n ordinates, abscissae = histogram\n plt.bar (abscissae[:-1], ordinates, 1);\n self._labelSigma (figure, ordinates.std ())\n\n\nclass FFT_DCTView (JPEG_View):\n def screenName (self):\n return 'FFT(JPG DCT Histogram)'\n\n def _plotHistogram (self, figure, histogram):\n\n # Calculate the DFT of the zero-meaned histogram values. The n\/2+1\n # positive frequencies are returned by rfft. Mirror the result back\n # into ordinates.\n #\n mean = histogram[0].mean ()\n posFreqs = abs (np.fft.rfft ([i - mean for i in histogram[0]]))\n ordinates = list (reversed (posFreqs))\n ordinates.extend (posFreqs[1:])\n n = len (posFreqs)\n abscissae = range (1 - n, n)\n\n plt.plot (abscissae, ordinates, 'k')\n plt.plot (abscissae, self.__hat (ordinates), 'r')\n self._labelSigma (figure, np.std (ordinates))\n\n def __hat (self, data):\n length = len (data)\n intercept1 = int (length * 0.425)\n intercept2 = int (length * 0.575)\n amp = max (data)\n threshold = amp * 0.15\n\n arr = np.full (length, threshold)\n arr[intercept1:intercept2] = amp\n\n return arr\n\n\nif __name__ == \"__main__\":\n DCTView ().draw (None, sys.argv[1])\n FFT_DCTView ().draw (None, sys.argv[1])","license":"bsd-3-clause"} {"repo_name":"DataCanvasIO\/example-modules","path":"modules\/modeling\/basic\/linear_svc_estimator\/main.py","copies":"2","size":"1630","content":"#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\nimport random\nfrom specparser import get_settings_from_file\nfrom pprint import pprint\nimport csv\nfrom sklearn.svm import LinearSVC\nimport numpy as np\nfrom sklearn.externals import joblib\n\nimport matplotlib\nmatplotlib.use('Agg')\n \nimport datetime\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\n\ndef drawPrecisionRecall(X,Y,output_file):\n pdf = PdfPages(output_file) \n plt.figure(figsize=(len(Y), len(X))) \n plt.plot(Y, X, 'r-o') \n plt.title('Precision\/Recall') \n pdf.savefig() # saves the current figure into a pdf page \n plt.close() \n pdf.close()\n\ndef readcolumn(filename):\n column = []\n with open(filename,\"r\") as fconcl:\n for line in fconcl:\n column.append(line.rstrip('\\n'))\n return column\n\ndef main():\n settings = get_settings_from_file(\"spec.json\")\n print(settings)\n X = np.genfromtxt(settings.Input.X, delimiter=',', skip_header=1)\n svc = joblib.load(settings.Input.MODEL)\n Y_out = svc.predict(X)\n Y_list = [Y_out]\n np.savetxt(\".\/conclusion.csv\", Y_out, fmt=\"%d\", delimiter=\",\")\n \n conclusion = readcolumn(\".\/conclusion.csv\")\n label = readcolumn(settings.Input.Y) \n \n precision_list = []\n recall_list = []\n \n hits = 0\n for i in range(len(label)):\n if conclusion[i] == label[i]:\n hits+=1\n precision_list.append(1.0*hits\/(i+1)) \n recall_list.append(1.0*hits\/(len(label)))\n\n drawPrecisionRecall(precision_list,recall_list,settings.Output.report)\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n","license":"bsd-3-clause"} {"repo_name":"bjlittle\/iris","path":"docs\/gallery_code\/oceanography\/plot_atlantic_profiles.py","copies":"2","size":"3317","content":"\"\"\"\nOceanographic Profiles and T-S Diagrams\n=======================================\n\nThis example demonstrates how to plot vertical profiles of different\nvariables in the same axes, and how to make a scatter plot of two\nvariables. There is an oceanographic theme but the same techniques are\nequally applicable to atmospheric or other kinds of data.\n\nThe data used are profiles of potential temperature and salinity in the\nEquatorial and South Atlantic, output from an ocean model.\n\nThe y-axis of the first plot produced will be automatically inverted due to the\npresence of the attribute positive=down on the depth coordinate. This means\ndepth values intuitively increase downward on the y-axis.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nimport iris\nimport iris.iterate\nimport iris.plot as iplt\n\n\ndef main():\n # Load the gridded temperature and salinity data.\n fname = iris.sample_data_path(\"atlantic_profiles.nc\")\n cubes = iris.load(fname)\n (theta,) = cubes.extract(\"sea_water_potential_temperature\")\n (salinity,) = cubes.extract(\"sea_water_practical_salinity\")\n\n # Extract profiles of temperature and salinity from a particular point in\n # the southern portion of the domain, and limit the depth of the profile\n # to 1000m.\n lon_cons = iris.Constraint(longitude=330.5)\n lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)\n depth_cons = iris.Constraint(depth=lambda d: d <= 1000)\n theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)\n salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)\n\n # Plot these profiles on the same set of axes. Depth is automatically\n # recognised as a vertical coordinate and placed on the y-axis.\n # The first plot is in the default axes. We'll use the same color for the\n # curve and its axes\/tick labels.\n plt.figure(figsize=(5, 6))\n temperature_color = (0.3, 0.4, 0.5)\n ax1 = plt.gca()\n iplt.plot(\n theta_1000m,\n linewidth=2,\n color=temperature_color,\n alpha=0.75,\n )\n ax1.set_xlabel(\"Potential Temperature \/ K\", color=temperature_color)\n ax1.set_ylabel(\"Depth \/ m\")\n for ticklabel in ax1.get_xticklabels():\n ticklabel.set_color(temperature_color)\n\n # To plot salinity in the same axes we use twiny(). We'll use a different\n # color to identify salinity.\n salinity_color = (0.6, 0.1, 0.15)\n ax2 = plt.gca().twiny()\n iplt.plot(\n salinity_1000m,\n linewidth=2,\n color=salinity_color,\n alpha=0.75,\n )\n ax2.set_xlabel(\"Salinity \/ PSU\", color=salinity_color)\n for ticklabel in ax2.get_xticklabels():\n ticklabel.set_color(salinity_color)\n plt.tight_layout()\n iplt.show()\n\n # Now plot a T-S diagram using scatter. We'll use all the profiles here,\n # and each point will be coloured according to its depth.\n plt.figure(figsize=(6, 6))\n depth_values = theta.coord(\"depth\").points\n for s, t in iris.iterate.izip(salinity, theta, coords=\"depth\"):\n iplt.scatter(s, t, c=depth_values, marker=\"+\", cmap=\"RdYlBu_r\")\n ax = plt.gca()\n ax.set_xlabel(\"Salinity \/ PSU\")\n ax.set_ylabel(\"Potential Temperature \/ K\")\n cb = plt.colorbar(orientation=\"horizontal\")\n cb.set_label(\"Depth \/ m\")\n plt.tight_layout()\n iplt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","license":"lgpl-3.0"} {"repo_name":"janmtl\/pypsych","path":"tests\/data\/generators\/eprime.py","copies":"1","size":"2106","content":"#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nScript for generating mock EPrime test data\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport io\npd.set_option('display.max_rows', 50)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\nfrom pypsych.config import Config\n\n\ndef generate_mock_eprime_data(config_path, task_name, begaze_data, sched_path):\n \"\"\"Generate mock eprime data based on mock begaze data.\"\"\"\n superconfig = Config(path=config_path)\n superconfig.load()\n config = superconfig.get_subconfig(task_name, 'EPrime')\n\n bg = begaze_data['merged_labels'][['Condition', 'ID']]\n ed = np.random.randint(0, 10, (bg.shape[0], len(config['channels'])))\n ep = pd.DataFrame(data=ed, index=bg.index, columns=config['channels'])\n df = pd.concat([bg, ep], axis=1, join='inner')\n\n df.rename(columns={'ID': 'Img'}, inplace=True)\n\n result = []\n for _, row in df.iterrows():\n props = [\"\\t\" + str(idx) + ': ' + str(val)\n for idx, val in zip(list(row.index), list(row))]\n result.append(\"\\n\\n\".join(props))\n result = ('\\n\\n\\t*** LogFrame End ***\\n\\n'\n '\\tLevel: 2\\n\\n'\n '\\t*** LogFrame Start ***\\n\\n').join(result)\n prestring = ('*** Header Start ***\\n\\n'\n 'GARBAGE\\n\\n'\n '*** Header End ***\\n\\n'\n '\\tLevel: 2\\n\\n'\n '\\t*** LogFrame Start ***\\n\\n')\n result = prestring + result + '\\n\\n\\t*** LogFrame End ***'\n return {'df': df, 'raw': result}\n\n\ndef save_mock_eprime_data(output_path, data, subject_id, task_order, task_name):\n \"\"\"Save the mock eprime files to output_path.\"\"\"\n base_path = ''.join([output_path,\n task_name,\n '_',\n str(subject_id),\n str(task_order)])\n raw_path = ''.join([base_path, '_eprime.txt'])\n df_path = ''.join([base_path, '_eprime_df.txt'])\n\n with io.open(raw_path, 'w', encoding=\"utf-16\") as f:\n f.write(unicode(data['raw']))\n\n data['df'].to_csv(df_path, sep=\"\\t\")\n\n pass\n","license":"bsd-3-clause"} {"repo_name":"AlexanderFabisch\/scikit-learn","path":"sklearn\/metrics\/pairwise.py","copies":"9","size":"45248","content":"# -*- coding: utf-8 -*-\n\n# Authors: Alexandre Gramfort \n# Mathieu Blondel \n# Robert Layton \n# Andreas Mueller \n# Philippe Gervais \n# Lars Buitinck \n# Joel Nothman \n# License: BSD 3 clause\n\nimport itertools\n\nimport numpy as np\nfrom scipy.spatial import distance\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import issparse\n\nfrom ..utils import check_array\nfrom ..utils import gen_even_slices\nfrom ..utils import gen_batches\nfrom ..utils.fixes import partial\nfrom ..utils.extmath import row_norms, safe_sparse_dot\nfrom ..preprocessing import normalize\nfrom ..externals.joblib import Parallel\nfrom ..externals.joblib import delayed\nfrom ..externals.joblib.parallel import cpu_count\n\nfrom .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan\n\n\n# Utility Functions\ndef _return_float_dtype(X, Y):\n \"\"\"\n 1. If dtype of X and Y is float32, then dtype float32 is returned.\n 2. Else dtype float is returned.\n \"\"\"\n if not issparse(X) and not isinstance(X, np.ndarray):\n X = np.asarray(X)\n\n if Y is None:\n Y_dtype = X.dtype\n elif not issparse(Y) and not isinstance(Y, np.ndarray):\n Y = np.asarray(Y)\n Y_dtype = Y.dtype\n else:\n Y_dtype = Y.dtype\n\n if X.dtype == Y_dtype == np.float32:\n dtype = np.float32\n else:\n dtype = np.float\n\n return X, Y, dtype\n\n\ndef check_pairwise_arrays(X, Y, precomputed=False):\n \"\"\" Set X and Y appropriately and checks inputs\n\n If Y is None, it is set as a pointer to X (i.e. not a copy).\n If Y is given, this does not happen.\n All distance metrics should use this function first to assert that the\n given parameters are correct and safe to use.\n\n Specifically, this function first ensures that both X and Y are arrays,\n then checks that they are at least two dimensional while ensuring that\n their elements are floats. Finally, the function checks that the size\n of the second dimension of the two arrays is equal, or the equivalent\n check for a precomputed distance matrix.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples_a, n_features)\n\n Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)\n\n precomputed : bool\n True if X is to be treated as precomputed distances to the samples in\n Y.\n\n Returns\n -------\n safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)\n An array equal to X, guaranteed to be a numpy array.\n\n safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\n If Y was None, safe_Y will be a pointer to X.\n\n \"\"\"\n X, Y, dtype = _return_float_dtype(X, Y)\n\n if Y is X or Y is None:\n X = Y = check_array(X, accept_sparse='csr', dtype=dtype)\n else:\n X = check_array(X, accept_sparse='csr', dtype=dtype)\n Y = check_array(Y, accept_sparse='csr', dtype=dtype)\n\n if precomputed:\n if X.shape[1] != Y.shape[0]:\n raise ValueError(\"Precomputed metric requires shape \"\n \"(n_queries, n_indexed). Got (%d, %d) \"\n \"for %d indexed.\" %\n (X.shape[0], X.shape[1], Y.shape[0]))\n elif X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices: \"\n \"X.shape[1] == %d while Y.shape[1] == %d\" % (\n X.shape[1], Y.shape[1]))\n\n return X, Y\n\n\ndef check_paired_arrays(X, Y):\n \"\"\" Set X and Y appropriately and checks inputs for paired distances\n\n All paired distance metrics should use this function first to assert that\n the given parameters are correct and safe to use.\n\n Specifically, this function first ensures that both X and Y are arrays,\n then checks that they are at least two dimensional while ensuring that\n their elements are floats. Finally, the function checks that the size\n of the dimensions of the two arrays are equal.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples_a, n_features)\n\n Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)\n\n Returns\n -------\n safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)\n An array equal to X, guaranteed to be a numpy array.\n\n safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\n If Y was None, safe_Y will be a pointer to X.\n\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if X.shape != Y.shape:\n raise ValueError(\"X and Y should be of same shape. They were \"\n \"respectively %r and %r long.\" % (X.shape, Y.shape))\n return X, Y\n\n\n# Pairwise distances\ndef euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,\n X_norm_squared=None):\n \"\"\"\n Considering the rows of X (and Y=X) as vectors, compute the\n distance matrix between each pair of vectors.\n\n For efficiency reasons, the euclidean distance between a pair of row\n vector x and y is computed as::\n\n dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))\n\n This formulation has two advantages over other ways of computing distances.\n First, it is computationally efficient when dealing with sparse data.\n Second, if one argument varies but the other remains unchanged, then\n `dot(x, x)` and\/or `dot(y, y)` can be pre-computed.\n\n However, this is not the most precise way of doing this computation, and\n the distance matrix returned by this function may not be exactly\n symmetric as required by, e.g., ``scipy.spatial.distance`` functions.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples_1, n_features)\n\n Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)\n\n Y_norm_squared : array-like, shape (n_samples_2, ), optional\n Pre-computed dot-products of vectors in Y (e.g.,\n ``(Y**2).sum(axis=1)``)\n\n squared : boolean, optional\n Return squared Euclidean distances.\n\n X_norm_squared : array-like, shape = [n_samples_1], optional\n Pre-computed dot-products of vectors in X (e.g.,\n ``(X**2).sum(axis=1)``)\n\n Returns\n -------\n distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import euclidean_distances\n >>> X = [[0, 1], [1, 1]]\n >>> # distance between rows of X\n >>> euclidean_distances(X, X)\n array([[ 0., 1.],\n [ 1., 0.]])\n >>> # get distance to origin\n >>> euclidean_distances(X, [[0, 0]])\n array([[ 1. ],\n [ 1.41421356]])\n\n See also\n --------\n paired_distances : distances betweens pairs of elements of X and Y.\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if X_norm_squared is not None:\n XX = check_array(X_norm_squared)\n if XX.shape == (1, X.shape[0]):\n XX = XX.T\n elif XX.shape != (X.shape[0], 1):\n raise ValueError(\n \"Incompatible dimensions for X and X_norm_squared\")\n else:\n XX = row_norms(X, squared=True)[:, np.newaxis]\n\n if X is Y: # shortcut in the common case euclidean_distances(X, X)\n YY = XX.T\n elif Y_norm_squared is not None:\n YY = np.atleast_2d(Y_norm_squared)\n\n if YY.shape != (1, Y.shape[0]):\n raise ValueError(\n \"Incompatible dimensions for Y and Y_norm_squared\")\n else:\n YY = row_norms(Y, squared=True)[np.newaxis, :]\n\n distances = safe_sparse_dot(X, Y.T, dense_output=True)\n distances *= -2\n distances += XX\n distances += YY\n np.maximum(distances, 0, out=distances)\n\n if X is Y:\n # Ensure that distances between vectors and themselves are set to 0.0.\n # This may not be the case due to floating point rounding errors.\n distances.flat[::distances.shape[0] + 1] = 0.0\n\n return distances if squared else np.sqrt(distances, out=distances)\n\n\ndef pairwise_distances_argmin_min(X, Y, axis=1, metric=\"euclidean\",\n batch_size=500, metric_kwargs=None):\n \"\"\"Compute minimum distances between one point and a set of points.\n\n This function computes for each row in X, the index of the row of Y which\n is closest (according to the specified distance). The minimal distances are\n also returned.\n\n This is mostly equivalent to calling:\n\n (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),\n pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))\n\n but uses much less memory, and is faster for large arrays.\n\n Parameters\n ----------\n X, Y : {array-like, sparse matrix}\n Arrays containing points. Respective shapes (n_samples1, n_features)\n and (n_samples2, n_features)\n\n batch_size : integer\n To reduce memory consumption over the naive solution, data are\n processed in batches, comprising batch_size rows of X and\n batch_size rows of Y. The default value is quite conservative, but\n can be changed for fine-tuning. The larger the number, the larger the\n memory usage.\n\n metric : string or callable, default 'euclidean'\n metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Distance matrices are not supported.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n metric_kwargs : dict, optional\n Keyword arguments to pass to specified metric function.\n\n axis : int, optional, default 1\n Axis along which the argmin and distances are to be computed.\n\n Returns\n -------\n argmin : numpy.ndarray\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\n\n distances : numpy.ndarray\n distances[i] is the distance between the i-th row in X and the\n argmin[i]-th row in Y.\n\n See also\n --------\n sklearn.metrics.pairwise_distances\n sklearn.metrics.pairwise_distances_argmin\n \"\"\"\n dist_func = None\n if metric in PAIRWISE_DISTANCE_FUNCTIONS:\n dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]\n elif not callable(metric) and not isinstance(metric, str):\n raise ValueError(\"'metric' must be a string or a callable\")\n\n X, Y = check_pairwise_arrays(X, Y)\n\n if metric_kwargs is None:\n metric_kwargs = {}\n\n if axis == 0:\n X, Y = Y, X\n\n # Allocate output arrays\n indices = np.empty(X.shape[0], dtype=np.intp)\n values = np.empty(X.shape[0])\n values.fill(np.infty)\n\n for chunk_x in gen_batches(X.shape[0], batch_size):\n X_chunk = X[chunk_x, :]\n\n for chunk_y in gen_batches(Y.shape[0], batch_size):\n Y_chunk = Y[chunk_y, :]\n\n if dist_func is not None:\n if metric == 'euclidean': # special case, for speed\n d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,\n dense_output=True)\n d_chunk *= -2\n d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]\n d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]\n np.maximum(d_chunk, 0, d_chunk)\n else:\n d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)\n else:\n d_chunk = pairwise_distances(X_chunk, Y_chunk,\n metric=metric, **metric_kwargs)\n\n # Update indices and minimum values using chunk\n min_indices = d_chunk.argmin(axis=1)\n min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),\n min_indices]\n\n flags = values[chunk_x] > min_values\n indices[chunk_x][flags] = min_indices[flags] + chunk_y.start\n values[chunk_x][flags] = min_values[flags]\n\n if metric == \"euclidean\" and not metric_kwargs.get(\"squared\", False):\n np.sqrt(values, values)\n return indices, values\n\n\ndef pairwise_distances_argmin(X, Y, axis=1, metric=\"euclidean\",\n batch_size=500, metric_kwargs=None):\n \"\"\"Compute minimum distances between one point and a set of points.\n\n This function computes for each row in X, the index of the row of Y which\n is closest (according to the specified distance).\n\n This is mostly equivalent to calling:\n\n pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)\n\n but uses much less memory, and is faster for large arrays.\n\n This function works with dense 2D arrays only.\n\n Parameters\n ----------\n X : array-like\n Arrays containing points. Respective shapes (n_samples1, n_features)\n and (n_samples2, n_features)\n\n Y : array-like\n Arrays containing points. Respective shapes (n_samples1, n_features)\n and (n_samples2, n_features)\n\n batch_size : integer\n To reduce memory consumption over the naive solution, data are\n processed in batches, comprising batch_size rows of X and\n batch_size rows of Y. The default value is quite conservative, but\n can be changed for fine-tuning. The larger the number, the larger the\n memory usage.\n\n metric : string or callable\n metric to use for distance computation. Any metric from scikit-learn\n or scipy.spatial.distance can be used.\n\n If metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays as input and return one value indicating the\n distance between them. This works for Scipy's metrics, but is less\n efficient than passing the metric name as a string.\n\n Distance matrices are not supported.\n\n Valid values for metric are:\n\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']\n\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'yule']\n\n See the documentation for scipy.spatial.distance for details on these\n metrics.\n\n metric_kwargs : dict\n keyword arguments to pass to specified metric function.\n\n axis : int, optional, default 1\n Axis along which the argmin and distances are to be computed.\n\n Returns\n -------\n argmin : numpy.ndarray\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\n\n See also\n --------\n sklearn.metrics.pairwise_distances\n sklearn.metrics.pairwise_distances_argmin_min\n \"\"\"\n if metric_kwargs is None:\n metric_kwargs = {}\n\n return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,\n metric_kwargs)[0]\n\n\ndef manhattan_distances(X, Y=None, sum_over_features=True,\n size_threshold=5e8):\n \"\"\" Compute the L1 distances between the vectors in X and Y.\n\n With sum_over_features equal to False it returns the componentwise\n distances.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array_like\n An array with shape (n_samples_X, n_features).\n\n Y : array_like, optional\n An array with shape (n_samples_Y, n_features).\n\n sum_over_features : bool, default=True\n If True the function returns the pairwise distance matrix\n else it returns the componentwise L1 pairwise-distances.\n Not supported for sparse matrix inputs.\n\n size_threshold : int, default=5e8\n Unused parameter.\n\n Returns\n -------\n D : array\n If sum_over_features is False shape is\n (n_samples_X * n_samples_Y, n_features) and D contains the\n componentwise L1 pairwise-distances (ie. absolute difference),\n else shape is (n_samples_X, n_samples_Y) and D contains\n the pairwise L1 distances.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import manhattan_distances\n >>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS\n array([[ 0.]])\n >>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS\n array([[ 1.]])\n >>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS\n array([[ 1.]])\n >>> manhattan_distances([[1, 2], [3, 4]],\\\n [[1, 2], [0, 3]])#doctest:+ELLIPSIS\n array([[ 0., 2.],\n [ 4., 4.]])\n >>> import numpy as np\n >>> X = np.ones((1, 2))\n >>> y = 2 * np.ones((2, 2))\n >>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS\n array([[ 1., 1.],\n [ 1., 1.]]...)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if issparse(X) or issparse(Y):\n if not sum_over_features:\n raise TypeError(\"sum_over_features=%r not supported\"\n \" for sparse matrices\" % sum_over_features)\n\n X = csr_matrix(X, copy=False)\n Y = csr_matrix(Y, copy=False)\n D = np.zeros((X.shape[0], Y.shape[0]))\n _sparse_manhattan(X.data, X.indices, X.indptr,\n Y.data, Y.indices, Y.indptr,\n X.shape[1], D)\n return D\n\n if sum_over_features:\n return distance.cdist(X, Y, 'cityblock')\n\n D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]\n D = np.abs(D, D)\n return D.reshape((-1, X.shape[1]))\n\n\ndef cosine_distances(X, Y=None):\n \"\"\"Compute cosine distance between samples in X and Y.\n\n Cosine distance is defined as 1.0 minus the cosine similarity.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array_like, sparse matrix\n with shape (n_samples_X, n_features).\n\n Y : array_like, sparse matrix (optional)\n with shape (n_samples_Y, n_features).\n\n Returns\n -------\n distance matrix : array\n An array with shape (n_samples_X, n_samples_Y).\n\n See also\n --------\n sklearn.metrics.pairwise.cosine_similarity\n scipy.spatial.distance.cosine (dense matrices only)\n \"\"\"\n # 1.0 - cosine_similarity(X, Y) without copy\n S = cosine_similarity(X, Y)\n S *= -1\n S += 1\n return S\n\n\n# Paired distances\ndef paired_euclidean_distances(X, Y):\n \"\"\"\n Computes the paired euclidean distances between X and Y\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n Y : array-like, shape (n_samples, n_features)\n\n Returns\n -------\n distances : ndarray (n_samples, )\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n return row_norms(X - Y)\n\n\ndef paired_manhattan_distances(X, Y):\n \"\"\"Compute the L1 distances between the vectors in X and Y.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n Y : array-like, shape (n_samples, n_features)\n\n Returns\n -------\n distances : ndarray (n_samples, )\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n diff = X - Y\n if issparse(diff):\n diff.data = np.abs(diff.data)\n return np.squeeze(np.array(diff.sum(axis=1)))\n else:\n return np.abs(diff).sum(axis=-1)\n\n\ndef paired_cosine_distances(X, Y):\n \"\"\"\n Computes the paired cosine distances between X and Y\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n Y : array-like, shape (n_samples, n_features)\n\n Returns\n -------\n distances : ndarray, shape (n_samples, )\n\n Notes\n ------\n The cosine distance is equivalent to the half the squared\n euclidean distance if each sample is normalized to unit norm\n \"\"\"\n X, Y = check_paired_arrays(X, Y)\n return .5 * row_norms(normalize(X) - normalize(Y), squared=True)\n\n\nPAIRED_DISTANCES = {\n 'cosine': paired_cosine_distances,\n 'euclidean': paired_euclidean_distances,\n 'l2': paired_euclidean_distances,\n 'l1': paired_manhattan_distances,\n 'manhattan': paired_manhattan_distances,\n 'cityblock': paired_manhattan_distances}\n\n\ndef paired_distances(X, Y, metric=\"euclidean\", **kwds):\n \"\"\"\n Computes the paired distances between X and Y.\n\n Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray (n_samples, n_features)\n Array 1 for distance computation.\n\n Y : ndarray (n_samples, n_features)\n Array 2 for distance computation.\n\n metric : string or callable\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n specified in PAIRED_DISTANCES, including \"euclidean\",\n \"manhattan\", or \"cosine\".\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n Returns\n -------\n distances : ndarray (n_samples, )\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import paired_distances\n >>> X = [[0, 1], [1, 1]]\n >>> Y = [[0, 1], [2, 1]]\n >>> paired_distances(X, Y)\n array([ 0., 1.])\n\n See also\n --------\n pairwise_distances : pairwise distances.\n \"\"\"\n\n if metric in PAIRED_DISTANCES:\n func = PAIRED_DISTANCES[metric]\n return func(X, Y)\n elif callable(metric):\n # Check the matrix first (it is usually done by the metric)\n X, Y = check_paired_arrays(X, Y)\n distances = np.zeros(len(X))\n for i in range(len(X)):\n distances[i] = metric(X[i], Y[i])\n return distances\n else:\n raise ValueError('Unknown distance %s' % metric)\n\n\n# Kernels\ndef linear_kernel(X, Y=None):\n \"\"\"\n Compute the linear kernel between X and Y.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array of shape (n_samples_1, n_features)\n\n Y : array of shape (n_samples_2, n_features)\n\n Returns\n -------\n Gram matrix : array of shape (n_samples_1, n_samples_2)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n return safe_sparse_dot(X, Y.T, dense_output=True)\n\n\ndef polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):\n \"\"\"\n Compute the polynomial kernel between X and Y::\n\n K(X, Y) = (gamma + coef0)^degree\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_1, n_features)\n\n Y : ndarray of shape (n_samples_2, n_features)\n\n coef0 : int, default 1\n\n degree : int, default 3\n\n Returns\n -------\n Gram matrix : array of shape (n_samples_1, n_samples_2)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 \/ X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n K **= degree\n return K\n\n\ndef sigmoid_kernel(X, Y=None, gamma=None, coef0=1):\n \"\"\"\n Compute the sigmoid kernel between X and Y::\n\n K(X, Y) = tanh(gamma + coef0)\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_1, n_features)\n\n Y : ndarray of shape (n_samples_2, n_features)\n\n coef0 : int, default 1\n\n Returns\n -------\n Gram matrix: array of shape (n_samples_1, n_samples_2)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 \/ X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n np.tanh(K, K) # compute tanh in-place\n return K\n\n\ndef rbf_kernel(X, Y=None, gamma=None):\n \"\"\"\n Compute the rbf (gaussian) kernel between X and Y::\n\n K(x, y) = exp(-gamma ||x-y||^2)\n\n for each pair of rows x in X and y in Y.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array of shape (n_samples_X, n_features)\n\n Y : array of shape (n_samples_Y, n_features)\n\n gamma : float\n\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 \/ X.shape[1]\n\n K = euclidean_distances(X, Y, squared=True)\n K *= -gamma\n np.exp(K, K) # exponentiate K in-place\n return K\n\n\ndef laplacian_kernel(X, Y=None, gamma=None):\n \"\"\"Compute the laplacian kernel between X and Y.\n\n The laplacian kernel is defined as::\n\n K(x, y) = exp(-gamma ||x-y||_1)\n\n for each pair of rows x in X and y in Y.\n Read more in the :ref:`User Guide `.\n\n .. versionadded:: 0.17\n\n Parameters\n ----------\n X : array of shape (n_samples_X, n_features)\n Y : array of shape (n_samples_Y, n_features)\n gamma : float\n\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 \/ X.shape[1]\n\n K = -gamma * manhattan_distances(X, Y)\n np.exp(K, K) # exponentiate K in-place\n return K\n\n\ndef cosine_similarity(X, Y=None, dense_output=True):\n \"\"\"Compute cosine similarity between samples in X and Y.\n\n Cosine similarity, or the cosine kernel, computes similarity as the\n normalized dot product of X and Y:\n\n K(X, Y) = \/ (||X||*||Y||)\n\n On L2-normalized data, this function is equivalent to linear_kernel.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray or sparse array, shape: (n_samples_X, n_features)\n Input data.\n\n Y : ndarray or sparse array, shape: (n_samples_Y, n_features)\n Input data. If ``None``, the output will be the pairwise\n similarities between all samples in ``X``.\n\n dense_output : boolean (optional), default True\n Whether to return dense output even when the input is sparse. If\n ``False``, the output is sparse if both input arrays are sparse.\n\n .. versionadded:: 0.17\n parameter *dense_output* for sparse output.\n\n Returns\n -------\n kernel matrix : array\n An array with shape (n_samples_X, n_samples_Y).\n \"\"\"\n # to avoid recursive import\n\n X, Y = check_pairwise_arrays(X, Y)\n\n X_normalized = normalize(X, copy=True)\n if X is Y:\n Y_normalized = X_normalized\n else:\n Y_normalized = normalize(Y, copy=True)\n\n K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)\n\n return K\n\n\ndef additive_chi2_kernel(X, Y=None):\n \"\"\"Computes the additive chi-squared kernel between observations in X and Y\n\n The chi-squared kernel is computed between each pair of rows in X and Y. X\n and Y have to be non-negative. This kernel is most commonly applied to\n histograms.\n\n The chi-squared kernel is given by::\n\n k(x, y) = -Sum [(x - y)^2 \/ (x + y)]\n\n It can be interpreted as a weighted difference per entry.\n\n Read more in the :ref:`User Guide `.\n\n Notes\n -----\n As the negative of a distance, this kernel is only conditionally positive\n definite.\n\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n\n Y : array of shape (n_samples_Y, n_features)\n\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n\n References\n ----------\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\n Local features and kernels for classification of texture and object\n categories: A comprehensive study\n International Journal of Computer Vision 2007\n http:\/\/research.microsoft.com\/en-us\/um\/people\/manik\/projects\/trade-off\/papers\/ZhangIJCV06.pdf\n\n\n See also\n --------\n chi2_kernel : The exponentiated version of the kernel, which is usually\n preferable.\n\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\n to this kernel.\n \"\"\"\n if issparse(X) or issparse(Y):\n raise ValueError(\"additive_chi2 does not support sparse matrices.\")\n X, Y = check_pairwise_arrays(X, Y)\n if (X < 0).any():\n raise ValueError(\"X contains negative values.\")\n if Y is not X and (Y < 0).any():\n raise ValueError(\"Y contains negative values.\")\n\n result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)\n _chi2_kernel_fast(X, Y, result)\n return result\n\n\ndef chi2_kernel(X, Y=None, gamma=1.):\n \"\"\"Computes the exponential chi-squared kernel X and Y.\n\n The chi-squared kernel is computed between each pair of rows in X and Y. X\n and Y have to be non-negative. This kernel is most commonly applied to\n histograms.\n\n The chi-squared kernel is given by::\n\n k(x, y) = exp(-gamma Sum [(x - y)^2 \/ (x + y)])\n\n It can be interpreted as a weighted difference per entry.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n\n Y : array of shape (n_samples_Y, n_features)\n\n gamma : float, default=1.\n Scaling parameter of the chi2 kernel.\n\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n\n References\n ----------\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\n Local features and kernels for classification of texture and object\n categories: A comprehensive study\n International Journal of Computer Vision 2007\n http:\/\/research.microsoft.com\/en-us\/um\/people\/manik\/projects\/trade-off\/papers\/ZhangIJCV06.pdf\n\n See also\n --------\n additive_chi2_kernel : The additive version of this kernel\n\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\n to the additive version of this kernel.\n \"\"\"\n K = additive_chi2_kernel(X, Y)\n K *= gamma\n return np.exp(K, K)\n\n\n# Helper functions - distance\nPAIRWISE_DISTANCE_FUNCTIONS = {\n # If updating this dictionary, update the doc in both distance_metrics()\n # and also in pairwise_distances()!\n 'cityblock': manhattan_distances,\n 'cosine': cosine_distances,\n 'euclidean': euclidean_distances,\n 'l2': euclidean_distances,\n 'l1': manhattan_distances,\n 'manhattan': manhattan_distances,\n 'precomputed': None, # HACK: precomputed is always allowed, never called\n}\n\n\ndef distance_metrics():\n \"\"\"Valid metrics for pairwise_distances.\n\n This function simply returns the valid pairwise distance metrics.\n It exists to allow for a description of the mapping for\n each of the valid strings.\n\n The valid distance metrics, and the function they map to, are:\n\n ============ ====================================\n metric Function\n ============ ====================================\n 'cityblock' metrics.pairwise.manhattan_distances\n 'cosine' metrics.pairwise.cosine_distances\n 'euclidean' metrics.pairwise.euclidean_distances\n 'l1' metrics.pairwise.manhattan_distances\n 'l2' metrics.pairwise.euclidean_distances\n 'manhattan' metrics.pairwise.manhattan_distances\n ============ ====================================\n\n Read more in the :ref:`User Guide `.\n\n \"\"\"\n return PAIRWISE_DISTANCE_FUNCTIONS\n\n\ndef _parallel_pairwise(X, Y, func, n_jobs, **kwds):\n \"\"\"Break the pairwise matrix in n_jobs even slices\n and compute them in parallel\"\"\"\n if n_jobs < 0:\n n_jobs = max(cpu_count() + 1 + n_jobs, 1)\n\n if Y is None:\n Y = X\n\n if n_jobs == 1:\n # Special case to avoid picklability checks in delayed\n return func(X, Y, **kwds)\n\n # TODO: in some cases, backend='threading' may be appropriate\n fd = delayed(func)\n ret = Parallel(n_jobs=n_jobs, verbose=0)(\n fd(X, Y[s], **kwds)\n for s in gen_even_slices(Y.shape[0], n_jobs))\n\n return np.hstack(ret)\n\n\ndef _pairwise_callable(X, Y, metric, **kwds):\n \"\"\"Handle the callable case for pairwise_{distances,kernels}\n \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n\n if X is Y:\n # Only calculate metric for upper triangle\n out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')\n iterator = itertools.combinations(range(X.shape[0]), 2)\n for i, j in iterator:\n out[i, j] = metric(X[i], Y[j], **kwds)\n\n # Make symmetric\n # NB: out += out.T will produce incorrect results\n out = out + out.T\n\n # Calculate diagonal\n # NB: nonzero diagonals are allowed for both metrics and kernels\n for i in range(X.shape[0]):\n x = X[i]\n out[i, i] = metric(x, x, **kwds)\n\n else:\n # Calculate all cells\n out = np.empty((X.shape[0], Y.shape[0]), dtype='float')\n iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))\n for i, j in iterator:\n out[i, j] = metric(X[i], Y[j], **kwds)\n\n return out\n\n\n_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',\n 'braycurtis', 'canberra', 'chebyshev', 'correlation',\n 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener',\n 'sokalsneath', 'sqeuclidean', 'yule', \"wminkowski\"]\n\n\ndef pairwise_distances(X, Y=None, metric=\"euclidean\", n_jobs=1, **kwds):\n \"\"\" Compute the distance matrix from a vector array X and optional Y.\n\n This method takes either a vector array or a distance matrix, and returns\n a distance matrix. If the input is a vector array, the distances are\n computed. If the input is a distances matrix, it is returned instead.\n\n This method provides a safe way to take a distance matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n distance between the arrays from both X and Y.\n\n Valid values for metric are:\n\n - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\n 'manhattan']. These metrics support sparse matrix inputs.\n\n - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\n 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n See the documentation for scipy.spatial.distance for details on these\n metrics. These metrics do not support sparse matrix inputs.\n\n Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\n valid scipy.spatial.distance metrics), the scikit-learn implementation\n will be used, which is faster and has support for sparse matrices (except\n for 'cityblock'). For a verbose description of the metrics from\n scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics\n function.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array [n_samples_a, n_samples_a] if metric == \"precomputed\", or, \\\n [n_samples_a, n_features] otherwise\n Array of pairwise distances between samples, or a feature array.\n\n Y : array [n_samples_b, n_features], optional\n An optional second feature array. Only allowed if metric != \"precomputed\".\n\n metric : string, or callable\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\n a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.\n If metric is \"precomputed\", X is assumed to be a distance matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n `**kwds` : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a scipy.spatial.distance metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Returns\n -------\n D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]\n A distance matrix D such that D_{i, j} is the distance between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then D_{i, j} is the distance between the ith array\n from X and the jth array from Y.\n\n \"\"\"\n if (metric not in _VALID_METRICS and\n not callable(metric) and metric != \"precomputed\"):\n raise ValueError(\"Unknown metric %s. \"\n \"Valid metrics are %s, or 'precomputed', or a \"\n \"callable\" % (metric, _VALID_METRICS))\n\n if metric == \"precomputed\":\n X, _ = check_pairwise_arrays(X, Y, precomputed=True)\n return X\n elif metric in PAIRWISE_DISTANCE_FUNCTIONS:\n func = PAIRWISE_DISTANCE_FUNCTIONS[metric]\n elif callable(metric):\n func = partial(_pairwise_callable, metric=metric, **kwds)\n else:\n if issparse(X) or issparse(Y):\n raise TypeError(\"scipy distance metrics do not\"\n \" support sparse matrices.\")\n X, Y = check_pairwise_arrays(X, Y)\n if n_jobs == 1 and X is Y:\n return distance.squareform(distance.pdist(X, metric=metric,\n **kwds))\n func = partial(distance.cdist, metric=metric, **kwds)\n\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\n\n\n# Helper functions - distance\nPAIRWISE_KERNEL_FUNCTIONS = {\n # If updating this dictionary, update the doc in both distance_metrics()\n # and also in pairwise_distances()!\n 'additive_chi2': additive_chi2_kernel,\n 'chi2': chi2_kernel,\n 'linear': linear_kernel,\n 'polynomial': polynomial_kernel,\n 'poly': polynomial_kernel,\n 'rbf': rbf_kernel,\n 'laplacian': laplacian_kernel,\n 'sigmoid': sigmoid_kernel,\n 'cosine': cosine_similarity, }\n\n\ndef kernel_metrics():\n \"\"\" Valid metrics for pairwise_kernels\n\n This function simply returns the valid pairwise distance metrics.\n It exists, however, to allow for a verbose description of the mapping for\n each of the valid strings.\n\n The valid distance metrics, and the function they map to, are:\n =============== ========================================\n metric Function\n =============== ========================================\n 'additive_chi2' sklearn.pairwise.additive_chi2_kernel\n 'chi2' sklearn.pairwise.chi2_kernel\n 'linear' sklearn.pairwise.linear_kernel\n 'poly' sklearn.pairwise.polynomial_kernel\n 'polynomial' sklearn.pairwise.polynomial_kernel\n 'rbf' sklearn.pairwise.rbf_kernel\n 'laplacian' sklearn.pairwise.laplacian_kernel\n 'sigmoid' sklearn.pairwise.sigmoid_kernel\n 'cosine' sklearn.pairwise.cosine_similarity\n =============== ========================================\n\n Read more in the :ref:`User Guide `.\n \"\"\"\n return PAIRWISE_KERNEL_FUNCTIONS\n\n\nKERNEL_PARAMS = {\n \"additive_chi2\": (),\n \"chi2\": (),\n \"cosine\": (),\n \"exp_chi2\": frozenset([\"gamma\"]),\n \"linear\": (),\n \"poly\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\n \"polynomial\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\n \"rbf\": frozenset([\"gamma\"]),\n \"laplacian\": frozenset([\"gamma\"]),\n \"sigmoid\": frozenset([\"gamma\", \"coef0\"]),\n}\n\n\ndef pairwise_kernels(X, Y=None, metric=\"linear\", filter_params=False,\n n_jobs=1, **kwds):\n \"\"\"Compute the kernel between arrays X and optional array Y.\n\n This method takes either a vector array or a kernel matrix, and returns\n a kernel matrix. If the input is a vector array, the kernels are\n computed. If the input is a kernel matrix, it is returned instead.\n\n This method provides a safe way to take a kernel matrix as input, while\n preserving compatibility with many other algorithms that take a vector\n array.\n\n If Y is given (default is None), then the returned matrix is the pairwise\n kernel between the arrays from both X and Y.\n\n Valid values for metric are::\n ['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array [n_samples_a, n_samples_a] if metric == \"precomputed\", or, \\\n [n_samples_a, n_features] otherwise\n Array of pairwise kernels between samples, or a feature array.\n\n Y : array [n_samples_b, n_features]\n A second feature array only if X has shape [n_samples_a, n_features].\n\n metric : string, or callable\n The metric to use when calculating kernel between instances in a\n feature array. If metric is a string, it must be one of the metrics\n in pairwise.PAIRWISE_KERNEL_FUNCTIONS.\n If metric is \"precomputed\", X is assumed to be a kernel matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n n_jobs : int\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n filter_params: boolean\n Whether to filter invalid parameters or not.\n\n `**kwds` : optional keyword parameters\n Any further parameters are passed directly to the kernel function.\n\n Returns\n -------\n K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]\n A kernel matrix K such that K_{i, j} is the kernel between the\n ith and jth vectors of the given matrix X, if Y is None.\n If Y is not None, then K_{i, j} is the kernel between the ith array\n from X and the jth array from Y.\n\n Notes\n -----\n If metric is 'precomputed', Y is ignored and X is returned.\n\n \"\"\"\n # import GPKernel locally to prevent circular imports\n from ..gaussian_process.kernels import Kernel as GPKernel\n\n if metric == \"precomputed\":\n X, _ = check_pairwise_arrays(X, Y, precomputed=True)\n return X\n elif isinstance(metric, GPKernel):\n func = metric.__call__\n elif metric in PAIRWISE_KERNEL_FUNCTIONS:\n if filter_params:\n kwds = dict((k, kwds[k]) for k in kwds\n if k in KERNEL_PARAMS[metric])\n func = PAIRWISE_KERNEL_FUNCTIONS[metric]\n elif callable(metric):\n func = partial(_pairwise_callable, metric=metric, **kwds)\n else:\n raise ValueError(\"Unknown kernel %r\" % metric)\n\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\n","license":"bsd-3-clause"} {"repo_name":"dingocuster\/scikit-learn","path":"examples\/applications\/plot_species_distribution_modeling.py","copies":"254","size":"7434","content":"\"\"\"\n=============================\nSpecies distribution modeling\n=============================\n\nModeling species' geographic distributions is an important\nproblem in conservation biology. In this example we\nmodel the geographic distribution of two south american\nmammals given past observations and 14 environmental\nvariables. Since we have only positive examples (there are\nno unsuccessful observations), we cast this problem as a\ndensity estimation problem and use the `OneClassSVM` provided\nby the package `sklearn.svm` as our modeling tool.\nThe dataset is provided by Phillips et. al. (2006).\nIf available, the example uses\n`basemap `_\nto plot the coast lines and national boundaries of South America.\n\nThe two species are:\n\n - `\"Bradypus variegatus\"\n `_ ,\n the Brown-throated Sloth.\n\n - `\"Microryzomys minutus\"\n `_ ,\n also known as the Forest Small Rice Rat, a rodent that lives in Peru,\n Colombia, Ecuador, Peru, and Venezuela.\n\nReferences\n----------\n\n * `\"Maximum entropy modeling of species geographic distributions\"\n `_\n S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,\n 190:231-259, 2006.\n\"\"\"\n\n# Authors: Peter Prettenhofer \n# Jake Vanderplas \n#\n# License: BSD 3 clause\n\nfrom __future__ import print_function\n\nfrom time import time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.datasets import fetch_species_distributions\nfrom sklearn.datasets.species_distributions import construct_grids\nfrom sklearn import svm, metrics\n\n# if basemap is available, we'll use it.\n# otherwise, we'll improvise later...\ntry:\n from mpl_toolkits.basemap import Basemap\n basemap = True\nexcept ImportError:\n basemap = False\n\nprint(__doc__)\n\n\ndef create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):\n \"\"\"Create a bunch with information about a particular organism\n\n This will use the test\/train record arrays to extract the\n data specific to the given species name.\n \"\"\"\n bunch = Bunch(name=' '.join(species_name.split(\"_\")[:2]))\n species_name = species_name.encode('ascii')\n points = dict(test=test, train=train)\n\n for label, pts in points.items():\n # choose points associated with the desired species\n pts = pts[pts['species'] == species_name]\n bunch['pts_%s' % label] = pts\n\n # determine coverage values for each of the training & testing points\n ix = np.searchsorted(xgrid, pts['dd long'])\n iy = np.searchsorted(ygrid, pts['dd lat'])\n bunch['cov_%s' % label] = coverages[:, -iy, ix].T\n\n return bunch\n\n\ndef plot_species_distribution(species=(\"bradypus_variegatus_0\",\n \"microryzomys_minutus_0\")):\n \"\"\"\n Plot the species distribution.\n \"\"\"\n if len(species) > 2:\n print(\"Note: when more than two species are provided,\"\n \" only the first two will be used\")\n\n t0 = time()\n\n # Load the compressed data\n data = fetch_species_distributions()\n\n # Set up the data grid\n xgrid, ygrid = construct_grids(data)\n\n # The grid in x,y coordinates\n X, Y = np.meshgrid(xgrid, ygrid[::-1])\n\n # create a bunch for each species\n BV_bunch = create_species_bunch(species[0],\n data.train, data.test,\n data.coverages, xgrid, ygrid)\n MM_bunch = create_species_bunch(species[1],\n data.train, data.test,\n data.coverages, xgrid, ygrid)\n\n # background points (grid coordinates) for evaluation\n np.random.seed(13)\n background_points = np.c_[np.random.randint(low=0, high=data.Ny,\n size=10000),\n np.random.randint(low=0, high=data.Nx,\n size=10000)].T\n\n # We'll make use of the fact that coverages[6] has measurements at all\n # land points. This will help us decide between land and water.\n land_reference = data.coverages[6]\n\n # Fit, predict, and plot for each species.\n for i, species in enumerate([BV_bunch, MM_bunch]):\n print(\"_\" * 80)\n print(\"Modeling distribution of species '%s'\" % species.name)\n\n # Standardize features\n mean = species.cov_train.mean(axis=0)\n std = species.cov_train.std(axis=0)\n train_cover_std = (species.cov_train - mean) \/ std\n\n # Fit OneClassSVM\n print(\" - fit OneClassSVM ... \", end='')\n clf = svm.OneClassSVM(nu=0.1, kernel=\"rbf\", gamma=0.5)\n clf.fit(train_cover_std)\n print(\"done.\")\n\n # Plot map of South America\n plt.subplot(1, 2, i + 1)\n if basemap:\n print(\" - plot coastlines using basemap\")\n m = Basemap(projection='cyl', llcrnrlat=Y.min(),\n urcrnrlat=Y.max(), llcrnrlon=X.min(),\n urcrnrlon=X.max(), resolution='c')\n m.drawcoastlines()\n m.drawcountries()\n else:\n print(\" - plot coastlines from coverage\")\n plt.contour(X, Y, land_reference,\n levels=[-9999], colors=\"k\",\n linestyles=\"solid\")\n plt.xticks([])\n plt.yticks([])\n\n print(\" - predict species distribution\")\n\n # Predict species distribution using the training data\n Z = np.ones((data.Ny, data.Nx), dtype=np.float64)\n\n # We'll predict only for the land points.\n idx = np.where(land_reference > -9999)\n coverages_land = data.coverages[:, idx[0], idx[1]].T\n\n pred = clf.decision_function((coverages_land - mean) \/ std)[:, 0]\n Z *= pred.min()\n Z[idx[0], idx[1]] = pred\n\n levels = np.linspace(Z.min(), Z.max(), 25)\n Z[land_reference == -9999] = -9999\n\n # plot contours of the prediction\n plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)\n plt.colorbar(format='%.2f')\n\n # scatter training\/testing points\n plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],\n s=2 ** 2, c='black',\n marker='^', label='train')\n plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],\n s=2 ** 2, c='black',\n marker='x', label='test')\n plt.legend()\n plt.title(species.name)\n plt.axis('equal')\n\n # Compute AUC with regards to background points\n pred_background = Z[background_points[0], background_points[1]]\n pred_test = clf.decision_function((species.cov_test - mean)\n \/ std)[:, 0]\n scores = np.r_[pred_test, pred_background]\n y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]\n fpr, tpr, thresholds = metrics.roc_curve(y, scores)\n roc_auc = metrics.auc(fpr, tpr)\n plt.text(-35, -70, \"AUC: %.3f\" % roc_auc, ha=\"right\")\n print(\"\\n Area under the ROC curve : %f\" % roc_auc)\n\n print(\"\\ntime elapsed: %.2fs\" % (time() - t0))\n\n\nplot_species_distribution()\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"nguy\/brawl4d","path":"LMA\/controller.py","copies":"1","size":"10240","content":"\"\"\" Support for LMA data display in brawl4d.\n \n These are meant to be lightweight wrappers to coordinate data formats \n understood by the lmatools package.\n\n\"\"\"\nimport numpy as np\n\nfrom lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile\n\nfrom stormdrain.bounds import Bounds, BoundsFilter\nfrom stormdrain.data import NamedArrayDataset, indexed\nfrom stormdrain.pipeline import Branchpoint, coroutine, ItemModifier\nfrom stormdrain.support.matplotlib.artistupdaters import PanelsScatterController\nfrom stormdrain.support.matplotlib.poly_lasso import LassoPayloadController\n\nclass LMAAnimator(object):\n \n \n def __init__(self, duration, variable='time'):\n self.tstart = time.time()\n self.duration = duration\n \n def draw_frame(self, animator, time_fraction):\n pass\n \n \n def init_draw(self, animator):\n pass\n\n\nclass LMAController(object):\n \"\"\" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.\n \"\"\"\n \n z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }\n \n def __init__(self, *args, **kwargs):\n super(LMAController, self).__init__(*args, **kwargs)\n self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))\n self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1))\n self.datasets = set()\n self.flash_datasets = set()\n \n def pipeline_for_dataset(self, d, panels, \n names4d=('lon', 'lat', 'alt', 'time'),\n transform_mapping=None,\n scatter_kwargs = {}\n ):\n \"\"\" Set 4d_names to the spatial coordinate names in d that provide \n longitude, latitude, altitude, and time. Default of \n lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds\n \n entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib\n scatter call.\n \"\"\"\n # Set up dataset -> time-height bound filter -> brancher\n branch = Branchpoint([])\n brancher = branch.broadcast()\n \n # strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.\n # therefore, add some padding. filtered again later after projection.\n \n quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()\n if transform_mapping is None:\n transform_mapping = self.z_alt_mapping\n # Use 'time', which is the name in panels.bounds, and not names4d[3], which should\n # is linked to 'time' by transform_mapping if necessary\n bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds, \n restrict_to=('time'), transform_mapping=transform_mapping)\n filterer = bound_filter.filter()\n d.target = filterer\n \n # Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater\n scatter_ctrl = PanelsScatterController(\n panels=panels, \n color_field=names4d[3], \n default_color_bounds=self.default_color_bounds,\n **scatter_kwargs)\n scatter_outlet_broadcaster = scatter_ctrl.branchpoint\n scatter_updater = scatter_outlet_broadcaster.broadcast() \n final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)\n final_filterer = final_bound_filter.filter()\n cs_transformer = panels.cs.project_points(\n target=final_filterer, \n x_coord='x', y_coord='y', z_coord='z', \n lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],\n distance_scale_factor=1.0e-3)\n branch.targets.add(cs_transformer)\n \n # return each broadcaster so that other things can tap into results of transformation of this dataset\n return branch, scatter_ctrl\n \n @coroutine\n def flash_stat_printer(self, min_points=10):\n while True:\n ev, fl = (yield)\n template = \"{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2\"\n N = len(fl)\n good = (fl['n_points'] >= min_points)\n N_good = len(fl[good])\n area = np.mean(fl['area'][good])\n print template.format(N_good, N, area, min_points)\n \n def flash_stats_for_dataset(self, d, selection_broadcaster):\n \n flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])\n flash_stat_brancher = flash_stat_branchpoint.broadcast()\n \n @coroutine\n def flash_data_for_selection(target, flash_id_key = 'flash_id'):\n \"\"\" Accepts an array of event data from the pipeline, and sends \n event and flash data.\n \"\"\"\n while True:\n ev = (yield) # array of event data\n fl_dat = d.flash_data\n \n flash_ids = set(ev[flash_id_key])\n flashes = np.fromiter(\n (fl for fl in fl_dat if fl[flash_id_key] in flash_ids), \n dtype=fl_dat.dtype)\n target.send((ev, flashes))\n \n selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))\n return flash_stat_branchpoint\n \n \n \n \n @indexed()\n def read_dat(self, *args, **kwargs):\n \"\"\" All args and kwargs are passed to the LMAdataFile object from lmatools\"\"\"\n lma = LMAdataFile(*args, **kwargs)\n stn = lma.stations # adds stations to lma.data as a side-effect\n d = NamedArrayDataset(lma.data)\n self.datasets.add(d)\n return d\n \n def load_dat_to_panels(self, panels, *args, **kwargs):\n \"\"\" All args and kwargs are passed to the LMAdataFile object from lmatools\"\"\"\n d = self.read_dat(*args, **kwargs)\n post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)\n branch_to_scatter_artists = scatter_ctrl.branchpoint\n # ask for a copy of the array from each selection operation, so that\n # it's saved and ready for any lasso operations\n \n charge_lasso = LassoChargeController(\n target=ItemModifier(\n target=d.update(field_names=['charge']), \n item_name='charge').modify())\n branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())\n \n return d, post_filter_brancher, scatter_ctrl, charge_lasso\n \n @indexed(index_name='hdf_row_idx') \n def read_hdf5(self, LMAfileHDF):\n try:\n import tables\n except ImportError:\n print \"couldn't import pytables\"\n return None\n from hdf5_lma import HDF5Dataset\n \n # get the HDF5 table name\n LMAh5 = tables.openFile(LMAfileHDF, 'r')\n table_names = LMAh5.root.events._v_children.keys()\n table_path = '\/events\/' + table_names[0]\n LMAh5.close()\n d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')\n self.datasets.add(d)\n \n if d.flash_table is not None:\n print \"found flash data\"\n \n return d\n \n def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):\n d = self.read_hdf5(LMAfileHDF)\n post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)\n branch_to_scatter_artists = scatter_ctrl.branchpoint\n charge_lasso = LassoChargeController(\n target=ItemModifier(\n target=d.update(index_name='hdf_row_idx',\n field_names=['charge']), \n item_name='charge').modify())\n branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment()) \n \n return d, post_filter_brancher, scatter_ctrl, charge_lasso\n \n def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):\n \"\"\" Set up a flash dataset display. The sole argument is usually the HDF5 \n LMA dataset returned by a call to self.load_hdf5_to_panels \"\"\"\n from hdf5_lma import HDF5FlashDataset\n if hdf5dataset.flash_table is not None:\n point_count_dtype = hdf5dataset.flash_data['n_points'].dtype\n self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))\n flash_d = HDF5FlashDataset(hdf5dataset)\n transform_mapping = {}\n transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )\n transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )\n transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )\n transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )\n flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels, \n transform_mapping=transform_mapping, \n names4d=('init_lon', 'init_lat', 'init_alt', 'start') )\n for art in flash_scatter_ctrl.artist_outlet_controllers:\n # there is no time variable, but the artist updater is set to expect\n # time. Patch that up.\n if art.coords == ('time', 'z'):\n art.coords = ('start', 'z')\n # Draw flash markers in a different style\n art.artist.set_edgecolor('k')\n self.flash_datasets.add(flash_d)\n return flash_d, flash_post_filter_brancher, flash_scatter_ctrl\n\nclass LassoChargeController(LassoPayloadController):\n \"\"\" The \"charge\" attribute is one of {-1, 0, 1} to set \n negative, unclassified, or positive charge, or None\n to do nothing.\n \"\"\"\n charge = LassoPayloadController.Payload() ","license":"bsd-2-clause"} {"repo_name":"ashokpant\/clandmark","path":"python_interface\/bin\/flandmark_demo.py","copies":"6","size":"2152","content":"import numpy as np\nimport os\nfrom fnmatch import fnmatch\nfrom py_flandmark import PyFlandmark\nfrom PIL import Image\nimport ImageDraw\nimport matplotlib.pyplot as plt\n\n\ndef rgb2gray(rgb):\n\t\"\"\"\n\tconverts rgb array to grey scale variant\n\taccordingly to fomula taken from wiki\n\t(this function is missing in python)\n\t\"\"\"\t\n\treturn np.dot(rgb[...,:3], [0.299, 0.587, 0.144])\n\ndef read_bbox_from_txt(file_name):\n\t\"\"\"\n\t\treturns 2x2 matrix coordinates of \n\t\tleft upper and right lower corners\n\t\tof rectangle that contains face stored\n\t\tin columns of matrix\n\t\"\"\"\n\tf = open(file_name)\n\tstr = f.read().replace(',', ' ')\t\t\n\tf.close()\n\tret = np.array(map(int,str.split()) ,dtype=np.int32)\t\n\tret = ret.reshape((2,2), order='F')\t\n\treturn ret\n\n\nDIR = '..\/..\/..\/data\/Images\/'\nJPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]\nflmrk = PyFlandmark(\"..\/..\/..\/data\/flandmark_model.xml\", False)\n\n\nfor jpg_name in JPGS:\n\n\tfile_name = jpg_name[:-4]\n\timg = Image.open(DIR + jpg_name)\t \t\t\n\tarr = rgb2gray(np.asarray(img))\t\n\tbbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')\n\n\td_landmarks = flmrk.detect(arr, bbox)\n\tn = d_landmarks.shape[1]\n\n\tprint \"test detect method\"\n\n\tim = Image.fromarray(arr)\n\timg_dr = ImageDraw.Draw(im)\n\timg_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline=\"#FF00FF\")\n\tr = 2.\n\tfor i in xrange(n):\n\t\tx = d_landmarks[0,i]\n\t\ty = d_landmarks[1,i]\n\t\timg_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)\n\n\tplt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))\n\tplt.show()\n\n\tprint \"test detect method\"\n\n\tframe = flmrk.get_normalized_frame(arr, bbox)[0]\n\tframe = frame.astype(np.double)\n\tim = Image.fromarray(frame)\n\tplt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))\n\tplt.show()\n\n\tprint \"test detect_base method\"\n\n\tlandmarks = flmrk.detect_base(frame)\n\t\n\tim = Image.fromarray(frame)\n\timg_dr = ImageDraw.Draw(im)\n\t\n\tr = 2.\n\tfor i in xrange(n):\n\t\tx = landmarks[0,i]\n\t\ty = landmarks[1,i]\n\t\timg_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)\n\n\tplt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))\n\tplt.show()\n\n\tprint \"test psi method\"\t\t\n\tpsi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)\n\n#flmrk.get_psi(d_landmarks, arr, bbox)\n\n\tbreak","license":"gpl-3.0"} {"repo_name":"abbeymiles\/aima-python","path":"submissions\/Blue\/myNN.py","copies":"10","size":"3071","content":"from sklearn import datasets\nfrom sklearn.neural_network import MLPClassifier\nimport traceback\nfrom submissions.Blue import music\n\n\nclass DataFrame:\n data = []\n feature_names = []\n target = []\n target_names = []\n\nmusicATRB = DataFrame()\nmusicATRB.data = []\ntargetData = []\n'''\nExtract data from the CORGIS Music Library.\n\nMost 'hit' songs average 48-52 bars and no more than ~3 minutes (180 seconds)...\n'''\n\nallSongs = music.get_songs()\nfor song in allSongs:\n try:\n length = float(song['song'][\"duration\"])\n targetData.append(length)\n\n genre = song['artist']['terms'] #String\n title = song['song']['title'] #String\n # release = float(song['song']['Release'])\n\n musicATRB.data.append([genre, title])\n\n except:\n traceback.print_exc()\n\nmusicATRB.feature_names = [\n 'Genre',\n 'Title',\n 'Release',\n 'Length',\n]\n\nmusicATRB.target = []\n\ndef musicTarget(release):\n if (song['song']['duration'] <= 210\n ): #if the song is less that 3.5 minutes (210 seconds) long\n return 1\n return 0\n\nfor i in targetData:\n tt = musicTarget(i)\n musicATRB.target.append(tt)\n\nmusicATRB.target_names = [\n 'Not a hit song',\n 'Could be a hit song',\n]\n\nExamples = {\n 'Music': musicATRB,\n}\n\n'''\nMake a customn classifier,\n'''\nmlpc = MLPClassifier(\n hidden_layer_sizes = (100,),\n activation = 'relu',\n solver='sgd', # 'adam',\n alpha = 0.0001,\n # batch_size='auto',\n learning_rate = 'adaptive', # 'constant',\n # power_t = 0.5,\n max_iter = 1000, # 200,\n shuffle = True,\n # random_state = None,\n # tol = 1e-4,\n # verbose = False,\n # warm_start = False,\n # momentum = 0.9,\n # nesterovs_momentum = True,\n # early_stopping = False,\n # validation_fraction = 0.1,\n # beta_1 = 0.9,\n # beta_2 = 0.999,\n # epsilon = 1e-8,\n)\n\n'''\nTry scaling the data.\n'''\nmusicScaled = DataFrame()\n\ndef setupScales(grid):\n global min, max\n min = list(grid[0])\n max = list(grid[0])\n for row in range(1, len(grid)):\n for col in range(len(grid[row])):\n cell = grid[row][col]\n if cell < min[col]:\n min[col] = cell\n if cell > max[col]:\n max[col] = cell\n\ndef scaleGrid(grid):\n newGrid = []\n for row in range(len(grid)):\n newRow = []\n for col in range(len(grid[row])):\n try:\n cell = grid[row][col]\n scaled = (cell - min[col]) \\\n \/ (max[col] - min[col])\n newRow.append(scaled)\n except:\n pass\n newGrid.append(newRow)\n return newGrid\n\nsetupScales(musicATRB.data)\nmusicScaled.data = scaleGrid(musicATRB.data)\nmusicScaled.feature_names = musicATRB.feature_names\nmusicScaled.target = musicATRB.target\nmusicScaled.target_names = musicATRB.target_names\n\nExamples = {\n 'musicDefault': {\n 'frame': musicATRB,\n },\n 'MusicSGD': {\n 'frame': musicATRB,\n 'mlpc': mlpc\n },\n 'MusisScaled': {\n 'frame': musicScaled,\n },\n}","license":"mit"} {"repo_name":"jblackburne\/scikit-learn","path":"sklearn\/manifold\/setup.py","copies":"24","size":"1279","content":"import os\nfrom os.path import join\n\nimport numpy\nfrom numpy.distutils.misc_util import Configuration\nfrom sklearn._build_utils import get_blas_info\n\n\ndef configuration(parent_package=\"\", top_path=None):\n config = Configuration(\"manifold\", parent_package, top_path)\n libraries = []\n if os.name == 'posix':\n libraries.append('m')\n config.add_extension(\"_utils\",\n sources=[\"_utils.c\"],\n include_dirs=[numpy.get_include()],\n libraries=libraries,\n extra_compile_args=[\"-O3\"])\n cblas_libs, blas_info = get_blas_info()\n eca = blas_info.pop('extra_compile_args', [])\n eca.append(\"-O4\")\n config.add_extension(\"_barnes_hut_tsne\",\n libraries=cblas_libs,\n sources=[\"_barnes_hut_tsne.c\"],\n include_dirs=[join('..', 'src', 'cblas'),\n numpy.get_include(),\n blas_info.pop('include_dirs', [])],\n extra_compile_args=eca, **blas_info)\n\n config.add_subpackage('tests')\n\n return config\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n setup(**configuration().todict())\n","license":"bsd-3-clause"} {"repo_name":"MartinDelzant\/scikit-learn","path":"sklearn\/utils\/tests\/test_random.py","copies":"230","size":"7344","content":"from __future__ import division\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.misc import comb as combinations\nfrom numpy.testing import assert_array_almost_equal\nfrom sklearn.utils.random import sample_without_replacement\nfrom sklearn.utils.random import random_choice_csc\n\nfrom sklearn.utils.testing import (\n assert_raises,\n assert_equal,\n assert_true)\n\n\n###############################################################################\n# test custom sampling without replacement algorithm\n###############################################################################\ndef test_invalid_sample_without_replacement_algorithm():\n assert_raises(ValueError, sample_without_replacement, 5, 4, \"unknown\")\n\n\ndef test_sample_without_replacement_algorithms():\n methods = (\"auto\", \"tracking_selection\", \"reservoir_sampling\", \"pool\")\n\n for m in methods:\n def sample_without_replacement_method(n_population, n_samples,\n random_state=None):\n return sample_without_replacement(n_population, n_samples,\n method=m,\n random_state=random_state)\n\n check_edge_case_of_sample_int(sample_without_replacement_method)\n check_sample_int(sample_without_replacement_method)\n check_sample_int_distribution(sample_without_replacement_method)\n\n\ndef check_edge_case_of_sample_int(sample_without_replacement):\n\n # n_poluation < n_sample\n assert_raises(ValueError, sample_without_replacement, 0, 1)\n assert_raises(ValueError, sample_without_replacement, 1, 2)\n\n # n_population == n_samples\n assert_equal(sample_without_replacement(0, 0).shape, (0, ))\n\n assert_equal(sample_without_replacement(1, 1).shape, (1, ))\n\n # n_population >= n_samples\n assert_equal(sample_without_replacement(5, 0).shape, (0, ))\n assert_equal(sample_without_replacement(5, 1).shape, (1, ))\n\n # n_population < 0 or n_samples < 0\n assert_raises(ValueError, sample_without_replacement, -1, 5)\n assert_raises(ValueError, sample_without_replacement, 5, -1)\n\n\ndef check_sample_int(sample_without_replacement):\n # This test is heavily inspired from test_random.py of python-core.\n #\n # For the entire allowable range of 0 <= k <= N, validate that\n # the sample is of the correct length and contains only unique items\n n_population = 100\n\n for n_samples in range(n_population + 1):\n s = sample_without_replacement(n_population, n_samples)\n assert_equal(len(s), n_samples)\n unique = np.unique(s)\n assert_equal(np.size(unique), n_samples)\n assert_true(np.all(unique < n_population))\n\n # test edge case n_population == n_samples == 0\n assert_equal(np.size(sample_without_replacement(0, 0)), 0)\n\n\ndef check_sample_int_distribution(sample_without_replacement):\n # This test is heavily inspired from test_random.py of python-core.\n #\n # For the entire allowable range of 0 <= k <= N, validate that\n # sample generates all possible permutations\n n_population = 10\n\n # a large number of trials prevents false negatives without slowing normal\n # case\n n_trials = 10000\n\n for n_samples in range(n_population):\n # Counting the number of combinations is not as good as counting the\n # the number of permutations. However, it works with sampling algorithm\n # that does not provide a random permutation of the subset of integer.\n n_expected = combinations(n_population, n_samples, exact=True)\n\n output = {}\n for i in range(n_trials):\n output[frozenset(sample_without_replacement(n_population,\n n_samples))] = None\n\n if len(output) == n_expected:\n break\n else:\n raise AssertionError(\n \"number of combinations != number of expected (%s != %s)\" %\n (len(output), n_expected))\n\n\ndef test_random_choice_csc(n_samples=10000, random_state=24):\n # Explicit class probabilities\n classes = [np.array([0, 1]), np.array([0, 1, 2])]\n class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]\n\n got = random_choice_csc(n_samples, classes, class_probabilites,\n random_state)\n assert_true(sp.issparse(got))\n\n for k in range(len(classes)):\n p = np.bincount(got.getcol(k).toarray().ravel()) \/ float(n_samples)\n assert_array_almost_equal(class_probabilites[k], p, decimal=1)\n\n # Implicit class probabilities\n classes = [[0, 1], [1, 2]] # test for array-like support\n class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1\/2, 1\/2])]\n\n got = random_choice_csc(n_samples=n_samples,\n classes=classes,\n random_state=random_state)\n assert_true(sp.issparse(got))\n\n for k in range(len(classes)):\n p = np.bincount(got.getcol(k).toarray().ravel()) \/ float(n_samples)\n assert_array_almost_equal(class_probabilites[k], p, decimal=1)\n\n # Edge case proabilites 1.0 and 0.0\n classes = [np.array([0, 1]), np.array([0, 1, 2])]\n class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]\n\n got = random_choice_csc(n_samples, classes, class_probabilites,\n random_state)\n assert_true(sp.issparse(got))\n\n for k in range(len(classes)):\n p = np.bincount(got.getcol(k).toarray().ravel(),\n minlength=len(class_probabilites[k])) \/ n_samples\n assert_array_almost_equal(class_probabilites[k], p, decimal=1)\n\n # One class target data\n classes = [[1], [0]] # test for array-like support\n class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]\n\n got = random_choice_csc(n_samples=n_samples,\n classes=classes,\n random_state=random_state)\n assert_true(sp.issparse(got))\n\n for k in range(len(classes)):\n p = np.bincount(got.getcol(k).toarray().ravel()) \/ n_samples\n assert_array_almost_equal(class_probabilites[k], p, decimal=1)\n\n\ndef test_random_choice_csc_errors():\n # the length of an array in classes and class_probabilites is mismatched\n classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]\n class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]\n assert_raises(ValueError, random_choice_csc, 4, classes,\n class_probabilites, 1)\n\n # the class dtype is not supported\n classes = [np.array([\"a\", \"1\"]), np.array([\"z\", \"1\", \"2\"])]\n class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]\n assert_raises(ValueError, random_choice_csc, 4, classes,\n class_probabilites, 1)\n\n # the class dtype is not supported\n classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]\n class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]\n assert_raises(ValueError, random_choice_csc, 4, classes,\n class_probabilites, 1)\n\n # Given proabilites don't sum to 1\n classes = [np.array([0, 1]), np.array([0, 1, 2])]\n class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]\n assert_raises(ValueError, random_choice_csc, 4, classes,\n class_probabilites, 1)\n","license":"bsd-3-clause"} {"repo_name":"juanshishido\/okcupid","path":"utils\/permutation.py","copies":"1","size":"2439","content":"import numpy as np\nfrom scipy.stats import ttest_ind\nfrom sklearn.metrics import accuracy_score\n\n\ndef _diff_means(m, arr):\n \"\"\"Calculate the difference-in-means statistic.\n This is based on an input array, `arr`, where the first\n `m` observations correspond to a particular class.\n\n Parameters\n ----------\n m : int\n Number of samples in the first class\n arr : np.ndarray\n Data for both classes\n\n Returns\n -------\n float\n \"\"\"\n return np.mean(arr[:m]) - np.mean(arr[m:])\n\ndef _permute(a, b, comparison='predictions', permutations=10000):\n \"\"\"Estimate of the permutation-based p-value\n\n Parameters\n ----------\n a : np.ndarray\n Data for one class or\n ground truth (correct) labels\n b : np.ndarray\n Data for another class or\n predicted labels, as returned by a classifier\n comparison : str\n {'predictions', 'means'}\n permutations : int, optional\n Number of permutations\n\n Returns\n -------\n p_value : float\n The proportion of times a value as extreme\n as the observed estimate is seen\n\n Notes\n -----\n This calculates the two-tailed p-value\n \"\"\"\n assert comparison in ('predictions', 'means')\n np.random.seed(42)\n if comparison == 'predictions':\n c = b.copy()\n compare = accuracy_score\n else:\n c = np.append(a, b)\n a = a.shape[0]\n compare = _diff_means\n baseline = compare(a, c)\n v = []\n for _ in range(permutations):\n np.random.shuffle(c)\n v.append(compare(a, c))\n p_value = (np.abs(np.array(v)) >= np.abs(baseline)).sum() \/ permutations\n return p_value\n\ndef print_pvalues(a, b):\n \"\"\"Wrapper function for printing meand and p-values\n both permutation-based and classical\n\n Parameters\n ----------\n a : np.ndarray\n Data for one class or\n ground truth (correct) labels\n b : np.ndarray\n Data for another class or\n predicted labels, as returned by a classifier\n\n Returns\n -------\n None\n \"\"\"\n assert isinstance(a, np.ndarray) and isinstance(b, np.ndarray)\n rnd = lambda x: np.round(x, 8)\n permutation = _permute(a, b, 'means')\n classical = ttest_ind(a, b, equal_var=False)[1]\n print(\"[means] 'a':\", rnd(a.mean()), \"'b':\", rnd(b.mean()))\n print(\"p-values:\")\n print(\" [permutation]:\", rnd(permutation))\n print(\" [classical]: \", rnd(classical))\n","license":"mit"} {"repo_name":"jrbourbeau\/cr-composition","path":"processing\/legacy\/anisotropy\/random_trials\/process_kstest.py","copies":"2","size":"7627","content":"#!\/usr\/bin\/env python\n\nimport os\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pycondor\n\nimport comptools as comp\n\n\nif __name__ == \"__main__\":\n\n p = argparse.ArgumentParser(\n description='Extracts and saves desired information from simulation\/data .i3 files')\n p.add_argument('-c', '--config', dest='config',\n default='IC86.2012',\n choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],\n help='Detector configuration')\n p.add_argument('--low_energy', dest='low_energy',\n default=False, action='store_true',\n help='Only use events with energy < 10**6.75 GeV')\n p.add_argument('--n_side', dest='n_side', type=int,\n default=64,\n help='Number of times to split the DataFrame')\n p.add_argument('--chunksize', dest='chunksize', type=int,\n default=1000,\n help='Number of lines used when reading in DataFrame')\n p.add_argument('--n_batches', dest='n_batches', type=int,\n default=50,\n help='Number batches running in parallel for each ks-test trial')\n p.add_argument('--ks_trials', dest='ks_trials', type=int,\n default=100,\n help='Number of random maps to generate')\n p.add_argument('--overwrite', dest='overwrite',\n default=False, action='store_true',\n help='Option to overwrite reference map file, '\n 'if it alreadu exists')\n p.add_argument('--test', dest='test',\n default=False, action='store_true',\n help='Option to run small test version')\n args = p.parse_args()\n\n if args.test:\n args.ks_trials = 20\n args.n_batches = 10000\n args.chunksize = 100\n\n # Define output directories\n error = comp.paths.condor_data_dir + '\/ks_test_{}\/error'.format(args.config)\n output = comp.paths.condor_data_dir + '\/ks_test_{}\/output'.format(args.config)\n log = comp.paths.condor_scratch_dir + '\/ks_test_{}\/log'.format(args.config)\n submit = comp.paths.condor_scratch_dir + '\/ks_test_{}\/submit'.format(args.config)\n\n # Define path to executables\n make_maps_ex = os.path.join(comp.paths.project_home,\n 'processing\/anisotropy\/ks_test_multipart',\n 'make_maps.py')\n merge_maps_ex = os.path.join(comp.paths.project_home,\n 'processing\/anisotropy\/ks_test_multipart',\n 'merge_maps.py')\n save_pvals_ex = os.path.join(comp.paths.project_home,\n 'processing\/anisotropy\/ks_test_multipart',\n 'save_pvals.py')\n\n # Create Dagman instance\n dag_name = 'anisotropy_kstest_{}'.format(args.config)\n if args.test:\n dag_name += '_test'\n dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)\n\n # Create Job for saving ks-test p-values for each trial\n save_pvals_name = 'save_pvals_{}'.format(args.config)\n if args.low_energy:\n save_pvals_name += '_lowenergy'\n save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,\n error=error, output=output,\n log=log, submit=submit,\n verbose=1)\n\n save_pvals_infiles_0 = []\n save_pvals_infiles_1 = []\n\n dagman.add_job(save_pvals_job)\n\n outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',\n 'anisotropy', 'random_splits')\n if args.test:\n outdir = os.path.join(outdir, 'test')\n for trial_num in range(args.ks_trials):\n # Create map_maps jobs for this ks_trial\n make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)\n if args.low_energy:\n make_maps_name += '_lowenergy'\n make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,\n error=error, output=output,\n log=log, submit=submit,\n verbose=1)\n dagman.add_job(make_maps_job)\n\n merge_maps_infiles_0 = []\n merge_maps_infiles_1 = []\n for batch_idx in range(args.n_batches):\n if args.test and batch_idx > 2:\n break\n\n outfile_sample_1 = os.path.join(outdir,\n 'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))\n outfile_sample_0 = os.path.join(outdir,\n 'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))\n\n make_maps_arg_list = []\n make_maps_arg_list.append('--config {}'.format(args.config))\n make_maps_arg_list.append('--n_side {}'.format(args.n_side))\n make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))\n make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))\n make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))\n make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))\n make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))\n make_maps_arg = ' '.join(make_maps_arg_list)\n if args.low_energy:\n make_maps_arg += ' --low_energy'\n\n make_maps_job.add_arg(make_maps_arg)\n\n # Add this outfile to the list of infiles for merge_maps_job\n merge_maps_infiles_0.append(outfile_sample_0)\n merge_maps_infiles_1.append(outfile_sample_1)\n\n for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,\n merge_maps_infiles_1]):\n merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)\n if args.low_energy:\n merge_maps_name += '_lowenergy'\n merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,\n error=error, output=output,\n log=log, submit=submit,\n verbose=1)\n\n # Ensure that make_maps_job completes before merge_maps_job begins\n make_maps_job.add_child(merge_maps_job)\n merge_maps_job.add_child(save_pvals_job)\n dagman.add_job(merge_maps_job)\n\n merge_infiles_str = ' '.join(input_file_list)\n # Assemble merged output file path\n merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))\n\n merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)\n merge_maps_job.add_arg(merge_maps_arg)\n\n if sample_idx == 0:\n save_pvals_infiles_0.append(merge_outfile)\n else:\n save_pvals_infiles_1.append(merge_outfile)\n\n save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)\n save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)\n if args.low_energy:\n outfile_basename = 'ks_test_dataframe_lowenergy.hdf'\n else:\n outfile_basename = 'ks_test_dataframe.hdf'\n outfile = os.path.join(outdir, outfile_basename)\n save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \\\n '--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)\n save_pvals_job.add_arg(save_pvals_arg)\n\n dagman.build_submit(fancyname=True)\n","license":"mit"} {"repo_name":"bradleyhd\/netsim","path":"nodes_vs_routing_speed.py","copies":"1","size":"2878","content":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy.optimize import curve_fit\n\ndef linear(x, a, b):\n return a * x + b\n\ndef quadratic(x, a, b, c):\n return a * x**2 + b * x + c\n\ndef exponential(x, a, b, c):\n return a * x**b + c\n\nfig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')\n\nxs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]\nys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]\n\nys = np.array(ys) * 1000\n\ndef graph(i, label, color, marker, l_marker):\n\n y = np.array(ys[i])\n x = np.array(xs[i])\n\n xl = np.linspace(np.min(x), np.max(x), 500)\n\n popt, pcov = curve_fit(exponential, x, y)\n\n plt.scatter(x, y, label=label, color=color, marker=marker)\n plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)\n\nblue = '#5738FF'\npurple = '#E747E7'\norange = '#E7A725'\ngreen = '#A1FF47'\nred = '#FF1E43'\ngray = '#333333'\nwhite = 'w'\n\ngraph(0, 'EDS5 - original graph', red, 'o', '--')\ngraph(1, 'N5 - original graph', purple, 's', '--')\ngraph(2, 'EDS5 - decision graph', blue, '^', '--')\ngraph(3, 'N5 - decision graph', white, 'D', '--')\n\nax = fig.gca()\nplt.title('Effects of Node Ordering on Routing Speed', color=white)\nplt.xlabel('Effective $\\\\vert V\\\/\\\\vert$')\nplt.ylabel('Routing Time (ms)')\nplt.axes().set_axis_bgcolor('black')\nax.xaxis.label.set_color(white)\nax.yaxis.label.set_color(white)\nax.tick_params(axis='x', colors=white)\nax.tick_params(axis='y', colors=white)\nax.spines['bottom'].set_color(white)\nax.spines['top'].set_color(white)\nax.spines['left'].set_color(white)\nax.spines['right'].set_color(white)\nlegend = plt.legend(loc=0, numpoints=1, framealpha=0.0)\nlegend.get_frame().set_facecolor('k')\n\nmax_x = np.max(np.array(xs))\nmax_y = np.max(np.array(ys))\nmin_x = np.min(np.array(xs))\n\nmin_y = 0 - (max_y * 0.01)\nmin_x = 0 - (max_x * 0.01)\nmax_x *= 1.01\nmax_y *= 1.01\nplt.axes().set_xlim([min_x, max_x])\nplt.axes().set_ylim([min_y, max_y])\n\nfor text in legend.get_texts():\n text.set_color(white)\n\n# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n\nplt.savefig('nodes_vs_routing_speed.png', transparent=True)\n#plt.show()","license":"gpl-3.0"} {"repo_name":"esc\/dask","path":"dask\/dataframe\/shuffle.py","copies":"4","size":"2967","content":"from itertools import count\nfrom collections import Iterator\nfrom math import ceil\nfrom toolz import merge, accumulate, merge_sorted\nimport toolz\nfrom operator import getitem, setitem\nimport pandas as pd\nimport numpy as np\nfrom pframe import pframe\n\nfrom .. import threaded\nfrom .core import DataFrame, Series, get, names\nfrom ..compatibility import unicode\nfrom ..utils import ignoring\n\n\ntokens = ('-%d' % i for i in count(1))\n\n\ndef set_index(f, index, npartitions=None, **kwargs):\n \"\"\" Set DataFrame index to new column\n\n Sorts index and realigns Dataframe to new sorted order. This shuffles and\n repartitions your data.\n \"\"\"\n npartitions = npartitions or f.npartitions\n if not isinstance(index, Series):\n index2 = f[index]\n else:\n index2 = index\n\n divisions = (index2\n .quantiles(np.linspace(0, 100, npartitions+1)[1:-1])\n .compute())\n return f.set_partition(index, divisions, **kwargs)\n\n\npartition_names = ('set_partition-%d' % i for i in count(1))\n\ndef set_partition(f, index, divisions, get=threaded.get, **kwargs):\n \"\"\" Set new partitioning along index given divisions \"\"\"\n divisions = unique(divisions)\n name = next(names)\n if isinstance(index, Series):\n assert index.divisions == f.divisions\n dsk = dict(((name, i), (f._partition_type.set_index, block, ind))\n for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))\n f2 = type(f)(merge(f.dask, index.dask, dsk), name,\n f.column_info, f.divisions)\n else:\n dsk = dict(((name, i), (f._partition_type.set_index, block, index))\n for i, block in enumerate(f._keys()))\n f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)\n\n head = f2.head()\n pf = pframe(like=head, divisions=divisions, **kwargs)\n\n def append(block):\n pf.append(block)\n return 0\n\n f2.map_blocks(append).compute(get=get)\n pf.flush()\n\n return from_pframe(pf)\n\n\ndef from_pframe(pf):\n \"\"\" Load dask.array from pframe \"\"\"\n name = next(names)\n dsk = dict(((name, i), (pframe.get_partition, pf, i))\n for i in range(pf.npartitions))\n\n return DataFrame(dsk, name, pf.columns, pf.divisions)\n\n\ndef unique(divisions):\n \"\"\" Polymorphic unique function\n\n >>> list(unique([1, 2, 3, 1, 2, 3]))\n [1, 2, 3]\n\n >>> unique(np.array([1, 2, 3, 1, 2, 3]))\n array([1, 2, 3])\n\n >>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))\n [Alice, Bob]\n Categories (2, object): [Alice, Bob]\n \"\"\"\n if isinstance(divisions, np.ndarray):\n return np.unique(divisions)\n if isinstance(divisions, pd.Categorical):\n return pd.Categorical.from_codes(np.unique(divisions.codes),\n divisions.categories, divisions.ordered)\n if isinstance(divisions, (tuple, list, Iterator)):\n return tuple(toolz.unique(divisions))\n raise NotImplementedError()\n","license":"bsd-3-clause"} {"repo_name":"q1ang\/scikit-learn","path":"examples\/ensemble\/plot_forest_importances_faces.py","copies":"403","size":"1519","content":"\"\"\"\n=================================================\nPixel importances with a parallel forest of trees\n=================================================\n\nThis example shows the use of forests of trees to evaluate the importance\nof the pixels in an image classification task (faces). The hotter the pixel,\nthe more important.\n\nThe code below also illustrates how the construction and the computation\nof the predictions can be parallelized within multiple jobs.\n\"\"\"\nprint(__doc__)\n\nfrom time import time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n# Number of cores to use to perform parallel fitting of the forest model\nn_jobs = 1\n\n# Load the faces dataset\ndata = fetch_olivetti_faces()\nX = data.images.reshape((len(data.images), -1))\ny = data.target\n\nmask = y < 5 # Limit to 5 classes\nX = X[mask]\ny = y[mask]\n\n# Build a forest and compute the pixel importances\nprint(\"Fitting ExtraTreesClassifier on faces data with %d cores...\" % n_jobs)\nt0 = time()\nforest = ExtraTreesClassifier(n_estimators=1000,\n max_features=128,\n n_jobs=n_jobs,\n random_state=0)\n\nforest.fit(X, y)\nprint(\"done in %0.3fs\" % (time() - t0))\nimportances = forest.feature_importances_\nimportances = importances.reshape(data.images[0].shape)\n\n# Plot pixel importances\nplt.matshow(importances, cmap=plt.cm.hot)\nplt.title(\"Pixel importances with forests of trees\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"yavalvas\/yav_com","path":"build\/matplotlib\/doc\/mpl_toolkits\/axes_grid\/examples\/demo_parasite_axes2.py","copies":"16","size":"1208","content":"from mpl_toolkits.axes_grid1 import host_subplot\nimport mpl_toolkits.axisartist as AA\nimport matplotlib.pyplot as plt\n\nif 1:\n\n host = host_subplot(111, axes_class=AA.Axes)\n plt.subplots_adjust(right=0.75)\n\n par1 = host.twinx()\n par2 = host.twinx()\n\n offset = 60\n new_fixed_axis = par2.get_grid_helper().new_fixed_axis\n par2.axis[\"right\"] = new_fixed_axis(loc=\"right\",\n axes=par2,\n offset=(offset, 0))\n \n par2.axis[\"right\"].toggle(all=True)\n\n\n\n host.set_xlim(0, 2)\n host.set_ylim(0, 2)\n\n host.set_xlabel(\"Distance\")\n host.set_ylabel(\"Density\")\n par1.set_ylabel(\"Temperature\")\n par2.set_ylabel(\"Velocity\")\n\n p1, = host.plot([0, 1, 2], [0, 1, 2], label=\"Density\")\n p2, = par1.plot([0, 1, 2], [0, 3, 2], label=\"Temperature\")\n p3, = par2.plot([0, 1, 2], [50, 30, 15], label=\"Velocity\")\n\n par1.set_ylim(0, 4)\n par2.set_ylim(1, 65)\n\n host.legend()\n\n host.axis[\"left\"].label.set_color(p1.get_color())\n par1.axis[\"right\"].label.set_color(p2.get_color())\n par2.axis[\"right\"].label.set_color(p3.get_color())\n\n plt.draw()\n plt.show()\n\n #plt.savefig(\"Test\")\n","license":"mit"} {"repo_name":"woozzu\/pylearn2","path":"pylearn2\/scripts\/tests\/test_print_monitor_cv.py","copies":"48","size":"1927","content":"\"\"\"\nTest print_monitor_cv.py by training on a short TrainCV YAML file and\nanalyzing the output pickle.\n\"\"\"\nimport os\nimport tempfile\n\nfrom pylearn2.config import yaml_parse\nfrom pylearn2.scripts import print_monitor_cv\nfrom pylearn2.testing.skip import skip_if_no_sklearn\n\n\ndef test_print_monitor_cv():\n \"\"\"Test print_monitor_cv.py.\"\"\"\n skip_if_no_sklearn()\n handle, filename = tempfile.mkstemp()\n trainer = yaml_parse.load(test_print_monitor_cv_yaml %\n {'filename': filename})\n trainer.main_loop()\n\n # run print_monitor_cv.py main\n print_monitor_cv.main(filename)\n\n # run print_monitor_cv.py main with all=True\n print_monitor_cv.main(filename, all=True)\n\n # cleanup\n os.remove(filename)\n\ntest_print_monitor_cv_yaml = \"\"\"\n!obj:pylearn2.cross_validation.TrainCV {\n dataset_iterator:\n !obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {\n dataset:\n !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix\n {\n rng: !obj:numpy.random.RandomState { seed: 1 },\n num_examples: 10,\n dim: 10,\n num_classes: 2,\n },\n },\n model: !obj:pylearn2.models.mlp.MLP {\n layers: [\n !obj:pylearn2.models.mlp.Sigmoid {\n layer_name: h0,\n dim: 8,\n irange: 0.05,\n },\n !obj:pylearn2.models.mlp.Softmax {\n layer_name: y,\n n_classes: 2,\n irange: 0.05,\n },\n ],\n nvis: 10,\n },\n algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {\n batch_size: 5,\n line_search_mode: 'exhaustive',\n conjugate: 1,\n termination_criterion:\n !obj:pylearn2.termination_criteria.EpochCounter {\n max_epochs: 1,\n },\n },\n save_path: %(filename)s,\n}\n\"\"\"\n","license":"bsd-3-clause"} {"repo_name":"vortex-ape\/scikit-learn","path":"examples\/bicluster\/plot_bicluster_newsgroups.py","copies":"39","size":"5911","content":"\"\"\"\n================================================================\nBiclustering documents with the Spectral Co-clustering algorithm\n================================================================\n\nThis example demonstrates the Spectral Co-clustering algorithm on the\ntwenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is\nexcluded because it contains many posts containing nothing but data.\n\nThe TF-IDF vectorized posts form a word frequency matrix, which is\nthen biclustered using Dhillon's Spectral Co-Clustering algorithm. The\nresulting document-word biclusters indicate subsets words used more\noften in those subsets documents.\n\nFor a few of the best biclusters, its most common document categories\nand its ten most important words get printed. The best biclusters are\ndetermined by their normalized cut. The best words are determined by\ncomparing their sums inside and outside the bicluster.\n\nFor comparison, the documents are also clustered using\nMiniBatchKMeans. The document clusters derived from the biclusters\nachieve a better V-measure than clusters found by MiniBatchKMeans.\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport operator\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.cluster.bicluster import SpectralCoclustering\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.externals.six import iteritems\nfrom sklearn.datasets.twenty_newsgroups import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.cluster import v_measure_score\n\nprint(__doc__)\n\n\ndef number_normalizer(tokens):\n \"\"\" Map all numeric tokens to a placeholder.\n\n For many applications, tokens that begin with a number are not directly\n useful, but the fact that such a token exists can be relevant. By applying\n this form of dimensionality reduction, some methods may perform better.\n \"\"\"\n return (\"#NUMBER\" if token[0].isdigit() else token for token in tokens)\n\n\nclass NumberNormalizingVectorizer(TfidfVectorizer):\n def build_tokenizer(self):\n tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()\n return lambda doc: list(number_normalizer(tokenize(doc)))\n\n\n# exclude 'comp.os.ms-windows.misc'\ncategories = ['alt.atheism', 'comp.graphics',\n 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',\n 'comp.windows.x', 'misc.forsale', 'rec.autos',\n 'rec.motorcycles', 'rec.sport.baseball',\n 'rec.sport.hockey', 'sci.crypt', 'sci.electronics',\n 'sci.med', 'sci.space', 'soc.religion.christian',\n 'talk.politics.guns', 'talk.politics.mideast',\n 'talk.politics.misc', 'talk.religion.misc']\nnewsgroups = fetch_20newsgroups(categories=categories)\ny_true = newsgroups.target\n\nvectorizer = NumberNormalizingVectorizer(stop_words='english', min_df=5)\ncocluster = SpectralCoclustering(n_clusters=len(categories),\n svd_method='arpack', random_state=0)\nkmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,\n random_state=0)\n\nprint(\"Vectorizing...\")\nX = vectorizer.fit_transform(newsgroups.data)\n\nprint(\"Coclustering...\")\nstart_time = time()\ncocluster.fit(X)\ny_cocluster = cocluster.row_labels_\nprint(\"Done in {:.2f}s. V-measure: {:.4f}\".format(\n time() - start_time,\n v_measure_score(y_cocluster, y_true)))\n\nprint(\"MiniBatchKMeans...\")\nstart_time = time()\ny_kmeans = kmeans.fit_predict(X)\nprint(\"Done in {:.2f}s. V-measure: {:.4f}\".format(\n time() - start_time,\n v_measure_score(y_kmeans, y_true)))\n\nfeature_names = vectorizer.get_feature_names()\ndocument_names = list(newsgroups.target_names[i] for i in newsgroups.target)\n\n\ndef bicluster_ncut(i):\n rows, cols = cocluster.get_indices(i)\n if not (np.any(rows) and np.any(cols)):\n import sys\n return sys.float_info.max\n row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]\n col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]\n # Note: the following is identical to X[rows[:, np.newaxis],\n # cols].sum() but much faster in scipy <= 0.16\n weight = X[rows][:, cols].sum()\n cut = (X[row_complement][:, cols].sum() +\n X[rows][:, col_complement].sum())\n return cut \/ weight\n\n\ndef most_common(d):\n \"\"\"Items of a defaultdict(int) with the highest values.\n\n Like Counter.most_common in Python >=2.7.\n \"\"\"\n return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)\n\n\nbicluster_ncuts = list(bicluster_ncut(i)\n for i in range(len(newsgroups.target_names)))\nbest_idx = np.argsort(bicluster_ncuts)[:5]\n\nprint()\nprint(\"Best biclusters:\")\nprint(\"----------------\")\nfor idx, cluster in enumerate(best_idx):\n n_rows, n_cols = cocluster.get_shape(cluster)\n cluster_docs, cluster_words = cocluster.get_indices(cluster)\n if not len(cluster_docs) or not len(cluster_words):\n continue\n\n # categories\n counter = defaultdict(int)\n for i in cluster_docs:\n counter[document_names[i]] += 1\n cat_string = \", \".join(\"{:.0f}% {}\".format(float(c) \/ n_rows * 100, name)\n for name, c in most_common(counter)[:3])\n\n # words\n out_of_cluster_docs = cocluster.row_labels_ != cluster\n out_of_cluster_docs = np.where(out_of_cluster_docs)[0]\n word_col = X[:, cluster_words]\n word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -\n word_col[out_of_cluster_docs, :].sum(axis=0))\n word_scores = word_scores.ravel()\n important_words = list(feature_names[cluster_words[i]]\n for i in word_scores.argsort()[:-11:-1])\n\n print(\"bicluster {} : {} documents, {} words\".format(\n idx, n_rows, n_cols))\n print(\"categories : {}\".format(cat_string))\n print(\"words : {}\\n\".format(', '.join(important_words)))\n","license":"bsd-3-clause"} {"repo_name":"fmacias64\/spyre","path":"setup.py","copies":"3","size":"1217","content":"from setuptools import setup, find_packages\n\nsetup(\n name='DataSpyre',\n version='0.2.0',\n description='Spyre makes it easy to build interactive web applications, and requires no knowledge of HTML, CSS, or Javascript.',\n url='https:\/\/github.com\/adamhajari\/spyre',\n author='Adam Hajari',\n author_email='adam@nextbigsound.com',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: CherryPy',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science\/Research',\n 'Environment :: Web Environment',\n 'Topic :: Scientific\/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords='web application template data visualization',\n include_package_data = True, # include everything in source control\n packages = ['spyre'], # include all packages under src\n package_data = {\n '': ['*.js','*.css','*.html'],\n 'public': ['js\/*.js','css\/*.css'],\n },\n install_requires=[\n \"numpy\",\n \"pandas\",\n \"cherrypy\",\n \"jinja2\",\n \"matplotlib\",\n ]\n)\n","license":"mit"} {"repo_name":"davidgardenier\/frbpoppy","path":"tests\/lognlogs\/local.py","copies":"1","size":"1611","content":"\"\"\"Check the log N log F slope of a local population.\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom frbpoppy import CosmicPopulation, Survey, SurveyPopulation\nfrom frbpoppy.population import unpickle\n\nfrom tests.convenience import plot_aa_style, rel_path\n\nMAKE = True\n\n\nif MAKE:\n population = CosmicPopulation.simple(1e5, generate=True)\n survey = Survey('perfect')\n surv_pop = SurveyPopulation(population, survey)\n surv_pop.name = 'lognlogflocal'\n surv_pop.save()\nelse:\n surv_pop = unpickle('lognlogflocal')\n\n# Get parameter\nparms = surv_pop.frbs.fluence\nmin_p = min(parms)\nmax_p = max(parms)\n\n# Bin up\nmin_f = np.log10(min(parms))\nmax_f = np.log10(max(parms))\nlog_bins = np.logspace(min_f, max_f, 50)\nhist, edges = np.histogram(parms, bins=log_bins)\nn_gt_s = np.cumsum(hist[::-1])[::-1]\n\n# Calculate alpha\nalpha, alpha_err, norm = surv_pop.frbs.calc_logn_logs(parameter='fluence',\n min_p=min_p,\n max_p=max_p)\n\nprint(alpha, alpha_err, norm)\nxs = 10**((np.log10(edges[:-1]) + np.log10(edges[1:])) \/ 2)\nxs = xs[xs >= min_p]\nxs = xs[xs <= max_p]\nys = [norm*x**(alpha) for x in xs]\n\nplot_aa_style()\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.step(edges[:-1], n_gt_s, where='post')\nplt.plot(xs, ys, linestyle='--',\n label=rf'$\\alpha$ = {alpha:.3} $\\pm$ {round(abs(alpha_err), 2)}')\n\nplt.xlabel('Fluence (Jy ms)')\nplt.ylabel(r'N(${>}Fluence$)')\nplt.xscale('log')\nplt.yscale('log')\nplt.legend()\nplt.tight_layout()\nplt.savefig(rel_path('plots\/logn_logf_local.pdf'))\n","license":"mit"} {"repo_name":"wooga\/airflow","path":"tests\/providers\/presto\/hooks\/test_presto.py","copies":"5","size":"4331","content":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import patch\n\nfrom prestodb.transaction import IsolationLevel\n\nfrom airflow.models import Connection\nfrom airflow.providers.presto.hooks.presto import PrestoHook\n\n\nclass TestPrestoHookConn(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.connection = Connection(\n login='login',\n password='password',\n host='host',\n schema='hive',\n )\n\n class UnitTestPrestoHook(PrestoHook):\n conn_name_attr = 'presto_conn_id'\n\n self.db_hook = UnitTestPrestoHook()\n self.db_hook.get_connection = mock.Mock()\n self.db_hook.get_connection.return_value = self.connection\n\n @patch('airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication')\n @patch('airflow.providers.presto.hooks.presto.prestodb.dbapi.connect')\n def test_get_conn(self, mock_connect, mock_basic_auth):\n self.db_hook.get_conn()\n mock_connect.assert_called_once_with(catalog='hive', host='host', port=None, http_scheme='http',\n schema='hive', source='airflow', user='login', isolation_level=0,\n auth=mock_basic_auth('login', 'password'))\n\n\nclass TestPrestoHook(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.cur = mock.MagicMock()\n self.conn = mock.MagicMock()\n self.conn.cursor.return_value = self.cur\n conn = self.conn\n\n class UnitTestPrestoHook(PrestoHook):\n conn_name_attr = 'test_conn_id'\n\n def get_conn(self):\n return conn\n\n def get_isolation_level(self):\n return IsolationLevel.READ_COMMITTED\n\n self.db_hook = UnitTestPrestoHook()\n\n @patch('airflow.hooks.dbapi_hook.DbApiHook.insert_rows')\n def test_insert_rows(self, mock_insert_rows):\n table = \"table\"\n rows = [(\"hello\",),\n (\"world\",)]\n target_fields = None\n commit_every = 10\n self.db_hook.insert_rows(table, rows, target_fields, commit_every)\n mock_insert_rows.assert_called_once_with(table, rows, None, 10)\n\n def test_get_first_record(self):\n statement = 'SQL'\n result_sets = [('row1',), ('row2',)]\n self.cur.fetchone.return_value = result_sets[0]\n\n self.assertEqual(result_sets[0], self.db_hook.get_first(statement))\n self.conn.close.assert_called_once_with()\n self.cur.close.assert_called_once_with()\n self.cur.execute.assert_called_once_with(statement)\n\n def test_get_records(self):\n statement = 'SQL'\n result_sets = [('row1',), ('row2',)]\n self.cur.fetchall.return_value = result_sets\n\n self.assertEqual(result_sets, self.db_hook.get_records(statement))\n self.conn.close.assert_called_once_with()\n self.cur.close.assert_called_once_with()\n self.cur.execute.assert_called_once_with(statement)\n\n def test_get_pandas_df(self):\n statement = 'SQL'\n column = 'col'\n result_sets = [('row1',), ('row2',)]\n self.cur.description = [(column,)]\n self.cur.fetchall.return_value = result_sets\n df = self.db_hook.get_pandas_df(statement)\n\n self.assertEqual(column, df.columns[0])\n\n self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])\n self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])\n\n self.cur.execute.assert_called_once_with(statement, None)\n","license":"apache-2.0"} {"repo_name":"billy-inn\/scikit-learn","path":"examples\/linear_model\/lasso_dense_vs_sparse_data.py","copies":"348","size":"1862","content":"\"\"\"\n==============================\nLasso on dense and sparse data\n==============================\n\nWe show that linear_model.Lasso provides the same results for dense and sparse\ndata and that in the case of sparse data the speed is improved.\n\n\"\"\"\nprint(__doc__)\n\nfrom time import time\nfrom scipy import sparse\nfrom scipy import linalg\n\nfrom sklearn.datasets.samples_generator import make_regression\nfrom sklearn.linear_model import Lasso\n\n\n###############################################################################\n# The two Lasso implementations on Dense data\nprint(\"--- Dense matrices\")\n\nX, y = make_regression(n_samples=200, n_features=5000, random_state=0)\nX_sp = sparse.coo_matrix(X)\n\nalpha = 1\nsparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)\ndense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)\n\nt0 = time()\nsparse_lasso.fit(X_sp, y)\nprint(\"Sparse Lasso done in %fs\" % (time() - t0))\n\nt0 = time()\ndense_lasso.fit(X, y)\nprint(\"Dense Lasso done in %fs\" % (time() - t0))\n\nprint(\"Distance between coefficients : %s\"\n % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))\n\n###############################################################################\n# The two Lasso implementations on Sparse data\nprint(\"--- Sparse matrices\")\n\nXs = X.copy()\nXs[Xs < 2.5] = 0.0\nXs = sparse.coo_matrix(Xs)\nXs = Xs.tocsc()\n\nprint(\"Matrix density : %s %%\" % (Xs.nnz \/ float(X.size) * 100))\n\nalpha = 0.1\nsparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)\ndense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)\n\nt0 = time()\nsparse_lasso.fit(Xs, y)\nprint(\"Sparse Lasso done in %fs\" % (time() - t0))\n\nt0 = time()\ndense_lasso.fit(Xs.toarray(), y)\nprint(\"Dense Lasso done in %fs\" % (time() - t0))\n\nprint(\"Distance between coefficients : %s\"\n % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))\n","license":"bsd-3-clause"} {"repo_name":"florentchandelier\/zipline","path":"tests\/data\/bundles\/test_csvdir.py","copies":"1","size":"5092","content":"from __future__ import division\n\nimport numpy as np\nimport pandas as pd\n\nfrom zipline.utils.calendars import get_calendar\nfrom zipline.data.bundles import ingest, load, bundles\nfrom zipline.testing import test_resource_path\nfrom zipline.testing.fixtures import ZiplineTestCase\nfrom zipline.testing.predicates import assert_equal\nfrom zipline.utils.functional import apply\n\n\nclass CSVDIRBundleTestCase(ZiplineTestCase):\n symbols = 'AAPL', 'IBM', 'KO', 'MSFT'\n asset_start = pd.Timestamp('2012-01-03', tz='utc')\n asset_end = pd.Timestamp('2014-12-31', tz='utc')\n bundle = bundles['csvdir']\n calendar = get_calendar(bundle.calendar_name)\n start_date = calendar.first_session\n end_date = calendar.last_session\n api_key = 'ayylmao'\n columns = 'open', 'high', 'low', 'close', 'volume'\n\n def _expected_data(self, asset_finder):\n sids = {\n symbol: asset_finder.lookup_symbol(\n symbol,\n self.asset_start,\n ).sid\n for symbol in self.symbols\n }\n\n def per_symbol(symbol):\n df = pd.read_csv(\n test_resource_path('csvdir_samples', 'csvdir',\n 'daily', symbol + '.csv.gz'),\n parse_dates=['date'],\n index_col='date',\n usecols=[\n 'open',\n 'high',\n 'low',\n 'close',\n 'volume',\n 'date',\n 'dividend',\n 'split',\n ],\n na_values=['NA'],\n )\n df['sid'] = sids[symbol]\n return df\n\n all_ = pd.concat(map(per_symbol, self.symbols)).set_index(\n 'sid',\n append=True,\n ).unstack()\n\n # fancy list comprehension with statements\n @list\n @apply\n def pricing():\n for column in self.columns:\n vs = all_[column].values\n if column == 'volume':\n vs = np.nan_to_num(vs)\n yield vs\n\n adjustments = [[5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,\n 5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,\n 5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,\n 5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,\n 6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,\n 6223, 6263, 6271, 6277],\n [5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,\n 5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,\n 5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,\n 5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,\n 6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,\n 6223, 6263, 6271, 6277],\n [5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,\n 5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,\n 5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,\n 5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,\n 6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,\n 6223, 6263, 6271, 6277],\n [5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,\n 5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,\n 5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,\n 5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,\n 6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,\n 6223, 6263, 6271, 6277],\n [5701, 6157]]\n\n return pricing, adjustments\n\n def test_bundle(self):\n environ = {\n 'CSVDIR': test_resource_path('csvdir_samples', 'csvdir')\n }\n\n ingest('csvdir', environ=environ)\n bundle = load('csvdir', environ=environ)\n sids = 0, 1, 2, 3\n assert_equal(set(bundle.asset_finder.sids), set(sids))\n\n for equity in bundle.asset_finder.retrieve_all(sids):\n assert_equal(equity.start_date, self.asset_start, msg=equity)\n assert_equal(equity.end_date, self.asset_end, msg=equity)\n\n sessions = self.calendar.all_sessions\n actual = bundle.equity_daily_bar_reader.load_raw_arrays(\n self.columns,\n sessions[sessions.get_loc(self.asset_start, 'bfill')],\n sessions[sessions.get_loc(self.asset_end, 'ffill')],\n sids,\n )\n\n expected_pricing, expected_adjustments = self._expected_data(\n bundle.asset_finder,\n )\n assert_equal(actual, expected_pricing, array_decimal=2)\n\n adjustments_for_cols = bundle.adjustment_reader.load_adjustments(\n self.columns,\n sessions,\n pd.Index(sids),\n )\n assert_equal([sorted(adj.keys()) for adj in adjustments_for_cols],\n expected_adjustments)\n","license":"apache-2.0"} {"repo_name":"milankl\/swm","path":"calc\/misc\/c_diss_plot.py","copies":"1","size":"3966","content":"from __future__ import print_function\npath = '\/home\/mkloewer\/python\/swm\/'\nimport os; os.chdir(path) # change working directory\nimport numpy as np\nfrom scipy import sparse\nimport time as tictoc\nfrom netCDF4 import Dataset\nimport glob\nimport matplotlib.pyplot as plt\n\n# OPTIONS\nrunfolder = [2,3]\n\n## read data\nfor r,i in zip(runfolder,range(len(runfolder))):\n runpath = path+'data\/run%04i' % r\n \n if i == 0:\n u = np.load(runpath+'\/u_sub.npy')\n v = np.load(runpath+'\/v_sub.npy')\n h = np.load(runpath+'\/h_sub.npy')\n time = np.load(runpath+'\/t_sub.npy')\n print('run %i read.' % r)\n\n else:\n u = np.concatenate((u,np.load(runpath+'\/u_sub.npy')))\n v = np.concatenate((v,np.load(runpath+'\/v_sub.npy')))\n h = np.concatenate((h,np.load(runpath+'\/h_sub.npy')))\n time = np.hstack((time,np.load(runpath+'\/t_sub.npy')))\n print('run %i read.' % r)\n\nt = time \/ 3600. \/ 24. # in days\n## read param\nglobal param\nparam = np.load(runpath+'\/param.npy').all()\nparam['dat_type'] = np.float32\n\n# import functions\nexec(open(path+'swm_param.py').read())\nexec(open(path+'swm_operators.py').read())\nexec(open(path+'swm_output.py').read())\nparam['output'] = 0\n\nset_grad_mat()\nset_interp_mat()\nset_lapl_mat()\nset_coriolis()\n\ntlen = len(time)\n## create ouputfolder\ntry:\n os.mkdir(runpath+'\/analysis')\nexcept:\n pass\n \n## reshape u,v\nu = u.reshape((tlen,param['Nu'])).T\nv = v.reshape((tlen,param['Nv'])).T\nh = h.reshape((tlen,param['NT'])).T\nprint('Reshape done.')\n\n##\ndudx = Gux.dot(u)\ndudy = Guy.dot(u)\ndvdx = Gvx.dot(v)\ndvdy = Gvy.dot(v)\n\nn = 2\n\nD = np.sqrt((dudx - dvdy)**2 + IqT.dot((dudy + dvdx)**2))\nRo = (D.T\/f_T)\nRom = Ro.mean(axis=0)\nc = (1\/(1+Ro)**n).mean(axis=0)\n\n# REYNOLDS, ROSSBY, EKMAN NUMBER MEAN\nu_T = IuT.dot(u)\nv_T = IvT.dot(v)\nprint('u,v interpolation done.')\n\n#advective term\nadv_u = u_T*Gux.dot(u) + v_T*IqT.dot(Guy.dot(u))\nadv_v = u_T*IqT.dot(Gvx.dot(v)) + v_T*Gvy.dot(v)\ndel u_T,v_T\nadv_term = np.sqrt(adv_u**2 + adv_v**2)\ndel adv_u, adv_v\nprint('Advection term done.')\n\n#coriolis term\ncor_term = (f_T*np.sqrt(IuT.dot(u**2) + IvT.dot(v**2)).T).T\nprint('Coriolis term done.')\n\nRo2 = adv_term \/ cor_term\nc2 = (1\/(1+Ro2)**n).mean(axis=1)\nRo2m = Ro2.mean(axis=1)\n\n##\nlevs1 = np.linspace(0,.2,21)\nlevs2 = np.linspace(0.5,1,21)\n\nfig,axs = plt.subplots(2,3,sharex=True,sharey=True,figsize=(9,5.5))\nplt.tight_layout(rect=[-.02,-.03,1.12,.97],w_pad=0.1)\n\naxs[0,0].contourf(param['x_T'],param['y_T'],h2mat(Ro2m),levs1)\naxs[0,1].contourf(param['x_T'],param['y_T'],h2mat(Rom),levs1,extend='max')\nm1 = axs[0,2].contourf(param['x_T'],param['y_T'],h2mat(Ro[-1,:]),levs1,extend='max')\nplt.colorbar(m1,ax=(axs[0,0],axs[0,1],axs[0,2]),ticks=np.arange(0,.22,.04))\n\naxs[1,0].contourf(param['x_T'],param['y_T'],h2mat(c2),levs2)\nm21 = axs[1,0].contour(param['x_T'],param['y_T'],h2mat(c2),[0.8],linewidths=0.7)\naxs[1,1].contourf(param['x_T'],param['y_T'],h2mat(c),levs2)\nm2 = axs[1,2].contourf(param['x_T'],param['y_T'],h2mat(1\/(1+Ro[-1,:])**n),levs2,extend='min')\naxs[1,2].contour(param['x_T'],param['y_T'],h2mat(1\/(1+Ro[-1,:])**n),[0.8],linewidths=0.7)\nm22 = axs[1,1].contour(param['x_T'],param['y_T'],h2mat(c),[0.8],linewidths=0.7)\nplt.colorbar(m2,ax=(axs[1,0],axs[1,1],axs[1,2]),ticks=np.arange(0.5,1.05,.05))\nplt.clabel(m22, inline=1, fontsize=5,fmt='%.1f')\nplt.clabel(m21, inline=1, fontsize=5,fmt='%.1f')\n\naxs[0,0].set_xticks([])\naxs[0,0].set_yticks([])\n\naxs[0,0].set_title(r'$\\overline{R_o} = \\overline{\\frac{|(\\mathbf{u} \\cdot \\nabla)\\mathbf{u}|}{|f\\mathbf{u}|}}$')\naxs[0,1].set_title(r'$\\overline{R_o^*} = \\overline{\\frac{|D|}{f}}$')\naxs[0,2].set_title(r'snapshot: $R_o^*$')\n\naxs[1,0].set_title(r'$(1+\\overline{R_o})^{-2}$')\naxs[1,1].set_title(r'$(1+\\overline{R_o}^*)^{-2}$')\naxs[1,2].set_title(r'$(1+R_o^*)^{-2}$')\n\naxs[0,0].set_ylabel('y')\naxs[1,0].set_ylabel('y')\naxs[1,0].set_xlabel('x')\naxs[1,1].set_xlabel('x')\n\nplt.savefig(path+'compare\/Ro_scaling.png',dpi=150)\nplt.close(fig)\n#plt.show()\n\n\n","license":"gpl-3.0"} {"repo_name":"Aasmi\/scikit-learn","path":"sklearn\/feature_selection\/variance_threshold.py","copies":"238","size":"2594","content":"# Author: Lars Buitinck \n# License: 3-clause BSD\n\nimport numpy as np\nfrom ..base import BaseEstimator\nfrom .base import SelectorMixin\nfrom ..utils import check_array\nfrom ..utils.sparsefuncs import mean_variance_axis\nfrom ..utils.validation import check_is_fitted\n\n\nclass VarianceThreshold(BaseEstimator, SelectorMixin):\n \"\"\"Feature selector that removes all low-variance features.\n\n This feature selection algorithm looks only at the features (X), not the\n desired outputs (y), and can thus be used for unsupervised learning.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n threshold : float, optional\n Features with a training-set variance lower than this threshold will\n be removed. The default is to keep all features with non-zero variance,\n i.e. remove the features that have the same value in all samples.\n\n Attributes\n ----------\n variances_ : array, shape (n_features,)\n Variances of individual features.\n\n Examples\n --------\n The following dataset has integer features, two of which are the same\n in every sample. These are removed with the default setting for threshold::\n\n >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]\n >>> selector = VarianceThreshold()\n >>> selector.fit_transform(X)\n array([[2, 0],\n [1, 4],\n [1, 1]])\n \"\"\"\n\n def __init__(self, threshold=0.):\n self.threshold = threshold\n\n def fit(self, X, y=None):\n \"\"\"Learn empirical variances from X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Sample vectors from which to compute variances.\n\n y : any\n Ignored. This parameter exists only for compatibility with\n sklearn.pipeline.Pipeline.\n\n Returns\n -------\n self\n \"\"\"\n X = check_array(X, ('csr', 'csc'), dtype=np.float64)\n\n if hasattr(X, \"toarray\"): # sparse matrix\n _, self.variances_ = mean_variance_axis(X, axis=0)\n else:\n self.variances_ = np.var(X, axis=0)\n\n if np.all(self.variances_ <= self.threshold):\n msg = \"No feature in X meets the variance threshold {0:.5f}\"\n if X.shape[0] == 1:\n msg += \" (X contains only one sample)\"\n raise ValueError(msg.format(self.threshold))\n\n return self\n\n def _get_support_mask(self):\n check_is_fitted(self, 'variances_')\n\n return self.variances_ > self.threshold\n","license":"bsd-3-clause"} {"repo_name":"OpenMined\/PySyft","path":"packages\/syft\/src\/syft\/lib\/pandas\/categorical_dtype.py","copies":"1","size":"1173","content":"# third party\nimport pandas as pd\n\n# syft relative\nfrom ...generate_wrapper import GenerateWrapper\nfrom ...lib.python.list import List\nfrom ...lib.python.primitive_factory import PrimitiveFactory\nfrom ...proto.lib.pandas.categorical_pb2 import (\n PandasCategoricalDtype as PandasCategoricalDtype_PB,\n)\n\n\ndef object2proto(obj: pd.CategoricalDtype) -> PandasCategoricalDtype_PB:\n # since pd.Index type is not integrated converted obj.categories to List\n pd_cat_list = PrimitiveFactory.generate_primitive(value=obj.categories.tolist())\n cat_list_proto = pd_cat_list._object2proto()\n\n return PandasCategoricalDtype_PB(\n id=cat_list_proto.id, categories=cat_list_proto, ordered=obj.ordered\n )\n\n\ndef proto2object(proto: PandasCategoricalDtype_PB) -> pd.CategoricalDtype:\n categories = List._proto2object(proto.categories).upcast()\n ordered = proto.ordered\n return pd.CategoricalDtype(categories=categories, ordered=ordered)\n\n\nGenerateWrapper(\n wrapped_type=pd.CategoricalDtype,\n import_path=\"pandas.CategoricalDtype\",\n protobuf_scheme=PandasCategoricalDtype_PB,\n type_object2proto=object2proto,\n type_proto2object=proto2object,\n)\n","license":"apache-2.0"} {"repo_name":"poojavade\/Genomics_Docker","path":"Dockerfiles\/gedlab-khmer-filter-abund\/pymodules\/python2.7\/lib\/python\/statsmodels-0.5.0-py2.7-linux-x86_64.egg\/statsmodels\/datasets\/statecrime\/data.py","copies":"3","size":"2985","content":"#! \/usr\/bin\/env python\n\n\"\"\"Statewide Crime Data\"\"\"\n\n__docformat__ = 'restructuredtext'\n\nCOPYRIGHT = \"\"\"Public domain.\"\"\"\nTITLE = \"\"\"Statewide Crime Data 2009\"\"\"\nSOURCE = \"\"\"\nAll data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.\n\"\"\"\n\nDESCRSHORT = \"\"\"State crime data 2009\"\"\"\n\nDESCRLONG = DESCRSHORT\n\n#suggested notes\nNOTE = \"\"\"\nNumber of observations: 51\nNumber of variables: 8\nVariable name definitions:\n\nstate\n All 50 states plus DC.\nviolent\n Rate of violent crimes \/ 100,000 population. Includes murder, forcible\n rape, robbery, and aggravated assault. Numbers for Illinois and Minnesota\n do not include forcible rapes. Footnote included with the American\n Statistical Abstract table reads:\n \"The data collection methodology for the offense of forcible\n rape used by the Illinois and the Minnesota state Uniform Crime Reporting\n (UCR) Programs (with the exception of Rockford, Illinois, and Minneapolis\n and St. Paul, Minnesota) does not comply with national UCR guidelines.\n Consequently, their state figures for forcible rape and violent crime (of\n which forcible rape is a part) are not published in this table.\"\nmurder\n Rate of murders \/ 100,000 population.\nhs_grad\n Precent of population having graduated from high school or higher.\npoverty\n % of individuals below the poverty line\nwhite\n Percent of population that is one race - white only. From 2009 American\n Community Survey\nsingle\n Calculated from 2009 1-year American Community Survey obtained obtained\n from Census. Variable is Male householder, no wife present, family\n household combined with Female household, no husband prsent, family\n household, divided by the total number of Family households.\nurban\n % of population in Urbanized Areas as of 2010 Census. Urbanized Areas are\n area of 50,000 or more people.\"\"\"\n\nimport numpy as np\nfrom statsmodels.datasets import utils as du\nfrom os.path import dirname, abspath\n\ndef load():\n \"\"\"\n Load the statecrime data and return a Dataset class instance.\n\n Returns\n -------\n Dataset instance:\n See DATASET_PROPOSAL.txt for more information.\n \"\"\"\n data = _get_data()\n ##### SET THE INDICES #####\n #NOTE: None for exog_idx is the complement of endog_idx\n return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],\n dtype=float)\n\ndef load_pandas():\n data = _get_data()\n ##### SET THE INDICES #####\n #NOTE: None for exog_idx is the complement of endog_idx\n return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],\n dtype=float, index_idx=0)\n\ndef _get_data():\n filepath = dirname(abspath(__file__))\n ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = np.recfromtxt(open(filepath + '\/statecrime.csv', 'rb'),\n delimiter=\",\", names=True, dtype=None)\n return data\n","license":"apache-2.0"} {"repo_name":"asadziach\/tensorflow","path":"tensorflow\/contrib\/learn\/python\/learn\/learn_io\/pandas_io.py","copies":"92","size":"4535","content":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Methods to allow pandas.DataFrame.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn\n\ntry:\n # pylint: disable=g-import-not-at-top\n import pandas as pd\n HAS_PANDAS = True\nexcept IOError:\n # Pandas writes a temporary file during import. If it fails, don't use pandas.\n HAS_PANDAS = False\nexcept ImportError:\n HAS_PANDAS = False\n\nPANDAS_DTYPES = {\n 'int8': 'int',\n 'int16': 'int',\n 'int32': 'int',\n 'int64': 'int',\n 'uint8': 'int',\n 'uint16': 'int',\n 'uint32': 'int',\n 'uint64': 'int',\n 'float16': 'float',\n 'float32': 'float',\n 'float64': 'float',\n 'bool': 'i'\n}\n\n\ndef pandas_input_fn(x,\n y=None,\n batch_size=128,\n num_epochs=1,\n shuffle=True,\n queue_capacity=1000,\n num_threads=1,\n target_column='target'):\n \"\"\"This input_fn diffs from the core version with default `shuffle`.\"\"\"\n return core_pandas_input_fn(x=x,\n y=y,\n batch_size=batch_size,\n shuffle=shuffle,\n num_epochs=num_epochs,\n queue_capacity=queue_capacity,\n num_threads=num_threads,\n target_column=target_column)\n\n\ndef extract_pandas_data(data):\n \"\"\"Extract data from pandas.DataFrame for predictors.\n\n Given a DataFrame, will extract the values and cast them to float. The\n DataFrame is expected to contain values of type int, float or bool.\n\n Args:\n data: `pandas.DataFrame` containing the data to be extracted.\n\n Returns:\n A numpy `ndarray` of the DataFrame's values as floats.\n\n Raises:\n ValueError: if data contains types other than int, float or bool.\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n return data\n\n bad_data = [column for column in data\n if data[column].dtype.name not in PANDAS_DTYPES]\n\n if not bad_data:\n return data.values.astype('float')\n else:\n error_report = [(\"'\" + str(column) + \"' type='\" +\n data[column].dtype.name + \"'\") for column in bad_data]\n raise ValueError('Data types for extracting pandas data must be int, '\n 'float, or bool. Found: ' + ', '.join(error_report))\n\n\ndef extract_pandas_matrix(data):\n \"\"\"Extracts numpy matrix from pandas DataFrame.\n\n Args:\n data: `pandas.DataFrame` containing the data to be extracted.\n\n Returns:\n A numpy `ndarray` of the DataFrame's values.\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n return data\n\n return data.as_matrix()\n\n\ndef extract_pandas_labels(labels):\n \"\"\"Extract data from pandas.DataFrame for labels.\n\n Args:\n labels: `pandas.DataFrame` or `pandas.Series` containing one column of\n labels to be extracted.\n\n Returns:\n A numpy `ndarray` of labels from the DataFrame.\n\n Raises:\n ValueError: if more than one column is found or type is not int, float or\n bool.\n \"\"\"\n if isinstance(labels,\n pd.DataFrame): # pandas.Series also belongs to DataFrame\n if len(labels.columns) > 1:\n raise ValueError('Only one column for labels is allowed.')\n\n bad_data = [column for column in labels\n if labels[column].dtype.name not in PANDAS_DTYPES]\n if not bad_data:\n return labels.values\n else:\n error_report = [\"'\" + str(column) + \"' type=\"\n + str(labels[column].dtype.name) for column in bad_data]\n raise ValueError('Data types for extracting labels must be int, '\n 'float, or bool. Found: ' + ', '.join(error_report))\n else:\n return labels\n","license":"apache-2.0"} {"repo_name":"cbertinato\/pandas","path":"pandas\/io\/excel\/_openpyxl.py","copies":"1","size":"14098","content":"from pandas.io.excel._base import ExcelWriter\nfrom pandas.io.excel._util import _validate_freeze_panes\n\n\nclass _OpenpyxlWriter(ExcelWriter):\n engine = 'openpyxl'\n supported_extensions = ('.xlsx', '.xlsm')\n\n def __init__(self, path, engine=None, mode='w', **engine_kwargs):\n # Use the openpyxl module as the Excel writer.\n from openpyxl.workbook import Workbook\n\n super().__init__(path, mode=mode, **engine_kwargs)\n\n if self.mode == 'a': # Load from existing workbook\n from openpyxl import load_workbook\n book = load_workbook(self.path)\n self.book = book\n else:\n # Create workbook object with default optimized_write=True.\n self.book = Workbook()\n\n if self.book.worksheets:\n try:\n self.book.remove(self.book.worksheets[0])\n except AttributeError:\n\n # compat - for openpyxl <= 2.4\n self.book.remove_sheet(self.book.worksheets[0])\n\n def save(self):\n \"\"\"\n Save workbook to disk.\n \"\"\"\n return self.book.save(self.path)\n\n @classmethod\n def _convert_to_style(cls, style_dict):\n \"\"\"\n converts a style_dict to an openpyxl style object\n Parameters\n ----------\n style_dict : style dictionary to convert\n \"\"\"\n\n from openpyxl.style import Style\n xls_style = Style()\n for key, value in style_dict.items():\n for nk, nv in value.items():\n if key == \"borders\":\n (xls_style.borders.__getattribute__(nk)\n .__setattr__('border_style', nv))\n else:\n xls_style.__getattribute__(key).__setattr__(nk, nv)\n\n return xls_style\n\n @classmethod\n def _convert_to_style_kwargs(cls, style_dict):\n \"\"\"\n Convert a style_dict to a set of kwargs suitable for initializing\n or updating-on-copy an openpyxl v2 style object\n Parameters\n ----------\n style_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'font'\n 'fill'\n 'border' ('borders')\n 'alignment'\n 'number_format'\n 'protection'\n Returns\n -------\n style_kwargs : dict\n A dict with the same, normalized keys as ``style_dict`` but each\n value has been replaced with a native openpyxl style object of the\n appropriate class.\n \"\"\"\n\n _style_key_map = {\n 'borders': 'border',\n }\n\n style_kwargs = {}\n for k, v in style_dict.items():\n if k in _style_key_map:\n k = _style_key_map[k]\n _conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),\n lambda x: None)\n new_v = _conv_to_x(v)\n if new_v:\n style_kwargs[k] = new_v\n\n return style_kwargs\n\n @classmethod\n def _convert_to_color(cls, color_spec):\n \"\"\"\n Convert ``color_spec`` to an openpyxl v2 Color object\n Parameters\n ----------\n color_spec : str, dict\n A 32-bit ARGB hex string, or a dict with zero or more of the\n following keys.\n 'rgb'\n 'indexed'\n 'auto'\n 'theme'\n 'tint'\n 'index'\n 'type'\n Returns\n -------\n color : openpyxl.styles.Color\n \"\"\"\n\n from openpyxl.styles import Color\n\n if isinstance(color_spec, str):\n return Color(color_spec)\n else:\n return Color(**color_spec)\n\n @classmethod\n def _convert_to_font(cls, font_dict):\n \"\"\"\n Convert ``font_dict`` to an openpyxl v2 Font object\n Parameters\n ----------\n font_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'name'\n 'size' ('sz')\n 'bold' ('b')\n 'italic' ('i')\n 'underline' ('u')\n 'strikethrough' ('strike')\n 'color'\n 'vertAlign' ('vertalign')\n 'charset'\n 'scheme'\n 'family'\n 'outline'\n 'shadow'\n 'condense'\n Returns\n -------\n font : openpyxl.styles.Font\n \"\"\"\n\n from openpyxl.styles import Font\n\n _font_key_map = {\n 'sz': 'size',\n 'b': 'bold',\n 'i': 'italic',\n 'u': 'underline',\n 'strike': 'strikethrough',\n 'vertalign': 'vertAlign',\n }\n\n font_kwargs = {}\n for k, v in font_dict.items():\n if k in _font_key_map:\n k = _font_key_map[k]\n if k == 'color':\n v = cls._convert_to_color(v)\n font_kwargs[k] = v\n\n return Font(**font_kwargs)\n\n @classmethod\n def _convert_to_stop(cls, stop_seq):\n \"\"\"\n Convert ``stop_seq`` to a list of openpyxl v2 Color objects,\n suitable for initializing the ``GradientFill`` ``stop`` parameter.\n Parameters\n ----------\n stop_seq : iterable\n An iterable that yields objects suitable for consumption by\n ``_convert_to_color``.\n Returns\n -------\n stop : list of openpyxl.styles.Color\n \"\"\"\n\n return map(cls._convert_to_color, stop_seq)\n\n @classmethod\n def _convert_to_fill(cls, fill_dict):\n \"\"\"\n Convert ``fill_dict`` to an openpyxl v2 Fill object\n Parameters\n ----------\n fill_dict : dict\n A dict with one or more of the following keys (or their synonyms),\n 'fill_type' ('patternType', 'patterntype')\n 'start_color' ('fgColor', 'fgcolor')\n 'end_color' ('bgColor', 'bgcolor')\n or one or more of the following keys (or their synonyms).\n 'type' ('fill_type')\n 'degree'\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'stop'\n Returns\n -------\n fill : openpyxl.styles.Fill\n \"\"\"\n\n from openpyxl.styles import PatternFill, GradientFill\n\n _pattern_fill_key_map = {\n 'patternType': 'fill_type',\n 'patterntype': 'fill_type',\n 'fgColor': 'start_color',\n 'fgcolor': 'start_color',\n 'bgColor': 'end_color',\n 'bgcolor': 'end_color',\n }\n\n _gradient_fill_key_map = {\n 'fill_type': 'type',\n }\n\n pfill_kwargs = {}\n gfill_kwargs = {}\n for k, v in fill_dict.items():\n pk = gk = None\n if k in _pattern_fill_key_map:\n pk = _pattern_fill_key_map[k]\n if k in _gradient_fill_key_map:\n gk = _gradient_fill_key_map[k]\n if pk in ['start_color', 'end_color']:\n v = cls._convert_to_color(v)\n if gk == 'stop':\n v = cls._convert_to_stop(v)\n if pk:\n pfill_kwargs[pk] = v\n elif gk:\n gfill_kwargs[gk] = v\n else:\n pfill_kwargs[k] = v\n gfill_kwargs[k] = v\n\n try:\n return PatternFill(**pfill_kwargs)\n except TypeError:\n return GradientFill(**gfill_kwargs)\n\n @classmethod\n def _convert_to_side(cls, side_spec):\n \"\"\"\n Convert ``side_spec`` to an openpyxl v2 Side object\n Parameters\n ----------\n side_spec : str, dict\n A string specifying the border style, or a dict with zero or more\n of the following keys (or their synonyms).\n 'style' ('border_style')\n 'color'\n Returns\n -------\n side : openpyxl.styles.Side\n \"\"\"\n\n from openpyxl.styles import Side\n\n _side_key_map = {\n 'border_style': 'style',\n }\n\n if isinstance(side_spec, str):\n return Side(style=side_spec)\n\n side_kwargs = {}\n for k, v in side_spec.items():\n if k in _side_key_map:\n k = _side_key_map[k]\n if k == 'color':\n v = cls._convert_to_color(v)\n side_kwargs[k] = v\n\n return Side(**side_kwargs)\n\n @classmethod\n def _convert_to_border(cls, border_dict):\n \"\"\"\n Convert ``border_dict`` to an openpyxl v2 Border object\n Parameters\n ----------\n border_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'diagonal'\n 'diagonal_direction'\n 'vertical'\n 'horizontal'\n 'diagonalUp' ('diagonalup')\n 'diagonalDown' ('diagonaldown')\n 'outline'\n Returns\n -------\n border : openpyxl.styles.Border\n \"\"\"\n\n from openpyxl.styles import Border\n\n _border_key_map = {\n 'diagonalup': 'diagonalUp',\n 'diagonaldown': 'diagonalDown',\n }\n\n border_kwargs = {}\n for k, v in border_dict.items():\n if k in _border_key_map:\n k = _border_key_map[k]\n if k == 'color':\n v = cls._convert_to_color(v)\n if k in ['left', 'right', 'top', 'bottom', 'diagonal']:\n v = cls._convert_to_side(v)\n border_kwargs[k] = v\n\n return Border(**border_kwargs)\n\n @classmethod\n def _convert_to_alignment(cls, alignment_dict):\n \"\"\"\n Convert ``alignment_dict`` to an openpyxl v2 Alignment object\n Parameters\n ----------\n alignment_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'horizontal'\n 'vertical'\n 'text_rotation'\n 'wrap_text'\n 'shrink_to_fit'\n 'indent'\n Returns\n -------\n alignment : openpyxl.styles.Alignment\n \"\"\"\n\n from openpyxl.styles import Alignment\n\n return Alignment(**alignment_dict)\n\n @classmethod\n def _convert_to_number_format(cls, number_format_dict):\n \"\"\"\n Convert ``number_format_dict`` to an openpyxl v2.1.0 number format\n initializer.\n Parameters\n ----------\n number_format_dict : dict\n A dict with zero or more of the following keys.\n 'format_code' : str\n Returns\n -------\n number_format : str\n \"\"\"\n return number_format_dict['format_code']\n\n @classmethod\n def _convert_to_protection(cls, protection_dict):\n \"\"\"\n Convert ``protection_dict`` to an openpyxl v2 Protection object.\n Parameters\n ----------\n protection_dict : dict\n A dict with zero or more of the following keys.\n 'locked'\n 'hidden'\n Returns\n -------\n \"\"\"\n\n from openpyxl.styles import Protection\n\n return Protection(**protection_dict)\n\n def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,\n freeze_panes=None):\n # Write the frame cells using openpyxl.\n sheet_name = self._get_sheet_name(sheet_name)\n\n _style_cache = {}\n\n if sheet_name in self.sheets:\n wks = self.sheets[sheet_name]\n else:\n wks = self.book.create_sheet()\n wks.title = sheet_name\n self.sheets[sheet_name] = wks\n\n if _validate_freeze_panes(freeze_panes):\n wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,\n column=freeze_panes[1] + 1)\n\n for cell in cells:\n xcell = wks.cell(\n row=startrow + cell.row + 1,\n column=startcol + cell.col + 1\n )\n xcell.value, fmt = self._value_with_fmt(cell.val)\n if fmt:\n xcell.number_format = fmt\n\n style_kwargs = {}\n if cell.style:\n key = str(cell.style)\n style_kwargs = _style_cache.get(key)\n if style_kwargs is None:\n style_kwargs = self._convert_to_style_kwargs(cell.style)\n _style_cache[key] = style_kwargs\n\n if style_kwargs:\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n if cell.mergestart is not None and cell.mergeend is not None:\n\n wks.merge_cells(\n start_row=startrow + cell.row + 1,\n start_column=startcol + cell.col + 1,\n end_column=startcol + cell.mergeend + 1,\n end_row=startrow + cell.mergestart + 1\n )\n\n # When cells are merged only the top-left cell is preserved\n # The behaviour of the other cells in a merged range is\n # undefined\n if style_kwargs:\n first_row = startrow + cell.row + 1\n last_row = startrow + cell.mergestart + 1\n first_col = startcol + cell.col + 1\n last_col = startcol + cell.mergeend + 1\n\n for row in range(first_row, last_row + 1):\n for col in range(first_col, last_col + 1):\n if row == first_row and col == first_col:\n # Ignore first cell. It is already handled.\n continue\n xcell = wks.cell(column=col, row=row)\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n","license":"bsd-3-clause"} {"repo_name":"moreati\/pandashells","path":"pandashells\/lib\/arg_lib.py","copies":"7","size":"6681","content":"from pandashells.lib import config_lib\n\n\ndef _check_for_recognized_args(*args):\n \"\"\"\n Raise an error if unrecognized argset is specified\n \"\"\"\n allowed_arg_set = set([\n 'io_in',\n 'io_out',\n 'example',\n 'xy_plotting',\n 'decorating',\n ])\n\n in_arg_set = set(args)\n unrecognized_set = in_arg_set - allowed_arg_set\n if unrecognized_set:\n msg = '{} not in allowed set {}'.format(unrecognized_set,\n allowed_arg_set)\n raise ValueError(msg)\n\n\ndef _io_in_adder(parser, config_dict, *args):\n \"\"\"\n Add input options to the parser\n \"\"\"\n in_arg_set = set(args)\n if 'io_in' in in_arg_set:\n group = parser.add_argument_group('Input Options')\n # define the valid components\n io_opt_list = ['csv', 'table', 'header', 'noheader']\n\n # allow the option of supplying input column names\n msg = 'Overwrite input column names with this list'\n group.add_argument(\n '--names', nargs='+', type=str, dest='names',\n metavar=\"name\", help=msg)\n\n default_for_input = [\n config_dict['io_input_type'],\n config_dict['io_input_header']\n ]\n msg = 'Must be one of {}'.format(repr(io_opt_list))\n group.add_argument(\n '-i', '--input_options', nargs='+', type=str, dest='input_options',\n metavar='option', default=default_for_input, choices=io_opt_list,\n help=msg)\n\n\ndef _io_out_adder(parser, config_dict, *args):\n \"\"\"\n Add output options to the parser\n \"\"\"\n in_arg_set = set(args)\n if 'io_out' in in_arg_set:\n group = parser.add_argument_group('Output Options')\n # define the valid components\n io_opt_list = [\n 'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',\n ]\n\n # define the current defaults\n default_for_output = [\n config_dict['io_output_type'],\n config_dict['io_output_header'],\n config_dict['io_output_index']\n ]\n\n # show the current defaults in the arg parser\n msg = 'Must be one of {}'.format(repr(io_opt_list))\n group.add_argument(\n '-o', '--output_options', nargs='+',\n type=str, dest='output_options', metavar='option',\n default=default_for_output, help=msg)\n\n msg = (\n 'Replace NaNs with this string. '\n 'A string containing \\'nan\\' will set na_rep to numpy NaN. '\n 'Current default is {}'\n ).format(repr(str(config_dict['io_output_na_rep'])))\n group.add_argument(\n '--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',\n help=msg)\n\n\ndef _decorating_adder(parser, *args):\n in_arg_set = set(args)\n if 'decorating' in in_arg_set:\n # get a list of valid plot styling info\n context_list = [t for t in config_lib.CONFIG_OPTS if\n t[0] == 'plot_context'][0][1]\n theme_list = [t for t in config_lib.CONFIG_OPTS if\n t[0] == 'plot_theme'][0][1]\n palette_list = [t for t in config_lib.CONFIG_OPTS if\n t[0] == 'plot_palette'][0][1]\n\n group = parser.add_argument_group('Plot specific Options')\n msg = \"Set the x-limits for the plot\"\n group.add_argument(\n '--xlim', nargs=2, type=float, dest='xlim',\n metavar=('XMIN', 'XMAX'), help=msg)\n msg = \"Set the y-limits for the plot\"\n group.add_argument(\n '--ylim', nargs=2, type=float, dest='ylim',\n metavar=('YMIN', 'YMAX'), help=msg)\n msg = \"Draw x axis with log scale\"\n group.add_argument(\n '--xlog', action='store_true', dest='xlog', default=False,\n help=msg)\n msg = \"Draw y axis with log scale\"\n group.add_argument(\n '--ylog', action='store_true', dest='ylog', default=False,\n help=msg)\n msg = \"Set the x-label for the plot\"\n group.add_argument(\n '--xlabel', nargs=1, type=str, dest='xlabel', help=msg)\n msg = \"Set the y-label for the plot\"\n group.add_argument(\n '--ylabel', nargs=1, type=str, dest='ylabel', help=msg)\n msg = \"Set the title for the plot\"\n group.add_argument(\n '--title', nargs=1, type=str, dest='title', help=msg)\n msg = \"Specify legend location\"\n group.add_argument(\n '--legend', nargs=1, type=str, dest='legend',\n choices=['1', '2', '3', '4', 'best'], help=msg)\n msg = \"Specify whether hide the grid or not\"\n group.add_argument(\n '--nogrid', action='store_true', dest='no_grid', default=False,\n help=msg)\n msg = \"Specify plot context. Default = '{}' \".format(context_list[0])\n group.add_argument(\n '--context', nargs=1, type=str, dest='plot_context',\n default=[context_list[0]], choices=context_list, help=msg)\n msg = \"Specify plot theme. Default = '{}' \".format(theme_list[0])\n group.add_argument(\n '--theme', nargs=1, type=str, dest='plot_theme',\n default=[theme_list[0]], choices=theme_list, help=msg)\n msg = \"Specify plot palette. Default = '{}' \".format(palette_list[0])\n group.add_argument(\n '--palette', nargs=1, type=str, dest='plot_palette',\n default=[palette_list[0]], choices=palette_list, help=msg)\n msg = \"Save the figure to this file\"\n group.add_argument('--savefig', nargs=1, type=str, help=msg)\n\n\ndef _xy_adder(parser, *args):\n in_arg_set = set(args)\n if 'xy_plotting' in in_arg_set:\n\n msg = 'Column to plot on x-axis'\n parser.add_argument(\n '-x', nargs=1, type=str, dest='x', metavar='col', help=msg)\n\n msg = 'List of columns to plot on y-axis'\n parser.add_argument(\n '-y', nargs='+', type=str, dest='y', metavar='col', help=msg)\n\n msg = \"Plot style(s) defaults to .-\"\n parser.add_argument(\n '-s', '--style', nargs='+', type=str, dest='style', default=['.-'],\n help=msg, metavar='style')\n\n\ndef add_args(parser, *args):\n \"\"\"Adds argument blocks to the arg parser\n\n :type parser: argparse instance\n :param parser: The argarse instance to use in adding arguments\n\n Additinional arguments are the names of argument blocks to add\n \"\"\"\n config_dict = config_lib.get_config()\n _check_for_recognized_args(*args)\n _io_in_adder(parser, config_dict, *args)\n _io_out_adder(parser, config_dict, *args)\n _decorating_adder(parser, *args)\n _xy_adder(parser, *args)\n","license":"bsd-2-clause"} {"repo_name":"mykoz\/ThinkStats2","path":"code\/thinkstats2.py","copies":"68","size":"68825","content":"\"\"\"This file contains code for use with \"Think Stats\" and\n\"Think Bayes\", both by Allen B. Downey, available from greenteapress.com\n\nCopyright 2014 Allen B. Downey\nLicense: GNU GPLv3 http:\/\/www.gnu.org\/licenses\/gpl.html\n\"\"\"\n\nfrom __future__ import print_function, division\n\n\"\"\"This file contains class definitions for:\n\nHist: represents a histogram (map from values to integer frequencies).\n\nPmf: represents a probability mass function (map from values to probs).\n\n_DictWrapper: private parent class for Hist and Pmf.\n\nCdf: represents a discrete cumulative distribution function\n\nPdf: represents a continuous probability density function\n\n\"\"\"\n\nimport bisect\nimport copy\nimport logging\nimport math\nimport random\nimport re\n\nfrom collections import Counter\nfrom operator import itemgetter\n\nimport thinkplot\n\nimport numpy as np\nimport pandas\n\nimport scipy\nfrom scipy import stats\nfrom scipy import special\nfrom scipy import ndimage\n\nfrom io import open\n\nROOT2 = math.sqrt(2)\n\ndef RandomSeed(x):\n \"\"\"Initialize the random and np.random generators.\n\n x: int seed\n \"\"\"\n random.seed(x)\n np.random.seed(x)\n \n\ndef Odds(p):\n \"\"\"Computes odds for a given probability.\n\n Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.\n\n Note: when p=1, the formula for odds divides by zero, which is\n normally undefined. But I think it is reasonable to define Odds(1)\n to be infinity, so that's what this function does.\n\n p: float 0-1\n\n Returns: float odds\n \"\"\"\n if p == 1:\n return float('inf')\n return p \/ (1 - p)\n\n\ndef Probability(o):\n \"\"\"Computes the probability corresponding to given odds.\n\n Example: o=2 means 2:1 odds in favor, or 2\/3 probability\n\n o: float odds, strictly positive\n\n Returns: float probability\n \"\"\"\n return o \/ (o + 1)\n\n\ndef Probability2(yes, no):\n \"\"\"Computes the probability corresponding to given odds.\n\n Example: yes=2, no=1 means 2:1 odds in favor, or 2\/3 probability.\n \n yes, no: int or float odds in favor\n \"\"\"\n return yes \/ (yes + no)\n\n\nclass Interpolator(object):\n \"\"\"Represents a mapping between sorted sequences; performs linear interp.\n\n Attributes:\n xs: sorted list\n ys: sorted list\n \"\"\"\n\n def __init__(self, xs, ys):\n self.xs = xs\n self.ys = ys\n\n def Lookup(self, x):\n \"\"\"Looks up x and returns the corresponding value of y.\"\"\"\n return self._Bisect(x, self.xs, self.ys)\n\n def Reverse(self, y):\n \"\"\"Looks up y and returns the corresponding value of x.\"\"\"\n return self._Bisect(y, self.ys, self.xs)\n\n def _Bisect(self, x, xs, ys):\n \"\"\"Helper function.\"\"\"\n if x <= xs[0]:\n return ys[0]\n if x >= xs[-1]:\n return ys[-1]\n i = bisect.bisect(xs, x)\n frac = 1.0 * (x - xs[i - 1]) \/ (xs[i] - xs[i - 1])\n y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])\n return y\n\n\nclass _DictWrapper(object):\n \"\"\"An object that contains a dictionary.\"\"\"\n\n def __init__(self, obj=None, label=None):\n \"\"\"Initializes the distribution.\n\n obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs\n label: string label\n \"\"\"\n self.label = label if label is not None else '_nolegend_'\n self.d = {}\n\n # flag whether the distribution is under a log transform\n self.log = False\n\n if obj is None:\n return\n\n if isinstance(obj, (_DictWrapper, Cdf, Pdf)):\n self.label = label if label is not None else obj.label\n\n if isinstance(obj, dict):\n self.d.update(obj.items())\n elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):\n self.d.update(obj.Items())\n elif isinstance(obj, pandas.Series):\n self.d.update(obj.value_counts().iteritems())\n else:\n # finally, treat it like a list\n self.d.update(Counter(obj))\n\n if len(self) > 0 and isinstance(self, Pmf):\n self.Normalize()\n\n def __hash__(self):\n return id(self)\n\n def __str__(self):\n cls = self.__class__.__name__\n return '%s(%s)' % (cls, str(self.d))\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return self.d == other.d\n\n def __len__(self):\n return len(self.d)\n\n def __iter__(self):\n return iter(self.d)\n\n def iterkeys(self):\n \"\"\"Returns an iterator over keys.\"\"\"\n return iter(self.d)\n\n def __contains__(self, value):\n return value in self.d\n\n def __getitem__(self, value):\n return self.d.get(value, 0)\n\n def __setitem__(self, value, prob):\n self.d[value] = prob\n\n def __delitem__(self, value):\n del self.d[value]\n\n def Copy(self, label=None):\n \"\"\"Returns a copy.\n\n Make a shallow copy of d. If you want a deep copy of d,\n use copy.deepcopy on the whole object.\n\n label: string label for the new Hist\n\n returns: new _DictWrapper with the same type\n \"\"\"\n new = copy.copy(self)\n new.d = copy.copy(self.d)\n new.label = label if label is not None else self.label\n return new\n\n def Scale(self, factor):\n \"\"\"Multiplies the values by a factor.\n\n factor: what to multiply by\n\n Returns: new object\n \"\"\"\n new = self.Copy()\n new.d.clear()\n\n for val, prob in self.Items():\n new.Set(val * factor, prob)\n return new\n\n def Log(self, m=None):\n \"\"\"Log transforms the probabilities.\n \n Removes values with probability 0.\n\n Normalizes so that the largest logprob is 0.\n \"\"\"\n if self.log:\n raise ValueError(\"Pmf\/Hist already under a log transform\")\n self.log = True\n\n if m is None:\n m = self.MaxLike()\n\n for x, p in self.d.items():\n if p:\n self.Set(x, math.log(p \/ m))\n else:\n self.Remove(x)\n\n def Exp(self, m=None):\n \"\"\"Exponentiates the probabilities.\n\n m: how much to shift the ps before exponentiating\n\n If m is None, normalizes so that the largest prob is 1.\n \"\"\"\n if not self.log:\n raise ValueError(\"Pmf\/Hist not under a log transform\")\n self.log = False\n\n if m is None:\n m = self.MaxLike()\n\n for x, p in self.d.items():\n self.Set(x, math.exp(p - m))\n\n def GetDict(self):\n \"\"\"Gets the dictionary.\"\"\"\n return self.d\n\n def SetDict(self, d):\n \"\"\"Sets the dictionary.\"\"\"\n self.d = d\n\n def Values(self):\n \"\"\"Gets an unsorted sequence of values.\n\n Note: one source of confusion is that the keys of this\n dictionary are the values of the Hist\/Pmf, and the\n values of the dictionary are frequencies\/probabilities.\n \"\"\"\n return self.d.keys()\n\n def Items(self):\n \"\"\"Gets an unsorted sequence of (value, freq\/prob) pairs.\"\"\"\n return self.d.items()\n\n def Render(self, **options):\n \"\"\"Generates a sequence of points suitable for plotting.\n\n Note: options are ignored\n\n Returns:\n tuple of (sorted value sequence, freq\/prob sequence)\n \"\"\"\n if min(self.d.keys()) is np.nan:\n logging.warning('Hist: contains NaN, may not render correctly.')\n\n return zip(*sorted(self.Items()))\n\n def MakeCdf(self, label=None):\n \"\"\"Makes a Cdf.\"\"\"\n label = label if label is not None else self.label\n return Cdf(self, label=label)\n\n def Print(self):\n \"\"\"Prints the values and freqs\/probs in ascending order.\"\"\"\n for val, prob in sorted(self.d.items()):\n print(val, prob)\n\n def Set(self, x, y=0):\n \"\"\"Sets the freq\/prob associated with the value x.\n\n Args:\n x: number value\n y: number freq or prob\n \"\"\"\n self.d[x] = y\n\n def Incr(self, x, term=1):\n \"\"\"Increments the freq\/prob associated with the value x.\n\n Args:\n x: number value\n term: how much to increment by\n \"\"\"\n self.d[x] = self.d.get(x, 0) + term\n\n def Mult(self, x, factor):\n \"\"\"Scales the freq\/prob associated with the value x.\n\n Args:\n x: number value\n factor: how much to multiply by\n \"\"\"\n self.d[x] = self.d.get(x, 0) * factor\n\n def Remove(self, x):\n \"\"\"Removes a value.\n\n Throws an exception if the value is not there.\n\n Args:\n x: value to remove\n \"\"\"\n del self.d[x]\n\n def Total(self):\n \"\"\"Returns the total of the frequencies\/probabilities in the map.\"\"\"\n total = sum(self.d.values())\n return total\n\n def MaxLike(self):\n \"\"\"Returns the largest frequency\/probability in the map.\"\"\"\n return max(self.d.values())\n\n def Largest(self, n=10):\n \"\"\"Returns the largest n values, with frequency\/probability.\n\n n: number of items to return\n \"\"\"\n return sorted(self.d.items(), reverse=True)[:n]\n\n def Smallest(self, n=10):\n \"\"\"Returns the smallest n values, with frequency\/probability.\n\n n: number of items to return\n \"\"\"\n return sorted(self.d.items(), reverse=False)[:n]\n\n\nclass Hist(_DictWrapper):\n \"\"\"Represents a histogram, which is a map from values to frequencies.\n\n Values can be any hashable type; frequencies are integer counters.\n \"\"\"\n def Freq(self, x):\n \"\"\"Gets the frequency associated with the value x.\n\n Args:\n x: number value\n\n Returns:\n int frequency\n \"\"\"\n return self.d.get(x, 0)\n\n def Freqs(self, xs):\n \"\"\"Gets frequencies for a sequence of values.\"\"\"\n return [self.Freq(x) for x in xs]\n\n def IsSubset(self, other):\n \"\"\"Checks whether the values in this histogram are a subset of\n the values in the given histogram.\"\"\"\n for val, freq in self.Items():\n if freq > other.Freq(val):\n return False\n return True\n\n def Subtract(self, other):\n \"\"\"Subtracts the values in the given histogram from this histogram.\"\"\"\n for val, freq in other.Items():\n self.Incr(val, -freq)\n\n\nclass Pmf(_DictWrapper):\n \"\"\"Represents a probability mass function.\n \n Values can be any hashable type; probabilities are floating-point.\n Pmfs are not necessarily normalized.\n \"\"\"\n\n def Prob(self, x, default=0):\n \"\"\"Gets the probability associated with the value x.\n\n Args:\n x: number value\n default: value to return if the key is not there\n\n Returns:\n float probability\n \"\"\"\n return self.d.get(x, default)\n\n def Probs(self, xs):\n \"\"\"Gets probabilities for a sequence of values.\"\"\"\n return [self.Prob(x) for x in xs]\n\n def Percentile(self, percentage):\n \"\"\"Computes a percentile of a given Pmf.\n\n Note: this is not super efficient. If you are planning\n to compute more than a few percentiles, compute the Cdf.\n\n percentage: float 0-100\n\n returns: value from the Pmf\n \"\"\"\n p = percentage \/ 100.0\n total = 0\n for val, prob in sorted(self.Items()):\n total += prob\n if total >= p:\n return val\n\n def ProbGreater(self, x):\n \"\"\"Probability that a sample from this Pmf exceeds x.\n\n x: number\n\n returns: float probability\n \"\"\"\n if isinstance(x, _DictWrapper):\n return PmfProbGreater(self, x)\n else:\n t = [prob for (val, prob) in self.d.items() if val > x]\n return sum(t)\n\n def ProbLess(self, x):\n \"\"\"Probability that a sample from this Pmf is less than x.\n\n x: number\n\n returns: float probability\n \"\"\"\n if isinstance(x, _DictWrapper):\n return PmfProbLess(self, x)\n else:\n t = [prob for (val, prob) in self.d.items() if val < x]\n return sum(t)\n\n def __lt__(self, obj):\n \"\"\"Less than.\n\n obj: number or _DictWrapper\n\n returns: float probability\n \"\"\"\n return self.ProbLess(obj)\n\n def __gt__(self, obj):\n \"\"\"Greater than.\n\n obj: number or _DictWrapper\n\n returns: float probability\n \"\"\"\n return self.ProbGreater(obj)\n\n def __ge__(self, obj):\n \"\"\"Greater than or equal.\n\n obj: number or _DictWrapper\n\n returns: float probability\n \"\"\"\n return 1 - (self < obj)\n\n def __le__(self, obj):\n \"\"\"Less than or equal.\n\n obj: number or _DictWrapper\n\n returns: float probability\n \"\"\"\n return 1 - (self > obj)\n\n def Normalize(self, fraction=1.0):\n \"\"\"Normalizes this PMF so the sum of all probs is fraction.\n\n Args:\n fraction: what the total should be after normalization\n\n Returns: the total probability before normalizing\n \"\"\"\n if self.log:\n raise ValueError(\"Normalize: Pmf is under a log transform\")\n\n total = self.Total()\n if total == 0.0:\n raise ValueError('Normalize: total probability is zero.')\n #logging.warning('Normalize: total probability is zero.')\n #return total\n\n factor = fraction \/ total\n for x in self.d:\n self.d[x] *= factor\n\n return total\n\n def Random(self):\n \"\"\"Chooses a random element from this PMF.\n\n Note: this is not very efficient. If you plan to call\n this more than a few times, consider converting to a CDF.\n\n Returns:\n float value from the Pmf\n \"\"\"\n target = random.random()\n total = 0.0\n for x, p in self.d.items():\n total += p\n if total >= target:\n return x\n\n # we shouldn't get here\n raise ValueError('Random: Pmf might not be normalized.')\n\n def Mean(self):\n \"\"\"Computes the mean of a PMF.\n\n Returns:\n float mean\n \"\"\"\n mean = 0.0\n for x, p in self.d.items():\n mean += p * x\n return mean\n\n def Var(self, mu=None):\n \"\"\"Computes the variance of a PMF.\n\n mu: the point around which the variance is computed;\n if omitted, computes the mean\n\n returns: float variance\n \"\"\"\n if mu is None:\n mu = self.Mean()\n\n var = 0.0\n for x, p in self.d.items():\n var += p * (x - mu) ** 2\n return var\n\n def Std(self, mu=None):\n \"\"\"Computes the standard deviation of a PMF.\n\n mu: the point around which the variance is computed;\n if omitted, computes the mean\n\n returns: float standard deviation\n \"\"\"\n var = self.Var(mu)\n return math.sqrt(var)\n\n def MaximumLikelihood(self):\n \"\"\"Returns the value with the highest probability.\n\n Returns: float probability\n \"\"\"\n _, val = max((prob, val) for val, prob in self.Items())\n return val\n\n def CredibleInterval(self, percentage=90):\n \"\"\"Computes the central credible interval.\n\n If percentage=90, computes the 90% CI.\n\n Args:\n percentage: float between 0 and 100\n\n Returns:\n sequence of two floats, low and high\n \"\"\"\n cdf = self.MakeCdf()\n return cdf.CredibleInterval(percentage)\n\n def __add__(self, other):\n \"\"\"Computes the Pmf of the sum of values drawn from self and other.\n\n other: another Pmf or a scalar\n\n returns: new Pmf\n \"\"\"\n try:\n return self.AddPmf(other)\n except AttributeError:\n return self.AddConstant(other)\n\n def AddPmf(self, other):\n \"\"\"Computes the Pmf of the sum of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n for v2, p2 in other.Items():\n pmf.Incr(v1 + v2, p1 * p2)\n return pmf\n\n def AddConstant(self, other):\n \"\"\"Computes the Pmf of the sum a constant and values from self.\n\n other: a number\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n pmf.Set(v1 + other, p1)\n return pmf\n\n def __sub__(self, other):\n \"\"\"Computes the Pmf of the diff of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n try:\n return self.SubPmf(other)\n except AttributeError:\n return self.AddConstant(-other)\n\n def SubPmf(self, other):\n \"\"\"Computes the Pmf of the diff of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n for v2, p2 in other.Items():\n pmf.Incr(v1 - v2, p1 * p2)\n return pmf\n\n def __mul__(self, other):\n \"\"\"Computes the Pmf of the product of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n try:\n return self.MulPmf(other)\n except AttributeError:\n return self.MulConstant(other)\n\n def MulPmf(self, other):\n \"\"\"Computes the Pmf of the diff of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n for v2, p2 in other.Items():\n pmf.Incr(v1 * v2, p1 * p2)\n return pmf\n\n def MulConstant(self, other):\n \"\"\"Computes the Pmf of the product of a constant and values from self.\n\n other: a number\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n pmf.Set(v1 * other, p1)\n return pmf\n\n def __div__(self, other):\n \"\"\"Computes the Pmf of the ratio of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n try:\n return self.DivPmf(other)\n except AttributeError:\n return self.MulConstant(1\/other)\n\n __truediv__ = __div__\n\n def DivPmf(self, other):\n \"\"\"Computes the Pmf of the ratio of values drawn from self and other.\n\n other: another Pmf\n\n returns: new Pmf\n \"\"\"\n pmf = Pmf()\n for v1, p1 in self.Items():\n for v2, p2 in other.Items():\n pmf.Incr(v1 \/ v2, p1 * p2)\n return pmf\n\n def Max(self, k):\n \"\"\"Computes the CDF of the maximum of k selections from this dist.\n\n k: int\n\n returns: new Cdf\n \"\"\"\n cdf = self.MakeCdf()\n return cdf.Max(k)\n\n\nclass Joint(Pmf):\n \"\"\"Represents a joint distribution.\n\n The values are sequences (usually tuples)\n \"\"\"\n\n def Marginal(self, i, label=None):\n \"\"\"Gets the marginal distribution of the indicated variable.\n\n i: index of the variable we want\n\n Returns: Pmf\n \"\"\"\n pmf = Pmf(label=label)\n for vs, prob in self.Items():\n pmf.Incr(vs[i], prob)\n return pmf\n\n def Conditional(self, i, j, val, label=None):\n \"\"\"Gets the conditional distribution of the indicated variable.\n\n Distribution of vs[i], conditioned on vs[j] = val.\n\n i: index of the variable we want\n j: which variable is conditioned on\n val: the value the jth variable has to have\n\n Returns: Pmf\n \"\"\"\n pmf = Pmf(label=label)\n for vs, prob in self.Items():\n if vs[j] != val:\n continue\n pmf.Incr(vs[i], prob)\n\n pmf.Normalize()\n return pmf\n\n def MaxLikeInterval(self, percentage=90):\n \"\"\"Returns the maximum-likelihood credible interval.\n\n If percentage=90, computes a 90% CI containing the values\n with the highest likelihoods.\n\n percentage: float between 0 and 100\n\n Returns: list of values from the suite\n \"\"\"\n interval = []\n total = 0\n\n t = [(prob, val) for val, prob in self.Items()]\n t.sort(reverse=True)\n\n for prob, val in t:\n interval.append(val)\n total += prob\n if total >= percentage \/ 100.0:\n break\n\n return interval\n\n\ndef MakeJoint(pmf1, pmf2):\n \"\"\"Joint distribution of values from pmf1 and pmf2.\n\n Assumes that the PMFs represent independent random variables.\n\n Args:\n pmf1: Pmf object\n pmf2: Pmf object\n\n Returns:\n Joint pmf of value pairs\n \"\"\"\n joint = Joint()\n for v1, p1 in pmf1.Items():\n for v2, p2 in pmf2.Items():\n joint.Set((v1, v2), p1 * p2)\n return joint\n\n\ndef MakeHistFromList(t, label=None):\n \"\"\"Makes a histogram from an unsorted sequence of values.\n\n Args:\n t: sequence of numbers\n label: string label for this histogram\n\n Returns:\n Hist object\n \"\"\"\n return Hist(t, label=label)\n\n\ndef MakeHistFromDict(d, label=None):\n \"\"\"Makes a histogram from a map from values to frequencies.\n\n Args:\n d: dictionary that maps values to frequencies\n label: string label for this histogram\n\n Returns:\n Hist object\n \"\"\"\n return Hist(d, label)\n\n\ndef MakePmfFromList(t, label=None):\n \"\"\"Makes a PMF from an unsorted sequence of values.\n\n Args:\n t: sequence of numbers\n label: string label for this PMF\n\n Returns:\n Pmf object\n \"\"\"\n return Pmf(t, label=label)\n\n\ndef MakePmfFromDict(d, label=None):\n \"\"\"Makes a PMF from a map from values to probabilities.\n\n Args:\n d: dictionary that maps values to probabilities\n label: string label for this PMF\n\n Returns:\n Pmf object\n \"\"\"\n return Pmf(d, label=label)\n\n\ndef MakePmfFromItems(t, label=None):\n \"\"\"Makes a PMF from a sequence of value-probability pairs\n\n Args:\n t: sequence of value-probability pairs\n label: string label for this PMF\n\n Returns:\n Pmf object\n \"\"\"\n return Pmf(dict(t), label=label)\n\n\ndef MakePmfFromHist(hist, label=None):\n \"\"\"Makes a normalized PMF from a Hist object.\n\n Args:\n hist: Hist object\n label: string label\n\n Returns:\n Pmf object\n \"\"\"\n if label is None:\n label = hist.label\n\n return Pmf(hist, label=label)\n\n\ndef MakeMixture(metapmf, label='mix'):\n \"\"\"Make a mixture distribution.\n\n Args:\n metapmf: Pmf that maps from Pmfs to probs.\n label: string label for the new Pmf.\n\n Returns: Pmf object.\n \"\"\"\n mix = Pmf(label=label)\n for pmf, p1 in metapmf.Items():\n for x, p2 in pmf.Items():\n mix.Incr(x, p1 * p2)\n return mix\n\n\ndef MakeUniformPmf(low, high, n):\n \"\"\"Make a uniform Pmf.\n\n low: lowest value (inclusive)\n high: highest value (inclusize)\n n: number of values\n \"\"\"\n pmf = Pmf()\n for x in np.linspace(low, high, n):\n pmf.Set(x, 1)\n pmf.Normalize()\n return pmf\n\n\nclass Cdf(object):\n \"\"\"Represents a cumulative distribution function.\n\n Attributes:\n xs: sequence of values\n ps: sequence of probabilities\n label: string used as a graph label.\n \"\"\"\n def __init__(self, obj=None, ps=None, label=None):\n \"\"\"Initializes.\n \n If ps is provided, obj must be the corresponding list of values.\n\n obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs\n ps: list of cumulative probabilities\n label: string label\n \"\"\"\n self.label = label if label is not None else '_nolegend_'\n\n if isinstance(obj, (_DictWrapper, Cdf, Pdf)):\n if not label:\n self.label = label if label is not None else obj.label\n\n if obj is None:\n # caller does not provide obj, make an empty Cdf\n self.xs = np.asarray([])\n self.ps = np.asarray([])\n if ps is not None:\n logging.warning(\"Cdf: can't pass ps without also passing xs.\")\n return\n else:\n # if the caller provides xs and ps, just store them \n if ps is not None:\n if isinstance(ps, str):\n logging.warning(\"Cdf: ps can't be a string\")\n\n self.xs = np.asarray(obj)\n self.ps = np.asarray(ps)\n return\n\n # caller has provided just obj, not ps\n if isinstance(obj, Cdf):\n self.xs = copy.copy(obj.xs)\n self.ps = copy.copy(obj.ps)\n return\n\n if isinstance(obj, _DictWrapper):\n dw = obj\n else:\n dw = Hist(obj)\n\n if len(dw) == 0:\n self.xs = np.asarray([])\n self.ps = np.asarray([])\n return\n\n xs, freqs = zip(*sorted(dw.Items()))\n self.xs = np.asarray(xs)\n self.ps = np.cumsum(freqs, dtype=np.float)\n self.ps \/= self.ps[-1]\n\n def __str__(self):\n return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))\n\n __repr__ = __str__\n\n def __len__(self):\n return len(self.xs)\n\n def __getitem__(self, x):\n return self.Prob(x)\n\n def __setitem__(self):\n raise UnimplementedMethodException()\n\n def __delitem__(self):\n raise UnimplementedMethodException()\n\n def __eq__(self, other):\n return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)\n\n def Copy(self, label=None):\n \"\"\"Returns a copy of this Cdf.\n\n label: string label for the new Cdf\n \"\"\"\n if label is None:\n label = self.label\n return Cdf(list(self.xs), list(self.ps), label=label)\n\n def MakePmf(self, label=None):\n \"\"\"Makes a Pmf.\"\"\"\n if label is None:\n label = self.label\n return Pmf(self, label=label)\n\n def Values(self):\n \"\"\"Returns a sorted list of values.\n \"\"\"\n return self.xs\n\n def Items(self):\n \"\"\"Returns a sorted sequence of (value, probability) pairs.\n\n Note: in Python3, returns an iterator.\n \"\"\"\n a = self.ps\n b = np.roll(a, 1)\n b[0] = 0\n return zip(self.xs, a-b)\n\n def Shift(self, term):\n \"\"\"Adds a term to the xs.\n\n term: how much to add\n \"\"\"\n new = self.Copy()\n # don't use +=, or else an int array + float yields int array\n new.xs = new.xs + term\n return new\n\n def Scale(self, factor):\n \"\"\"Multiplies the xs by a factor.\n\n factor: what to multiply by\n \"\"\"\n new = self.Copy()\n # don't use *=, or else an int array * float yields int array\n new.xs = new.xs * factor\n return new\n\n def Prob(self, x):\n \"\"\"Returns CDF(x), the probability that corresponds to value x.\n\n Args:\n x: number\n\n Returns:\n float probability\n \"\"\"\n if x < self.xs[0]:\n return 0.0\n index = bisect.bisect(self.xs, x)\n p = self.ps[index-1]\n return p\n\n def Probs(self, xs):\n \"\"\"Gets probabilities for a sequence of values.\n\n xs: any sequence that can be converted to NumPy array\n\n returns: NumPy array of cumulative probabilities\n \"\"\"\n xs = np.asarray(xs)\n index = np.searchsorted(self.xs, xs, side='right')\n ps = self.ps[index-1]\n ps[xs < self.xs[0]] = 0.0\n return ps\n\n ProbArray = Probs\n\n def Value(self, p):\n \"\"\"Returns InverseCDF(p), the value that corresponds to probability p.\n\n Args:\n p: number in the range [0, 1]\n\n Returns:\n number value\n \"\"\"\n if p < 0 or p > 1:\n raise ValueError('Probability p must be in range [0, 1]')\n\n index = bisect.bisect_left(self.ps, p)\n return self.xs[index]\n\n def ValueArray(self, ps):\n \"\"\"Returns InverseCDF(p), the value that corresponds to probability p.\n\n Args:\n ps: NumPy array of numbers in the range [0, 1]\n\n Returns:\n NumPy array of values\n \"\"\"\n ps = np.asarray(ps)\n if np.any(ps < 0) or np.any(ps > 1):\n raise ValueError('Probability p must be in range [0, 1]')\n\n index = np.searchsorted(self.ps, ps, side='left')\n return self.xs[index]\n\n def Percentile(self, p):\n \"\"\"Returns the value that corresponds to percentile p.\n\n Args:\n p: number in the range [0, 100]\n\n Returns:\n number value\n \"\"\"\n return self.Value(p \/ 100.0)\n\n def PercentileRank(self, x):\n \"\"\"Returns the percentile rank of the value x.\n\n x: potential value in the CDF\n\n returns: percentile rank in the range 0 to 100\n \"\"\"\n return self.Prob(x) * 100.0\n\n def Random(self):\n \"\"\"Chooses a random value from this distribution.\"\"\"\n return self.Value(random.random())\n\n def Sample(self, n):\n \"\"\"Generates a random sample from this distribution.\n \n n: int length of the sample\n returns: NumPy array\n \"\"\"\n ps = np.random.random(n)\n return self.ValueArray(ps)\n\n def Mean(self):\n \"\"\"Computes the mean of a CDF.\n\n Returns:\n float mean\n \"\"\"\n old_p = 0\n total = 0.0\n for x, new_p in zip(self.xs, self.ps):\n p = new_p - old_p\n total += p * x\n old_p = new_p\n return total\n\n def CredibleInterval(self, percentage=90):\n \"\"\"Computes the central credible interval.\n\n If percentage=90, computes the 90% CI.\n\n Args:\n percentage: float between 0 and 100\n\n Returns:\n sequence of two floats, low and high\n \"\"\"\n prob = (1 - percentage \/ 100.0) \/ 2\n interval = self.Value(prob), self.Value(1 - prob)\n return interval\n\n ConfidenceInterval = CredibleInterval\n\n def _Round(self, multiplier=1000.0):\n \"\"\"\n An entry is added to the cdf only if the percentile differs\n from the previous value in a significant digit, where the number\n of significant digits is determined by multiplier. The\n default is 1000, which keeps log10(1000) = 3 significant digits.\n \"\"\"\n # TODO(write this method)\n raise UnimplementedMethodException()\n\n def Render(self, **options):\n \"\"\"Generates a sequence of points suitable for plotting.\n\n An empirical CDF is a step function; linear interpolation\n can be misleading.\n\n Note: options are ignored\n\n Returns:\n tuple of (xs, ps)\n \"\"\"\n def interleave(a, b):\n c = np.empty(a.shape[0] + b.shape[0])\n c[::2] = a\n c[1::2] = b\n return c\n\n a = np.array(self.xs)\n xs = interleave(a, a)\n shift_ps = np.roll(self.ps, 1)\n shift_ps[0] = 0\n ps = interleave(shift_ps, self.ps)\n return xs, ps\n\n def Max(self, k):\n \"\"\"Computes the CDF of the maximum of k selections from this dist.\n\n k: int\n\n returns: new Cdf\n \"\"\"\n cdf = self.Copy()\n cdf.ps **= k\n return cdf\n\n\ndef MakeCdfFromItems(items, label=None):\n \"\"\"Makes a cdf from an unsorted sequence of (value, frequency) pairs.\n\n Args:\n items: unsorted sequence of (value, frequency) pairs\n label: string label for this CDF\n\n Returns:\n cdf: list of (value, fraction) pairs\n \"\"\"\n return Cdf(dict(items), label=label)\n\n\ndef MakeCdfFromDict(d, label=None):\n \"\"\"Makes a CDF from a dictionary that maps values to frequencies.\n\n Args:\n d: dictionary that maps values to frequencies.\n label: string label for the data.\n\n Returns:\n Cdf object\n \"\"\"\n return Cdf(d, label=label)\n\n\ndef MakeCdfFromList(seq, label=None):\n \"\"\"Creates a CDF from an unsorted sequence.\n\n Args:\n seq: unsorted sequence of sortable values\n label: string label for the cdf\n\n Returns:\n Cdf object\n \"\"\"\n return Cdf(seq, label=label)\n\n\ndef MakeCdfFromHist(hist, label=None):\n \"\"\"Makes a CDF from a Hist object.\n\n Args:\n hist: Pmf.Hist object\n label: string label for the data.\n\n Returns:\n Cdf object\n \"\"\"\n if label is None:\n label = hist.label\n\n return Cdf(hist, label=label)\n\n\ndef MakeCdfFromPmf(pmf, label=None):\n \"\"\"Makes a CDF from a Pmf object.\n\n Args:\n pmf: Pmf.Pmf object\n label: string label for the data.\n\n Returns:\n Cdf object\n \"\"\"\n if label is None:\n label = pmf.label\n\n return Cdf(pmf, label=label)\n\n\nclass UnimplementedMethodException(Exception):\n \"\"\"Exception if someone calls a method that should be overridden.\"\"\"\n\n\nclass Suite(Pmf):\n \"\"\"Represents a suite of hypotheses and their probabilities.\"\"\"\n\n def Update(self, data):\n \"\"\"Updates each hypothesis based on the data.\n\n data: any representation of the data\n\n returns: the normalizing constant\n \"\"\"\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n return self.Normalize()\n\n def LogUpdate(self, data):\n \"\"\"Updates a suite of hypotheses based on new data.\n\n Modifies the suite directly; if you want to keep the original, make\n a copy.\n\n Note: unlike Update, LogUpdate does not normalize.\n\n Args:\n data: any representation of the data\n \"\"\"\n for hypo in self.Values():\n like = self.LogLikelihood(data, hypo)\n self.Incr(hypo, like)\n\n def UpdateSet(self, dataset):\n \"\"\"Updates each hypothesis based on the dataset.\n\n This is more efficient than calling Update repeatedly because\n it waits until the end to Normalize.\n\n Modifies the suite directly; if you want to keep the original, make\n a copy.\n\n dataset: a sequence of data\n\n returns: the normalizing constant\n \"\"\"\n for data in dataset:\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n return self.Normalize()\n\n def LogUpdateSet(self, dataset):\n \"\"\"Updates each hypothesis based on the dataset.\n\n Modifies the suite directly; if you want to keep the original, make\n a copy.\n\n dataset: a sequence of data\n\n returns: None\n \"\"\"\n for data in dataset:\n self.LogUpdate(data)\n\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: some representation of the hypothesis\n data: some representation of the data\n \"\"\"\n raise UnimplementedMethodException()\n\n def LogLikelihood(self, data, hypo):\n \"\"\"Computes the log likelihood of the data under the hypothesis.\n\n hypo: some representation of the hypothesis\n data: some representation of the data\n \"\"\"\n raise UnimplementedMethodException()\n\n def Print(self):\n \"\"\"Prints the hypotheses and their probabilities.\"\"\"\n for hypo, prob in sorted(self.Items()):\n print(hypo, prob)\n\n def MakeOdds(self):\n \"\"\"Transforms from probabilities to odds.\n\n Values with prob=0 are removed.\n \"\"\"\n for hypo, prob in self.Items():\n if prob:\n self.Set(hypo, Odds(prob))\n else:\n self.Remove(hypo)\n\n def MakeProbs(self):\n \"\"\"Transforms from odds to probabilities.\"\"\"\n for hypo, odds in self.Items():\n self.Set(hypo, Probability(odds))\n\n\ndef MakeSuiteFromList(t, label=None):\n \"\"\"Makes a suite from an unsorted sequence of values.\n\n Args:\n t: sequence of numbers\n label: string label for this suite\n\n Returns:\n Suite object\n \"\"\"\n hist = MakeHistFromList(t, label=label)\n d = hist.GetDict()\n return MakeSuiteFromDict(d)\n\n\ndef MakeSuiteFromHist(hist, label=None):\n \"\"\"Makes a normalized suite from a Hist object.\n\n Args:\n hist: Hist object\n label: string label\n\n Returns:\n Suite object\n \"\"\"\n if label is None:\n label = hist.label\n\n # make a copy of the dictionary\n d = dict(hist.GetDict())\n return MakeSuiteFromDict(d, label)\n\n\ndef MakeSuiteFromDict(d, label=None):\n \"\"\"Makes a suite from a map from values to probabilities.\n\n Args:\n d: dictionary that maps values to probabilities\n label: string label for this suite\n\n Returns:\n Suite object\n \"\"\"\n suite = Suite(label=label)\n suite.SetDict(d)\n suite.Normalize()\n return suite\n\n\nclass Pdf(object):\n \"\"\"Represents a probability density function (PDF).\"\"\"\n\n def Density(self, x):\n \"\"\"Evaluates this Pdf at x.\n\n Returns: float or NumPy array of probability density\n \"\"\"\n raise UnimplementedMethodException()\n\n def GetLinspace(self):\n \"\"\"Get a linspace for plotting.\n\n Not all subclasses of Pdf implement this.\n\n Returns: numpy array\n \"\"\"\n raise UnimplementedMethodException()\n\n def MakePmf(self, **options):\n \"\"\"Makes a discrete version of this Pdf.\n\n options can include\n label: string\n low: low end of range\n high: high end of range\n n: number of places to evaluate\n\n Returns: new Pmf\n \"\"\"\n label = options.pop('label', '')\n xs, ds = self.Render(**options)\n return Pmf(dict(zip(xs, ds)), label=label)\n\n def Render(self, **options):\n \"\"\"Generates a sequence of points suitable for plotting.\n\n If options includes low and high, it must also include n;\n in that case the density is evaluated an n locations between\n low and high, including both.\n\n If options includes xs, the density is evaluate at those location.\n\n Otherwise, self.GetLinspace is invoked to provide the locations.\n\n Returns:\n tuple of (xs, densities)\n \"\"\"\n low, high = options.pop('low', None), options.pop('high', None)\n if low is not None and high is not None:\n n = options.pop('n', 101)\n xs = np.linspace(low, high, n)\n else:\n xs = options.pop('xs', None)\n if xs is None:\n xs = self.GetLinspace()\n \n ds = self.Density(xs)\n return xs, ds\n\n def Items(self):\n \"\"\"Generates a sequence of (value, probability) pairs.\n \"\"\"\n return zip(*self.Render())\n\n\nclass NormalPdf(Pdf):\n \"\"\"Represents the PDF of a Normal distribution.\"\"\"\n\n def __init__(self, mu=0, sigma=1, label=None):\n \"\"\"Constructs a Normal Pdf with given mu and sigma.\n\n mu: mean\n sigma: standard deviation\n label: string\n \"\"\"\n self.mu = mu\n self.sigma = sigma\n self.label = label if label is not None else '_nolegend_'\n\n def __str__(self):\n return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)\n\n def GetLinspace(self):\n \"\"\"Get a linspace for plotting.\n\n Returns: numpy array\n \"\"\"\n low, high = self.mu-3*self.sigma, self.mu+3*self.sigma\n return np.linspace(low, high, 101)\n\n def Density(self, xs):\n \"\"\"Evaluates this Pdf at xs.\n\n xs: scalar or sequence of floats\n\n returns: float or NumPy array of probability density\n \"\"\"\n return stats.norm.pdf(xs, self.mu, self.sigma)\n\n\nclass ExponentialPdf(Pdf):\n \"\"\"Represents the PDF of an exponential distribution.\"\"\"\n\n def __init__(self, lam=1, label=None):\n \"\"\"Constructs an exponential Pdf with given parameter.\n\n lam: rate parameter\n label: string\n \"\"\"\n self.lam = lam\n self.label = label if label is not None else '_nolegend_'\n\n def __str__(self):\n return 'ExponentialPdf(%f)' % (self.lam)\n\n def GetLinspace(self):\n \"\"\"Get a linspace for plotting.\n\n Returns: numpy array\n \"\"\"\n low, high = 0, 5.0\/self.lam\n return np.linspace(low, high, 101)\n\n def Density(self, xs):\n \"\"\"Evaluates this Pdf at xs.\n\n xs: scalar or sequence of floats\n\n returns: float or NumPy array of probability density\n \"\"\"\n return stats.expon.pdf(xs, scale=1.0\/self.lam)\n\n\nclass EstimatedPdf(Pdf):\n \"\"\"Represents a PDF estimated by KDE.\"\"\"\n\n def __init__(self, sample, label=None):\n \"\"\"Estimates the density function based on a sample.\n\n sample: sequence of data\n label: string\n \"\"\"\n self.label = label if label is not None else '_nolegend_'\n self.kde = stats.gaussian_kde(sample)\n low = min(sample)\n high = max(sample)\n self.linspace = np.linspace(low, high, 101)\n\n def __str__(self):\n return 'EstimatedPdf(label=%s)' % str(self.label)\n\n def GetLinspace(self):\n \"\"\"Get a linspace for plotting.\n\n Returns: numpy array\n \"\"\"\n return self.linspace\n\n def Density(self, xs):\n \"\"\"Evaluates this Pdf at xs.\n\n returns: float or NumPy array of probability density\n \"\"\"\n return self.kde.evaluate(xs)\n\n\ndef CredibleInterval(pmf, percentage=90):\n \"\"\"Computes a credible interval for a given distribution.\n\n If percentage=90, computes the 90% CI.\n\n Args:\n pmf: Pmf object representing a posterior distribution\n percentage: float between 0 and 100\n\n Returns:\n sequence of two floats, low and high\n \"\"\"\n cdf = pmf.MakeCdf()\n prob = (1 - percentage \/ 100.0) \/ 2\n interval = cdf.Value(prob), cdf.Value(1 - prob)\n return interval\n\n\ndef PmfProbLess(pmf1, pmf2):\n \"\"\"Probability that a value from pmf1 is less than a value from pmf2.\n\n Args:\n pmf1: Pmf object\n pmf2: Pmf object\n\n Returns:\n float probability\n \"\"\"\n total = 0.0\n for v1, p1 in pmf1.Items():\n for v2, p2 in pmf2.Items():\n if v1 < v2:\n total += p1 * p2\n return total\n\n\ndef PmfProbGreater(pmf1, pmf2):\n \"\"\"Probability that a value from pmf1 is less than a value from pmf2.\n\n Args:\n pmf1: Pmf object\n pmf2: Pmf object\n\n Returns:\n float probability\n \"\"\"\n total = 0.0\n for v1, p1 in pmf1.Items():\n for v2, p2 in pmf2.Items():\n if v1 > v2:\n total += p1 * p2\n return total\n\n\ndef PmfProbEqual(pmf1, pmf2):\n \"\"\"Probability that a value from pmf1 equals a value from pmf2.\n\n Args:\n pmf1: Pmf object\n pmf2: Pmf object\n\n Returns:\n float probability\n \"\"\"\n total = 0.0\n for v1, p1 in pmf1.Items():\n for v2, p2 in pmf2.Items():\n if v1 == v2:\n total += p1 * p2\n return total\n\n\ndef RandomSum(dists):\n \"\"\"Chooses a random value from each dist and returns the sum.\n\n dists: sequence of Pmf or Cdf objects\n\n returns: numerical sum\n \"\"\"\n total = sum(dist.Random() for dist in dists)\n return total\n\n\ndef SampleSum(dists, n):\n \"\"\"Draws a sample of sums from a list of distributions.\n\n dists: sequence of Pmf or Cdf objects\n n: sample size\n\n returns: new Pmf of sums\n \"\"\"\n pmf = Pmf(RandomSum(dists) for i in range(n))\n return pmf\n\n\ndef EvalNormalPdf(x, mu, sigma):\n \"\"\"Computes the unnormalized PDF of the normal distribution.\n\n x: value\n mu: mean\n sigma: standard deviation\n \n returns: float probability density\n \"\"\"\n return stats.norm.pdf(x, mu, sigma)\n\n\ndef MakeNormalPmf(mu, sigma, num_sigmas, n=201):\n \"\"\"Makes a PMF discrete approx to a Normal distribution.\n \n mu: float mean\n sigma: float standard deviation\n num_sigmas: how many sigmas to extend in each direction\n n: number of values in the Pmf\n\n returns: normalized Pmf\n \"\"\"\n pmf = Pmf()\n low = mu - num_sigmas * sigma\n high = mu + num_sigmas * sigma\n\n for x in np.linspace(low, high, n):\n p = EvalNormalPdf(x, mu, sigma)\n pmf.Set(x, p)\n pmf.Normalize()\n return pmf\n\n\ndef EvalBinomialPmf(k, n, p):\n \"\"\"Evaluates the binomial PMF.\n\n Returns the probabily of k successes in n trials with probability p.\n \"\"\"\n return stats.binom.pmf(k, n, p)\n \n\ndef EvalHypergeomPmf(k, N, K, n):\n \"\"\"Evaluates the hypergeometric PMF.\n\n Returns the probabily of k successes in n trials from a population\n N with K successes in it.\n \"\"\"\n return stats.hypergeom.pmf(k, N, K, n)\n \n\ndef EvalPoissonPmf(k, lam):\n \"\"\"Computes the Poisson PMF.\n\n k: number of events\n lam: parameter lambda in events per unit time\n\n returns: float probability\n \"\"\"\n # don't use the scipy function (yet). for lam=0 it returns NaN;\n # should be 0.0\n # return stats.poisson.pmf(k, lam)\n return lam ** k * math.exp(-lam) \/ special.gamma(k+1)\n\n\ndef MakePoissonPmf(lam, high, step=1):\n \"\"\"Makes a PMF discrete approx to a Poisson distribution.\n\n lam: parameter lambda in events per unit time\n high: upper bound of the Pmf\n\n returns: normalized Pmf\n \"\"\"\n pmf = Pmf()\n for k in range(0, high + 1, step):\n p = EvalPoissonPmf(k, lam)\n pmf.Set(k, p)\n pmf.Normalize()\n return pmf\n\n\ndef EvalExponentialPdf(x, lam):\n \"\"\"Computes the exponential PDF.\n\n x: value\n lam: parameter lambda in events per unit time\n\n returns: float probability density\n \"\"\"\n return lam * math.exp(-lam * x)\n\n\ndef EvalExponentialCdf(x, lam):\n \"\"\"Evaluates CDF of the exponential distribution with parameter lam.\"\"\"\n return 1 - math.exp(-lam * x)\n\n\ndef MakeExponentialPmf(lam, high, n=200):\n \"\"\"Makes a PMF discrete approx to an exponential distribution.\n\n lam: parameter lambda in events per unit time\n high: upper bound\n n: number of values in the Pmf\n\n returns: normalized Pmf\n \"\"\"\n pmf = Pmf()\n for x in np.linspace(0, high, n):\n p = EvalExponentialPdf(x, lam)\n pmf.Set(x, p)\n pmf.Normalize()\n return pmf\n\n\ndef StandardNormalCdf(x):\n \"\"\"Evaluates the CDF of the standard Normal distribution.\n \n See http:\/\/en.wikipedia.org\/wiki\/Normal_distribution\n #Cumulative_distribution_function\n\n Args:\n x: float\n \n Returns:\n float\n \"\"\"\n return (math.erf(x \/ ROOT2) + 1) \/ 2\n\n\ndef EvalNormalCdf(x, mu=0, sigma=1):\n \"\"\"Evaluates the CDF of the normal distribution.\n \n Args:\n x: float\n\n mu: mean parameter\n \n sigma: standard deviation parameter\n \n Returns:\n float\n \"\"\"\n return stats.norm.cdf(x, loc=mu, scale=sigma)\n\n\ndef EvalNormalCdfInverse(p, mu=0, sigma=1):\n \"\"\"Evaluates the inverse CDF of the normal distribution.\n\n See http:\/\/en.wikipedia.org\/wiki\/Normal_distribution#Quantile_function \n\n Args:\n p: float\n\n mu: mean parameter\n \n sigma: standard deviation parameter\n \n Returns:\n float\n \"\"\"\n return stats.norm.ppf(p, loc=mu, scale=sigma)\n\n\ndef EvalLognormalCdf(x, mu=0, sigma=1):\n \"\"\"Evaluates the CDF of the lognormal distribution.\n \n x: float or sequence\n mu: mean parameter\n sigma: standard deviation parameter\n \n Returns: float or sequence\n \"\"\"\n return stats.lognorm.cdf(x, loc=mu, scale=sigma)\n\n\ndef RenderExpoCdf(lam, low, high, n=101):\n \"\"\"Generates sequences of xs and ps for an exponential CDF.\n\n lam: parameter\n low: float\n high: float\n n: number of points to render\n\n returns: numpy arrays (xs, ps)\n \"\"\"\n xs = np.linspace(low, high, n)\n ps = 1 - np.exp(-lam * xs)\n #ps = stats.expon.cdf(xs, scale=1.0\/lam)\n return xs, ps\n\n\ndef RenderNormalCdf(mu, sigma, low, high, n=101):\n \"\"\"Generates sequences of xs and ps for a Normal CDF.\n\n mu: parameter\n sigma: parameter\n low: float\n high: float\n n: number of points to render\n\n returns: numpy arrays (xs, ps)\n \"\"\"\n xs = np.linspace(low, high, n)\n ps = stats.norm.cdf(xs, mu, sigma)\n return xs, ps\n\n\ndef RenderParetoCdf(xmin, alpha, low, high, n=50):\n \"\"\"Generates sequences of xs and ps for a Pareto CDF.\n\n xmin: parameter\n alpha: parameter\n low: float\n high: float\n n: number of points to render\n\n returns: numpy arrays (xs, ps)\n \"\"\"\n if low < xmin:\n low = xmin\n xs = np.linspace(low, high, n)\n ps = 1 - (xs \/ xmin) ** -alpha\n #ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)\n return xs, ps\n\n\nclass Beta(object):\n \"\"\"Represents a Beta distribution.\n\n See http:\/\/en.wikipedia.org\/wiki\/Beta_distribution\n \"\"\"\n def __init__(self, alpha=1, beta=1, label=None):\n \"\"\"Initializes a Beta distribution.\"\"\"\n self.alpha = alpha\n self.beta = beta\n self.label = label if label is not None else '_nolegend_'\n\n def Update(self, data):\n \"\"\"Updates a Beta distribution.\n\n data: pair of int (heads, tails)\n \"\"\"\n heads, tails = data\n self.alpha += heads\n self.beta += tails\n\n def Mean(self):\n \"\"\"Computes the mean of this distribution.\"\"\"\n return self.alpha \/ (self.alpha + self.beta)\n\n def Random(self):\n \"\"\"Generates a random variate from this distribution.\"\"\"\n return random.betavariate(self.alpha, self.beta)\n\n def Sample(self, n):\n \"\"\"Generates a random sample from this distribution.\n\n n: int sample size\n \"\"\"\n size = n,\n return np.random.beta(self.alpha, self.beta, size)\n\n def EvalPdf(self, x):\n \"\"\"Evaluates the PDF at x.\"\"\"\n return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)\n\n def MakePmf(self, steps=101, label=None):\n \"\"\"Returns a Pmf of this distribution.\n\n Note: Normally, we just evaluate the PDF at a sequence\n of points and treat the probability density as a probability\n mass.\n\n But if alpha or beta is less than one, we have to be\n more careful because the PDF goes to infinity at x=0\n and x=1. In that case we evaluate the CDF and compute\n differences.\n \"\"\"\n if self.alpha < 1 or self.beta < 1:\n cdf = self.MakeCdf()\n pmf = cdf.MakePmf()\n return pmf\n\n xs = [i \/ (steps - 1.0) for i in range(steps)]\n probs = [self.EvalPdf(x) for x in xs]\n pmf = Pmf(dict(zip(xs, probs)), label=label)\n return pmf\n\n def MakeCdf(self, steps=101):\n \"\"\"Returns the CDF of this distribution.\"\"\"\n xs = [i \/ (steps - 1.0) for i in range(steps)]\n ps = [special.betainc(self.alpha, self.beta, x) for x in xs]\n cdf = Cdf(xs, ps)\n return cdf\n\n\nclass Dirichlet(object):\n \"\"\"Represents a Dirichlet distribution.\n\n See http:\/\/en.wikipedia.org\/wiki\/Dirichlet_distribution\n \"\"\"\n\n def __init__(self, n, conc=1, label=None):\n \"\"\"Initializes a Dirichlet distribution.\n\n n: number of dimensions\n conc: concentration parameter (smaller yields more concentration)\n label: string label\n \"\"\"\n if n < 2:\n raise ValueError('A Dirichlet distribution with '\n 'n<2 makes no sense')\n\n self.n = n\n self.params = np.ones(n, dtype=np.float) * conc\n self.label = label if label is not None else '_nolegend_'\n\n def Update(self, data):\n \"\"\"Updates a Dirichlet distribution.\n\n data: sequence of observations, in order corresponding to params\n \"\"\"\n m = len(data)\n self.params[:m] += data\n\n def Random(self):\n \"\"\"Generates a random variate from this distribution.\n\n Returns: normalized vector of fractions\n \"\"\"\n p = np.random.gamma(self.params)\n return p \/ p.sum()\n\n def Likelihood(self, data):\n \"\"\"Computes the likelihood of the data.\n\n Selects a random vector of probabilities from this distribution.\n\n Returns: float probability\n \"\"\"\n m = len(data)\n if self.n < m:\n return 0\n\n x = data\n p = self.Random()\n q = p[:m] ** x\n return q.prod()\n\n def LogLikelihood(self, data):\n \"\"\"Computes the log likelihood of the data.\n\n Selects a random vector of probabilities from this distribution.\n\n Returns: float log probability\n \"\"\"\n m = len(data)\n if self.n < m:\n return float('-inf')\n\n x = self.Random()\n y = np.log(x[:m]) * data\n return y.sum()\n\n def MarginalBeta(self, i):\n \"\"\"Computes the marginal distribution of the ith element.\n\n See http:\/\/en.wikipedia.org\/wiki\/Dirichlet_distribution\n #Marginal_distributions\n\n i: int\n\n Returns: Beta object\n \"\"\"\n alpha0 = self.params.sum()\n alpha = self.params[i]\n return Beta(alpha, alpha0 - alpha)\n\n def PredictivePmf(self, xs, label=None):\n \"\"\"Makes a predictive distribution.\n\n xs: values to go into the Pmf\n\n Returns: Pmf that maps from x to the mean prevalence of x\n \"\"\"\n alpha0 = self.params.sum()\n ps = self.params \/ alpha0\n return Pmf(zip(xs, ps), label=label)\n\n\ndef BinomialCoef(n, k):\n \"\"\"Compute the binomial coefficient \"n choose k\".\n\n n: number of trials\n k: number of successes\n\n Returns: float\n \"\"\"\n return scipy.misc.comb(n, k)\n\n\ndef LogBinomialCoef(n, k):\n \"\"\"Computes the log of the binomial coefficient.\n\n http:\/\/math.stackexchange.com\/questions\/64716\/\n approximating-the-logarithm-of-the-binomial-coefficient\n\n n: number of trials\n k: number of successes\n\n Returns: float\n \"\"\"\n return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)\n\n\ndef NormalProbability(ys, jitter=0.0):\n \"\"\"Generates data for a normal probability plot.\n\n ys: sequence of values\n jitter: float magnitude of jitter added to the ys \n\n returns: numpy arrays xs, ys\n \"\"\"\n n = len(ys)\n xs = np.random.normal(0, 1, n)\n xs.sort()\n \n if jitter:\n ys = Jitter(ys, jitter)\n else:\n ys = np.array(ys)\n ys.sort()\n\n return xs, ys\n\n\ndef Jitter(values, jitter=0.5):\n \"\"\"Jitters the values by adding a uniform variate in (-jitter, jitter).\n\n values: sequence\n jitter: scalar magnitude of jitter\n \n returns: new numpy array\n \"\"\"\n n = len(values)\n return np.random.uniform(-jitter, +jitter, n) + values\n\n\ndef NormalProbabilityPlot(sample, fit_color='0.8', **options):\n \"\"\"Makes a normal probability plot with a fitted line.\n\n sample: sequence of numbers\n fit_color: color string for the fitted line\n options: passed along to Plot\n \"\"\"\n xs, ys = NormalProbability(sample)\n mean, var = MeanVar(sample)\n std = math.sqrt(var)\n\n fit = FitLine(xs, mean, std)\n thinkplot.Plot(*fit, color=fit_color, label='model')\n\n xs, ys = NormalProbability(sample)\n thinkplot.Plot(xs, ys, **options)\n\n \ndef Mean(xs):\n \"\"\"Computes mean.\n\n xs: sequence of values\n\n returns: float mean\n \"\"\"\n return np.mean(xs)\n\n\ndef Var(xs, mu=None, ddof=0):\n \"\"\"Computes variance.\n\n xs: sequence of values\n mu: option known mean\n ddof: delta degrees of freedom\n\n returns: float\n \"\"\"\n xs = np.asarray(xs)\n\n if mu is None:\n mu = xs.mean()\n\n ds = xs - mu\n return np.dot(ds, ds) \/ (len(xs) - ddof)\n\n\ndef Std(xs, mu=None, ddof=0):\n \"\"\"Computes standard deviation.\n\n xs: sequence of values\n mu: option known mean\n ddof: delta degrees of freedom\n\n returns: float\n \"\"\"\n var = Var(xs, mu, ddof)\n return math.sqrt(var)\n\n\ndef MeanVar(xs, ddof=0):\n \"\"\"Computes mean and variance.\n\n Based on http:\/\/stackoverflow.com\/questions\/19391149\/\n numpy-mean-and-variance-from-single-function\n\n xs: sequence of values\n ddof: delta degrees of freedom\n \n returns: pair of float, mean and var\n \"\"\"\n xs = np.asarray(xs)\n mean = xs.mean()\n s2 = Var(xs, mean, ddof)\n return mean, s2\n\n\ndef Trim(t, p=0.01):\n \"\"\"Trims the largest and smallest elements of t.\n\n Args:\n t: sequence of numbers\n p: fraction of values to trim off each end\n\n Returns:\n sequence of values\n \"\"\"\n n = int(p * len(t))\n t = sorted(t)[n:-n]\n return t\n\n\ndef TrimmedMean(t, p=0.01):\n \"\"\"Computes the trimmed mean of a sequence of numbers.\n\n Args:\n t: sequence of numbers\n p: fraction of values to trim off each end\n\n Returns:\n float\n \"\"\"\n t = Trim(t, p)\n return Mean(t)\n\n\ndef TrimmedMeanVar(t, p=0.01):\n \"\"\"Computes the trimmed mean and variance of a sequence of numbers.\n\n Side effect: sorts the list.\n\n Args:\n t: sequence of numbers\n p: fraction of values to trim off each end\n\n Returns:\n float\n \"\"\"\n t = Trim(t, p)\n mu, var = MeanVar(t)\n return mu, var\n\n\ndef CohenEffectSize(group1, group2):\n \"\"\"Compute Cohen's d.\n\n group1: Series or NumPy array\n group2: Series or NumPy array\n\n returns: float\n \"\"\"\n diff = group1.mean() - group2.mean()\n\n n1, n2 = len(group1), len(group2)\n var1 = group1.var()\n var2 = group2.var()\n\n pooled_var = (n1 * var1 + n2 * var2) \/ (n1 + n2)\n d = diff \/ math.sqrt(pooled_var)\n return d\n\n\ndef Cov(xs, ys, meanx=None, meany=None):\n \"\"\"Computes Cov(X, Y).\n\n Args:\n xs: sequence of values\n ys: sequence of values\n meanx: optional float mean of xs\n meany: optional float mean of ys\n\n Returns:\n Cov(X, Y)\n \"\"\"\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n\n if meanx is None:\n meanx = np.mean(xs)\n if meany is None:\n meany = np.mean(ys)\n\n cov = np.dot(xs-meanx, ys-meany) \/ len(xs)\n return cov\n\n\ndef Corr(xs, ys):\n \"\"\"Computes Corr(X, Y).\n\n Args:\n xs: sequence of values\n ys: sequence of values\n\n Returns:\n Corr(X, Y)\n \"\"\"\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n\n meanx, varx = MeanVar(xs)\n meany, vary = MeanVar(ys)\n\n corr = Cov(xs, ys, meanx, meany) \/ math.sqrt(varx * vary)\n\n return corr\n\n\ndef SerialCorr(series, lag=1):\n \"\"\"Computes the serial correlation of a series.\n\n series: Series\n lag: integer number of intervals to shift\n\n returns: float correlation\n \"\"\"\n xs = series[lag:]\n ys = series.shift(lag)[lag:]\n corr = Corr(xs, ys)\n return corr\n\n\ndef SpearmanCorr(xs, ys):\n \"\"\"Computes Spearman's rank correlation.\n\n Args:\n xs: sequence of values\n ys: sequence of values\n\n Returns:\n float Spearman's correlation\n \"\"\"\n xranks = pandas.Series(xs).rank()\n yranks = pandas.Series(ys).rank()\n return Corr(xranks, yranks)\n\n\ndef MapToRanks(t):\n \"\"\"Returns a list of ranks corresponding to the elements in t.\n\n Args:\n t: sequence of numbers\n \n Returns:\n list of integer ranks, starting at 1\n \"\"\"\n # pair up each value with its index\n pairs = enumerate(t)\n \n # sort by value\n sorted_pairs = sorted(pairs, key=itemgetter(1))\n\n # pair up each pair with its rank\n ranked = enumerate(sorted_pairs)\n\n # sort by index\n resorted = sorted(ranked, key=lambda trip: trip[1][0])\n\n # extract the ranks\n ranks = [trip[0]+1 for trip in resorted]\n return ranks\n\n\ndef LeastSquares(xs, ys):\n \"\"\"Computes a linear least squares fit for ys as a function of xs.\n\n Args:\n xs: sequence of values\n ys: sequence of values\n\n Returns:\n tuple of (intercept, slope)\n \"\"\"\n meanx, varx = MeanVar(xs)\n meany = Mean(ys)\n\n slope = Cov(xs, ys, meanx, meany) \/ varx\n inter = meany - slope * meanx\n\n return inter, slope\n\n\ndef FitLine(xs, inter, slope):\n \"\"\"Fits a line to the given data.\n\n xs: sequence of x\n\n returns: tuple of numpy arrays (sorted xs, fit ys)\n \"\"\"\n fit_xs = np.sort(xs)\n fit_ys = inter + slope * fit_xs\n return fit_xs, fit_ys\n\n\ndef Residuals(xs, ys, inter, slope):\n \"\"\"Computes residuals for a linear fit with parameters inter and slope.\n\n Args:\n xs: independent variable\n ys: dependent variable\n inter: float intercept\n slope: float slope\n\n Returns:\n list of residuals\n \"\"\"\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n res = ys - (inter + slope * xs)\n return res\n\n\ndef CoefDetermination(ys, res):\n \"\"\"Computes the coefficient of determination (R^2) for given residuals.\n\n Args:\n ys: dependent variable\n res: residuals\n \n Returns:\n float coefficient of determination\n \"\"\"\n return 1 - Var(res) \/ Var(ys)\n\n\ndef CorrelatedGenerator(rho):\n \"\"\"Generates standard normal variates with serial correlation.\n\n rho: target coefficient of correlation\n\n Returns: iterable\n \"\"\"\n x = random.gauss(0, 1)\n yield x\n\n sigma = math.sqrt(1 - rho**2)\n while True:\n x = random.gauss(x * rho, sigma)\n yield x\n\n\ndef CorrelatedNormalGenerator(mu, sigma, rho):\n \"\"\"Generates normal variates with serial correlation.\n\n mu: mean of variate\n sigma: standard deviation of variate\n rho: target coefficient of correlation\n\n Returns: iterable\n \"\"\"\n for x in CorrelatedGenerator(rho):\n yield x * sigma + mu\n\n\ndef RawMoment(xs, k):\n \"\"\"Computes the kth raw moment of xs.\n \"\"\"\n return sum(x**k for x in xs) \/ len(xs)\n\n\ndef CentralMoment(xs, k):\n \"\"\"Computes the kth central moment of xs.\n \"\"\"\n mean = RawMoment(xs, 1)\n return sum((x - mean)**k for x in xs) \/ len(xs)\n\n\ndef StandardizedMoment(xs, k):\n \"\"\"Computes the kth standardized moment of xs.\n \"\"\"\n var = CentralMoment(xs, 2)\n std = math.sqrt(var)\n return CentralMoment(xs, k) \/ std**k\n\n\ndef Skewness(xs):\n \"\"\"Computes skewness.\n \"\"\"\n return StandardizedMoment(xs, 3)\n\n\ndef Median(xs):\n \"\"\"Computes the median (50th percentile) of a sequence.\n\n xs: sequence or anything else that can initialize a Cdf\n\n returns: float\n \"\"\"\n cdf = Cdf(xs)\n return cdf.Value(0.5)\n\n\ndef IQR(xs):\n \"\"\"Computes the interquartile of a sequence.\n\n xs: sequence or anything else that can initialize a Cdf\n\n returns: pair of floats\n \"\"\"\n cdf = Cdf(xs)\n return cdf.Value(0.25), cdf.Value(0.75)\n\n\ndef PearsonMedianSkewness(xs):\n \"\"\"Computes the Pearson median skewness.\n \"\"\"\n median = Median(xs)\n mean = RawMoment(xs, 1)\n var = CentralMoment(xs, 2)\n std = math.sqrt(var)\n gp = 3 * (mean - median) \/ std\n return gp\n\n\nclass FixedWidthVariables(object):\n \"\"\"Represents a set of variables in a fixed width file.\"\"\"\n\n def __init__(self, variables, index_base=0):\n \"\"\"Initializes.\n\n variables: DataFrame\n index_base: are the indices 0 or 1 based?\n\n Attributes:\n colspecs: list of (start, end) index tuples\n names: list of string variable names\n \"\"\"\n self.variables = variables\n\n # note: by default, subtract 1 from colspecs\n self.colspecs = variables[['start', 'end']] - index_base\n\n # convert colspecs to a list of pair of int\n self.colspecs = self.colspecs.astype(np.int).values.tolist()\n self.names = variables['name']\n\n def ReadFixedWidth(self, filename, **options):\n \"\"\"Reads a fixed width ASCII file.\n\n filename: string filename\n\n returns: DataFrame\n \"\"\"\n df = pandas.read_fwf(filename,\n colspecs=self.colspecs, \n names=self.names,\n **options)\n return df\n\n\ndef ReadStataDct(dct_file, **options):\n \"\"\"Reads a Stata dictionary file.\n\n dct_file: string filename\n options: dict of options passed to open()\n\n returns: FixedWidthVariables object\n \"\"\"\n type_map = dict(byte=int, int=int, long=int, float=float, double=float)\n\n var_info = []\n for line in open(dct_file, **options):\n match = re.search( r'_column\\(([^)]*)\\)', line)\n if match:\n start = int(match.group(1))\n t = line.split()\n vtype, name, fstring = t[1:4]\n name = name.lower()\n if vtype.startswith('str'):\n vtype = str\n else:\n vtype = type_map[vtype]\n long_desc = ' '.join(t[4:]).strip('\"')\n var_info.append((start, vtype, name, fstring, long_desc))\n \n columns = ['start', 'type', 'name', 'fstring', 'desc']\n variables = pandas.DataFrame(var_info, columns=columns)\n\n # fill in the end column by shifting the start column\n variables['end'] = variables.start.shift(-1)\n variables.loc[len(variables)-1, 'end'] = 0\n\n dct = FixedWidthVariables(variables, index_base=1)\n return dct\n\n\ndef Resample(xs, n=None):\n \"\"\"Draw a sample from xs with the same length as xs.\n\n xs: sequence\n n: sample size (default: len(xs))\n\n returns: NumPy array\n \"\"\"\n if n is None:\n n = len(xs)\n return np.random.choice(xs, n, replace=True)\n\n\ndef SampleRows(df, nrows, replace=False):\n \"\"\"Choose a sample of rows from a DataFrame.\n\n df: DataFrame\n nrows: number of rows\n replace: whether to sample with replacement\n\n returns: DataDf\n \"\"\"\n indices = np.random.choice(df.index, nrows, replace=replace)\n sample = df.loc[indices]\n return sample\n\n\ndef ResampleRows(df):\n \"\"\"Resamples rows from a DataFrame.\n\n df: DataFrame\n\n returns: DataFrame\n \"\"\"\n return SampleRows(df, len(df), replace=True)\n\n\ndef ResampleRowsWeighted(df, column='finalwgt'):\n \"\"\"Resamples a DataFrame using probabilities proportional to given column.\n\n df: DataFrame\n column: string column name to use as weights\n\n returns: DataFrame\n \"\"\"\n weights = df[column]\n cdf = Cdf(dict(weights))\n indices = cdf.Sample(len(weights))\n sample = df.loc[indices]\n return sample\n\n\ndef PercentileRow(array, p):\n \"\"\"Selects the row from a sorted array that maps to percentile p.\n\n p: float 0--100\n\n returns: NumPy array (one row)\n \"\"\"\n rows, cols = array.shape\n index = int(rows * p \/ 100)\n return array[index,]\n\n\ndef PercentileRows(ys_seq, percents):\n \"\"\"Given a collection of lines, selects percentiles along vertical axis.\n\n For example, if ys_seq contains simulation results like ys as a\n function of time, and percents contains (5, 95), the result would\n be a 90% CI for each vertical slice of the simulation results.\n\n ys_seq: sequence of lines (y values)\n percents: list of percentiles (0-100) to select\n\n returns: list of NumPy arrays, one for each percentile\n \"\"\"\n nrows = len(ys_seq)\n ncols = len(ys_seq[0])\n array = np.zeros((nrows, ncols))\n\n for i, ys in enumerate(ys_seq):\n array[i,] = ys\n\n array = np.sort(array, axis=0)\n\n rows = [PercentileRow(array, p) for p in percents]\n return rows\n\n\ndef Smooth(xs, sigma=2, **options):\n \"\"\"Smooths a NumPy array with a Gaussian filter.\n\n xs: sequence\n sigma: standard deviation of the filter\n \"\"\"\n return ndimage.filters.gaussian_filter1d(xs, sigma, **options)\n\n\nclass HypothesisTest(object):\n \"\"\"Represents a hypothesis test.\"\"\"\n\n def __init__(self, data):\n \"\"\"Initializes.\n\n data: data in whatever form is relevant\n \"\"\"\n self.data = data\n self.MakeModel()\n self.actual = self.TestStatistic(data)\n self.test_stats = None\n self.test_cdf = None\n\n def PValue(self, iters=1000):\n \"\"\"Computes the distribution of the test statistic and p-value.\n\n iters: number of iterations\n\n returns: float p-value\n \"\"\"\n self.test_stats = [self.TestStatistic(self.RunModel()) \n for _ in range(iters)]\n self.test_cdf = Cdf(self.test_stats)\n\n count = sum(1 for x in self.test_stats if x >= self.actual)\n return count \/ iters\n\n def MaxTestStat(self):\n \"\"\"Returns the largest test statistic seen during simulations.\n \"\"\"\n return max(self.test_stats)\n\n def PlotCdf(self, label=None):\n \"\"\"Draws a Cdf with vertical lines at the observed test stat.\n \"\"\"\n def VertLine(x):\n \"\"\"Draws a vertical line at x.\"\"\"\n thinkplot.Plot([x, x], [0, 1], color='0.8')\n\n VertLine(self.actual)\n thinkplot.Cdf(self.test_cdf, label=label)\n\n def TestStatistic(self, data):\n \"\"\"Computes the test statistic.\n\n data: data in whatever form is relevant \n \"\"\"\n raise UnimplementedMethodException()\n\n def MakeModel(self):\n \"\"\"Build a model of the null hypothesis.\n \"\"\"\n pass\n\n def RunModel(self):\n \"\"\"Run the model of the null hypothesis.\n\n returns: simulated data\n \"\"\"\n raise UnimplementedMethodException()\n\n\ndef main():\n pass\n \n\nif __name__ == '__main__':\n main()\n","license":"gpl-3.0"} {"repo_name":"paulorauber\/nn","path":"examples\/rnn.py","copies":"1","size":"2389","content":"import numpy as np\nfrom sklearn.utils import check_random_state\n\nfrom nn.model.recurrent import RecurrentNetwork\n\nrandom_state = check_random_state(None)\n\ndef nback(n, k, length):\n \"\"\"Random n-back targets given n, number of digits k and sequence length\"\"\"\n Xi = random_state.randint(k, size=length)\n yi = np.zeros(length, dtype=int)\n \n for t in range(n, length):\n yi[t] = (Xi[t - n] == Xi[t]) \n \n return Xi, yi\n \ndef one_of_k(Xi_, k):\n Xi = np.zeros((len(Xi_), k))\n for t, Xit in np.ndenumerate(Xi_):\n Xi[t, Xit] = 1 \n \n return Xi\n \ndef nback_dataset(n_sequences, mean_length, std_length, n, k):\n X, y = [], []\n \n for _ in range(n_sequences):\n length = random_state.normal(loc=mean_length, scale=std_length)\n length = int(max(n + 1, length))\n \n Xi_, yi = nback(n, k, length)\n Xi = one_of_k(Xi_, k)\n \n X.append(Xi)\n y.append(yi)\n \n return X, y\n \ndef nback_example():\n # Input dimension\n k = 4\n # n-back\n n = 3\n \n n_sequences = 100\n mean_length = 20\n std_length = 5\n \n # Training \n Xtrain, ytrain = nback_dataset(n_sequences, mean_length, std_length, n, k)\n \n rnn = RecurrentNetwork(64, learning_rate=2.0, n_epochs=30, \n lmbda=0.0, mu=0.2, output_activation='softmax', \n random_state=None, verbose=1)\n \n rnn.fit(Xtrain, ytrain)\n \n # Evaluating\n Xtest, ytest = nback_dataset(5*n_sequences, 5*mean_length, 5*std_length, n, k)\n \n print('Average accuracy: {0:.3f}'.format(rnn.score(Xtest, ytest)))\n \n acc_zeros = 0.0\n for yi in ytest:\n acc_zeros += float((yi == 0).sum()) \/ len(yi)\n acc_zeros \/= len(ytest)\n print('Negative guess accuracy: {0:.3f}'.format(acc_zeros))\n \n # Example\n Xi_ = [3, 2, 1, 3, 2, 1, 3, 2, 2, 1, 2, 3, 1, 2, 0, 0, 2, 0]\n print('\\nExample sequence: {0}'.format(Xi_))\n yi = np.zeros(len(Xi_), dtype=int)\n for t in range(n, len(Xi_)):\n yi[t] = (Xi_[t - n] == Xi_[t]) \n \n Xi = one_of_k(Xi_, k)\n \n yipred = rnn.predict([Xi])[0]\n print('Correct: \\t{0}'.format(yi))\n print('Predicted: \\t{0}'.format(yipred))\n print('Accuracy: {0:.3f}'.format(float((yi == yipred).sum())\/len(yi)))\n\ndef main():\n nback_example()\n\nif __name__ == \"__main__\":\n main()","license":"mit"} {"repo_name":"agopalak\/football_pred","path":"pre_proc\/proc_data.py","copies":"1","size":"4667","content":"\nimport sys\nimport yaml\nimport re\nimport datetime as DT\n\nimport logging\nfrom rainbow_logging_handler import RainbowLoggingHandler\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn_pandas import DataFrameMapper\n\n# Capturing current module. Needed to call getattr on this module\nthis_module = sys.modules[__name__]\n\n# Setup logging module\n# TODO: Figure out a standard way to install\/handle logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(filename)s:%(lineno)4s - %(funcName)15s()] %(levelname)8s: %(message)s')\n\n# Setup RainbowLoggingHandler\nhandler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n# Converting Boolean to String during YAML load\n# Done to workaround quirkness with PyYAML\n\ndef bool_constructor(self, node):\n value = self.construct_yaml_bool(node)\n if value == False:\n return 'False'\n else:\n return 'True'\nyaml.Loader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)\nyaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)\n\n# Load data from CSV, configuration file\n# Process data and provide input\/output data frames\n\ndef load_data(data_csv, data_cfg):\n\n # Load Data YAML configuration file\n with open(data_cfg, 'r') as yf:\n data = yaml.load(yf)\n\n # Read CSV into data frame\n df = pd.read_csv(data_csv)\n # Filling holes with zeros\n df.fillna(0, inplace=True)\n\n # Process Columns\n for item in data:\n if item['include'] == False:\n continue\n else:\n colnum = item['column']\n logger.info('Processing Column %s', colnum)\n\n # Create a column data frame\n col_df = df.iloc[:, [colnum-1]].copy()\n logger.debug(col_df.columns)\n logger.debug('Preprocess Column Input\\n%s', col_df.head())\n\n # Apply transformations\n col_df = do_transform(col_df, item['transform'])\n logger.debug('Preprocess Column Output\\n%s', col_df.head())\n\n# Perform Data Transformations\ndef do_transform(df, tf):\n for func in tf:\n funckey, funcval = func.items()[0]\n\n # Getting transformation call name\n transform = getattr(this_module, funckey, None)\n\n # Splitting funcval to individual function arguments\n # First argument is True\/False to indicate if transform is called\n try:\n pattern = re.compile('\\s*,\\s*')\n funcvals = pattern.split(funcval)\n logger.debug('Funcvals --> %s', funcvals)\n except AttributeError:\n funcvals = [funcval]\n\n # Calling transformation\n if funcvals[0] == 'True':\n try:\n logger.debug('Funckey --> %s', funckey)\n df = transform(df, funcvals[1:])\n except AttributeError:\n logger.error('Function %s has not been implemented!', funckey)\n return df\n\n# Performs feature scaling on data frame\n# TODO: scale - Add implementation to handle val\ndef scale(df, val):\n logger.info('Function %s called..', sys._getframe().f_code.co_name)\n mms = preprocessing.MinMaxScaler()\n return pd.DataFrame(mms.fit_transform(df.values.ravel().reshape(-1, 1)), columns=df.columns)\n\n# conv2num: Converts column data to ordered integers\n# TODO: conv2num - Add implementation to handle args\ndef conv2num(df, args):\n logger.info('Function %s called..', sys._getframe().f_code.co_name)\n le = preprocessing.LabelEncoder()\n return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)\n\n# conv2bin: Converts column data to binary\n# TODO: conv2bin - Add implementation to handle args\ndef conv2bin(df, args):\n logger.info('Function %s called..', sys._getframe().f_code.co_name)\n le = preprocessing.LabelBinarizer()\n return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)\n\n# conv2timedelta: Converts column data to age\n# TODO: conv2timedelta - Current returns in years. May need to make it more scalable\ndef conv2timedelta(df, args):\n logger.info('Function %s called..', sys._getframe().f_code.co_name)\n if args[1] == 'now':\n refdate = pd.Timestamp(DT.datetime.now())\n else:\n refdate = pd.Timestamp(DT.datetime.strptime(args[1], args[0]))\n logger.debug('Reference date is: %s', refdate)\n df = pd.DataFrame((refdate - pd.to_datetime(df.values.ravel())), columns=df.columns)\n return df.apply(lambda x: (x\/np.timedelta64(1, 'Y')).astype(int))\n\n# Main Program\nif __name__ == '__main__':\n load_data('nflData.csv', 'datacfg.yaml')\n","license":"mit"} {"repo_name":"alexeyum\/scikit-learn","path":"sklearn\/decomposition\/tests\/test_incremental_pca.py","copies":"297","size":"8265","content":"\"\"\"Tests for Incremental PCA.\"\"\"\nimport numpy as np\n\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raises\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA, IncrementalPCA\n\niris = datasets.load_iris()\n\n\ndef test_incremental_pca():\n # Incremental PCA on dense arrays.\n X = iris.data\n batch_size = X.shape[0] \/\/ 3\n ipca = IncrementalPCA(n_components=2, batch_size=batch_size)\n pca = PCA(n_components=2)\n pca.fit_transform(X)\n\n X_transformed = ipca.fit_transform(X)\n\n np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))\n assert_almost_equal(ipca.explained_variance_ratio_.sum(),\n pca.explained_variance_ratio_.sum(), 1)\n\n for n_components in [1, 2, X.shape[1]]:\n ipca = IncrementalPCA(n_components, batch_size=batch_size)\n ipca.fit(X)\n cov = ipca.get_covariance()\n precision = ipca.get_precision()\n assert_array_almost_equal(np.dot(cov, precision),\n np.eye(X.shape[1]))\n\n\ndef test_incremental_pca_check_projection():\n # Test that the projection of data is correct.\n rng = np.random.RandomState(1999)\n n, p = 100, 3\n X = rng.randn(n, p) * .1\n X[:10] += np.array([3, 4, 5])\n Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])\n\n # Get the reconstruction of the generated data X\n # Note that Xt has the same \"components\" as X, just separated\n # This is what we want to ensure is recreated correctly\n Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)\n\n # Normalize\n Yt \/= np.sqrt((Yt ** 2).sum())\n\n # Make sure that the first element of Yt is ~1, this means\n # the reconstruction worked as expected\n assert_almost_equal(np.abs(Yt[0][0]), 1., 1)\n\n\ndef test_incremental_pca_inverse():\n # Test that the projection of data can be inverted.\n rng = np.random.RandomState(1999)\n n, p = 50, 3\n X = rng.randn(n, p) # spherical data\n X[:, 1] *= .00001 # make middle component relatively small\n X += [5, 4, 3] # make a large mean\n\n # same check that we can find the original data from the transformed\n # signal (since the data is almost of rank n_components)\n ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)\n Y = ipca.transform(X)\n Y_inverse = ipca.inverse_transform(Y)\n assert_almost_equal(X, Y_inverse, decimal=3)\n\n\ndef test_incremental_pca_validation():\n # Test that n_components is >=1 and <= n_features.\n X = [[0, 1], [1, 0]]\n for n_components in [-1, 0, .99, 3]:\n assert_raises(ValueError, IncrementalPCA(n_components,\n batch_size=10).fit, X)\n\n\ndef test_incremental_pca_set_params():\n # Test that components_ sign is stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 20\n X = rng.randn(n_samples, n_features)\n X2 = rng.randn(n_samples, n_features)\n X3 = rng.randn(n_samples, n_features)\n ipca = IncrementalPCA(n_components=20)\n ipca.fit(X)\n # Decreasing number of components\n ipca.set_params(n_components=10)\n assert_raises(ValueError, ipca.partial_fit, X2)\n # Increasing number of components\n ipca.set_params(n_components=15)\n assert_raises(ValueError, ipca.partial_fit, X3)\n # Returning to original setting\n ipca.set_params(n_components=20)\n ipca.partial_fit(X)\n\n\ndef test_incremental_pca_num_features_change():\n # Test that changing n_components will raise an error.\n rng = np.random.RandomState(1999)\n n_samples = 100\n X = rng.randn(n_samples, 20)\n X2 = rng.randn(n_samples, 50)\n ipca = IncrementalPCA(n_components=None)\n ipca.fit(X)\n assert_raises(ValueError, ipca.partial_fit, X2)\n\n\ndef test_incremental_pca_batch_signs():\n # Test that components_ sign is stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features)\n all_components = []\n batch_sizes = np.arange(10, 20)\n for batch_size in batch_sizes:\n ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)\n all_components.append(ipca.components_)\n\n for i, j in zip(all_components[:-1], all_components[1:]):\n assert_almost_equal(np.sign(i), np.sign(j), decimal=6)\n\n\ndef test_incremental_pca_batch_values():\n # Test that components_ values are stable over batch sizes.\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features)\n all_components = []\n batch_sizes = np.arange(20, 40, 3)\n for batch_size in batch_sizes:\n ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)\n all_components.append(ipca.components_)\n\n for i, j in zip(all_components[:-1], all_components[1:]):\n assert_almost_equal(i, j, decimal=1)\n\n\ndef test_incremental_pca_partial_fit():\n # Test that fit and partial_fit get equivalent results.\n rng = np.random.RandomState(1999)\n n, p = 50, 3\n X = rng.randn(n, p) # spherical data\n X[:, 1] *= .00001 # make middle component relatively small\n X += [5, 4, 3] # make a large mean\n\n # same check that we can find the original data from the transformed\n # signal (since the data is almost of rank n_components)\n batch_size = 10\n ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)\n pipca = IncrementalPCA(n_components=2, batch_size=batch_size)\n # Add one to make sure endpoint is included\n batch_itr = np.arange(0, n + 1, batch_size)\n for i, j in zip(batch_itr[:-1], batch_itr[1:]):\n pipca.partial_fit(X[i:j, :])\n assert_almost_equal(ipca.components_, pipca.components_, decimal=3)\n\n\ndef test_incremental_pca_against_pca_iris():\n # Test that IncrementalPCA and PCA are approximate (to a sign flip).\n X = iris.data\n\n Y_pca = PCA(n_components=2).fit_transform(X)\n Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)\n\n assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)\n\n\ndef test_incremental_pca_against_pca_random_data():\n # Test that IncrementalPCA and PCA are approximate (to a sign flip).\n rng = np.random.RandomState(1999)\n n_samples = 100\n n_features = 3\n X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)\n\n Y_pca = PCA(n_components=3).fit_transform(X)\n Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)\n\n assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)\n\n\ndef test_explained_variances():\n # Test that PCA and IncrementalPCA calculations match\n X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,\n effective_rank=10, random_state=1999)\n prec = 3\n n_samples, n_features = X.shape\n for nc in [None, 99]:\n pca = PCA(n_components=nc).fit(X)\n ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)\n assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,\n decimal=prec)\n assert_almost_equal(pca.explained_variance_ratio_,\n ipca.explained_variance_ratio_, decimal=prec)\n assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,\n decimal=prec)\n\n\ndef test_whitening():\n # Test that PCA and IncrementalPCA transforms match to sign flip.\n X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,\n effective_rank=2, random_state=1999)\n prec = 3\n n_samples, n_features = X.shape\n for nc in [None, 9]:\n pca = PCA(whiten=True, n_components=nc).fit(X)\n ipca = IncrementalPCA(whiten=True, n_components=nc,\n batch_size=250).fit(X)\n\n Xt_pca = pca.transform(X)\n Xt_ipca = ipca.transform(X)\n assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)\n Xinv_ipca = ipca.inverse_transform(Xt_ipca)\n Xinv_pca = pca.inverse_transform(Xt_pca)\n assert_almost_equal(X, Xinv_ipca, decimal=prec)\n assert_almost_equal(X, Xinv_pca, decimal=prec)\n assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)\n","license":"bsd-3-clause"} {"repo_name":"Akshay0724\/scikit-learn","path":"sklearn\/gaussian_process\/tests\/test_kernels.py","copies":"3","size":"12567","content":"\"\"\"Testing for kernels for Gaussian processes.\"\"\"\n\n# Author: Jan Hendrik Metzen \n# License: BSD 3 clause\n\nfrom sklearn.externals.funcsigs import signature\n\nimport numpy as np\n\nfrom sklearn.gaussian_process.kernels import _approx_fprime\n\nfrom sklearn.metrics.pairwise \\\n import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels\nfrom sklearn.gaussian_process.kernels \\\n import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,\n ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,\n Exponentiation)\nfrom sklearn.base import clone\n\nfrom sklearn.utils.testing import (assert_equal, assert_almost_equal,\n assert_not_equal, assert_array_equal,\n assert_array_almost_equal)\n\n\nX = np.random.RandomState(0).normal(0, 1, (5, 2))\nY = np.random.RandomState(0).normal(0, 1, (6, 2))\n\nkernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)\nkernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),\n ConstantKernel(constant_value=10.0),\n 2.0 * RBF(length_scale=0.33, length_scale_bounds=\"fixed\"),\n 2.0 * RBF(length_scale=0.5), kernel_white,\n 2.0 * RBF(length_scale=[0.5, 2.0]),\n 2.0 * Matern(length_scale=0.33, length_scale_bounds=\"fixed\"),\n 2.0 * Matern(length_scale=0.5, nu=0.5),\n 2.0 * Matern(length_scale=1.5, nu=1.5),\n 2.0 * Matern(length_scale=2.5, nu=2.5),\n 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),\n 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),\n 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),\n RationalQuadratic(length_scale=0.5, alpha=1.5),\n ExpSineSquared(length_scale=0.5, periodicity=1.5),\n DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,\n RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]\nfor metric in PAIRWISE_KERNEL_FUNCTIONS:\n if metric in [\"additive_chi2\", \"chi2\"]:\n continue\n kernels.append(PairwiseKernel(gamma=1.0, metric=metric))\n\n\ndef test_kernel_gradient():\n # Compare analytic and numeric gradient of kernels.\n for kernel in kernels:\n K, K_gradient = kernel(X, eval_gradient=True)\n\n assert_equal(K_gradient.shape[0], X.shape[0])\n assert_equal(K_gradient.shape[1], X.shape[0])\n assert_equal(K_gradient.shape[2], kernel.theta.shape[0])\n\n def eval_kernel_for_theta(theta):\n kernel_clone = kernel.clone_with_theta(theta)\n K = kernel_clone(X, eval_gradient=False)\n return K\n\n K_gradient_approx = \\\n _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)\n\n assert_almost_equal(K_gradient, K_gradient_approx, 4)\n\n\ndef test_kernel_theta():\n # Check that parameter vector theta of kernel is set correctly.\n for kernel in kernels:\n if isinstance(kernel, KernelOperator) \\\n or isinstance(kernel, Exponentiation): # skip non-basic kernels\n continue\n theta = kernel.theta\n _, K_gradient = kernel(X, eval_gradient=True)\n\n # Determine kernel parameters that contribute to theta\n init_sign = signature(kernel.__class__.__init__).parameters.values()\n args = [p.name for p in init_sign if p.name != 'self']\n theta_vars = map(lambda s: s[0:-len(\"_bounds\")],\n filter(lambda s: s.endswith(\"_bounds\"), args))\n assert_equal(\n set(hyperparameter.name\n for hyperparameter in kernel.hyperparameters),\n set(theta_vars))\n\n # Check that values returned in theta are consistent with\n # hyperparameter values (being their logarithms)\n for i, hyperparameter in enumerate(kernel.hyperparameters):\n assert_equal(theta[i],\n np.log(getattr(kernel, hyperparameter.name)))\n\n # Fixed kernel parameters must be excluded from theta and gradient.\n for i, hyperparameter in enumerate(kernel.hyperparameters):\n # create copy with certain hyperparameter fixed\n params = kernel.get_params()\n params[hyperparameter.name + \"_bounds\"] = \"fixed\"\n kernel_class = kernel.__class__\n new_kernel = kernel_class(**params)\n # Check that theta and K_gradient are identical with the fixed\n # dimension left out\n _, K_gradient_new = new_kernel(X, eval_gradient=True)\n assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)\n assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)\n if i > 0:\n assert_equal(theta[:i], new_kernel.theta[:i])\n assert_array_equal(K_gradient[..., :i],\n K_gradient_new[..., :i])\n if i + 1 < len(kernel.hyperparameters):\n assert_equal(theta[i + 1:], new_kernel.theta[i:])\n assert_array_equal(K_gradient[..., i + 1:],\n K_gradient_new[..., i:])\n\n # Check that values of theta are modified correctly\n for i, hyperparameter in enumerate(kernel.hyperparameters):\n theta[i] = np.log(42)\n kernel.theta = theta\n assert_almost_equal(getattr(kernel, hyperparameter.name), 42)\n\n setattr(kernel, hyperparameter.name, 43)\n assert_almost_equal(kernel.theta[i], np.log(43))\n\n\ndef test_auto_vs_cross():\n # Auto-correlation and cross-correlation should be consistent.\n for kernel in kernels:\n if kernel == kernel_white:\n continue # Identity is not satisfied on diagonal\n K_auto = kernel(X)\n K_cross = kernel(X, X)\n assert_almost_equal(K_auto, K_cross, 5)\n\n\ndef test_kernel_diag():\n # Test that diag method of kernel returns consistent results.\n for kernel in kernels:\n K_call_diag = np.diag(kernel(X))\n K_diag = kernel.diag(X)\n assert_almost_equal(K_call_diag, K_diag, 5)\n\n\ndef test_kernel_operator_commutative():\n # Adding kernels and multiplying kernels should be commutative.\n # Check addition\n assert_almost_equal((RBF(2.0) + 1.0)(X),\n (1.0 + RBF(2.0))(X))\n\n # Check multiplication\n assert_almost_equal((3.0 * RBF(2.0))(X),\n (RBF(2.0) * 3.0)(X))\n\n\ndef test_kernel_anisotropic():\n # Anisotropic kernel should be consistent with isotropic kernels.\n kernel = 3.0 * RBF([0.5, 2.0])\n\n K = kernel(X)\n X1 = np.array(X)\n X1[:, 0] *= 4\n K1 = 3.0 * RBF(2.0)(X1)\n assert_almost_equal(K, K1)\n\n X2 = np.array(X)\n X2[:, 1] \/= 4\n K2 = 3.0 * RBF(0.5)(X2)\n assert_almost_equal(K, K2)\n\n # Check getting and setting via theta\n kernel.theta = kernel.theta + np.log(2)\n assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))\n assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])\n\n\ndef test_kernel_stationary():\n # Test stationarity of kernels.\n for kernel in kernels:\n if not kernel.is_stationary():\n continue\n K = kernel(X, X + 1)\n assert_almost_equal(K[0, 0], np.diag(K))\n\n\ndef check_hyperparameters_equal(kernel1, kernel2):\n # Check that hyperparameters of two kernels are equal\n for attr in set(dir(kernel1) + dir(kernel2)):\n if attr.startswith(\"hyperparameter_\"):\n attr_value1 = getattr(kernel1, attr)\n attr_value2 = getattr(kernel2, attr)\n assert_equal(attr_value1, attr_value2)\n\n\ndef test_kernel_clone():\n # Test that sklearn's clone works correctly on kernels.\n bounds = (1e-5, 1e5)\n for kernel in kernels:\n kernel_cloned = clone(kernel)\n\n # XXX: Should this be fixed?\n # This differs from the sklearn's estimators equality check.\n assert_equal(kernel, kernel_cloned)\n assert_not_equal(id(kernel), id(kernel_cloned))\n\n # Check that all constructor parameters are equal.\n assert_equal(kernel.get_params(), kernel_cloned.get_params())\n\n # Check that all hyperparameters are equal.\n yield check_hyperparameters_equal, kernel, kernel_cloned\n\n # This test is to verify that using set_params does not\n # break clone on kernels.\n # This used to break because in kernels such as the RBF, non-trivial\n # logic that modified the length scale used to be in the constructor\n # See https:\/\/github.com\/scikit-learn\/scikit-learn\/issues\/6961\n # for more details.\n params = kernel.get_params()\n # RationalQuadratic kernel is isotropic.\n isotropic_kernels = (ExpSineSquared, RationalQuadratic)\n if 'length_scale' in params and not isinstance(kernel,\n isotropic_kernels):\n length_scale = params['length_scale']\n if np.iterable(length_scale):\n params['length_scale'] = length_scale[0]\n params['length_scale_bounds'] = bounds\n else:\n params['length_scale'] = [length_scale] * 2\n params['length_scale_bounds'] = bounds * 2\n kernel_cloned.set_params(**params)\n kernel_cloned_clone = clone(kernel_cloned)\n assert_equal(kernel_cloned_clone.get_params(),\n kernel_cloned.get_params())\n assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))\n yield (check_hyperparameters_equal, kernel_cloned,\n kernel_cloned_clone)\n\n\ndef test_matern_kernel():\n # Test consistency of Matern kernel for special values of nu.\n K = Matern(nu=1.5, length_scale=1.0)(X)\n # the diagonal elements of a matern kernel are 1\n assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))\n # matern kernel for coef0==0.5 is equal to absolute exponential kernel\n K_absexp = np.exp(-euclidean_distances(X, X, squared=False))\n K = Matern(nu=0.5, length_scale=1.0)(X)\n assert_array_almost_equal(K, K_absexp)\n # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])\n # result in nearly identical results as the general case for coef0 in\n # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]\n tiny = 1e-10\n for nu in [0.5, 1.5, 2.5]:\n K1 = Matern(nu=nu, length_scale=1.0)(X)\n K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)\n assert_array_almost_equal(K1, K2)\n\n\ndef test_kernel_versus_pairwise():\n # Check that GP kernels can also be used as pairwise kernels.\n for kernel in kernels:\n # Test auto-kernel\n if kernel != kernel_white:\n # For WhiteKernel: k(X) != k(X,X). This is assumed by\n # pairwise_kernels\n K1 = kernel(X)\n K2 = pairwise_kernels(X, metric=kernel)\n assert_array_almost_equal(K1, K2)\n\n # Test cross-kernel\n K1 = kernel(X, Y)\n K2 = pairwise_kernels(X, Y, metric=kernel)\n assert_array_almost_equal(K1, K2)\n\n\ndef test_set_get_params():\n # Check that set_params()\/get_params() is consistent with kernel.theta.\n for kernel in kernels:\n # Test get_params()\n index = 0\n params = kernel.get_params()\n for hyperparameter in kernel.hyperparameters:\n if hyperparameter.bounds == \"fixed\":\n continue\n size = hyperparameter.n_elements\n if size > 1: # anisotropic kernels\n assert_almost_equal(np.exp(kernel.theta[index:index + size]),\n params[hyperparameter.name])\n index += size\n else:\n assert_almost_equal(np.exp(kernel.theta[index]),\n params[hyperparameter.name])\n index += 1\n # Test set_params()\n index = 0\n value = 10 # arbitrary value\n for hyperparameter in kernel.hyperparameters:\n if hyperparameter.bounds == \"fixed\":\n continue\n size = hyperparameter.n_elements\n if size > 1: # anisotropic kernels\n kernel.set_params(**{hyperparameter.name: [value] * size})\n assert_almost_equal(np.exp(kernel.theta[index:index + size]),\n [value] * size)\n index += size\n else:\n kernel.set_params(**{hyperparameter.name: value})\n assert_almost_equal(np.exp(kernel.theta[index]), value)\n index += 1\n\n\ndef test_repr_kernels():\n # Smoke-test for repr in kernels.\n\n for kernel in kernels:\n repr(kernel)\n","license":"bsd-3-clause"} {"repo_name":"wilsonkichoi\/zipline","path":"zipline\/data\/data_portal.py","copies":"1","size":"64491","content":"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom operator import mul\n\nimport bcolz\nfrom logbook import Logger\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tslib import normalize_date\nfrom six import iteritems\nfrom six.moves import reduce\n\nfrom zipline.assets import Asset, Future, Equity\nfrom zipline.data.us_equity_pricing import NoDataOnDate\nfrom zipline.data.us_equity_loader import (\n USEquityDailyHistoryLoader,\n USEquityMinuteHistoryLoader,\n)\n\nfrom zipline.utils import tradingcalendar\nfrom zipline.utils.math_utils import (\n nansum,\n nanmean,\n nanstd\n)\nfrom zipline.utils.memoize import remember_last, weak_lru_cache\nfrom zipline.errors import (\n NoTradeDataAvailableTooEarly,\n NoTradeDataAvailableTooLate,\n HistoryWindowStartsBeforeData,\n)\n\nlog = Logger('DataPortal')\n\nBASE_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\", \"price\", \"last_traded\"\n])\n\nOHLCV_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\"\n])\n\nOHLCVP_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\", \"price\"\n])\n\nHISTORY_FREQUENCIES = set([\"1m\", \"1d\"])\n\n\nclass DailyHistoryAggregator(object):\n \"\"\"\n Converts minute pricing data into a daily summary, to be used for the\n last slot in a call to history with a frequency of `1d`.\n\n This summary is the same as a daily bar rollup of minute data, with the\n distinction that the summary is truncated to the `dt` requested.\n i.e. the aggregation slides forward during a the course of simulation day.\n\n Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.\n The aggregation rules for each price type is documented in their respective\n\n \"\"\"\n\n def __init__(self, market_opens, minute_reader):\n self._market_opens = market_opens\n self._minute_reader = minute_reader\n\n # The caches are structured as (date, market_open, entries), where\n # entries is a dict of asset -> (last_visited_dt, value)\n #\n # Whenever an aggregation method determines the current value,\n # the entry for the respective asset should be overwritten with a new\n # entry for the current dt.value (int) and aggregation value.\n #\n # When the requested dt's date is different from date the cache is\n # flushed, so that the cache entries do not grow unbounded.\n #\n # Example cache:\n # cache = (date(2016, 3, 17),\n # pd.Timestamp('2016-03-17 13:31', tz='UTC'),\n # {\n # 1: (1458221460000000000, np.nan),\n # 2: (1458221460000000000, 42.0),\n # })\n self._caches = {\n 'open': None,\n 'high': None,\n 'low': None,\n 'close': None,\n 'volume': None\n }\n\n # The int value is used for deltas to avoid extra computation from\n # creating new Timestamps.\n self._one_min = pd.Timedelta('1 min').value\n\n def _prelude(self, dt, field):\n date = dt.date()\n dt_value = dt.value\n cache = self._caches[field]\n if cache is None or cache[0] != date:\n market_open = self._market_opens.loc[date]\n cache = self._caches[field] = (dt.date(), market_open, {})\n\n _, market_open, entries = cache\n if dt != market_open:\n prev_dt = dt_value - self._one_min\n else:\n prev_dt = None\n return market_open, prev_dt, dt_value, entries\n\n def opens(self, assets, dt):\n \"\"\"\n The open field's aggregation returns the first value that occurs\n for the day, if there has been no data on or before the `dt` the open\n is `nan`.\n\n Once the first non-nan open is seen, that value remains constant per\n asset for the remainder of the day.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')\n\n opens = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n opens.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'open')\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n else:\n try:\n last_visited_dt, first_open = entries[asset]\n if last_visited_dt == dt_value:\n opens.append(first_open)\n continue\n elif not pd.isnull(first_open):\n opens.append(first_open)\n entries[asset] = (dt_value, first_open)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n after_last,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n market_open,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n return np.array(opens)\n\n def highs(self, assets, dt):\n \"\"\"\n The high field's aggregation returns the largest high seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the high is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')\n\n highs = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n highs.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'high')\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n try:\n last_visited_dt, last_max = entries[asset]\n if last_visited_dt == dt_value:\n highs.append(last_max)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'high')\n if pd.isnull(curr_val):\n val = last_max\n elif pd.isnull(last_max):\n val = curr_val\n else:\n val = max(last_max, curr_val)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n after_last,\n dt,\n [asset],\n )[0].T\n val = max(last_max, np.nanmax(window))\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmax(window)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n return np.array(highs)\n\n def lows(self, assets, dt):\n \"\"\"\n The low field's aggregation returns the smallest low seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the low is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')\n\n lows = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n lows.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'low')\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n try:\n last_visited_dt, last_min = entries[asset]\n if last_visited_dt == dt_value:\n lows.append(last_min)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'low')\n val = np.nanmin([last_min, curr_val])\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n after_last,\n dt,\n [asset],\n )[0].T\n window_min = np.nanmin(window)\n if pd.isnull(window_min):\n val = last_min\n else:\n val = min(last_min, window_min)\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmin(window)\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n return np.array(lows)\n\n def closes(self, assets, dt):\n \"\"\"\n The close field's aggregation returns the latest close at the given\n dt.\n If the close for the given dt is `nan`, the most recent non-nan\n `close` is used.\n If there has been no data on or before the `dt` the close is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')\n\n closes = []\n normalized_dt = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_dt, True):\n closes.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'close')\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_close = entries[asset]\n if last_visited_dt == dt_value:\n closes.append(last_close)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = last_close\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = self.closes(\n [asset],\n pd.Timestamp(prev_dt, tz='UTC'))[0]\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n except KeyError:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = self.closes([asset],\n pd.Timestamp(prev_dt, tz='UTC'))[0]\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n return np.array(closes)\n\n def volumes(self, assets, dt):\n \"\"\"\n The volume field's aggregation returns the sum of all volumes\n between the market open and the `dt`\n If there has been no data on or before the `dt` the volume is 0.\n\n Returns\n -------\n np.array with dtype=int64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')\n\n volumes = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n volumes.append(0)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'volume')\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_total = entries[asset]\n if last_visited_dt == dt_value:\n volumes.append(last_total)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'volume')\n val += last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n after_last,\n dt,\n [asset],\n )[0]\n val = np.nansum(window) + last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n market_open,\n dt,\n [asset],\n )[0]\n val = np.nansum(window)\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n return np.array(volumes)\n\n\nclass DataPortal(object):\n \"\"\"Interface to all of the data that a zipline simulation needs.\n\n This is used by the simulation runner to answer questions about the data,\n like getting the prices of assets on a given day or to service history\n calls.\n\n Parameters\n ----------\n env : TradingEnvironment\n The trading environment for the simulation. This includes the trading\n calendar and benchmark data.\n first_trading_day : pd.Timestamp\n The first trading day for the simulation.\n equity_daily_reader : BcolzDailyBarReader, optional\n The daily bar reader for equities. This will be used to service\n daily data backtests or daily history calls in a minute backetest.\n If a daily bar reader is not provided but a minute bar reader is,\n the minutes will be rolled up to serve the daily requests.\n equity_minute_reader : BcolzMinuteBarReader, optional\n The minute bar reader for equities. This will be used to service\n minute data backtests or minute history calls. This can be used\n to serve daily calls if no daily bar reader is provided.\n future_daily_reader : BcolzDailyBarReader, optional\n The daily bar ready for futures. This will be used to service\n daily data backtests or daily history calls in a minute backetest.\n If a daily bar reader is not provided but a minute bar reader is,\n the minutes will be rolled up to serve the daily requests.\n future_minute_reader : BcolzMinuteBarReader, optional\n The minute bar reader for futures. This will be used to service\n minute data backtests or minute history calls. This can be used\n to serve daily calls if no daily bar reader is provided.\n adjustment_reader : SQLiteAdjustmentWriter, optional\n The adjustment reader. This is used to apply splits, dividends, and\n other adjustment data to the raw data from the readers.\n \"\"\"\n def __init__(self,\n env,\n first_trading_day,\n equity_daily_reader=None,\n equity_minute_reader=None,\n future_daily_reader=None,\n future_minute_reader=None,\n adjustment_reader=None):\n self.env = env\n\n self.views = {}\n\n self._asset_finder = env.asset_finder\n\n self._carrays = {\n 'open': {},\n 'high': {},\n 'low': {},\n 'close': {},\n 'volume': {},\n 'sid': {},\n }\n\n self._adjustment_reader = adjustment_reader\n\n # caches of sid -> adjustment list\n self._splits_dict = {}\n self._mergers_dict = {}\n self._dividends_dict = {}\n\n # Cache of sid -> the first trading day of an asset.\n self._asset_start_dates = {}\n self._asset_end_dates = {}\n\n # Handle extra sources, like Fetcher.\n self._augmented_sources_map = {}\n self._extra_source_df = None\n\n self._equity_daily_reader = equity_daily_reader\n if self._equity_daily_reader is not None:\n self._equity_history_loader = USEquityDailyHistoryLoader(\n self.env,\n self._equity_daily_reader,\n self._adjustment_reader\n )\n self._equity_minute_reader = equity_minute_reader\n self._future_daily_reader = future_daily_reader\n self._future_minute_reader = future_minute_reader\n\n self._first_trading_day = first_trading_day\n\n if self._equity_minute_reader is not None:\n self._equity_daily_aggregator = DailyHistoryAggregator(\n self.env.open_and_closes.market_open,\n self._equity_minute_reader)\n self._equity_minute_history_loader = USEquityMinuteHistoryLoader(\n self.env,\n self._equity_minute_reader,\n self._adjustment_reader\n )\n self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \\\n self._equity_minute_reader._ohlc_inverse\n\n def _reindex_extra_source(self, df, source_date_index):\n return df.reindex(index=source_date_index, method='ffill')\n\n def handle_extra_source(self, source_df, sim_params):\n \"\"\"\n Extra sources always have a sid column.\n\n We expand the given data (by forward filling) to the full range of\n the simulation dates, so that lookup is fast during simulation.\n \"\"\"\n if source_df is None:\n return\n\n # Normalize all the dates in the df\n source_df.index = source_df.index.normalize()\n\n # source_df's sid column can either consist of assets we know about\n # (such as sid(24)) or of assets we don't know about (such as\n # palladium).\n #\n # In both cases, we break up the dataframe into individual dfs\n # that only contain a single asset's information. ie, if source_df\n # has data for PALLADIUM and GOLD, we split source_df into two\n # dataframes, one for each. (same applies if source_df has data for\n # AAPL and IBM).\n #\n # We then take each child df and reindex it to the simulation's date\n # range by forward-filling missing values. this makes reads simpler.\n #\n # Finally, we store the data. For each column, we store a mapping in\n # self.augmented_sources_map from the column to a dictionary of\n # asset -> df. In other words,\n # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df\n # holding that data.\n source_date_index = self.env.days_in_range(\n start=sim_params.period_start,\n end=sim_params.period_end\n )\n\n # Break the source_df up into one dataframe per sid. This lets\n # us (more easily) calculate accurate start\/end dates for each sid,\n # de-dup data, and expand the data to fit the backtest start\/end date.\n grouped_by_sid = source_df.groupby([\"sid\"])\n group_names = grouped_by_sid.groups.keys()\n group_dict = {}\n for group_name in group_names:\n group_dict[group_name] = grouped_by_sid.get_group(group_name)\n\n # This will be the dataframe which we query to get fetcher assets at\n # any given time. Get's overwritten every time there's a new fetcher\n # call\n extra_source_df = pd.DataFrame()\n\n for identifier, df in iteritems(group_dict):\n # Before reindexing, save the earliest and latest dates\n earliest_date = df.index[0]\n latest_date = df.index[-1]\n\n # Since we know this df only contains a single sid, we can safely\n # de-dupe by the index (dt). If minute granularity, will take the\n # last data point on any given day\n df = df.groupby(level=0).last()\n\n # Reindex the dataframe based on the backtest start\/end date.\n # This makes reads easier during the backtest.\n df = self._reindex_extra_source(df, source_date_index)\n\n if not isinstance(identifier, Asset):\n # for fake assets we need to store a start\/end date\n self._asset_start_dates[identifier] = earliest_date\n self._asset_end_dates[identifier] = latest_date\n\n for col_name in df.columns.difference(['sid']):\n if col_name not in self._augmented_sources_map:\n self._augmented_sources_map[col_name] = {}\n\n self._augmented_sources_map[col_name][identifier] = df\n\n # Append to extra_source_df the reindexed dataframe for the single\n # sid\n extra_source_df = extra_source_df.append(df)\n\n self._extra_source_df = extra_source_df\n\n def _open_minute_file(self, field, asset):\n sid_str = str(int(asset))\n\n try:\n carray = self._carrays[field][sid_str]\n except KeyError:\n carray = self._carrays[field][sid_str] = \\\n self._get_ctable(asset)[field]\n\n return carray\n\n def _get_ctable(self, asset):\n sid = int(asset)\n\n if isinstance(asset, Future):\n if self._future_minute_reader.sid_path_func is not None:\n path = self._future_minute_reader.sid_path_func(\n self._future_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}\/{1}.bcolz\".format(\n self._future_minute_reader.rootdir, sid)\n elif isinstance(asset, Equity):\n if self._equity_minute_reader.sid_path_func is not None:\n path = self._equity_minute_reader.sid_path_func(\n self._equity_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}\/{1}.bcolz\".format(\n self._equity_minute_reader.rootdir, sid)\n\n else:\n # TODO: Figure out if assets should be allowed if neither, and\n # why this code path is being hit.\n if self._equity_minute_reader.sid_path_func is not None:\n path = self._equity_minute_reader.sid_path_func(\n self._equity_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}\/{1}.bcolz\".format(\n self._equity_minute_reader.rootdir, sid)\n\n return bcolz.open(path, mode='r')\n\n def get_last_traded_dt(self, asset, dt, data_frequency):\n \"\"\"\n Given an asset and dt, returns the last traded dt from the viewpoint\n of the given dt.\n\n If there is a trade on the dt, the answer is dt provided.\n \"\"\"\n if data_frequency == 'minute':\n return self._equity_minute_reader.get_last_traded_dt(asset, dt)\n elif data_frequency == 'daily':\n return self._equity_daily_reader.get_last_traded_dt(asset, dt)\n\n @staticmethod\n def _is_extra_source(asset, field, map):\n \"\"\"\n Internal method that determines if this asset\/field combination\n represents a fetcher value or a regular OHLCVP lookup.\n \"\"\"\n # If we have an extra source with a column called \"price\", only look\n # at it if it's on something like palladium and not AAPL (since our\n # own price data always wins when dealing with assets).\n\n return not (field in BASE_FIELDS and isinstance(asset, Asset))\n\n def _get_fetcher_value(self, asset, field, dt):\n day = normalize_date(dt)\n\n try:\n return \\\n self._augmented_sources_map[field][asset].loc[day, field]\n except KeyError:\n return np.NaN\n\n def get_spot_value(self, asset, field, dt, data_frequency):\n \"\"\"\n Public API method that returns a scalar value representing the value\n of the desired asset's field at either the given dt.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n field : {'open', 'high', 'low', 'close', 'volume',\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n value : float, int, or pd.Timestamp\n The spot value of ``field`` for ``asset`` The return type is based\n on the ``field`` requested. If the field is one of 'open', 'high',\n 'low', 'close', or 'price', the value will be a float. If the\n ``field`` is 'volume' the value will be a int. If the ``field`` is\n 'last_traded' the value will be a Timestamp.\n \"\"\"\n if self._is_extra_source(asset, field, self._augmented_sources_map):\n return self._get_fetcher_value(asset, field, dt)\n\n if field not in BASE_FIELDS:\n raise KeyError(\"Invalid column: \" + str(field))\n\n if dt < asset.start_date or \\\n (data_frequency == \"daily\" and dt > asset.end_date) or \\\n (data_frequency == \"minute\" and\n normalize_date(dt) > asset.end_date):\n if field == \"volume\":\n return 0\n elif field != \"last_traded\":\n return np.NaN\n\n if data_frequency == \"daily\":\n day_to_use = dt\n day_to_use = normalize_date(day_to_use)\n return self._get_daily_data(asset, field, day_to_use)\n else:\n if isinstance(asset, Future):\n return self._get_minute_spot_value_future(\n asset, field, dt)\n else:\n if field == \"last_traded\":\n return self._equity_minute_reader.get_last_traded_dt(\n asset, dt\n )\n elif field == \"price\":\n return self._get_minute_spot_value(asset, \"close\", dt,\n True)\n else:\n return self._get_minute_spot_value(asset, field, dt)\n\n def get_adjustments(self, assets, field, dt, perspective_dt):\n \"\"\"\n Returns a list of adjustments between the dt and perspective_dt for the\n given field and list of assets\n\n Parameters\n ----------\n assets : list of type Asset, or Asset\n The asset, or assets whose adjustments are desired.\n field : {'open', 'high', 'low', 'close', 'volume', \\\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n perspective_dt : pd.Timestamp\n The timestamp from which the data is being viewed back from.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n adjustments : list[Adjustment]\n The adjustments to that field.\n \"\"\"\n if isinstance(assets, Asset):\n assets = [assets]\n\n adjustment_ratios_per_asset = []\n split_adj_factor = lambda x: x if field != 'volume' else 1.0 \/ x\n\n for asset in assets:\n adjustments_for_asset = []\n split_adjustments = self._get_adjustment_list(\n asset, self._splits_dict, \"SPLITS\"\n )\n for adj_dt, adj in split_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(split_adj_factor(adj))\n elif adj_dt > perspective_dt:\n break\n\n if field != 'volume':\n merger_adjustments = self._get_adjustment_list(\n asset, self._mergers_dict, \"MERGERS\"\n )\n for adj_dt, adj in merger_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(adj)\n elif adj_dt > perspective_dt:\n break\n\n dividend_adjustments = self._get_adjustment_list(\n asset, self._dividends_dict, \"DIVIDENDS\",\n )\n for adj_dt, adj in dividend_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(adj)\n elif adj_dt > perspective_dt:\n break\n\n ratio = reduce(mul, adjustments_for_asset, 1.0)\n adjustment_ratios_per_asset.append(ratio)\n\n return adjustment_ratios_per_asset\n\n def get_adjusted_value(self, asset, field, dt,\n perspective_dt,\n data_frequency,\n spot_value=None):\n \"\"\"\n Returns a scalar value representing the value\n of the desired asset's field at the given dt with adjustments applied.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n field : {'open', 'high', 'low', 'close', 'volume', \\\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n perspective_dt : pd.Timestamp\n The timestamp from which the data is being viewed back from.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n value : float, int, or pd.Timestamp\n The value of the given ``field`` for ``asset`` at ``dt`` with any\n adjustments known by ``perspective_dt`` applied. The return type is\n based on the ``field`` requested. If the field is one of 'open',\n 'high', 'low', 'close', or 'price', the value will be a float. If\n the ``field`` is 'volume' the value will be a int. If the ``field``\n is 'last_traded' the value will be a Timestamp.\n \"\"\"\n if spot_value is None:\n # if this a fetcher field, we want to use perspective_dt (not dt)\n # because we want the new value as of midnight (fetcher only works\n # on a daily basis, all timestamps are on midnight)\n if self._is_extra_source(asset, field,\n self._augmented_sources_map):\n spot_value = self.get_spot_value(asset, field, perspective_dt,\n data_frequency)\n else:\n spot_value = self.get_spot_value(asset, field, dt,\n data_frequency)\n\n if isinstance(asset, Equity):\n ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]\n spot_value *= ratio\n\n return spot_value\n\n def _get_minute_spot_value_future(self, asset, column, dt):\n # Futures bcolz files have 1440 bars per day (24 hours), 7 days a week.\n # The file attributes contain the \"start_dt\" and \"last_dt\" fields,\n # which represent the time period for this bcolz file.\n\n # The start_dt is midnight of the first day that this future started\n # trading.\n\n # figure out the # of minutes between dt and this asset's start_dt\n start_date = self._get_asset_start_date(asset)\n minute_offset = int((dt - start_date).total_seconds() \/ 60)\n\n if minute_offset < 0:\n # asking for a date that is before the asset's start date, no dice\n return 0.0\n\n # then just index into the bcolz carray at that offset\n carray = self._open_minute_file(column, asset)\n result = carray[minute_offset]\n\n # if there's missing data, go backwards until we run out of file\n while result == 0 and minute_offset > 0:\n minute_offset -= 1\n result = carray[minute_offset]\n\n if column != 'volume':\n # FIXME switch to a futures reader\n return result * 0.001\n else:\n return result\n\n def _get_minute_spot_value(self, asset, column, dt, ffill=False):\n result = self._equity_minute_reader.get_value(\n asset.sid, dt, column\n )\n\n if column == \"volume\":\n if result == 0:\n return 0\n elif not ffill or not np.isnan(result):\n # if we're not forward filling, or we found a result, return it\n return result\n\n # we are looking for price, and didn't find one. have to go hunting.\n last_traded_dt = \\\n self._equity_minute_reader.get_last_traded_dt(asset, dt)\n\n if last_traded_dt is pd.NaT:\n # no last traded dt, bail\n return np.nan\n\n # get the value as of the last traded dt\n result = self._equity_minute_reader.get_value(\n asset.sid,\n last_traded_dt,\n column\n )\n\n if np.isnan(result):\n return np.nan\n\n if dt == last_traded_dt or dt.date() == last_traded_dt.date():\n return result\n\n # the value we found came from a different day, so we have to adjust\n # the data if there are any adjustments on that day barrier\n return self.get_adjusted_value(\n asset, column, last_traded_dt,\n dt, \"minute\", spot_value=result\n )\n\n def _get_daily_data(self, asset, column, dt):\n if column == \"last_traded\":\n last_traded_dt = \\\n self._equity_daily_reader.get_last_traded_dt(asset, dt)\n\n if pd.isnull(last_traded_dt):\n return pd.NaT\n else:\n return last_traded_dt\n elif column in OHLCV_FIELDS:\n # don't forward fill\n try:\n val = self._equity_daily_reader.spot_price(asset, dt, column)\n if val == -1:\n if column == \"volume\":\n return 0\n else:\n return np.nan\n else:\n return val\n except NoDataOnDate:\n return np.nan\n elif column == \"price\":\n found_dt = dt\n while True:\n try:\n value = self._equity_daily_reader.spot_price(\n asset, found_dt, \"close\"\n )\n if value != -1:\n if dt == found_dt:\n return value\n else:\n # adjust if needed\n return self.get_adjusted_value(\n asset, column, found_dt, dt, \"minute\",\n spot_value=value\n )\n else:\n found_dt -= tradingcalendar.trading_day\n except NoDataOnDate:\n return np.nan\n\n @remember_last\n def _get_days_for_window(self, end_date, bar_count):\n tds = self.env.trading_days\n end_loc = self.env.trading_days.get_loc(end_date)\n start_loc = end_loc - bar_count + 1\n if start_loc < 0:\n raise HistoryWindowStartsBeforeData(\n first_trading_day=self.env.first_trading_day.date(),\n bar_count=bar_count,\n suggested_start_day=tds[bar_count].date(),\n )\n return tds[start_loc:end_loc + 1]\n\n def _get_history_daily_window(self, assets, end_dt, bar_count,\n field_to_use):\n \"\"\"\n Internal method that returns a dataframe containing history bars\n of daily frequency for the given sids.\n \"\"\"\n days_for_window = self._get_days_for_window(end_dt.date(), bar_count)\n\n if len(assets) == 0:\n return pd.DataFrame(None,\n index=days_for_window,\n columns=None)\n\n future_data = []\n eq_assets = []\n\n for asset in assets:\n if isinstance(asset, Future):\n future_data.append(self._get_history_daily_window_future(\n asset, days_for_window, end_dt, field_to_use\n ))\n else:\n eq_assets.append(asset)\n eq_data = self._get_history_daily_window_equities(\n eq_assets, days_for_window, end_dt, field_to_use\n )\n if future_data:\n # TODO: This case appears to be uncovered by testing.\n data = np.concatenate(eq_data, np.array(future_data).T)\n else:\n data = eq_data\n return pd.DataFrame(\n data,\n index=days_for_window,\n columns=assets\n )\n\n def _get_history_daily_window_future(self, asset, days_for_window,\n end_dt, column):\n # Since we don't have daily bcolz files for futures (yet), use minute\n # bars to calculate the daily values.\n data = []\n data_groups = []\n\n # get all the minutes for the days NOT including today\n for day in days_for_window[:-1]:\n minutes = self.env.market_minutes_for_day(day)\n\n values_for_day = np.zeros(len(minutes), dtype=np.float64)\n\n for idx, minute in enumerate(minutes):\n minute_val = self._get_minute_spot_value_future(\n asset, column, minute\n )\n\n values_for_day[idx] = minute_val\n\n data_groups.append(values_for_day)\n\n # get the minutes for today\n last_day_minutes = pd.date_range(\n start=self.env.get_open_and_close(end_dt)[0],\n end=end_dt,\n freq=\"T\"\n )\n\n values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64)\n\n for idx, minute in enumerate(last_day_minutes):\n minute_val = self._get_minute_spot_value_future(\n asset, column, minute\n )\n\n values_for_last_day[idx] = minute_val\n\n data_groups.append(values_for_last_day)\n\n for group in data_groups:\n if len(group) == 0:\n continue\n\n if column == 'volume':\n data.append(np.sum(group))\n elif column == 'open':\n data.append(group[0])\n elif column == 'close':\n data.append(group[-1])\n elif column == 'high':\n data.append(np.amax(group))\n elif column == 'low':\n data.append(np.amin(group))\n\n return data\n\n def _get_history_daily_window_equities(\n self, assets, days_for_window, end_dt, field_to_use):\n ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0\n\n if ends_at_midnight:\n # two cases where we use daily data for the whole range:\n # 1) the history window ends at midnight utc.\n # 2) the last desired day of the window is after the\n # last trading day, use daily data for the whole range.\n return self._get_daily_window_for_sids(\n assets,\n field_to_use,\n days_for_window,\n extra_slot=False\n )\n else:\n # minute mode, requesting '1d'\n daily_data = self._get_daily_window_for_sids(\n assets,\n field_to_use,\n days_for_window[0:-1]\n )\n\n if field_to_use == 'open':\n minute_value = self._equity_daily_aggregator.opens(\n assets, end_dt)\n elif field_to_use == 'high':\n minute_value = self._equity_daily_aggregator.highs(\n assets, end_dt)\n elif field_to_use == 'low':\n minute_value = self._equity_daily_aggregator.lows(\n assets, end_dt)\n elif field_to_use == 'close':\n minute_value = self._equity_daily_aggregator.closes(\n assets, end_dt)\n elif field_to_use == 'volume':\n minute_value = self._equity_daily_aggregator.volumes(\n assets, end_dt)\n\n # append the partial day.\n daily_data[-1] = minute_value\n\n return daily_data\n\n def _get_history_minute_window(self, assets, end_dt, bar_count,\n field_to_use):\n \"\"\"\n Internal method that returns a dataframe containing history bars\n of minute frequency for the given sids.\n \"\"\"\n # get all the minutes for this window\n mm = self.env.market_minutes\n end_loc = mm.get_loc(end_dt)\n start_loc = end_loc - bar_count + 1\n if start_loc < 0:\n suggested_start_day = (mm[bar_count] + self.env.trading_day).date()\n raise HistoryWindowStartsBeforeData(\n first_trading_day=self.env.first_trading_day.date(),\n bar_count=bar_count,\n suggested_start_day=suggested_start_day,\n )\n minutes_for_window = mm[start_loc:end_loc + 1]\n\n asset_minute_data = self._get_minute_window_for_assets(\n assets,\n field_to_use,\n minutes_for_window,\n )\n\n return pd.DataFrame(\n asset_minute_data,\n index=minutes_for_window,\n columns=assets\n )\n\n def get_history_window(self, assets, end_dt, bar_count, frequency, field,\n ffill=True):\n \"\"\"\n Public API method that returns a dataframe containing the requested\n history window. Data is fully adjusted.\n\n Parameters\n ----------\n assets : list of zipline.data.Asset objects\n The assets whose data is desired.\n\n bar_count: int\n The number of bars desired.\n\n frequency: string\n \"1d\" or \"1m\"\n\n field: string\n The desired field of the asset.\n\n ffill: boolean\n Forward-fill missing values. Only has effect if field\n is 'price'.\n\n Returns\n -------\n A dataframe containing the requested data.\n \"\"\"\n if field not in OHLCVP_FIELDS:\n raise ValueError(\"Invalid field: {0}\".format(field))\n\n if frequency == \"1d\":\n if field == \"price\":\n df = self._get_history_daily_window(assets, end_dt, bar_count,\n \"close\")\n else:\n df = self._get_history_daily_window(assets, end_dt, bar_count,\n field)\n elif frequency == \"1m\":\n if field == \"price\":\n df = self._get_history_minute_window(assets, end_dt, bar_count,\n \"close\")\n else:\n df = self._get_history_minute_window(assets, end_dt, bar_count,\n field)\n else:\n raise ValueError(\"Invalid frequency: {0}\".format(frequency))\n\n # forward-fill price\n if field == \"price\":\n if frequency == \"1m\":\n data_frequency = 'minute'\n elif frequency == \"1d\":\n data_frequency = 'daily'\n else:\n raise Exception(\n \"Only 1d and 1m are supported for forward-filling.\")\n\n dt_to_fill = df.index[0]\n\n perspective_dt = df.index[-1]\n assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]\n for missing_loc in assets_with_leading_nan:\n asset = assets[missing_loc]\n previous_dt = self.get_last_traded_dt(\n asset, dt_to_fill, data_frequency)\n if pd.isnull(previous_dt):\n continue\n previous_value = self.get_adjusted_value(\n asset,\n field,\n previous_dt,\n perspective_dt,\n data_frequency,\n )\n df.iloc[0, missing_loc] = previous_value\n\n df.fillna(method='ffill', inplace=True)\n\n for asset in df.columns:\n if df.index[-1] >= asset.end_date:\n # if the window extends past the asset's end date, set\n # all post-end-date values to NaN in that asset's series\n series = df[asset]\n series[series.index.normalize() > asset.end_date] = np.NaN\n\n return df\n\n def _get_minute_window_for_assets(self, assets, field, minutes_for_window):\n \"\"\"\n Internal method that gets a window of adjusted minute data for an asset\n and specified date range. Used to support the history API method for\n minute bars.\n\n Missing bars are filled with NaN.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n\n field: string\n The specific field to return. \"open\", \"high\", \"close_price\", etc.\n\n minutes_for_window: pd.DateTimeIndex\n The list of minutes representing the desired window. Each minute\n is a pd.Timestamp.\n\n Returns\n -------\n A numpy array with requested values.\n \"\"\"\n if isinstance(assets, Future):\n return self._get_minute_window_for_future([assets], field,\n minutes_for_window)\n else:\n # TODO: Make caller accept assets.\n window = self._get_minute_window_for_equities(assets, field,\n minutes_for_window)\n return window\n\n def _get_minute_window_for_future(self, asset, field, minutes_for_window):\n # THIS IS TEMPORARY. For now, we are only exposing futures within\n # equity trading hours (9:30 am to 4pm, Eastern). The easiest way to\n # do this is to simply do a spot lookup for each desired minute.\n return_data = np.zeros(len(minutes_for_window), dtype=np.float64)\n for idx, minute in enumerate(minutes_for_window):\n return_data[idx] = \\\n self._get_minute_spot_value_future(asset, field, minute)\n\n # Note: an improvement could be to find the consecutive runs within\n # minutes_for_window, and use them to read the underlying ctable\n # more efficiently.\n\n # Once futures are on 24-hour clock, then we can just grab all the\n # requested minutes in one shot from the ctable.\n\n # no adjustments for futures, yay.\n return return_data\n\n def _get_minute_window_for_equities(\n self, assets, field, minutes_for_window):\n return self._equity_minute_history_loader.history(assets,\n minutes_for_window,\n field)\n\n def _apply_all_adjustments(self, data, asset, dts, field,\n price_adj_factor=1.0):\n \"\"\"\n Internal method that applies all the necessary adjustments on the\n given data array.\n\n The adjustments are:\n - splits\n - if field != \"volume\":\n - mergers\n - dividends\n - * 0.001\n - any zero fields replaced with NaN\n - all values rounded to 3 digits after the decimal point.\n\n Parameters\n ----------\n data : np.array\n The data to be adjusted.\n\n asset: Asset\n The asset whose data is being adjusted.\n\n dts: pd.DateTimeIndex\n The list of minutes or days representing the desired window.\n\n field: string\n The field whose values are in the data array.\n\n price_adj_factor: float\n Factor with which to adjust OHLC values.\n Returns\n -------\n None. The data array is modified in place.\n \"\"\"\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._splits_dict, \"SPLITS\"\n ),\n data,\n dts,\n field != 'volume'\n )\n\n if field != 'volume':\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._mergers_dict, \"MERGERS\"\n ),\n data,\n dts,\n True\n )\n\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._dividends_dict, \"DIVIDENDS\"\n ),\n data,\n dts,\n True\n )\n\n if price_adj_factor is not None:\n data *= price_adj_factor\n np.around(data, 3, out=data)\n\n def _get_daily_window_for_sids(\n self, assets, field, days_in_window, extra_slot=True):\n \"\"\"\n Internal method that gets a window of adjusted daily data for a sid\n and specified date range. Used to support the history API method for\n daily bars.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n\n start_dt: pandas.Timestamp\n The start of the desired window of data.\n\n bar_count: int\n The number of days of data to return.\n\n field: string\n The specific field to return. \"open\", \"high\", \"close_price\", etc.\n\n extra_slot: boolean\n Whether to allocate an extra slot in the returned numpy array.\n This extra slot will hold the data for the last partial day. It's\n much better to create it here than to create a copy of the array\n later just to add a slot.\n\n Returns\n -------\n A numpy array with requested values. Any missing slots filled with\n nan.\n\n \"\"\"\n bar_count = len(days_in_window)\n # create an np.array of size bar_count\n if extra_slot:\n return_array = np.zeros((bar_count + 1, len(assets)))\n else:\n return_array = np.zeros((bar_count, len(assets)))\n\n if field != \"volume\":\n # volumes default to 0, so we don't need to put NaNs in the array\n return_array[:] = np.NAN\n\n if bar_count != 0:\n data = self._equity_history_loader.history(assets,\n days_in_window,\n field)\n if extra_slot:\n return_array[:len(return_array) - 1, :] = data\n else:\n return_array[:len(data)] = data\n return return_array\n\n @staticmethod\n def _apply_adjustments_to_window(adjustments_list, window_data,\n dts_in_window, multiply):\n if len(adjustments_list) == 0:\n return\n\n # advance idx to the correct spot in the adjustments list, based on\n # when the window starts\n idx = 0\n\n while idx < len(adjustments_list) and dts_in_window[0] >\\\n adjustments_list[idx][0]:\n idx += 1\n\n # if we've advanced through all the adjustments, then there's nothing\n # to do.\n if idx == len(adjustments_list):\n return\n\n while idx < len(adjustments_list):\n adjustment_to_apply = adjustments_list[idx]\n\n if adjustment_to_apply[0] > dts_in_window[-1]:\n break\n\n range_end = dts_in_window.searchsorted(adjustment_to_apply[0])\n if multiply:\n window_data[0:range_end] *= adjustment_to_apply[1]\n else:\n window_data[0:range_end] \/= adjustment_to_apply[1]\n\n idx += 1\n\n def _get_adjustment_list(self, asset, adjustments_dict, table_name):\n \"\"\"\n Internal method that returns a list of adjustments for the given sid.\n\n Parameters\n ----------\n asset : Asset\n The asset for which to return adjustments.\n\n adjustments_dict: dict\n A dictionary of sid -> list that is used as a cache.\n\n table_name: string\n The table that contains this data in the adjustments db.\n\n Returns\n -------\n adjustments: list\n A list of [multiplier, pd.Timestamp], earliest first\n\n \"\"\"\n if self._adjustment_reader is None:\n return []\n\n sid = int(asset)\n\n try:\n adjustments = adjustments_dict[sid]\n except KeyError:\n adjustments = adjustments_dict[sid] = self._adjustment_reader.\\\n get_adjustments_for_sid(table_name, sid)\n\n return adjustments\n\n def _check_is_currently_alive(self, asset, dt):\n sid = int(asset)\n\n if sid not in self._asset_start_dates:\n self._get_asset_start_date(asset)\n\n start_date = self._asset_start_dates[sid]\n if self._asset_start_dates[sid] > dt:\n raise NoTradeDataAvailableTooEarly(\n sid=sid,\n dt=normalize_date(dt),\n start_dt=start_date\n )\n\n end_date = self._asset_end_dates[sid]\n if self._asset_end_dates[sid] < dt:\n raise NoTradeDataAvailableTooLate(\n sid=sid,\n dt=normalize_date(dt),\n end_dt=end_date\n )\n\n def _get_asset_start_date(self, asset):\n self._ensure_asset_dates(asset)\n return self._asset_start_dates[asset]\n\n def _get_asset_end_date(self, asset):\n self._ensure_asset_dates(asset)\n return self._asset_end_dates[asset]\n\n def _ensure_asset_dates(self, asset):\n sid = int(asset)\n\n if sid not in self._asset_start_dates:\n if self._first_trading_day is not None:\n self._asset_start_dates[sid] = \\\n max(asset.start_date, self._first_trading_day)\n else:\n self._asset_start_dates[sid] = asset.start_date\n\n self._asset_end_dates[sid] = asset.end_date\n\n def get_splits(self, sids, dt):\n \"\"\"\n Returns any splits for the given sids and the given dt.\n\n Parameters\n ----------\n sids : container\n Sids for which we want splits.\n dt : pd.Timestamp\n The date for which we are checking for splits. Note: this is\n expected to be midnight UTC.\n\n Returns\n -------\n splits : list[(int, float)]\n List of splits, where each split is a (sid, ratio) tuple.\n \"\"\"\n if self._adjustment_reader is None or not sids:\n return {}\n\n # convert dt to # of seconds since epoch, because that's what we use\n # in the adjustments db\n seconds = int(dt.value \/ 1e9)\n\n splits = self._adjustment_reader.conn.execute(\n \"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?\",\n (seconds,)).fetchall()\n\n splits = [split for split in splits if split[0] in sids]\n\n return splits\n\n def get_stock_dividends(self, sid, trading_days):\n \"\"\"\n Returns all the stock dividends for a specific sid that occur\n in the given trading range.\n\n Parameters\n ----------\n sid: int\n The asset whose stock dividends should be returned.\n\n trading_days: pd.DatetimeIndex\n The trading range.\n\n Returns\n -------\n list: A list of objects with all relevant attributes populated.\n All timestamp fields are converted to pd.Timestamps.\n \"\"\"\n\n if self._adjustment_reader is None:\n return []\n\n if len(trading_days) == 0:\n return []\n\n start_dt = trading_days[0].value \/ 1e9\n end_dt = trading_days[-1].value \/ 1e9\n\n dividends = self._adjustment_reader.conn.execute(\n \"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND \"\n \"ex_date > ? AND pay_date < ?\", (int(sid), start_dt, end_dt,)).\\\n fetchall()\n\n dividend_info = []\n for dividend_tuple in dividends:\n dividend_info.append({\n \"declared_date\": dividend_tuple[1],\n \"ex_date\": pd.Timestamp(dividend_tuple[2], unit=\"s\"),\n \"pay_date\": pd.Timestamp(dividend_tuple[3], unit=\"s\"),\n \"payment_sid\": dividend_tuple[4],\n \"ratio\": dividend_tuple[5],\n \"record_date\": pd.Timestamp(dividend_tuple[6], unit=\"s\"),\n \"sid\": dividend_tuple[7]\n })\n\n return dividend_info\n\n def contains(self, asset, field):\n return field in BASE_FIELDS or \\\n (field in self._augmented_sources_map and\n asset in self._augmented_sources_map[field])\n\n def get_fetcher_assets(self, dt):\n \"\"\"\n Returns a list of assets for the current date, as defined by the\n fetcher data.\n\n Returns\n -------\n list: a list of Asset objects.\n \"\"\"\n # return a list of assets for the current date, as defined by the\n # fetcher source\n if self._extra_source_df is None:\n return []\n\n day = normalize_date(dt)\n\n if day in self._extra_source_df.index:\n assets = self._extra_source_df.loc[day]['sid']\n else:\n return []\n\n if isinstance(assets, pd.Series):\n return [x for x in assets if isinstance(x, Asset)]\n else:\n return [assets] if isinstance(assets, Asset) else []\n\n @weak_lru_cache(20)\n def _get_minute_count_for_transform(self, ending_minute, days_count):\n # cache size picked somewhat loosely. this code exists purely to\n # handle deprecated API.\n\n # bars is the number of days desired. we have to translate that\n # into the number of minutes we want.\n # we get all the minutes for the last (bars - 1) days, then add\n # all the minutes so far today. the +2 is to account for ignoring\n # today, and the previous day, in doing the math.\n previous_day = self.env.previous_trading_day(ending_minute)\n days = self.env.days_in_range(\n self.env.add_trading_days(-days_count + 2, previous_day),\n previous_day,\n )\n\n minutes_count = \\\n sum(210 if day in self.env.early_closes else 390 for day in days)\n\n # add the minutes for today\n today_open = self.env.get_open_and_close(ending_minute)[0]\n minutes_count += \\\n ((ending_minute - today_open).total_seconds() \/\/ 60) + 1\n\n return minutes_count\n\n def get_simple_transform(self, asset, transform_name, dt, data_frequency,\n bars=None):\n if transform_name == \"returns\":\n # returns is always calculated over the last 2 days, regardless\n # of the simulation's data frequency.\n hst = self.get_history_window(\n [asset], dt, 2, \"1d\", \"price\", ffill=True\n )[asset]\n\n return (hst.iloc[-1] - hst.iloc[0]) \/ hst.iloc[0]\n\n if bars is None:\n raise ValueError(\"bars cannot be None!\")\n\n if data_frequency == \"minute\":\n freq_str = \"1m\"\n calculated_bar_count = self._get_minute_count_for_transform(\n dt, bars\n )\n else:\n freq_str = \"1d\"\n calculated_bar_count = bars\n\n price_arr = self.get_history_window(\n [asset], dt, calculated_bar_count, freq_str, \"price\", ffill=True\n )[asset]\n\n if transform_name == \"mavg\":\n return nanmean(price_arr)\n elif transform_name == \"stddev\":\n return nanstd(price_arr, ddof=1)\n elif transform_name == \"vwap\":\n volume_arr = self.get_history_window(\n [asset], dt, calculated_bar_count, freq_str, \"volume\",\n ffill=True\n )[asset]\n\n vol_sum = nansum(volume_arr)\n\n try:\n ret = nansum(price_arr * volume_arr) \/ vol_sum\n except ZeroDivisionError:\n ret = np.nan\n\n return ret\n","license":"apache-2.0"} {"repo_name":"schets\/scikit-learn","path":"examples\/mixture\/plot_gmm_sin.py","copies":"248","size":"2747","content":"\"\"\"\n=================================\nGaussian Mixture Model Sine Curve\n=================================\n\nThis example highlights the advantages of the Dirichlet Process:\ncomplexity control and dealing with sparse data. The dataset is formed\nby 100 points loosely spaced following a noisy sine curve. The fit by\nthe GMM class, using the expectation-maximization algorithm to fit a\nmixture of 10 Gaussian components, finds too-small components and very\nlittle structure. The fits by the Dirichlet process, however, show\nthat the model can either learn a global structure for the data (small\nalpha) or easily interpolate to finding relevant local structure\n(large alpha), never falling into the problems shown by the GMM class.\n\"\"\"\n\nimport itertools\n\nimport numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom sklearn import mixture\nfrom sklearn.externals.six.moves import xrange\n\n# Number of samples per component\nn_samples = 100\n\n# Generate random sample following a sine curve\nnp.random.seed(0)\nX = np.zeros((n_samples, 2))\nstep = 4 * np.pi \/ n_samples\n\nfor i in xrange(X.shape[0]):\n x = i * step - 6\n X[i, 0] = x + np.random.normal(0, 0.1)\n X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))\n\n\ncolor_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])\n\n\nfor i, (clf, title) in enumerate([\n (mixture.GMM(n_components=10, covariance_type='full', n_iter=100),\n \"Expectation-maximization\"),\n (mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,\n n_iter=100),\n \"Dirichlet Process,alpha=0.01\"),\n (mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,\n n_iter=100),\n \"Dirichlet Process,alpha=100.\")]):\n\n clf.fit(X)\n splot = plt.subplot(3, 1, 1 + i)\n Y_ = clf.predict(X)\n for i, (mean, covar, color) in enumerate(zip(\n clf.means_, clf._get_covars(), color_iter)):\n v, w = linalg.eigh(covar)\n u = w[0] \/ linalg.norm(w[0])\n # as the DP will not use every component it has access to\n # unless it needs it, we shouldn't plot the redundant\n # components.\n if not np.any(Y_ == i):\n continue\n plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan(u[1] \/ u[0])\n angle = 180 * angle \/ np.pi # convert to degrees\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(0.5)\n splot.add_artist(ell)\n\n plt.xlim(-6, 4 * np.pi - 6)\n plt.ylim(-5, 5)\n plt.title(title)\n plt.xticks(())\n plt.yticks(())\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"waterponey\/scikit-learn","path":"examples\/semi_supervised\/plot_label_propagation_structure.py","copies":"55","size":"2433","content":"\"\"\"\n==============================================\nLabel Propagation learning a complex structure\n==============================================\n\nExample of LabelPropagation learning a complex internal structure\nto demonstrate \"manifold learning\". The outer circle should be\nlabeled \"red\" and the inner circle \"blue\". Because both label groups\nlie inside their own distinct shape, we can see that the labels\npropagate correctly around the circle.\n\"\"\"\nprint(__doc__)\n\n# Authors: Clay Woolam \n# Andreas Mueller \n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.semi_supervised import label_propagation\nfrom sklearn.datasets import make_circles\n\n# generate ring with inner box\nn_samples = 200\nX, y = make_circles(n_samples=n_samples, shuffle=False)\nouter, inner = 0, 1\nlabels = -np.ones(n_samples)\nlabels[0] = outer\nlabels[-1] = inner\n\n###############################################################################\n# Learn with LabelSpreading\nlabel_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)\nlabel_spread.fit(X, labels)\n\n###############################################################################\n# Plot output labels\noutput_labels = label_spread.transduction_\nplt.figure(figsize=(8.5, 4))\nplt.subplot(1, 2, 1)\nplt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',\n marker='s', lw=0, label=\"outer labeled\", s=10)\nplt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',\n marker='s', lw=0, label='inner labeled', s=10)\nplt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',\n marker='.', label='unlabeled')\nplt.legend(scatterpoints=1, shadow=False, loc='upper right')\nplt.title(\"Raw data (2 classes=outer and inner)\")\n\nplt.subplot(1, 2, 2)\noutput_label_array = np.asarray(output_labels)\nouter_numbers = np.where(output_label_array == outer)[0]\ninner_numbers = np.where(output_label_array == inner)[0]\nplt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',\n marker='s', lw=0, s=10, label=\"outer learned\")\nplt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',\n marker='s', lw=0, s=10, label=\"inner learned\")\nplt.legend(scatterpoints=1, shadow=False, loc='upper right')\nplt.title(\"Labels learned with Label Spreading (KNN)\")\n\nplt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"shangwuhencc\/scikit-learn","path":"examples\/cluster\/plot_kmeans_assumptions.py","copies":"270","size":"2040","content":"\"\"\"\n====================================\nDemonstration of k-means assumptions\n====================================\n\nThis example is meant to illustrate situations where k-means will produce\nunintuitive and possibly unexpected clusters. In the first three plots, the\ninput data does not conform to some implicit assumption that k-means makes and\nundesirable clusters are produced as a result. In the last plot, k-means\nreturns intuitive clusters despite unevenly sized blobs.\n\"\"\"\nprint(__doc__)\n\n# Author: Phil Roth \n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\n\nplt.figure(figsize=(12, 12))\n\nn_samples = 1500\nrandom_state = 170\nX, y = make_blobs(n_samples=n_samples, random_state=random_state)\n\n# Incorrect number of clusters\ny_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)\n\nplt.subplot(221)\nplt.scatter(X[:, 0], X[:, 1], c=y_pred)\nplt.title(\"Incorrect Number of Blobs\")\n\n# Anisotropicly distributed data\ntransformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]\nX_aniso = np.dot(X, transformation)\ny_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)\n\nplt.subplot(222)\nplt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)\nplt.title(\"Anisotropicly Distributed Blobs\")\n\n# Different variance\nX_varied, y_varied = make_blobs(n_samples=n_samples,\n cluster_std=[1.0, 2.5, 0.5],\n random_state=random_state)\ny_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)\n\nplt.subplot(223)\nplt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)\nplt.title(\"Unequal Variance\")\n\n# Unevenly sized blobs\nX_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))\ny_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)\n\nplt.subplot(224)\nplt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)\nplt.title(\"Unevenly Sized Blobs\")\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"winklerand\/pandas","path":"pandas\/tests\/test_errors.py","copies":"9","size":"1147","content":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom warnings import catch_warnings\nimport pandas # noqa\nimport pandas as pd\n\n\n@pytest.mark.parametrize(\n \"exc\", ['UnsupportedFunctionCall', 'UnsortedIndexError',\n 'OutOfBoundsDatetime',\n 'ParserError', 'PerformanceWarning', 'DtypeWarning',\n 'EmptyDataError', 'ParserWarning', 'MergeError'])\ndef test_exception_importable(exc):\n from pandas import errors\n e = getattr(errors, exc)\n assert e is not None\n\n # check that we can raise on them\n with pytest.raises(e):\n raise e()\n\n\ndef test_catch_oob():\n from pandas import errors\n\n try:\n pd.Timestamp('15000101')\n except errors.OutOfBoundsDatetime:\n pass\n\n\ndef test_error_rename():\n # see gh-12665\n from pandas.errors import ParserError\n from pandas.io.common import CParserError\n\n try:\n raise CParserError()\n except ParserError:\n pass\n\n try:\n raise ParserError()\n except CParserError:\n pass\n\n with catch_warnings(record=True):\n try:\n raise ParserError()\n except pd.parser.CParserError:\n pass\n","license":"bsd-3-clause"} {"repo_name":"samuel1208\/scikit-learn","path":"sklearn\/metrics\/scorer.py","copies":"211","size":"13141","content":"\"\"\"\nThe :mod:`sklearn.metrics.scorer` submodule implements a flexible\ninterface for model selection and evaluation using\narbitrary score functions.\n\nA scorer object is a callable that can be passed to\n:class:`sklearn.grid_search.GridSearchCV` or\n:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,\nto specify how a model should be evaluated.\n\nThe signature of the call is ``(estimator, X, y)`` where ``estimator``\nis the model to be evaluated, ``X`` is the test data and ``y`` is the\nground truth labeling (or ``None`` in the case of unsupervised models).\n\"\"\"\n\n# Authors: Andreas Mueller \n# Lars Buitinck \n# Arnaud Joly \n# License: Simplified BSD\n\nfrom abc import ABCMeta, abstractmethod\nfrom functools import partial\n\nimport numpy as np\n\nfrom . import (r2_score, median_absolute_error, mean_absolute_error,\n mean_squared_error, accuracy_score, f1_score,\n roc_auc_score, average_precision_score,\n precision_score, recall_score, log_loss)\nfrom .cluster import adjusted_rand_score\nfrom ..utils.multiclass import type_of_target\nfrom ..externals import six\nfrom ..base import is_regressor\n\n\nclass _BaseScorer(six.with_metaclass(ABCMeta, object)):\n def __init__(self, score_func, sign, kwargs):\n self._kwargs = kwargs\n self._score_func = score_func\n self._sign = sign\n\n @abstractmethod\n def __call__(self, estimator, X, y, sample_weight=None):\n pass\n\n def __repr__(self):\n kwargs_string = \"\".join([\", %s=%s\" % (str(k), str(v))\n for k, v in self._kwargs.items()])\n return (\"make_scorer(%s%s%s%s)\"\n % (self._score_func.__name__,\n \"\" if self._sign > 0 else \", greater_is_better=False\",\n self._factory_args(), kwargs_string))\n\n def _factory_args(self):\n \"\"\"Return non-default make_scorer arguments for repr.\"\"\"\n return \"\"\n\n\nclass _PredictScorer(_BaseScorer):\n def __call__(self, estimator, X, y_true, sample_weight=None):\n \"\"\"Evaluate predicted target values for X relative to y_true.\n\n Parameters\n ----------\n estimator : object\n Trained estimator to use for scoring. Must have a predict_proba\n method; the output of that is used to compute the score.\n\n X : array-like or sparse matrix\n Test data that will be fed to estimator.predict.\n\n y_true : array-like\n Gold standard target values for X.\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n y_pred = estimator.predict(X)\n if sample_weight is not None:\n return self._sign * self._score_func(y_true, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y_true, y_pred,\n **self._kwargs)\n\n\nclass _ProbaScorer(_BaseScorer):\n def __call__(self, clf, X, y, sample_weight=None):\n \"\"\"Evaluate predicted probabilities for X relative to y_true.\n\n Parameters\n ----------\n clf : object\n Trained classifier to use for scoring. Must have a predict_proba\n method; the output of that is used to compute the score.\n\n X : array-like or sparse matrix\n Test data that will be fed to clf.predict_proba.\n\n y : array-like\n Gold standard target values for X. These must be class labels,\n not probabilities.\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n y_pred = clf.predict_proba(X)\n if sample_weight is not None:\n return self._sign * self._score_func(y, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y, y_pred, **self._kwargs)\n\n def _factory_args(self):\n return \", needs_proba=True\"\n\n\nclass _ThresholdScorer(_BaseScorer):\n def __call__(self, clf, X, y, sample_weight=None):\n \"\"\"Evaluate decision function output for X relative to y_true.\n\n Parameters\n ----------\n clf : object\n Trained classifier to use for scoring. Must have either a\n decision_function method or a predict_proba method; the output of\n that is used to compute the score.\n\n X : array-like or sparse matrix\n Test data that will be fed to clf.decision_function or\n clf.predict_proba.\n\n y : array-like\n Gold standard target values for X. These must be class labels,\n not decision function values.\n\n sample_weight : array-like, optional (default=None)\n Sample weights.\n\n Returns\n -------\n score : float\n Score function applied to prediction of estimator on X.\n \"\"\"\n y_type = type_of_target(y)\n if y_type not in (\"binary\", \"multilabel-indicator\"):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if is_regressor(clf):\n y_pred = clf.predict(X)\n else:\n try:\n y_pred = clf.decision_function(X)\n\n # For multi-output multi-class estimator\n if isinstance(y_pred, list):\n y_pred = np.vstack(p for p in y_pred).T\n\n except (NotImplementedError, AttributeError):\n y_pred = clf.predict_proba(X)\n\n if y_type == \"binary\":\n y_pred = y_pred[:, 1]\n elif isinstance(y_pred, list):\n y_pred = np.vstack([p[:, -1] for p in y_pred]).T\n\n if sample_weight is not None:\n return self._sign * self._score_func(y, y_pred,\n sample_weight=sample_weight,\n **self._kwargs)\n else:\n return self._sign * self._score_func(y, y_pred, **self._kwargs)\n\n def _factory_args(self):\n return \", needs_threshold=True\"\n\n\ndef get_scorer(scoring):\n if isinstance(scoring, six.string_types):\n try:\n scorer = SCORERS[scoring]\n except KeyError:\n raise ValueError('%r is not a valid scoring value. '\n 'Valid options are %s'\n % (scoring, sorted(SCORERS.keys())))\n else:\n scorer = scoring\n return scorer\n\n\ndef _passthrough_scorer(estimator, *args, **kwargs):\n \"\"\"Function that wraps estimator.score\"\"\"\n return estimator.score(*args, **kwargs)\n\n\ndef check_scoring(estimator, scoring=None, allow_none=False):\n \"\"\"Determine scorer from user options.\n\n A TypeError will be thrown if the estimator cannot be scored.\n\n Parameters\n ----------\n estimator : estimator object implementing 'fit'\n The object to use to fit the data.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object \/ function with signature\n ``scorer(estimator, X, y)``.\n\n allow_none : boolean, optional, default: False\n If no scoring is specified and the estimator has no score function, we\n can either return None or raise an exception.\n\n Returns\n -------\n scoring : callable\n A scorer callable object \/ function with signature\n ``scorer(estimator, X, y)``.\n \"\"\"\n has_scoring = scoring is not None\n if not hasattr(estimator, 'fit'):\n raise TypeError(\"estimator should a be an estimator implementing \"\n \"'fit' method, %r was passed\" % estimator)\n elif has_scoring:\n return get_scorer(scoring)\n elif hasattr(estimator, 'score'):\n return _passthrough_scorer\n elif allow_none:\n return None\n else:\n raise TypeError(\n \"If no scoring is specified, the estimator passed should \"\n \"have a 'score' method. The estimator %r does not.\" % estimator)\n\n\ndef make_scorer(score_func, greater_is_better=True, needs_proba=False,\n needs_threshold=False, **kwargs):\n \"\"\"Make a scorer from a performance metric or loss function.\n\n This factory function wraps scoring functions for use in GridSearchCV\n and cross_val_score. It takes a score function, such as ``accuracy_score``,\n ``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``\n and returns a callable that scores an estimator's output.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n score_func : callable,\n Score function (or loss function) with signature\n ``score_func(y, y_pred, **kwargs)``.\n\n greater_is_better : boolean, default=True\n Whether score_func is a score function (default), meaning high is good,\n or a loss function, meaning low is good. In the latter case, the\n scorer object will sign-flip the outcome of the score_func.\n\n needs_proba : boolean, default=False\n Whether score_func requires predict_proba to get probability estimates\n out of a classifier.\n\n needs_threshold : boolean, default=False\n Whether score_func takes a continuous decision certainty.\n This only works for binary classification using estimators that\n have either a decision_function or predict_proba method.\n\n For example ``average_precision`` or the area under the roc curve\n can not be computed using discrete predictions alone.\n\n **kwargs : additional arguments\n Additional parameters to be passed to score_func.\n\n Returns\n -------\n scorer : callable\n Callable object that returns a scalar score; greater is better.\n\n Examples\n --------\n >>> from sklearn.metrics import fbeta_score, make_scorer\n >>> ftwo_scorer = make_scorer(fbeta_score, beta=2)\n >>> ftwo_scorer\n make_scorer(fbeta_score, beta=2)\n >>> from sklearn.grid_search import GridSearchCV\n >>> from sklearn.svm import LinearSVC\n >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},\n ... scoring=ftwo_scorer)\n \"\"\"\n sign = 1 if greater_is_better else -1\n if needs_proba and needs_threshold:\n raise ValueError(\"Set either needs_proba or needs_threshold to True,\"\n \" but not both.\")\n if needs_proba:\n cls = _ProbaScorer\n elif needs_threshold:\n cls = _ThresholdScorer\n else:\n cls = _PredictScorer\n return cls(score_func, sign, kwargs)\n\n\n# Standard regression scores\nr2_scorer = make_scorer(r2_score)\nmean_squared_error_scorer = make_scorer(mean_squared_error,\n greater_is_better=False)\nmean_absolute_error_scorer = make_scorer(mean_absolute_error,\n greater_is_better=False)\nmedian_absolute_error_scorer = make_scorer(median_absolute_error,\n greater_is_better=False)\n\n# Standard Classification Scores\naccuracy_scorer = make_scorer(accuracy_score)\nf1_scorer = make_scorer(f1_score)\n\n# Score functions that need decision values\nroc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,\n needs_threshold=True)\naverage_precision_scorer = make_scorer(average_precision_score,\n needs_threshold=True)\nprecision_scorer = make_scorer(precision_score)\nrecall_scorer = make_scorer(recall_score)\n\n# Score function for probabilistic classification\nlog_loss_scorer = make_scorer(log_loss, greater_is_better=False,\n needs_proba=True)\n\n# Clustering scores\nadjusted_rand_scorer = make_scorer(adjusted_rand_score)\n\nSCORERS = dict(r2=r2_scorer,\n median_absolute_error=median_absolute_error_scorer,\n mean_absolute_error=mean_absolute_error_scorer,\n mean_squared_error=mean_squared_error_scorer,\n accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,\n average_precision=average_precision_scorer,\n log_loss=log_loss_scorer,\n adjusted_rand_score=adjusted_rand_scorer)\n\nfor name, metric in [('precision', precision_score),\n ('recall', recall_score), ('f1', f1_score)]:\n SCORERS[name] = make_scorer(metric)\n for average in ['macro', 'micro', 'samples', 'weighted']:\n qualified_name = '{0}_{1}'.format(name, average)\n SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,\n average=average))\n","license":"bsd-3-clause"} {"repo_name":"vigilv\/scikit-learn","path":"sklearn\/datasets\/base.py","copies":"196","size":"18554","content":"\"\"\"\nBase IO code for all datasets\n\"\"\"\n\n# Copyright (c) 2007 David Cournapeau \n# 2010 Fabian Pedregosa \n# 2010 Olivier Grisel \n# License: BSD 3 clause\n\nimport os\nimport csv\nimport shutil\nfrom os import environ\nfrom os.path import dirname\nfrom os.path import join\nfrom os.path import exists\nfrom os.path import expanduser\nfrom os.path import isdir\nfrom os import listdir\nfrom os import makedirs\n\nimport numpy as np\n\nfrom ..utils import check_random_state\n\n\nclass Bunch(dict):\n \"\"\"Container object for datasets\n\n Dictionary-like object that exposes its keys as attributes.\n\n >>> b = Bunch(a=1, b=2)\n >>> b['b']\n 2\n >>> b.b\n 2\n >>> b.a = 3\n >>> b['a']\n 3\n >>> b.c = 6\n >>> b['c']\n 6\n\n \"\"\"\n\n def __init__(self, **kwargs):\n dict.__init__(self, kwargs)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(key)\n\n def __getstate__(self):\n return self.__dict__\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the scikit-learn data dir.\n\n This folder is used by some large dataset loaders to avoid\n downloading the data several times.\n\n By default the data dir is set to a folder named 'scikit_learn_data'\n in the user home folder.\n\n Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment\n variable or programmatically by giving an explicit folder path. The\n '~' symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n \"\"\"\n if data_home is None:\n data_home = environ.get('SCIKIT_LEARN_DATA',\n join('~', 'scikit_learn_data'))\n data_home = expanduser(data_home)\n if not exists(data_home):\n makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\n\ndef load_files(container_path, description=None, categories=None,\n load_content=True, shuffle=True, encoding=None,\n decode_error='strict', random_state=0):\n \"\"\"Load text files with categories as subfolder names.\n\n Individual samples are assumed to be files stored a two levels folder\n structure such as the following:\n\n container_folder\/\n category_1_folder\/\n file_1.txt\n file_2.txt\n ...\n file_42.txt\n category_2_folder\/\n file_43.txt\n file_44.txt\n ...\n\n The folder names are used as supervised signal label names. The\n individual file names are not important.\n\n This function does not try to extract features into a numpy array or\n scipy sparse matrix. In addition, if load_content is false it\n does not try to load the files in memory.\n\n To use text files in a scikit-learn classification or clustering\n algorithm, you will need to use the `sklearn.feature_extraction.text`\n module to build a feature extraction transformer that suits your\n problem.\n\n If you set load_content=True, you should also specify the encoding of\n the text using the 'encoding' parameter. For many modern text files,\n 'utf-8' will be the correct encoding. If you leave encoding equal to None,\n then the content will be made of bytes instead of Unicode, and you will\n not be able to use most functions in `sklearn.feature_extraction.text`.\n\n Similar feature extractors should be built for other kind of unstructured\n data input such as images, audio, video, ...\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n container_path : string or unicode\n Path to the main folder holding one subfolder per category\n\n description: string or unicode, optional (default=None)\n A paragraph describing the characteristic of the dataset: its source,\n reference, etc.\n\n categories : A collection of strings or None, optional (default=None)\n If None (default), load all the categories.\n If not None, list of category names to load (other categories ignored).\n\n load_content : boolean, optional (default=True)\n Whether to load or not the content of the different files. If\n true a 'data' attribute containing the text information is present\n in the data structure returned. If not, a filenames attribute\n gives the path to the files.\n\n encoding : string or None (default is None)\n If None, do not try to decode the content of the files (e.g. for\n images or other non-text content).\n If not None, encoding to use to decode text files to Unicode if\n load_content is True.\n\n decode_error: {'strict', 'ignore', 'replace'}, optional\n Instruction on what to do if a byte sequence is given to analyze that\n contains characters not of the given `encoding`. Passed as keyword\n argument 'errors' to bytes.decode.\n\n shuffle : bool, optional (default=True)\n Whether or not to shuffle the data: might be important for models that\n make the assumption that the samples are independent and identically\n distributed (i.i.d.), such as stochastic gradient descent.\n\n random_state : int, RandomState instance or None, optional (default=0)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are: either\n data, the raw text data to learn, or 'filenames', the files\n holding it, 'target', the classification labels (integer index),\n 'target_names', the meaning of the labels, and 'DESCR', the full\n description of the dataset.\n \"\"\"\n target = []\n target_names = []\n filenames = []\n\n folders = [f for f in sorted(listdir(container_path))\n if isdir(join(container_path, f))]\n\n if categories is not None:\n folders = [f for f in folders if f in categories]\n\n for label, folder in enumerate(folders):\n target_names.append(folder)\n folder_path = join(container_path, folder)\n documents = [join(folder_path, d)\n for d in sorted(listdir(folder_path))]\n target.extend(len(documents) * [label])\n filenames.extend(documents)\n\n # convert to array for fancy indexing\n filenames = np.array(filenames)\n target = np.array(target)\n\n if shuffle:\n random_state = check_random_state(random_state)\n indices = np.arange(filenames.shape[0])\n random_state.shuffle(indices)\n filenames = filenames[indices]\n target = target[indices]\n\n if load_content:\n data = []\n for filename in filenames:\n with open(filename, 'rb') as f:\n data.append(f.read())\n if encoding is not None:\n data = [d.decode(encoding, decode_error) for d in data]\n return Bunch(data=data,\n filenames=filenames,\n target_names=target_names,\n target=target,\n DESCR=description)\n\n return Bunch(filenames=filenames,\n target_names=target_names,\n target=target,\n DESCR=description)\n\n\ndef load_iris():\n \"\"\"Load and return the iris dataset (classification).\n\n The iris dataset is a classic and very easy multi-class classification\n dataset.\n\n ================= ==============\n Classes 3\n Samples per class 50\n Samples total 150\n Dimensionality 4\n Features real, positive\n ================= ==============\n\n Read more in the :ref:`User Guide `.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are:\n 'data', the data to learn, 'target', the classification labels,\n 'target_names', the meaning of the labels, 'feature_names', the\n meaning of the features, and 'DESCR', the\n full description of the dataset.\n\n Examples\n --------\n Let's say you are interested in the samples 10, 25, and 50, and want to\n know their class name.\n\n >>> from sklearn.datasets import load_iris\n >>> data = load_iris()\n >>> data.target[[10, 25, 50]]\n array([0, 0, 1])\n >>> list(data.target_names)\n ['setosa', 'versicolor', 'virginica']\n \"\"\"\n module_path = dirname(__file__)\n with open(join(module_path, 'data', 'iris.csv')) as csv_file:\n data_file = csv.reader(csv_file)\n temp = next(data_file)\n n_samples = int(temp[0])\n n_features = int(temp[1])\n target_names = np.array(temp[2:])\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,), dtype=np.int)\n\n for i, ir in enumerate(data_file):\n data[i] = np.asarray(ir[:-1], dtype=np.float)\n target[i] = np.asarray(ir[-1], dtype=np.int)\n\n with open(join(module_path, 'descr', 'iris.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data, target=target,\n target_names=target_names,\n DESCR=fdescr,\n feature_names=['sepal length (cm)', 'sepal width (cm)',\n 'petal length (cm)', 'petal width (cm)'])\n\n\ndef load_digits(n_class=10):\n \"\"\"Load and return the digits dataset (classification).\n\n Each datapoint is a 8x8 image of a digit.\n\n ================= ==============\n Classes 10\n Samples per class ~180\n Samples total 1797\n Dimensionality 64\n Features integers 0-16\n ================= ==============\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_class : integer, between 0 and 10, optional (default=10)\n The number of classes to return.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are:\n 'data', the data to learn, 'images', the images corresponding\n to each sample, 'target', the classification labels for each\n sample, 'target_names', the meaning of the labels, and 'DESCR',\n the full description of the dataset.\n\n Examples\n --------\n To load the data and visualize the images::\n\n >>> from sklearn.datasets import load_digits\n >>> digits = load_digits()\n >>> print(digits.data.shape)\n (1797, 64)\n >>> import pylab as pl #doctest: +SKIP\n >>> pl.gray() #doctest: +SKIP\n >>> pl.matshow(digits.images[0]) #doctest: +SKIP\n >>> pl.show() #doctest: +SKIP\n \"\"\"\n module_path = dirname(__file__)\n data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),\n delimiter=',')\n with open(join(module_path, 'descr', 'digits.rst')) as f:\n descr = f.read()\n target = data[:, -1]\n flat_data = data[:, :-1]\n images = flat_data.view()\n images.shape = (-1, 8, 8)\n\n if n_class < 10:\n idx = target < n_class\n flat_data, target = flat_data[idx], target[idx]\n images = images[idx]\n\n return Bunch(data=flat_data,\n target=target.astype(np.int),\n target_names=np.arange(10),\n images=images,\n DESCR=descr)\n\n\ndef load_diabetes():\n \"\"\"Load and return the diabetes dataset (regression).\n\n ============== ==================\n Samples total 442\n Dimensionality 10\n Features real, -.2 < x < .2\n Targets integer 25 - 346\n ============== ==================\n\n Read more in the :ref:`User Guide `.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are:\n 'data', the data to learn and 'target', the regression target for each\n sample.\n \"\"\"\n base_dir = join(dirname(__file__), 'data')\n data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))\n target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))\n return Bunch(data=data, target=target)\n\n\ndef load_linnerud():\n \"\"\"Load and return the linnerud dataset (multivariate regression).\n\n Samples total: 20\n Dimensionality: 3 for both data and targets\n Features: integer\n Targets: integer\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are: 'data' and\n 'targets', the two multivariate datasets, with 'data' corresponding to\n the exercise and 'targets' corresponding to the physiological\n measurements, as well as 'feature_names' and 'target_names'.\n \"\"\"\n base_dir = join(dirname(__file__), 'data\/')\n # Read data\n data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)\n data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',\n skiprows=1)\n # Read header\n with open(base_dir + 'linnerud_exercise.csv') as f:\n header_exercise = f.readline().split()\n with open(base_dir + 'linnerud_physiological.csv') as f:\n header_physiological = f.readline().split()\n with open(dirname(__file__) + '\/descr\/linnerud.rst') as f:\n descr = f.read()\n\n return Bunch(data=data_exercise, feature_names=header_exercise,\n target=data_physiological,\n target_names=header_physiological,\n DESCR=descr)\n\n\ndef load_boston():\n \"\"\"Load and return the boston house-prices dataset (regression).\n\n ============== ==============\n Samples total 506\n Dimensionality 13\n Features real, positive\n Targets real 5. - 50.\n ============== ==============\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interesting attributes are:\n 'data', the data to learn, 'target', the regression targets,\n and 'DESCR', the full description of the dataset.\n\n Examples\n --------\n >>> from sklearn.datasets import load_boston\n >>> boston = load_boston()\n >>> print(boston.data.shape)\n (506, 13)\n \"\"\"\n module_path = dirname(__file__)\n\n fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')\n with open(fdescr_name) as f:\n descr_text = f.read()\n\n data_file_name = join(module_path, 'data', 'boston_house_prices.csv')\n with open(data_file_name) as f:\n data_file = csv.reader(f)\n temp = next(data_file)\n n_samples = int(temp[0])\n n_features = int(temp[1])\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,))\n temp = next(data_file) # names of features\n feature_names = np.array(temp)\n\n for i, d in enumerate(data_file):\n data[i] = np.asarray(d[:-1], dtype=np.float)\n target[i] = np.asarray(d[-1], dtype=np.float)\n\n return Bunch(data=data,\n target=target,\n # last column is target value\n feature_names=feature_names[:-1],\n DESCR=descr_text)\n\n\ndef load_sample_images():\n \"\"\"Load sample images for image manipulation.\n Loads both, ``china`` and ``flower``.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object with the following attributes :\n 'images', the two sample images, 'filenames', the file\n names for the images, and 'DESCR'\n the full description of the dataset.\n\n Examples\n --------\n To load the data and visualize the images:\n\n >>> from sklearn.datasets import load_sample_images\n >>> dataset = load_sample_images() #doctest: +SKIP\n >>> len(dataset.images) #doctest: +SKIP\n 2\n >>> first_img_data = dataset.images[0] #doctest: +SKIP\n >>> first_img_data.shape #doctest: +SKIP\n (427, 640, 3)\n >>> first_img_data.dtype #doctest: +SKIP\n dtype('uint8')\n \"\"\"\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n module_path = join(dirname(__file__), \"images\")\n with open(join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)\n\n\ndef load_sample_image(image_name):\n \"\"\"Load the numpy array of a single sample image\n\n Parameters\n -----------\n image_name: {`china.jpg`, `flower.jpg`}\n The name of the sample image loaded\n\n Returns\n -------\n img: 3D array\n The image as a numpy array: height x width x color\n\n Examples\n ---------\n\n >>> from sklearn.datasets import load_sample_image\n >>> china = load_sample_image('china.jpg') # doctest: +SKIP\n >>> china.dtype # doctest: +SKIP\n dtype('uint8')\n >>> china.shape # doctest: +SKIP\n (427, 640, 3)\n >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP\n >>> flower.dtype # doctest: +SKIP\n dtype('uint8')\n >>> flower.shape # doctest: +SKIP\n (427, 640, 3)\n \"\"\"\n images = load_sample_images()\n index = None\n for i, filename in enumerate(images.filenames):\n if filename.endswith(image_name):\n index = i\n break\n if index is None:\n raise AttributeError(\"Cannot find sample image: %s\" % image_name)\n return images.images[index]\n","license":"bsd-3-clause"} {"repo_name":"refstudycentre\/versification","path":"util.py","copies":"1","size":"11774","content":"\nimport numpy as np\n\nimport unicodecsv\nimport codecs\nimport goslate\nimport sqlite3\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\n\ndef imp_load(filename):\n\n texts = []\n books = []\n chapters = []\n verses = []\n\n # Read in a whole bible\n with codecs.open(filename,encoding='utf-8') as f:\n bibletext = f.read()\n\n # Split by verse\n bible_verses = bibletext.split('$$$')\n\n # Process verses\n for verse in bible_verses:\n try:\n verse = verse.split('\\n',1)\n ref = verse[0].strip()\n text = verse[1].strip()\n ref = ref.split('.')\n book = ref[0].strip()\n cnum = ref[1].strip()\n vnum = ref[2].strip()\n\n texts.append(text)\n books.append(book)\n chapters.append(cnum)\n verses.append(vnum)\n\n except IndexError:\n pass\n\n return books, chapters, verses, texts\n\n\ndef calculate_similarity(texts, translations):\n\n # Train the tf-idf thingy on the translated texts\n tfidf = TfidfVectorizer().fit_transform(texts)\n\n # Build a matrix representation of the similarities between verses\n # This will yield a simmetrical matrix\n # TODO: For performance and logical reasons: Only calculate similarity for nearby verses, assume others 0 ?\n M = np.array([linear_kernel(tfidf[j:j+1], tfidf).flatten() for j in range(len(texts))])\n\n # Hack(ish): Set similarity with verses of same translation to 0\n for i in range(len(M)):\n for j in range(i+1):\n if translations[i] == translations[j]:\n M[i][j] = M[j][i] = 0\n\n # print np.round(M*100,0)\n\n return M\n\n\ndef find_best_couple(M,t):\n \"\"\"\n find best couple in similarity matrix M\n the translation(s) of each verse is given in t\n \"\"\"\n\n # assume values are 0 for verses in same translation\n i_max, j_max = np.unravel_index(M.argmax(), M.shape)\n P_max = M[i_max, j_max]\n\n return i_max, j_max, P_max\n\n\ndef merge_nodes(M,a,b):\n \"\"\"\n merge indices a and b in similarity matrix M into one supernode,\n averaging similarity values between the supernode and other verses\n \"\"\"\n \n N = len(M)\n \n # calculate a new row (and column) for the supernode\n supernode_similarity = [np.average([M[k][a],M[k][b]]) for k in range(N)]\n \n # append the row (this will jumble the verse order...)\n newM = np.append(M, np.array(supernode_similarity)[None,:], axis=0)\n \n # append 0 (supernode's similarity with itself) to the row and add it as a column\n supernode_similarity.append(0.)\n newM = np.append(newM, np.array(supernode_similarity)[:,None], axis=1)\n \n # to preserve verse indices, don't delete\n # newM = np.delete(newM,[a,b],axis=0)\n # rather make rows a and b 0\n # to preserve verse indices, don't delete\n # newM = np.delete(newM,[a,b],axis=1)\n # rather make columns a and b 0\n \n newM[:,a] = np.zeros_like(newM[:,a])\n newM[:,b] = np.zeros_like(newM[:,b])\n newM[a,:] = np.zeros_like(newM[a,:])\n newM[b,:] = np.zeros_like(newM[b,:])\n \n return newM\n\n\ndef group_verses(M, t, numT, P_min = 0.1):\n \"\"\"\n Automatically group verses\n t = the translation of each verse\n numT = max number of verses in a group = number of translations\n \"\"\"\n\n t = [[val] for val in t]\n N = len(M)\n groups = {} # keyed by supernode index\n iteration = 0\n max_iteration = N\n \n while iteration < max_iteration:\n iteration += 1\n #print \"\\t\\tGrouping: iteration \",iteration\n\n i,j,P = find_best_couple(M, t)\n #print \"\\t\\tbest couple: \",i,j,P\n\n # Stop iterating if similarity gets too low...\n if P < P_min:\n break;\n \n group = []\n \n # merge supernodes if they exist, else merge nodes:\n \n if i in groups:\n group.extend(groups[i])\n else:\n group.append(i)\n \n if j in groups:\n group.extend(groups[j])\n else:\n group.append(j)\n \n # group now contains all of the verses for the new supernode\n\n if len(group) > numT:\n # this grouping is invalid\n # prevent it from happening again by making P 0\n M[i][j] = 0\n else:\n # valid grouping. save it.\n\n # Remove the previous supernode groups\n if i in groups:\n del groups[i]\n\n if j in groups:\n del groups[j]\n\n # Create the supernode\n M = merge_nodes(M,i,j)\n t.append(t[i] + t[j])\n\n # Save the index of the new supernode\n supernode_index = len(M)-1\n groups[supernode_index] = group\n\n print \"\\r\\t\\t\",len(groups),\n\n print\n\n return groups\n\n\ndef align(input_translations, input_filenames, output_filename):\n \"\"\"\n Load one csv file for each translation\n Group, align and sort the verses\n Export a csv file containing a column for each translation\n \"\"\"\n\n if len(input_translations) != len(input_filenames):\n raise ValueError(\"Number of translations and number of files must be the same\")\n\n M = len(input_translations)\n\n # Load pre-translated data\n print \"\\tLoading data from files...\"\n #translations,books,chapters,verses,texts_original,texts_en = load_translated_verses(input_translations, input_filenames)\n translations,chapters,verses,texts_original,texts_en = csv_import_translated_books(input_filenames, input_translations)\n\n # Calculate similarity between verses\n print \"\\tCalculating similarity matrix...\"\n similarity = calculate_similarity(texts_en, translations)\n\n def canonical_group_cmp(a, b):\n \"\"\"\n Define sort order for groups of verses\n \"\"\"\n\n # find two verses from the same translation to compare their canonical order\n for i in a:\n for j in b:\n if translations[i] == translations[j]:\n return i - j\n\n # Group the verses\n print \"\\tGrouping verses...\"\n groups = group_verses(similarity, translations, 3).values()\n # print groups\n\n # Put groups back into canonical order\n print \"\\tSorting verses...\"\n groups.sort(canonical_group_cmp)\n\n # prepare data for csv export\n print \"\\tPreparing csv data...\"\n csv_rows = []\n csv_rows.append(input_translations) # headers\n\n for group in groups:\n\n # create a row in the csv file for every group\n if len(group) == M:\n # rows where all translations are present, are quick:\n group.sort()\n row = [u\"{0}:{1}:{2}\".format(chapters[verse],verses[verse],texts_original[verse]) for verse in group]\n else:\n # for other rows, we have to find the missing translation, and substitute it with a blank\n row = []\n for translation in input_translations:\n found = False\n for verse in group:\n if translation == translations[verse]:\n # verse found for this translation\n row.append(u\"{0}:{1}:{2}\".format(chapters[verse],verses[verse],texts_original[verse]))\n found = True\n break\n if not found:\n # fill in a blank\n row.append(\"\")\n\n csv_rows.append(row)\n\n # print csv_rows\n\n # Export to csv file\n print \"\\tWriting csv file...\"\n with open(output_filename,'wb') as f:\n cw = unicodecsv.writer(f, encoding='utf-8')\n cw.writerows(csv_rows)\n\n print \"\\tDone!\"\n\n\ndef translate_csv(in_filename, language, out_filename):\n \"\"\"\n Load a bible book from csv file\n translate it\n save it as a new file\n \"\"\"\n\n # Create a translator object\n gs = goslate.Goslate(retry_times=100, timeout=100)\n\n # Load the bible book to be translated\n chapters,verses,texts_original = csv_import_book(in_filename)\n\n # Batch translate the verses if necessary\n if language != 'en':\n print \"Batch translating {0} verses from '{1}' to 'en'\".format(len(texts_original), language)\n texts_translated = gs.translate(texts_original, 'en', language)\n else:\n print \"Not translating {0} verses already in 'en'\".format(len(texts_original))\n texts_translated = texts_original\n\n # Write to CSV file\n rows = zip(chapters, verses, texts_original, texts_translated)\n with open(out_filename,'wb') as f:\n cw = unicodecsv.writer(f, encoding='utf-8')\n cw.writerow(['chapter','verse','text_original','text_english'])\n cw.writerows(rows)\n\n\ndef csv_import_book(filename):\n \"\"\"\n load bible book from csv file\n \"\"\"\n\n texts = []\n chapters = []\n verses = []\n\n # Read in a whole file of verses\n with open(filename,'rb') as f:\n cr = unicodecsv.reader(f, encoding='utf-8')\n header = cr.next() # skip header\n\n # Process verses\n for cnum,vnum,text in cr:\n chapters.append(int(cnum)) # parse integer\n verses.append(int(vnum)) # parse integer\n texts.append(text.strip()) # remove surrounding whitespace\n\n # return results\n return chapters,verses,texts\n\n\ndef csv_export_book(filename, rows=[], chapters=[], verses=[], texts=[]):\n\n if not len(rows) > 0:\n rows = zip(chapters, verses, texts)\n\n with open(filename,'wb') as f:\n cw = unicodecsv.writer(f,encoding='utf-8')\n cw.writerow(['chapter','verse','text'])\n cw.writerows(rows)\n\n\ndef csv_import_translated_book(input_file):\n \"\"\"\n import a single translated book from a single translation from single csv file\n \"\"\"\n\n texts_en = []\n texts_original = []\n chapters = []\n verses = []\n\n # Read in a whole (Google translated) file of verses\n with open(input_file, 'rb') as f:\n cr = unicodecsv.reader(f, encoding='utf-8')\n header = cr.next() # skip header\n\n # Process verses\n for cnum,vnum,text_original,text_en in cr:\n chapters.append(int(cnum))\n verses.append(int(vnum))\n texts_original.append(text_original.strip())\n texts_en.append(text_en.strip())\n\n # return results\n return chapters,verses,texts_original,texts_en\n\n\ndef csv_import_translated_books(input_files, input_translations):\n \"\"\"\n import a single book from M translations from M csv files\n \"\"\"\n\n if len(input_files) != len(input_translations):\n raise ValueError(\"Number of input files and translations are not the same\")\n\n translations = []\n chapters = []\n verses = []\n texts_original = []\n texts_en = []\n\n for in_file,translation in zip(input_files,input_translations):\n c,v,o,e = csv_import_translated_book(in_file)\n chapters.extend(c)\n verses.extend(v)\n texts_original.extend(o)\n texts_en.extend(e)\n translations.extend([translation]*len(e))\n\n return translations,chapters,verses,texts_original,texts_en\n\n\ndef csv_import_aligned_book(input_file):\n \"\"\"\n Import a single aligned book (e.g. after it is checked by humans)\n \"\"\"\n\n groups = []\n\n with open(input_file, 'rb') as f:\n cr = unicodecsv.reader(f, encoding='utf-8')\n\n translations = cr.next() # header contains translation names\n\n for row in cr:\n group = {}\n for i in range(len(translations)):\n verse = row[i].split(':',3)\n group[translations[i]] = {\n 'chapternum':int(verse[0]),\n 'versenum':int(verse[1]),\n 'text':verse[2].strip()\n }\n groups.append(group)\n\n return groups","license":"gpl-2.0"} {"repo_name":"oesteban\/mriqc","path":"mriqc\/qc\/anatomical.py","copies":"1","size":"21553","content":"#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# pylint: disable=no-member\n\nr\"\"\"\n\nMeasures based on noise measurements\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. _iqms_cjv:\n\n- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**\n (:abbr:`CJV (coefficient of joint variation)`):\n The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for\n the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.\n Higher values are related to the presence of heavy head motion and large\n :abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.\n\n.. _iqms_cnr:\n\n- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**\n (:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,\n is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation\n to evaluate how separated the tissue distributions of GM and WM are.\n Higher values indicate better quality.\n\n.. _iqms_snr:\n\n- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**\n (:abbr:`SNR (signal-to-noise ratio)`): calculated within the\n tissue mask.\n\n.. _iqms_snrd:\n\n- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**\n (:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed\n by [Dietrich2007]_, using the air background as reference.\n\n.. _iqms_qi2:\n\n- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**\n (:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit\n of a :math:`\\chi^2` distribution on the air mask,\n once the artifactual intensities detected for computing\n the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.\n\nMeasures based on information theory\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. _iqms_efc:\n\n- :py:func:`~mriqc.qc.anatomical.efc`:\n The :abbr:`EFC (Entropy Focus Criterion)`\n [Atkinson1997]_ uses the Shannon entropy of voxel intensities as\n an indication of ghosting and blurring induced by head motion.\n Lower values are better.\n\n The original equation is normalized by the maximum entropy, so that the\n :abbr:`EFC (Entropy Focus Criterion)` can be compared across images with\n different dimensions.\n\n.. _iqms_fber:\n\n- :py:func:`~mriqc.qc.anatomical.fber`:\n The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,\n defined as the mean energy of image values within the head relative\n to outside the head [QAP-measures]_.\n Higher values are better.\n\nMeasures targeting specific artifacts\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. _iqms_inu:\n\n- **inu_\\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)\n of the :abbr:`INU (intensity non-uniformity)` field as extracted by the N4ITK algorithm\n [Tustison2010]_. Values closer to 1.0 are better.\n\n.. _iqms_qi:\n\n- :py:func:`~mriqc.qc.anatomical.art_qi1`:\n Detect artifacts in the image using the method described in [Mortamet2009]_.\n The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity\n corrupted by artifacts normalized by the number of voxels in the background.\n Lower values are better.\n\n .. figure:: ..\/resources\/mortamet-mrm2009.png\n\n The workflow to compute the artifact detection from [Mortamet2009]_.\n\n.. _iqms_wm2max:\n\n- :py:func:`~mriqc.qc.anatomical.wm2max`:\n The white-matter to maximum intensity ratio is the median intensity\n within the WM mask over the 95% percentile of the full intensity\n distribution, that captures the existence of long tails due to\n hyper-intensity of the carotid vessels and fat. Values\n should be around the interval [0.6, 0.8].\n\n\nOther measures\n^^^^^^^^^^^^^^\n\n.. _iqms_fwhm:\n\n- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of\n the spatial distribution of the image intensity values in units of voxels [Forman1995]_.\n Lower values are better. Uses the gaussian width estimator filter implemented in\n AFNI's ``3dFWHMx``:\n\n .. math ::\n\n \\text{FWHM} = \\sqrt{-{\\left[4 \\ln{(1-\\frac{\\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}\n {2\\sigma^2_{X^m_{i,j}}}})\\right]}^{-1}}\n\n\n.. _iqms_icvs:\n\n- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\\***):\n the\n :abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,\n :abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within\n a normative range.\n\n.. _iqms_rpve:\n\n- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\\***): the\n :abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,\n :abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.\n\n.. _iqms_summary:\n\n- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\\*_\\***):\n Mean, standard deviation, 5% percentile and 95% percentile of the distribution\n of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and\n :abbr:`WM (white-matter)`.\n\n.. _iqms_tpm:\n\n- **overlap_\\*_\\***:\n The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and\n the corresponding maps from the ICBM nonlinear-asymmetric 2009c template.\n\n .. math ::\n\n \\text{JI}^k = \\frac{\\sum_i \\min{(\\text{TPM}^k_i, \\text{MNI}^k_i)}}\n {\\sum_i \\max{(\\text{TPM}^k_i, \\text{MNI}^k_i)}}\n\n\n.. topic:: References\n\n .. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence\n of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.\n 2007. doi:`10.1002\/jmri.20969 `_.\n\n .. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:\n a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.\n doi:`10.3389\/finf.201600010 `_.\n\n .. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise\n and contrast-to-noise in the fBIRN multicenter imaging study*.\n J Dig Imag 19(2):140-147, 2006. doi:`10.1007\/s10278-006-0264-x\n `_.\n\n .. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in\n structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,\n 2009. doi:`10.1002\/mrm.21992 `_.\n\n .. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,\n IEEE Trans Med Imag, 29(6):1310-20,\n 2010. doi:`10.1109\/TMI.2010.2046908 `_.\n\n .. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project\n Quality Assessment Protocol - a resource for measuring the quality of MRI data*,\n Front. Neurosci. Conference Abstract: Neuroinformatics 2015.\n doi:`10.3389\/conf.fnins.2015.91.00047 `_.\n\n .. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional\n magnetic resonance imaging (fMRI): use of a cluster-size threshold*,\n Magn. Reson. Med. 33 (5), 636\u2013647, 1995.\n doi:`10.1002\/mrm.1910330508 `_.\n\n\nmriqc.qc.anatomical module\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\"\"\"\nimport os.path as op\nfrom sys import version_info\nfrom math import pi, sqrt\nimport numpy as np\nimport scipy.ndimage as nd\nfrom scipy.stats import kurtosis # pylint: disable=E0611\n\nfrom io import open # pylint: disable=W0622\nfrom builtins import zip, range # pylint: disable=W0622\nfrom six import string_types\n\nDIETRICH_FACTOR = 1.0 \/ sqrt(2 \/ (4 - pi))\nFSL_FAST_LABELS = {'csf': 1, 'gm': 2, 'wm': 3, 'bg': 0}\nPY3 = version_info[0] > 2\n\n\ndef snr(mu_fg, sigma_fg, n):\n r\"\"\"\n Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.\n The estimation may be provided with only one foreground region in\n which the noise is computed as follows:\n\n .. math::\n\n \\text{SNR} = \\frac{\\mu_F}{\\sigma_F\\sqrt{n\/(n-1)}},\n\n where :math:`\\mu_F` is the mean intensity of the foreground and\n :math:`\\sigma_F` is the standard deviation of the same region.\n\n :param float mu_fg: mean of foreground.\n :param float sigma_fg: standard deviation of foreground.\n :param int n: number of voxels in foreground mask.\n\n :return: the computed SNR\n\n \"\"\"\n return float(mu_fg \/ (sigma_fg * sqrt(n \/ (n - 1))))\n\n\ndef snr_dietrich(mu_fg, sigma_air):\n r\"\"\"\n Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.\n\n This must be an air mask around the head, and it should not contain artifacts.\n The computation is done following the eq. A.12 of [Dietrich2007]_, which\n includes a correction factor in the estimation of the standard deviation of\n air and its Rayleigh distribution:\n\n .. math::\n\n \\text{SNR} = \\frac{\\mu_F}{\\sqrt{\\frac{2}{4-\\pi}}\\,\\sigma_\\text{air}}.\n\n\n :param float mu_fg: mean of foreground.\n :param float sigma_air: standard deviation of the air surrounding the head (\"hat\" mask).\n\n :return: the computed SNR for the foreground segmentation\n\n \"\"\"\n if sigma_air < 1.0:\n from .. import MRIQC_LOG\n MRIQC_LOG.warning('SNRd - background sigma is too small (%f)', sigma_air)\n sigma_air += 1.0\n\n return float(DIETRICH_FACTOR * mu_fg \/ sigma_air)\n\n\ndef cnr(mu_wm, mu_gm, sigma_air):\n r\"\"\"\n Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.\n Higher values are better.\n\n .. math::\n\n \\text{CNR} = \\frac{|\\mu_\\text{GM} - \\mu_\\text{WM} |}{\\sqrt{\\sigma_B^2 +\n \\sigma_\\text{WM}^2 + \\sigma_\\text{GM}^2}},\n\n where :math:`\\sigma_B` is the standard deviation of the noise distribution within\n the air (background) mask.\n\n\n :param float mu_wm: mean of signal within white-matter mask.\n :param float mu_gm: mean of signal within gray-matter mask.\n :param float sigma_air: standard deviation of the air surrounding the head (\"hat\" mask).\n\n :return: the computed CNR\n\n \"\"\"\n return float(abs(mu_wm - mu_gm) \/ sigma_air)\n\n\ndef cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):\n r\"\"\"\n Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure\n related to :abbr:`SNR (Signal-to-Noise Ratio)` and\n :abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for\n the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.\n Lower is better.\n\n .. math::\n\n \\text{CJV} = \\frac{\\sigma_\\text{WM} + \\sigma_\\text{GM}}{|\\mu_\\text{WM} - \\mu_\\text{GM}|}.\n\n :param float mu_wm: mean of signal within white-matter mask.\n :param float mu_gm: mean of signal within gray-matter mask.\n :param float sigma_wm: standard deviation of signal within white-matter mask.\n :param float sigma_gm: standard deviation of signal within gray-matter mask.\n\n :return: the computed CJV\n\n\n \"\"\"\n return float((sigma_wm + sigma_gm) \/ abs(mu_wm - mu_gm))\n\n\ndef fber(img, headmask, rotmask=None):\n r\"\"\"\n Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,\n defined as the mean energy of image values within the head relative\n to outside the head. Higher values are better.\n\n .. math::\n\n \\text{FBER} = \\frac{E[|F|^2]}{E[|B|^2]}\n\n\n :param numpy.ndarray img: input data\n :param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)\n :param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of\n data\n\n \"\"\"\n\n fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)\n\n airmask = np.ones_like(headmask, dtype=np.uint8)\n airmask[headmask > 0] = 0\n if rotmask is not None:\n airmask[rotmask > 0] = 0\n bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)\n if bg_mu < 1.0e-3:\n return 0\n return float(fg_mu \/ bg_mu)\n\n\ndef efc(img, framemask=None):\n r\"\"\"\n Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.\n Uses the Shannon entropy of voxel intensities as an indication of ghosting\n and blurring induced by head motion. A range of low values is better,\n with EFC = 0 for all the energy concentrated in one pixel.\n\n .. math::\n\n \\text{E} = - \\sum_{j=1}^N \\frac{x_j}{x_\\text{max}}\n \\ln \\left[\\frac{x_j}{x_\\text{max}}\\right]\n\n with :math:`x_\\text{max} = \\sqrt{\\sum_{j=1}^N x^2_j}`.\n\n The original equation is normalized by the maximum entropy, so that the\n :abbr:`EFC (Entropy Focus Criterion)` can be compared across images with\n different dimensions:\n\n .. math::\n\n \\text{EFC} = \\left( \\frac{N}{\\sqrt{N}} \\, \\log{\\sqrt{N}^{-1}} \\right) \\text{E}\n\n :param numpy.ndarray img: input data\n :param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of\n data\n\n \"\"\"\n\n if framemask is None:\n framemask = np.zeros_like(img, dtype=np.uint8)\n\n n_vox = np.sum(1 - framemask)\n # Calculate the maximum value of the EFC (which occurs any time all\n # voxels have the same value)\n efc_max = 1.0 * n_vox * (1.0 \/ np.sqrt(n_vox)) * \\\n np.log(1.0 \/ np.sqrt(n_vox))\n\n # Calculate the total image energy\n b_max = np.sqrt((img[framemask == 0]**2).sum())\n\n # Calculate EFC (add 1e-16 to the image data to keep log happy)\n return float((1.0 \/ efc_max) * np.sum((img[framemask == 0] \/ b_max) * np.log(\n (img[framemask == 0] + 1e-16) \/ b_max)))\n\n\ndef wm2max(img, mu_wm):\n r\"\"\"\n Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,\n defined as the maximum intensity found in the volume w.r.t. the\n mean value of the white matter tissue. Values close to 1.0 are\n better:\n\n .. math ::\n\n \\text{WM2MAX} = \\frac{\\mu_\\text{WM}}{P_{99.95}(X)}\n\n \"\"\"\n return float(mu_wm \/ np.percentile(img.reshape(-1), 99.95))\n\n\ndef art_qi1(airmask, artmask):\n r\"\"\"\n Detect artifacts in the image using the method described in [Mortamet2009]_.\n Caculates :math:`\\text{QI}_1`, as the proportion of voxels with intensity\n corrupted by artifacts normalized by the number of voxels in the background:\n\n .. math ::\n\n \\text{QI}_1 = \\frac{1}{N} \\sum\\limits_{x\\in X_\\text{art}} 1\n\n Lower values are better.\n\n :param numpy.ndarray airmask: input air mask, without artifacts\n :param numpy.ndarray artmask: input artifacts mask\n\n \"\"\"\n\n # Count the number of voxels that remain after the opening operation.\n # These are artifacts.\n return float(artmask.sum() \/ (airmask.sum() + artmask.sum()))\n\n\ndef art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):\n r\"\"\"\n Calculates :math:`\\text{QI}_2`, based on the goodness-of-fit of a centered\n :math:`\\chi^2` distribution onto the intensity distribution of\n non-artifactual background (within the \"hat\" mask):\n\n\n .. math ::\n\n \\chi^2_n = \\frac{2}{(\\sigma \\sqrt{2})^{2n} \\, (n - 1)!}x^{2n - 1}\\, e^{-\\frac{x}{2}}\n\n where :math:`n` is the number of coil elements.\n\n :param numpy.ndarray img: input data\n :param numpy.ndarray airmask: input air mask without artifacts\n\n \"\"\"\n\n from sklearn.neighbors import KernelDensity\n from scipy.stats import chi2\n from mriqc.viz.misc import plot_qi2\n\n # S. Ogawa was born\n np.random.seed(1191935)\n\n data = img[airmask > 0]\n data = data[data > 0]\n\n # Write out figure of the fitting\n out_file = op.abspath('error.svg')\n with open(out_file, 'w') as ofh:\n ofh.write('

Background noise fitting could not be plotted.<\/p>')\n\n if len(data) < min_voxels:\n return 0.0, out_file\n\n modelx = data if len(data) < max_voxels else np.random.choice(\n data, size=max_voxels)\n\n x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)\n\n # Estimate data pdf with KDE on a random subsample\n kde_skl = KernelDensity(bandwidth=0.05 * np.percentile(data, 98),\n kernel='gaussian').fit(modelx[:, np.newaxis])\n kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))\n\n # Find cutoff\n kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)\n\n # Fit X^2\n param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)\n chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])\n\n # Compute goodness-of-fit (gof)\n gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())\n if save_plot:\n out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)\n\n return gof, out_file\n\n\ndef volume_fraction(pvms):\n r\"\"\"\n Computes the :abbr:`ICV (intracranial volume)` fractions\n corresponding to the (partial volume maps).\n\n .. math ::\n\n \\text{ICV}^k = \\frac{\\sum_i p^k_i}{\\sum\\limits_{x \\in X_\\text{brain}} 1}\n\n :param list pvms: list of :code:`numpy.ndarray` of partial volume maps.\n\n \"\"\"\n tissue_vfs = {}\n total = 0\n for k, lid in list(FSL_FAST_LABELS.items()):\n if lid == 0:\n continue\n tissue_vfs[k] = pvms[lid - 1].sum()\n total += tissue_vfs[k]\n\n for k in list(tissue_vfs.keys()):\n tissue_vfs[k] \/= total\n return {k: float(v) for k, v in list(tissue_vfs.items())}\n\n\ndef rpve(pvms, seg):\n \"\"\"\n Computes the :abbr:`rPVe (residual partial voluming error)`\n of each tissue class.\n\n .. math ::\n\n \\\\text{rPVE}^k = \\\\frac{1}{N} \\\\left[ \\\\sum\\\\limits_{p^k_i \\\n\\\\in [0.5, P_{98}]} p^k_i + \\\\sum\\\\limits_{p^k_i \\\\in [P_{2}, 0.5)} 1 - p^k_i \\\\right]\n\n \"\"\"\n\n pvfs = {}\n for k, lid in list(FSL_FAST_LABELS.items()):\n if lid == 0:\n continue\n pvmap = pvms[lid - 1]\n pvmap[pvmap < 0.] = 0.\n pvmap[pvmap >= 1.] = 1.\n totalvol = np.sum(pvmap > 0.0)\n upth = np.percentile(pvmap[pvmap > 0], 98)\n loth = np.percentile(pvmap[pvmap > 0], 2)\n pvmap[pvmap < loth] = 0\n pvmap[pvmap > upth] = 0\n pvfs[k] = (pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()) \/ totalvol\n return {k: float(v) for k, v in list(pvfs.items())}\n\n\ndef summary_stats(img, pvms, airmask=None, erode=True):\n r\"\"\"\n Estimates the mean, the standard deviation, the 95\\%\n and the 5\\% percentiles of each tissue distribution.\n\n .. warning ::\n\n Sometimes (with datasets that have been partially processed), the air\n mask will be empty. In those cases, the background stats will be zero\n for the mean, median, percentiles and kurtosis, the sum of voxels in\n the other remaining labels for ``n``, and finally the MAD and the\n :math:`\\sigma` will be calculated as:\n\n .. math ::\n\n \\sigma_\\text{BG} = \\sqrt{\\sum \\sigma_\\text{i}^2}\n\n\n \"\"\"\n from .. import MRIQC_LOG\n from statsmodels.robust.scale import mad\n\n # Check type of input masks\n dims = np.squeeze(np.array(pvms)).ndim\n if dims == 4:\n # If pvms is from FSL FAST, create the bg mask\n stats_pvms = [np.zeros_like(img)] + pvms\n elif dims == 3:\n stats_pvms = [np.ones_like(pvms) - pvms, pvms]\n else:\n raise RuntimeError('Incorrect image dimensions ({0:d})'.format(\n np.array(pvms).ndim))\n\n if airmask is not None:\n stats_pvms[0] = airmask\n\n labels = list(FSL_FAST_LABELS.items())\n if len(stats_pvms) == 2:\n labels = list(zip(['bg', 'fg'], list(range(2))))\n\n output = {}\n for k, lid in labels:\n mask = np.zeros_like(img, dtype=np.uint8)\n mask[stats_pvms[lid] > 0.85] = 1\n\n if erode:\n struc = nd.generate_binary_structure(3, 2)\n mask = nd.binary_erosion(\n mask, structure=struc).astype(np.uint8)\n\n nvox = float(mask.sum())\n if nvox < 1e3:\n MRIQC_LOG.warning('calculating summary stats of label \"%s\" in a very small '\n 'mask (%d voxels)', k, int(nvox))\n if k == 'bg':\n continue\n\n output[k] = {\n 'mean': float(img[mask == 1].mean()),\n 'stdv': float(img[mask == 1].std()),\n 'median': float(np.median(img[mask == 1])),\n 'mad': float(mad(img[mask == 1])),\n 'p95': float(np.percentile(img[mask == 1], 95)),\n 'p05': float(np.percentile(img[mask == 1], 5)),\n 'k': float(kurtosis(img[mask == 1])),\n 'n': nvox,\n }\n\n if 'bg' not in output:\n output['bg'] = {\n 'mean': 0.,\n 'median': 0.,\n 'p95': 0.,\n 'p05': 0.,\n 'k': 0.,\n 'stdv': sqrt(sum(val['stdv']**2\n for _, val in list(output.items()))),\n 'mad': sqrt(sum(val['mad']**2\n for _, val in list(output.items()))),\n 'n': sum(val['n'] for _, val in list(output.items()))\n }\n\n if 'bg' in output and output['bg']['mad'] == 0.0 and output['bg']['stdv'] > 1.0:\n MRIQC_LOG.warning('estimated MAD in the background was too small ('\n 'MAD=%f)', output['bg']['mad'])\n output['bg']['mad'] = output['bg']['stdv'] \/ DIETRICH_FACTOR\n return output\n\n\ndef _prepare_mask(mask, label, erode=True):\n fgmask = mask.copy()\n\n if np.issubdtype(fgmask.dtype, np.integer):\n if isinstance(label, string_types):\n label = FSL_FAST_LABELS[label]\n\n fgmask[fgmask != label] = 0\n fgmask[fgmask == label] = 1\n else:\n fgmask[fgmask > .95] = 1.\n fgmask[fgmask < 1.] = 0\n\n if erode:\n # Create a structural element to be used in an opening operation.\n struc = nd.generate_binary_structure(3, 2)\n # Perform an opening operation on the background data.\n fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)\n\n return fgmask\n","license":"bsd-3-clause"} {"repo_name":"analogdevicesinc\/gnuradio","path":"gr-analog\/examples\/fmtest.py","copies":"40","size":"7941","content":"#!\/usr\/bin\/env python\n#\n# Copyright 2009,2012,2013 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# GNU Radio is free software; you can redistribute it and\/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# GNU Radio is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GNU Radio; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n#\n\nfrom gnuradio import gr\nfrom gnuradio import blocks\nfrom gnuradio import filter\nfrom gnuradio import analog\nfrom gnuradio import channels\nimport sys, math, time\n\ntry:\n import scipy\n from scipy import fftpack\nexcept ImportError:\n print \"Error: Program requires scipy (see: www.scipy.org).\"\n sys.exit(1)\n\ntry:\n import pylab\nexcept ImportError:\n print \"Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\"\n sys.exit(1)\n\n\nclass fmtx(gr.hier_block2):\n def __init__(self, lo_freq, audio_rate, if_rate):\n\n gr.hier_block2.__init__(self, \"build_fm\",\n gr.io_signature(1, 1, gr.sizeof_float),\n gr.io_signature(1, 1, gr.sizeof_gr_complex))\n\n fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)\n\n # Local oscillator\n lo = analog.sig_source_c(if_rate, # sample rate\n analog.GR_SIN_WAVE, # waveform type\n lo_freq, # frequency\n 1.0, # amplitude\n 0) # DC Offset\n mixer = blocks.multiply_cc()\n\n self.connect(self, fmtx, (mixer, 0))\n self.connect(lo, (mixer, 1))\n self.connect(mixer, self)\n\nclass fmtest(gr.top_block):\n def __init__(self):\n gr.top_block.__init__(self)\n\n self._nsamples = 1000000\n self._audio_rate = 8000\n\n # Set up N channels with their own baseband and IF frequencies\n self._N = 5\n chspacing = 16000\n freq = [10, 20, 30, 40, 50]\n f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]\n\n self._if_rate = 4*self._N*self._audio_rate\n\n # Create a signal source and frequency modulate it\n self.sum = blocks.add_cc()\n for n in xrange(self._N):\n sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)\n fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)\n self.connect(sig, fm)\n self.connect(fm, (self.sum, n))\n\n self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)\n self.snk_tx = blocks.vector_sink_c()\n self.channel = channels.channel_model(0.1)\n\n self.connect(self.sum, self.head, self.channel, self.snk_tx)\n\n\n # Design the channlizer\n self._M = 10\n bw = chspacing\/2.0\n t_bw = chspacing\/10.0\n self._chan_rate = self._if_rate \/ self._M\n self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,\n attenuation_dB=100,\n window=filter.firdes.WIN_BLACKMAN_hARRIS)\n tpc = math.ceil(float(len(self._taps)) \/ float(self._M))\n\n print \"Number of taps: \", len(self._taps)\n print \"Number of channels: \", self._M\n print \"Taps per channel: \", tpc\n\n self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)\n\n self.connect(self.channel, self.pfb)\n\n # Create a file sink for each of M output channels of the filter and connect it\n self.fmdet = list()\n self.squelch = list()\n self.snks = list()\n for i in xrange(self._M):\n self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))\n self.squelch.append(analog.standard_squelch(self._audio_rate*10))\n self.snks.append(blocks.vector_sink_f())\n self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])\n\n def num_tx_channels(self):\n return self._N\n\n def num_rx_channels(self):\n return self._M\n\ndef main():\n\n fm = fmtest()\n\n tstart = time.time()\n fm.run()\n tend = time.time()\n\n if 1:\n fig1 = pylab.figure(1, figsize=(12,10), facecolor=\"w\")\n fig2 = pylab.figure(2, figsize=(12,10), facecolor=\"w\")\n fig3 = pylab.figure(3, figsize=(12,10), facecolor=\"w\")\n\n Ns = 10000\n Ne = 100000\n\n fftlen = 8192\n winfunc = scipy.blackman\n\n # Plot transmitted signal\n fs = fm._if_rate\n\n d = fm.snk_tx.data()[Ns:Ns+Ne]\n sp1_f = fig1.add_subplot(2, 1, 1)\n\n X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen\/4, Fs=fs,\n window = lambda d: d*winfunc(fftlen),\n visible=False)\n X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))\n f_in = scipy.arange(-fs\/2.0, fs\/2.0, fs\/float(X_in.size))\n p1_f = sp1_f.plot(f_in, X_in, \"b\")\n sp1_f.set_xlim([min(f_in), max(f_in)+1])\n sp1_f.set_ylim([-120.0, 20.0])\n\n sp1_f.set_title(\"Input Signal\", weight=\"bold\")\n sp1_f.set_xlabel(\"Frequency (Hz)\")\n sp1_f.set_ylabel(\"Power (dBW)\")\n\n Ts = 1.0\/fs\n Tmax = len(d)*Ts\n\n t_in = scipy.arange(0, Tmax, Ts)\n x_in = scipy.array(d)\n sp1_t = fig1.add_subplot(2, 1, 2)\n p1_t = sp1_t.plot(t_in, x_in.real, \"b-o\")\n #p1_t = sp1_t.plot(t_in, x_in.imag, \"r-o\")\n sp1_t.set_ylim([-5, 5])\n\n # Set up the number of rows and columns for plotting the subfigures\n Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))\n Nrows = int(scipy.floor(fm.num_rx_channels() \/ Ncols))\n if(fm.num_rx_channels() % Ncols != 0):\n Nrows += 1\n\n # Plot each of the channels outputs. Frequencies on Figure 2 and\n # time signals on Figure 3\n fs_o = fm._audio_rate\n for i in xrange(len(fm.snks)):\n # remove issues with the transients at the beginning\n # also remove some corruption at the end of the stream\n # this is a bug, probably due to the corner cases\n d = fm.snks[i].data()[Ns:Ne]\n\n sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)\n X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen\/4, Fs=fs_o,\n window = lambda d: d*winfunc(fftlen),\n visible=False)\n #X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))\n X_o = 10.0*scipy.log10(abs(X))\n #f_o = scipy.arange(-fs_o\/2.0, fs_o\/2.0, fs_o\/float(X_o.size))\n f_o = scipy.arange(0, fs_o\/2.0, fs_o\/2.0\/float(X_o.size))\n p2_f = sp2_f.plot(f_o, X_o, \"b\")\n sp2_f.set_xlim([min(f_o), max(f_o)+0.1])\n sp2_f.set_ylim([-120.0, 20.0])\n sp2_f.grid(True)\n\n sp2_f.set_title((\"Channel %d\" % i), weight=\"bold\")\n sp2_f.set_xlabel(\"Frequency (kHz)\")\n sp2_f.set_ylabel(\"Power (dBW)\")\n\n\n Ts = 1.0\/fs_o\n Tmax = len(d)*Ts\n t_o = scipy.arange(0, Tmax, Ts)\n\n x_t = scipy.array(d)\n sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)\n p2_t = sp2_t.plot(t_o, x_t.real, \"b\")\n p2_t = sp2_t.plot(t_o, x_t.imag, \"r\")\n sp2_t.set_xlim([min(t_o), max(t_o)+1])\n sp2_t.set_ylim([-1, 1])\n\n sp2_t.set_xlabel(\"Time (s)\")\n sp2_t.set_ylabel(\"Amplitude\")\n\n\n pylab.show()\n\n\nif __name__ == \"__main__\":\n main()\n","license":"gpl-3.0"} {"repo_name":"ghchinoy\/tensorflow","path":"tensorflow\/contrib\/timeseries\/examples\/known_anomaly.py","copies":"24","size":"7880","content":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Example of using an exogenous feature to ignore a known anomaly.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nfrom os import path\n\nimport numpy as np\nimport tensorflow as tf\n\n\ntry:\n import matplotlib # pylint: disable=g-import-not-at-top\n matplotlib.use(\"TkAgg\") # Need Tk for interactive plots.\n from matplotlib import pyplot # pylint: disable=g-import-not-at-top\n HAS_MATPLOTLIB = True\nexcept ImportError:\n # Plotting requires matplotlib, but the unit test running this code may\n # execute in an environment without it (i.e. matplotlib is not a build\n # dependency). We'd still like to test the TensorFlow-dependent parts of this\n # example, namely train_and_predict.\n HAS_MATPLOTLIB = False\n\n_MODULE_PATH = path.dirname(__file__)\n_DATA_FILE = path.join(_MODULE_PATH, \"data\/changepoints.csv\")\n\n\ndef state_space_estimator(exogenous_feature_columns):\n \"\"\"Constructs a StructuralEnsembleRegressor.\"\"\"\n\n def _exogenous_update_condition(times, features):\n del times # unused\n # Make exogenous updates sparse by setting an update condition. This in\n # effect allows missing exogenous features: if the condition evaluates to\n # False, no update is performed. Otherwise we sometimes end up with \"leaky\"\n # updates which add unnecessary uncertainty to the model even when there is\n # no changepoint.\n return tf.equal(tf.squeeze(features[\"is_changepoint\"], axis=-1), \"yes\")\n\n return (\n tf.contrib.timeseries.StructuralEnsembleRegressor(\n periodicities=12,\n # Extract a smooth period by constraining the number of latent values\n # being cycled between.\n cycle_num_latent_values=3,\n num_features=1,\n exogenous_feature_columns=exogenous_feature_columns,\n exogenous_update_condition=_exogenous_update_condition),\n # Use truncated backpropagation with a window size of 64, batching\n # together 4 of these windows (random offsets) per training step. Training\n # with exogenous features often requires somewhat larger windows.\n 4, 64)\n\n\ndef autoregressive_estimator(exogenous_feature_columns):\n input_window_size = 8\n output_window_size = 2\n return (\n tf.contrib.timeseries.ARRegressor(\n periodicities=12,\n num_features=1,\n input_window_size=input_window_size,\n output_window_size=output_window_size,\n exogenous_feature_columns=exogenous_feature_columns),\n 64, input_window_size + output_window_size)\n\n\ndef train_and_evaluate_exogenous(\n estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):\n \"\"\"Training, evaluating, and predicting on a series with changepoints.\"\"\"\n # Indicate the format of our exogenous feature, in this case a string\n # representing a boolean value.\n string_feature = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"is_changepoint\", vocabulary_list=[\"no\", \"yes\"])\n # Specify the way this feature is presented to the model, here using a one-hot\n # encoding.\n one_hot_feature = tf.feature_column.indicator_column(\n categorical_column=string_feature)\n\n estimator, batch_size, window_size = estimator_fn(\n exogenous_feature_columns=[one_hot_feature])\n reader = tf.contrib.timeseries.CSVReader(\n csv_file_name,\n # Indicate the format of our CSV file. First we have two standard columns,\n # one for times and one for values. The third column is a custom exogenous\n # feature indicating whether each timestep is a changepoint. The\n # changepoint feature name must match the string_feature column name\n # above.\n column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,\n tf.contrib.timeseries.TrainEvalFeatures.VALUES,\n \"is_changepoint\"),\n # Indicate dtypes for our features.\n column_dtypes=(tf.int64, tf.float32, tf.string),\n # This CSV has a header line; here we just ignore it.\n skip_header_lines=1)\n train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(\n reader, batch_size=batch_size, window_size=window_size)\n estimator.train(input_fn=train_input_fn, steps=train_steps)\n evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)\n evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)\n # Create an input_fn for prediction, with a simulated changepoint. Since all\n # of the anomalies in the training data are explained by the exogenous\n # feature, we should get relatively confident predictions before the indicated\n # changepoint (since we are telling the model that no changepoint exists at\n # those times) and relatively uncertain predictions after.\n (predictions,) = tuple(estimator.predict(\n input_fn=tf.contrib.timeseries.predict_continuation_input_fn(\n evaluation, steps=100,\n exogenous_features={\n \"is_changepoint\": [[\"no\"] * 49 + [\"yes\"] + [\"no\"] * 50]})))\n times = evaluation[\"times\"][0]\n observed = evaluation[\"observed\"][0, :, 0]\n mean = np.squeeze(np.concatenate(\n [evaluation[\"mean\"][0], predictions[\"mean\"]], axis=0))\n variance = np.squeeze(np.concatenate(\n [evaluation[\"covariance\"][0], predictions[\"covariance\"]], axis=0))\n all_times = np.concatenate([times, predictions[\"times\"]], axis=0)\n upper_limit = mean + np.sqrt(variance)\n lower_limit = mean - np.sqrt(variance)\n # Indicate the locations of the changepoints for plotting vertical lines.\n anomaly_locations = []\n with open(csv_file_name, \"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n if row[\"is_changepoint\"] == \"yes\":\n anomaly_locations.append(int(row[\"time\"]))\n anomaly_locations.append(predictions[\"times\"][49])\n return (times, observed, all_times, mean, upper_limit, lower_limit,\n anomaly_locations)\n\n\ndef make_plot(name, training_times, observed, all_times, mean,\n upper_limit, lower_limit, anomaly_locations):\n \"\"\"Plot the time series and anomalies in a new figure.\"\"\"\n pyplot.figure()\n pyplot.plot(training_times, observed, \"b\", label=\"training series\")\n pyplot.plot(all_times, mean, \"r\", label=\"forecast\")\n pyplot.axvline(anomaly_locations[0], linestyle=\"dotted\", label=\"changepoints\")\n for anomaly_location in anomaly_locations[1:]:\n pyplot.axvline(anomaly_location, linestyle=\"dotted\")\n pyplot.fill_between(all_times, lower_limit, upper_limit, color=\"grey\",\n alpha=\"0.2\")\n pyplot.axvline(training_times[-1], color=\"k\", linestyle=\"--\")\n pyplot.xlabel(\"time\")\n pyplot.ylabel(\"observations\")\n pyplot.legend(loc=0)\n pyplot.title(name)\n\n\ndef main(unused_argv):\n if not HAS_MATPLOTLIB:\n raise ImportError(\n \"Please install matplotlib to generate a plot from this example.\")\n make_plot(\"Ignoring a known anomaly (state space)\",\n *train_and_evaluate_exogenous(\n estimator_fn=state_space_estimator))\n make_plot(\"Ignoring a known anomaly (autoregressive)\",\n *train_and_evaluate_exogenous(\n estimator_fn=autoregressive_estimator, train_steps=3000))\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n tf.app.run(main=main)\n","license":"apache-2.0"} {"repo_name":"leesavide\/pythonista-docs","path":"Documentation\/matplotlib\/mpl_examples\/api\/custom_scale_example.py","copies":"9","size":"6401","content":"from __future__ import unicode_literals\n\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib import scale as mscale\nfrom matplotlib import transforms as mtransforms\nfrom matplotlib.ticker import Formatter, FixedLocator\n\n\nclass MercatorLatitudeScale(mscale.ScaleBase):\n \"\"\"\n Scales data in range -pi\/2 to pi\/2 (-90 to 90 degrees) using\n the system used to scale latitudes in a Mercator projection.\n\n The scale function:\n ln(tan(y) + sec(y))\n\n The inverse scale function:\n atan(sinh(y))\n\n Since the Mercator scale tends to infinity at +\/- 90 degrees,\n there is user-defined threshold, above and below which nothing\n will be plotted. This defaults to +\/- 85 degrees.\n\n source:\n http:\/\/en.wikipedia.org\/wiki\/Mercator_projection\n \"\"\"\n\n # The scale class must have a member ``name`` that defines the\n # string used to select the scale. For example,\n # ``gca().set_yscale(\"mercator\")`` would be used to select this\n # scale.\n name = 'mercator'\n\n\n def __init__(self, axis, **kwargs):\n \"\"\"\n Any keyword arguments passed to ``set_xscale`` and\n ``set_yscale`` will be passed along to the scale's\n constructor.\n\n thresh: The degree above which to crop the data.\n \"\"\"\n mscale.ScaleBase.__init__(self)\n thresh = kwargs.pop(\"thresh\", (85 \/ 180.0) * np.pi)\n if thresh >= np.pi \/ 2.0:\n raise ValueError(\"thresh must be less than pi\/2\")\n self.thresh = thresh\n\n def get_transform(self):\n \"\"\"\n Override this method to return a new instance that does the\n actual transformation of the data.\n\n The MercatorLatitudeTransform class is defined below as a\n nested class of this one.\n \"\"\"\n return self.MercatorLatitudeTransform(self.thresh)\n\n def set_default_locators_and_formatters(self, axis):\n \"\"\"\n Override to set up the locators and formatters to use with the\n scale. This is only required if the scale requires custom\n locators and formatters. Writing custom locators and\n formatters is rather outside the scope of this example, but\n there are many helpful examples in ``ticker.py``.\n\n In our case, the Mercator example uses a fixed locator from\n -90 to 90 degrees and a custom formatter class to put convert\n the radians to degrees and put a degree symbol after the\n value::\n \"\"\"\n class DegreeFormatter(Formatter):\n def __call__(self, x, pos=None):\n # \\u00b0 : degree symbol\n return \"%d\\u00b0\" % ((x \/ np.pi) * 180.0)\n\n deg2rad = np.pi \/ 180.0\n axis.set_major_locator(FixedLocator(\n np.arange(-90, 90, 10) * deg2rad))\n axis.set_major_formatter(DegreeFormatter())\n axis.set_minor_formatter(DegreeFormatter())\n\n def limit_range_for_scale(self, vmin, vmax, minpos):\n \"\"\"\n Override to limit the bounds of the axis to the domain of the\n transform. In the case of Mercator, the bounds should be\n limited to the threshold that was passed in. Unlike the\n autoscaling provided by the tick locators, this range limiting\n will always be adhered to, whether the axis range is set\n manually, determined automatically or changed through panning\n and zooming.\n \"\"\"\n return max(vmin, -self.thresh), min(vmax, self.thresh)\n\n class MercatorLatitudeTransform(mtransforms.Transform):\n # There are two value members that must be defined.\n # ``input_dims`` and ``output_dims`` specify number of input\n # dimensions and output dimensions to the transformation.\n # These are used by the transformation framework to do some\n # error checking and prevent incompatible transformations from\n # being connected together. When defining transforms for a\n # scale, which are, by definition, separable and have only one\n # dimension, these members should always be set to 1.\n input_dims = 1\n output_dims = 1\n is_separable = True\n\n def __init__(self, thresh):\n mtransforms.Transform.__init__(self)\n self.thresh = thresh\n\n def transform_non_affine(self, a):\n \"\"\"\n This transform takes an Nx1 ``numpy`` array and returns a\n transformed copy. Since the range of the Mercator scale\n is limited by the user-specified threshold, the input\n array must be masked to contain only valid values.\n ``matplotlib`` will handle masked arrays and remove the\n out-of-range data from the plot. Importantly, the\n ``transform`` method *must* return an array that is the\n same shape as the input array, since these values need to\n remain synchronized with values in the other dimension.\n \"\"\"\n masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)\n if masked.mask.any():\n return ma.log(np.abs(ma.tan(masked) + 1.0 \/ ma.cos(masked)))\n else:\n return np.log(np.abs(np.tan(a) + 1.0 \/ np.cos(a)))\n\n def inverted(self):\n \"\"\"\n Override this method so matplotlib knows how to get the\n inverse transform for this transform.\n \"\"\"\n return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)\n\n class InvertedMercatorLatitudeTransform(mtransforms.Transform):\n input_dims = 1\n output_dims = 1\n is_separable = True\n\n def __init__(self, thresh):\n mtransforms.Transform.__init__(self)\n self.thresh = thresh\n\n def transform_non_affine(self, a):\n return np.arctan(np.sinh(a))\n\n def inverted(self):\n return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)\n\n# Now that the Scale class has been defined, it must be registered so\n# that ``matplotlib`` can find it.\nmscale.register_scale(MercatorLatitudeScale)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n t = np.arange(-180.0, 180.0, 0.1)\n s = t \/ 360.0 * np.pi\n\n plt.plot(t, s, '-', lw=2)\n plt.gca().set_yscale('mercator')\n\n plt.xlabel('Longitude')\n plt.ylabel('Latitude')\n plt.title('Mercator: Projection of the Oppressor')\n plt.grid(True)\n\n plt.show()\n\n","license":"apache-2.0"} {"repo_name":"SCP-028\/UGA","path":"protein_pka\/mcce\/mcce.py","copies":"1","size":"17127","content":"#!python3\r\n\"\"\"\r\nPredict protein pKa based on MCCE method.\r\nhttp:\/\/pka.engr.ccny.cuny.edu\/\r\n\r\nRequire MCCE 3.0 to work: https:\/\/anaconda.org\/SalahSalah\/mcce\/files\r\n\"\"\"\r\nimport asyncio\r\nimport glob\r\nimport gzip\r\nimport locale\r\nimport logging\r\nimport math\r\nimport os\r\nimport re\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\nimport time\r\nfrom multiprocessing import Pool\r\nfrom urllib.request import urlopen\r\n\r\nimport aioftp\r\nimport pandas as pd\r\nimport uvloop\r\n\r\n# Sapelo Locale is broken, quick fix\r\nlocale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\r\n# Set working directory\r\nROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))\r\nos.chdir(ROOTPATH)\r\n# Log settings\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(logging.INFO)\r\nhandler = logging.FileHandler(f\".\/pKa_calculation_{__file__}.log\")\r\nhandler.setLevel(logging.INFO)\r\nformatter = logging.Formatter(\r\n \"%(asctime)s\\t%(levelname)s\\t\"\r\n \"[%(filename)s:%(lineno)s -%(funcName)12s()]\\t%(message)s\"\r\n)\r\nhandler.setFormatter(formatter)\r\nlogger.addHandler(handler)\r\n\r\n\r\nclass pdb:\r\n def __init__(self):\r\n self.all_ids = []\r\n self.download_ids = [] # Download -> Unzip -> Preprocess -> Calculate\r\n self.unzip_ids = [] # Unzip -> Preprocess -> Calculate\r\n self.preprocess_ids = [] # Preprocess -> Calculate\r\n self.ready_ids = [] # Calculate\r\n self.finished_ids = [] # Successfully calculated IDs\r\n self.error_ids = [] # Error in download, unzip, or calculation\r\n # IDs this script will work on (messy queue implementation)\r\n self.working_ids = []\r\n\r\n def load_id(self):\r\n \"\"\"\r\n First try to get existing pKa values,\r\n then get the list of PDB files to download.\r\n \"\"\"\r\n for folder in [\".\/pdb\", \".\/annotation\", \".\/results\"]:\r\n try:\r\n os.makedirs(folder)\r\n except OSError:\r\n pass\r\n self.finished_ids = [id[-8:-4] for id in glob.glob(\".\/results\/*.pka\")]\r\n logger.debug(f\"{len(self.finished_ids)} finished files.\")\r\n # Create file even at first run so that the results folder doesn't get deleted\r\n with open(\".\/results\/finished_ids.list\", \"a\") as f:\r\n f.write(\"\\n\".join(self.finished_ids))\r\n\r\n self.ready_ids = list(set(\r\n [id[-12:-8].upper() for id in glob.glob(\".\/pdb\/*\/*.pdb.bak\")]) - set(self.finished_ids))\r\n logger.debug(f\"{len(self.ready_ids)} files ready to be calculated.\")\r\n\r\n self.preprocess_ids = list(set([id[-8:-4].upper() for id in glob.glob(\r\n \".\/pdb\/*\/*.pdb\") if \"out\" not in id]) - set(self.finished_ids) - set(self.ready_ids))\r\n logger.debug(\r\n f\"{len(self.preprocess_ids)} files ready to be preprocessed.\")\r\n\r\n self.unzip_ids = [id[-11:-7].upper() for id in glob.glob(\".\/*.ent.gz\")]\r\n logger.debug(f\"{len(self.unzip_ids)} files ready to be unzipped.\")\r\n\r\n if not os.path.exists(\".\/annotation\/uniprot_id_mapping.dat\"):\r\n with urlopen(\"ftp:\/\/ftp.uniprot.org\/pub\/databases\/uniprot\/current_release\/knowledgebase\/idmapping\/by_organism\/HUMAN_9606_idmapping.dat.gz\") as remotefile:\r\n logger.debug(\r\n \"Saving UniProt ID mapping data since it doesn't exist...\")\r\n with open(\".\/annotation\/uniprot_id_mapping.dat.gz\", \"wb\") as f:\r\n f.write(remotefile.read())\r\n with gzip.open(\r\n \".\/annotation\/uniprot_id_mapping.dat.gz\", \"rb\") as inFile, open(\r\n \".\/annotation\/uniprot_id_mapping.dat\", \"wb\") as outFile:\r\n shutil.copyfileobj(inFile, outFile)\r\n os.remove(\".\/annotation\/uniprot_id_mapping.dat.gz\")\r\n else:\r\n logger.debug(\"UniProt ID mapping data exists.\")\r\n\r\n logger.debug(\"Reading all possible PDB IDs...\")\r\n annot = pd.read_csv(\".\/annotation\/uniprot_id_mapping.dat\",\r\n sep=\"\\t\", header=None,\r\n names=[\"uniprot\", \"id\", \"value\"])\r\n self.all_ids = annot.loc[annot.id == \"PDB\", \"value\"].tolist()\r\n self.download_ids = list(set(self.all_ids) - set(self.unzip_ids) - set(\r\n self.preprocess_ids) - set(self.ready_ids) - set(self.finished_ids))\r\n logger.info(\r\n f\"{len(self.download_ids)} PDB files need to be downloaded.\")\r\n\r\n def get_link(self, ids):\r\n \"\"\" Get PDB file links from:\r\n ftp:\/\/ftp.wwpdb.org\/pub\/pdb\/data\/structures\/divided\/pdb\/ ,\r\n and create folders to store the files.\r\n\r\n Parameters\r\n ----------\r\n ids: list\r\n The PDB IDs to download.\r\n\r\n Returns\r\n -------\r\n Links to download.\r\n \"\"\"\r\n if isinstance(ids, list):\r\n ids = [id[:4].lower() for id in ids] # pdb file IDs\r\n pdb_names = [f\"{id}.ent.gz\" for id in ids] # pdb filenames\r\n # subdirectory of the pdb files\r\n pdbDirs = [id[1:3].lower() for id in ids]\r\n remoteaddr = [\r\n f\"ftp:\/\/ftp.wwpdb.org\/pub\/pdb\/data\/structures\/divided\/pdb\/{pdbDir}\/pdb{pdb_name}\" for pdbDir, pdb_name in zip(pdbDirs, pdb_names)]\r\n else:\r\n raise TypeError(f\"{id} is not a string or list.\")\r\n return remoteaddr\r\n\r\n def make_dirs(self, ids):\r\n \"\"\"Make sure the download directory exists.\"\"\"\r\n for id in ids:\r\n try:\r\n os.makedirs(os.path.join(ROOTPATH, \"pdb\", id.upper()))\r\n except OSError:\r\n pass\r\n\r\n async def download_worker(self, session, url):\r\n \"\"\"Download the given url to working directory.\"\"\"\r\n url = url[len(\"ftp:\/\/ftp.wwpdb.org\"):]\r\n logger.debug(f\"Downloading {url}\")\r\n try:\r\n await session.download(url)\r\n self.unzip_ids.append(url[-11:-7].upper())\r\n except Exception as e:\r\n self.error_ids.append(url[-11:-7].upper())\r\n logger.warning(f\"Error when downloading {url}: {e}\")\r\n\r\n async def download_session(self, sem, work_queue):\r\n \"\"\" Get urls from the queue and pass to worker.\r\n\r\n Parameters\r\n ----------\r\n sem: asyncio.Semaphore object\r\n work_queue: asyncio.Queue object\r\n \"\"\"\r\n while not work_queue.empty():\r\n url = await work_queue.get()\r\n logger.debug(f\"Got url from queue: {url}\")\r\n async with sem:\r\n async with aioftp.ClientSession(\"ftp.wwpdb.org\") as session:\r\n await self.download_worker(session, url)\r\n\r\n def download_queue(self, urls):\r\n \"\"\" Create a queue to download all the given urls.\r\n\r\n Parameters\r\n ----------\r\n urls: list\r\n A list of urls to download.\r\n\r\n Returns\r\n -------\r\n Downloaded \"*.ent.gz\" files in working directory.\r\n \"\"\"\r\n logger.debug(f\"{len(urls)} urls to download.\")\r\n loop = uvloop.new_event_loop()\r\n asyncio.set_event_loop(loop)\r\n q = asyncio.Queue()\r\n sem = asyncio.Semaphore(10)\r\n [q.put_nowait(url) for url in urls]\r\n tasks = [asyncio.ensure_future(self.download_session(sem, q))\r\n for _ in range(len(urls))]\r\n loop.run_until_complete(asyncio.gather(*tasks))\r\n # Zero-sleep to allow underlying connections to close\r\n loop.run_until_complete(asyncio.sleep(0))\r\n loop.close()\r\n\r\n def check_mcce(self):\r\n \"\"\"Check if MCCE 3.0 exists.\"\"\"\r\n if not os.path.exists(os.path.join(ROOTPATH, \"mcce3.0\")):\r\n if not os.path.exists(os.path.join(ROOTPATH, \"mcce3.0.tar.bz2\")):\r\n logger.debug(\"MCCE isn't downloaded yet. Retrieving...\")\r\n with urlopen(\"https:\/\/anaconda.org\/SalahSalah\/mcce\/3.0\/download\/linux-64\/mcce-3.0-0.tar.bz2\") as remotefile:\r\n with open(\".\/mcce-3.0-0.tar.bz2\", 'wb') as f:\r\n f.write(remotefile.read())\r\n subprocess.run([\"tar\", \"-xjf\", \"mcce-3.0-0.tar.bz2\"])\r\n shutil.move(\".\/info\/recipe\/mcce3.0\", \".\/mcce3.0\")\r\n shutil.rmtree(os.path.join(ROOTPATH, \"info\"), ignore_errors=True)\r\n shutil.rmtree(os.path.join(ROOTPATH, \"bin\"), ignore_errors=True)\r\n else:\r\n logger.info(\"MCCE 3.0 exists, proceeding to calculation...\")\r\n\r\n def unzip(self, id):\r\n \"\"\"Unzip downloaded *.ent.gz file.\"\"\"\r\n try:\r\n saved_pdb = os.path.join(ROOTPATH, \"pdb\", id, f\"{id}.pdb\")\r\n with gzip.open(f\"pdb{id.lower()}.ent.gz\", \"rb\") as inFile, open(saved_pdb, \"wb\") as outFile:\r\n shutil.copyfileobj(inFile, outFile)\r\n os.remove(f\"pdb{id.lower()}.ent.gz\")\r\n self.preprocess_ids.append(id)\r\n except Exception as e:\r\n self.error_ids.append(id)\r\n logger.warning(f\"Unzip of {id} unsuccessful: {e}\")\r\n\r\n def preprocess(self, id, backup=True):\r\n \"\"\"\r\n This program will:\r\n 1) strip lines other than ATOM and HETATM records\r\n 2) keep the first model of an NMR structure\r\n 3) delete H and D atoms\r\n 4) MSE to MET residue\r\n 5) keep only one atom alternate position\r\n 6) keep defined chains, if chain ID(s) are given in command\r\n 7) remove some cofactors and salt ions\r\n\r\n Parameters\r\n ----------\r\n id: str\r\n The PDB ID to find the file.\r\n backup: bool, optional\r\n Whether to backup the original file or not. Default is True,\r\n and save to \"original.bak\".\r\n\r\n Returns\r\n -------\r\n Nothing, modify the file in place.\r\n \"\"\"\r\n removable_res = [\r\n \" ZN\", \"PCA\", \"XYP\", \" NA\", \" CL\", \" CA\", \" MG\", \" MN\", \"HOH\"\r\n ]\r\n model_start = False\r\n newlines = []\r\n ID = id.upper()\r\n filepath = os.path.join(ROOTPATH, \"pdb\", ID, f\"{ID}.pdb\")\r\n if backup:\r\n shutil.copy2(filepath, f\"{filepath}.bak\")\r\n with open(filepath) as f:\r\n for line in f:\r\n if line[:5] == \"MODEL\":\r\n model_start = True\r\n if model_start and line[:6] == \"ENDMDL\":\r\n break\r\n if line[:6] != \"ATOM \" and line[:6] != \"HETATM\":\r\n continue # discard non ATOM records\r\n if line[13] == \"H\" or line[12] == \"H\":\r\n continue\r\n if line[16] == \"A\":\r\n line = f\"{line[:16]} {line[17:]}\"\r\n elif line[16] != \" \":\r\n continue # delete this line, alternative posion is not A or empty\r\n if line[:6] == \"HETATM\" and line[17:20] == \"MSE\":\r\n if line[12:15] == \"SE \":\r\n line = f\"ATOM {line[6:12]} SD{line[15:17]}MET{line[20:]}\"\r\n else:\r\n line = f\"ATOM {line[6:17]}MET{line[20:]}\"\r\n res = line[17:20]\r\n if res in removable_res:\r\n continue\r\n newlines.append(line.rstrip())\r\n with open(filepath, \"w\") as f:\r\n f.write(\"\\n\".join(newlines))\r\n logger.debug(f\"{ID} preprocessing complete.\")\r\n\r\n def set_params(self, id, quickrun=True):\r\n \"\"\"\r\n Set the parameters for MCCE.\r\n\r\n Parameters\r\n ----------\r\n id: str\r\n The PDB ID of the file.\r\n quickrun: bool, optional\r\n Use \"run.prm.quick\" or \"run.prm.default\".\r\n\r\n Returns\r\n -------\r\n run.prm: a file describing the parameters that points to the PDB file.\r\n \"\"\"\r\n pkgpath = os.path.join(ROOTPATH, \"mcce3.0\")\r\n ID = id.upper()\r\n filepath = os.path.join(ROOTPATH, \"pdb\", ID)\r\n newlines = []\r\n if quickrun:\r\n shutil.copy2(\r\n os.path.join(pkgpath, \"run.prm.quick\"),\r\n os.path.join(filepath, \"run.prm\")\r\n )\r\n else:\r\n shutil.copy2([\r\n os.path.join(pkgpath, \"run.prm.default\"),\r\n os.path.join(filepath, \"run.prm\")\r\n ])\r\n with open(os.path.join(filepath, \"run.prm\")) as f:\r\n for line in f:\r\n line = line.rstrip()\r\n if line.endswith(\"(INPDB)\"):\r\n line = re.sub(r\"^[^\\s]+\", fr\"{id}.pdb\", line)\r\n if line.endswith((\"(DO_PREMCCE)\", \"(DO_ROTAMERS)\",\r\n \"(DO_ENERGY)\", \"(DO_MONTE)\")):\r\n line = re.sub(r\"^f\", r\"t\", line)\r\n if line.endswith(\"(EPSILON_PROT)\"):\r\n line = re.sub(r\"^[\\d\\.]+\", r\"8.0\", line)\r\n if line.startswith(\"\/home\/mcce\/mcce3.0\"):\r\n line = re.sub(r\"^\/.*3\\.0\", pkgpath,\r\n line)\r\n newlines.append(line)\r\n with open(os.path.join(filepath, \"run.prm\"), \"w\") as f:\r\n f.write(\"\\n\".join(newlines))\r\n self.ready_ids.append(ID)\r\n logger.debug(f\"Parameters set for {ID}.\")\r\n\r\n def split_ready_ids(self, num):\r\n \"\"\" A naive queue implementation for multiple scripts.\r\n\r\n Parameters\r\n ----------\r\n num: int\r\n Which part of the IDs to work on.\r\n\r\n Returns\r\n -------\r\n A list of the actual IDs to work on, and save the lists of IDs for\r\n other scripts to work with if this is the first instance.\r\n \"\"\"\r\n if os.path.isfile(os.path.join(ROOTPATH, \"results\", \"working_ids.list\")):\r\n with open(os.path.join(ROOTPATH, \"results\", f\"working_ids.list{num}\"), \"r\") as f:\r\n self.working_ids = [line.strip() for line in f]\r\n else:\r\n n = math.ceil(len(self.ready_ids) \/ 10)\r\n self.working_ids = [self.ready_ids[i:i + n]\r\n for i in range(0, len(self.ready_ids), n)]\r\n metafile = []\r\n for i, ids in enumerate(self.working_ids):\r\n metafile.append(os.path.join(\r\n ROOTPATH, \"results\", f\"working_ids.list{i}\"))\r\n with open(os.path.join(ROOTPATH, \"results\", f\"working_ids.list{i}\"), \"w\") as f:\r\n f.write(\"\\n\".join(ids))\r\n logger.debug(\r\n f\"Saved {len(ids)} IDs to file working_ids.list{i} .\")\r\n with open(os.path.join(ROOTPATH, \"results\", \"working_ids.list\"), \"w\") as f:\r\n f.write(\"\\n\".join(metafile))\r\n self.working_ids = self.working_ids[num]\r\n\r\n def calc_pka(self, id, clean=True):\r\n \"\"\" Calculate protein pKa values using MCCE.\r\n https:\/\/sites.google.com\/site\/mccewiki\/home\r\n\r\n Parameters\r\n ----------\r\n id: str\r\n The PDB ID of the protein calculated.\r\n clean: bool, optional\r\n Only keep the PDB file, run log and pKa output.\r\n\r\n Returns\r\n -------\r\n A set of files in a subdirectory named after the ID.\r\n See user manual for detail.\r\n \"\"\"\r\n id = id.upper()\r\n os.chdir(os.path.realpath(os.path.join(ROOTPATH, \"pdb\", id)))\r\n logger.info(f\"{id} calculation started.\")\r\n start = time.time()\r\n with open(f\"{id}.run.log\", \"w\") as f:\r\n subprocess.run(f\"{ROOTPATH}\/mcce3.0\/mcce\", stdout=f)\r\n with open(f\"{id}.run.log\", \"rb\") as f:\r\n last = f.readlines()[-1].decode().lstrip()\r\n if last.startswith((\"Fatal\", \"FATAL\", \"WARNING\", \"STOP\")):\r\n self.error_ids.append(id)\r\n logger.warning(\r\n f\"{id} calculation aborted after {time.time() - start}s, due to {last}\")\r\n else:\r\n self.finished_ids.append(id)\r\n logger.info(\r\n f\"{id} calculation finished, used {time.time() - start}s.\")\r\n shutil.move(\"pK.out\", os.path.join(\r\n ROOTPATH, \"results\", f\"{id}.pka\"))\r\n if clean:\r\n del_list = [i for i in os.listdir() if i not in (\r\n \"pK.out\", f\"{id}.run.log\", f\"{id}.pdb.bak\")]\r\n [os.remove(item) for item in del_list]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n x = pdb()\r\n x.load_id()\r\n urls = x.get_link(x.download_ids)\r\n x.make_dirs(x.all_ids)\r\n x.download_queue(urls)\r\n\r\n x.check_mcce()\r\n for id in x.unzip_ids:\r\n x.unzip(id)\r\n for id in x.preprocess_ids:\r\n try:\r\n x.preprocess(id)\r\n x.set_params(id)\r\n except Exception as e:\r\n x.error_ids.append(id)\r\n logger.warning(f\"Preprocess of {id}: {e}\")\r\n # subprocess.run([\"find\", \".\", \"-type\", \"d\", \"-empty\", \"-delete\"])\r\n\r\n x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists\r\n\r\n with Pool(os.cpu_count()) as p:\r\n p.map(x.calc_pka, x.working_ids)\r\n\r\n with open(\".\/results\/finished_ids.list\", \"a\") as f:\r\n f.write(\"\\n\".join(x.working_ids))\r\n\r\n with open(\".\/results\/error_ids.list\", \"a\") as f:\r\n f.write(\"\\n\".join(x.error_ids))\r\n","license":"apache-2.0"} {"repo_name":"smsolivier\/VEF","path":"code\/hlimit.py","copies":"1","size":"2247","content":"#!\/usr\/bin\/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport ld as LD \nimport dd as DD \n\nfrom hidespines import * \n\nimport sys \n\n''' compares difference between Sn and moment equations as cell width --> 0 ''' \n\nif (len(sys.argv) > 1):\n\toutfile = sys.argv[1] \nelse:\n\toutfile = None \n\ndef getDiff(sol, tol=1e-6):\n\n\tdiff = np.zeros(len(sol))\n\tfor i in range(len(sol)):\n\n\t\tx, phi, it = sol[i].sourceIteration(tol)\n\n\t\t# diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)\/np.linalg.norm(sol[i].phi_SN, 2)\n\t\tdiff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)\/np.linalg.norm(sol[i].phi_SN, 2)\n\n\treturn diff \n\nN = 100 \nn = 8 \nxb = 1\n\nSigmaa = lambda x: .1 \nSigmat = lambda x: 1 \nq = lambda x, mu: 1 \n\ntol = 1e-10\n\nN = np.logspace(1, 3, 5) \n\nN = np.array([int(x) for x in N])\n\ned00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=0, GAUSS=0) for x in N]\n\ned01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=0, GAUSS=1) for x in N]\n\ned10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=1, GAUSS=0) for x in N]\n\ned11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=1, GAUSS=1) for x in N]\n\ned20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=2, GAUSS=0) for x in N]\n\ned21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa, \n\tSigmat, q, OPT=2, GAUSS=1) for x in N]\n\ndiff00 = getDiff(ed00, tol)\ndiff01 = getDiff(ed01, tol)\ndiff10 = getDiff(ed10, tol)\ndiff11 = getDiff(ed11, tol)\ndiff20 = getDiff(ed20, tol)\ndiff21 = getDiff(ed21, tol)\n\nfontsize=16\nplt.loglog(xb\/N, diff00, '-o', clip_on=False, label='MHFEM Edges, No Gauss')\nplt.loglog(xb\/N, diff01, '-o', clip_on=False, label='Maintain Slopes, No Gauss')\nplt.loglog(xb\/N, diff10, '-o', clip_on=False, label='MHFEM Edges, Gauss')\nplt.loglog(xb\/N, diff11, '-o', clip_on=False, label='Maintain Slopes, Gauss')\nplt.loglog(xb\/N, diff20, '-o', clip_on=False, label='vanLeer, No Gauss')\nplt.loglog(xb\/N, diff21, '-o', clip_on=False, label='vanLeer, Gauss')\nplt.xlabel(r'$h$', fontsize=fontsize)\nplt.ylabel('SN\/MHFEM Convergence', fontsize=fontsize)\nplt.legend(loc='best', frameon=False)\nhidespines(plt.gca())\nif (outfile != None):\n\tplt.savefig(outfile, transparent=True)\nelse:\n\tplt.show()\n\n\n","license":"mit"} {"repo_name":"phobson\/wqio","path":"wqio\/tests\/test_datacollections.py","copies":"2","size":"28761","content":"from distutils.version import LooseVersion\nfrom textwrap import dedent\nfrom io import StringIO\n\nimport numpy\nimport scipy\nfrom scipy import stats\nimport pandas\n\nfrom unittest import mock\nimport pytest\nimport pandas.testing as pdtest\nfrom wqio.tests import helpers\n\nfrom wqio.features import Location, Dataset\nfrom wqio.datacollections import DataCollection, _dist_compare\n\n\nOLD_SCIPY = LooseVersion(scipy.version.version) < LooseVersion(\"0.19\")\n\n\ndef check_stat(expected_csv, result, comp=False):\n index_col = [0]\n if comp:\n index_col += [1]\n\n file_obj = StringIO(dedent(expected_csv))\n expected = pandas.read_csv(file_obj, header=[0, 1], index_col=index_col)\n\n if comp:\n expected = expected.stack(level=-1)\n\n pdtest.assert_frame_equal(\n expected.sort_index(axis=\"columns\"),\n result.sort_index(axis=\"columns\").round(6),\n atol=1e-5,\n )\n\n\ndef remove_g_and_h(group):\n return group.name[1] not in [\"G\", \"H\"]\n\n\n@pytest.fixture\ndef dc():\n df = helpers.make_dc_data_complex()\n dc = DataCollection(\n df,\n rescol=\"res\",\n qualcol=\"qual\",\n stationcol=\"loc\",\n paramcol=\"param\",\n ndval=\"<\",\n othergroups=None,\n pairgroups=[\"state\", \"bmp\"],\n useros=True,\n filterfxn=remove_g_and_h,\n bsiter=10000,\n )\n\n return dc\n\n\n@pytest.fixture\ndef dc_noNDs():\n df = helpers.make_dc_data_complex()\n dc = DataCollection(\n df,\n rescol=\"res\",\n qualcol=\"qual\",\n stationcol=\"loc\",\n paramcol=\"param\",\n ndval=\"junk\",\n othergroups=None,\n pairgroups=[\"state\", \"bmp\"],\n useros=True,\n filterfxn=remove_g_and_h,\n bsiter=10000,\n )\n\n return dc\n\n\ndef test_basic_attr(dc):\n assert dc._raw_rescol == \"res\"\n assert isinstance(dc.data, pandas.DataFrame)\n assert dc.roscol == \"ros_res\"\n assert dc.rescol == \"ros_res\"\n assert dc.qualcol == \"qual\"\n assert dc.stationcol == \"loc\"\n assert dc.paramcol == \"param\"\n assert dc.ndval == [\"<\"]\n assert dc.bsiter == 10000\n assert dc.groupcols == [\"loc\", \"param\"]\n assert dc.tidy_columns == [\"loc\", \"param\", \"res\", \"__censorship\"]\n assert hasattr(dc, \"filterfxn\")\n\n\ndef test_data(dc):\n assert isinstance(dc.data, pandas.DataFrame)\n assert dc.data.shape == (519, 8)\n assert \"G\" in dc.data[\"param\"].unique()\n assert \"H\" in dc.data[\"param\"].unique()\n\n\n@pytest.mark.parametrize(\"useros\", [True, False])\ndef test_tidy(dc, useros):\n assert isinstance(dc.tidy, pandas.DataFrame)\n assert dc.tidy.shape == (388, 5)\n assert \"G\" not in dc.tidy[\"param\"].unique()\n assert \"H\" not in dc.tidy[\"param\"].unique()\n collist = [\"loc\", \"param\", \"res\", \"__censorship\", \"ros_res\"]\n assert dc.tidy.columns.tolist() == collist\n\n\ndef test_paired(dc):\n assert isinstance(dc.paired, pandas.DataFrame)\n assert dc.paired.shape == (164, 6)\n assert \"G\" not in dc.paired.index.get_level_values(\"param\").unique()\n assert \"H\" not in dc.paired.index.get_level_values(\"param\").unique()\n dc.paired.columns.tolist() == [\n (\"res\", \"Inflow\"),\n (\"res\", \"Outflow\"),\n (\"res\", \"Reference\"),\n (\"__censorship\", \"Inflow\"),\n (\"__censorship\", \"Outflow\"),\n (\"__censorship\", \"Reference\"),\n ]\n\n\ndef test_count(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n result,Count,Count,Count\n param,,,\n A,21,22,20\n B,24,22,19\n C,24,24,25\n D,24,25,21\n E,19,16,20\n F,21,24,17\n \"\"\"\n check_stat(known_csv, dc.count)\n\n\ndef test_n_unique(dc):\n known_csv = \"\"\"\\\n loc,Inflow,Outflow,Reference\n result,bmp,bmp,bmp\n param,,,\n A,7,7,7\n B,7,7,7\n C,7,7,7\n D,7,7,7\n E,7,7,7\n F,7,7,7\n G,7,7,7\n H,7,7,7\n \"\"\"\n check_stat(known_csv, dc.n_unique(\"bmp\"))\n\n\n@helpers.seed\ndef test_median(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference\n result,lower,median,upper,lower,median,upper,lower,median,upper\n param,,,,,,,,,\n A,0.334506,1.197251,2.013994,0.860493,2.231058,2.626023,1.073386,1.639472,1.717293\n B,1.366948,2.773989,3.297147,0.23201,1.546499,2.579206,0.204164,1.565076,2.196367\n C,0.17351,0.525957,0.68024,0.247769,0.396984,0.540742,0.136462,0.412693,0.559458\n D,0.374122,1.201892,2.098846,0.516989,1.362759,1.827087,0.314655,0.882695,1.24545\n E,0.276095,1.070858,1.152887,0.287914,0.516746,1.456859,0.366824,0.80716,2.040739\n F,0.05667,0.832488,1.310575,0.425237,1.510942,2.193997,0.162327,0.745993,1.992513\n \"\"\"\n check_stat(known_csv, dc.median)\n\n\n@helpers.seed\ndef test_mean(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference\n result,lower,mean,upper,lower,mean,upper,lower,mean,upper\n param,,,,,,,,,\n A,1.231607,2.646682,4.204054,1.930601,5.249281,9.081952,1.540167,3.777974,6.389439\n B,2.99031,7.647175,12.810844,1.545539,6.863835,12.705913,1.010374,4.504255,9.592572\n C,0.37496,0.513248,0.65948,0.411501,1.004637,1.706317,0.35779,0.541962,0.734751\n D,1.29141,3.021235,4.987855,1.285899,2.318808,3.451824,1.008364,1.945828,2.924812\n E,0.818641,1.914696,3.049554,0.584826,1.098241,1.640807,1.113589,2.283292,3.581946\n F,0.8379,9.825404,25.289933,1.497825,3.450184,5.61929,0.939917,2.491708,4.094258\n \"\"\"\n check_stat(known_csv, dc.mean)\n\n\n@helpers.seed\ndef test_std_dev(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n result,std. dev.,std. dev.,std. dev.\n param,,,\n A,3.58649,8.719371,5.527633\n B,12.360099,13.60243,10.759285\n C,0.353755,1.691208,0.493325\n D,4.811938,2.849393,2.248178\n E,2.55038,1.096698,2.789238\n F,34.447565,5.361033,3.398367\n \"\"\"\n check_stat(known_csv, dc.std_dev)\n\n\n@helpers.seed\ndef test_percentile_25(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n result,pctl 25,pctl 25,pctl 25\n param,,,\n A,0.522601,0.906029,1.094721\n B,1.472541,0.251126,0.314226\n C,0.164015,0.267521,0.136462\n D,0.35688,0.516989,0.383895\n E,0.364748,0.311508,0.394658\n F,0.120068,0.406132,0.224429\n \"\"\"\n check_stat(known_csv, dc.percentile(25))\n\n\n@helpers.seed\ndef test_percentile_75(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n result,pctl 75,pctl 75,pctl 75\n param,,,\n A,2.563541,3.838021,2.650648\n B,4.728871,2.849948,2.261847\n C,0.776388,0.853535,0.792612\n D,3.04268,2.79341,3.611793\n E,1.532775,1.59183,3.201534\n F,1.792985,2.80979,2.742249\n \"\"\"\n check_stat(known_csv, dc.percentile(75))\n\n\n@helpers.seed\ndef test_logmean(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference\n result,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper\n param,,,,,,,,,\n A,0.140559,-0.55112,0.644202,0.733004,0.047053,1.22099,0.545205,-0.057683,1.029948\n B,1.026473,0.368659,1.541241,0.105106,-0.939789,0.860244,0.068638,-0.932357,0.661203\n C,-0.963004,-1.304115,-0.638446,-0.83221,-1.464092,-0.414379,-1.088377,-1.556795,-0.720706\n D,0.062317,-0.663241,0.58349,0.185757,-0.325074,0.598432,-0.063507,-0.670456,0.434214\n E,-0.103655,-0.751075,0.385909,-0.456202,-1.08692,0.029967,-0.068135,-0.787007,0.51226\n F,-0.442721,-1.874677,0.344704,0.211658,-0.504166,0.734283,-0.253352,-1.175917,0.467231\n \"\"\"\n check_stat(known_csv, dc.logmean)\n\n\n@helpers.seed\ndef test_logstd_dev(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n result,Log-std. dev.,Log-std. dev.,Log-std. dev.\n param,,,\n A,1.374026,1.343662,1.225352\n B,1.430381,2.07646,1.662001\n C,0.818504,1.263631,1.057177\n D,1.530871,1.187246,1.277927\n E,1.264403,1.121038,1.474431\n F,2.324063,1.516331,1.701596\n \"\"\"\n check_stat(known_csv, dc.logstd_dev)\n\n\n@helpers.seed\ndef test_geomean(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference\n Geo-mean,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper\n param,,,,,,,,,\n A,1.150917,0.576304,1.904467,2.081323,1.048178,3.390543,1.724962,0.943949,2.800919\n B,2.791205,1.445795,4.670381,1.110829,0.39071,2.363737,1.071049,0.393625,1.937121\n C,0.381744,0.271413,0.528113,0.435087,0.231288,0.66075,0.336763,0.210811,0.486409\n D,1.064299,0.515179,1.792283,1.204129,0.722474,1.819264,0.938467,0.511475,1.543749\n E,0.901536,0.471859,1.470951,0.633686,0.337254,1.03042,0.934134,0.455205,1.66906\n F,0.642286,0.153405,1.411572,1.235726,0.604009,2.083988,0.776195,0.308536,1.595571\n \"\"\"\n check_stat(known_csv, dc.geomean)\n\n\n@helpers.seed\ndef test_geostd_dev(dc):\n known_csv = \"\"\"\\\n station,Inflow,Outflow,Reference\n Geo-std. dev.,Log-std. dev.,Log-std. dev.,Log-std. dev.\n param,,,\n A,3.951225,3.833055,3.405365\n B,4.180294,7.976181,5.269843\n C,2.267105,3.538244,2.878234\n D,4.622199,3.278041,3.589191\n E,3.540977,3.068036,4.368548\n F,10.217099,4.55548,5.48269\n \"\"\"\n check_stat(known_csv, dc.geostd_dev)\n\n\n@helpers.seed\ndef test_shapiro(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Outflow,Outflow,Reference,Reference\n result,pvalue,statistic,pvalue,statistic,pvalue,statistic\n param,,,,,,\n A,1.8e-05,0.685783,1e-06,0.576069,4e-06,0.61735\n B,1e-06,0.594411,0.0,0.530962,0.0,0.41471\n C,0.028774,0.905906,0.0,0.546626,0.00279,0.860373\n D,1e-06,0.622915,1.5e-05,0.722374,0.000202,0.76518\n E,1.7e-05,0.654137,0.004896,0.818813,0.000165,0.74917\n F,0.0,0.292916,2e-06,0.634671,0.000167,0.713968\n \"\"\"\n check_stat(known_csv, dc.shapiro)\n\n\n@helpers.seed\ndef test_shapiro_log(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Outflow,Outflow,Reference,Reference\n result,statistic,pvalue,statistic,pvalue,statistic,pvalue\n param,,,,,,\n A,0.983521938,0.96662426,0.979861856,0.913820148,0.939460814,0.234214202\n B,0.957531095,0.390856266,0.97048676,0.722278714,0.967978418,0.735424638\n C,0.906479359,0.029602444,0.974698305,0.78197974,0.967106879,0.572929323\n D,0.989704251,0.995502174,0.990663111,0.997093379,0.964812279,0.617747009\n E,0.955088913,0.479993254,0.95211035,0.523841977,0.963425279,0.61430341\n F,0.97542423,0.847370088,0.982230783,0.933124721,0.966197193,0.749036908\n \"\"\"\n check_stat(known_csv, dc.shapiro_log)\n\n\n@helpers.seed\ndef test_lilliefors(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Outflow,Outflow,Reference,Reference\n result,lilliefors,pvalue,lilliefors,pvalue,lilliefors,pvalue\n param,,,,,,\n A,0.308131,1.4e-05,0.340594,0.0,0.364453,0.0\n B,0.36764,0.0,0.420343,0.0,0.417165,0.0\n C,0.166799,0.082737,0.324733,0.0,0.161753,0.090455\n D,0.273012,6.7e-05,0.240311,0.000665,0.296919,3.7e-05\n E,0.341398,3e-06,0.239314,0.014862,0.233773,0.005474\n F,0.419545,0.0,0.331315,0.0,0.284249,0.000741\n \"\"\"\n check_stat(known_csv, dc.lilliefors)\n\n\n@helpers.seed\ndef test_lilliefors_log(dc):\n known_csv = \"\"\"\\\n station,Inflow,Inflow,Outflow,Outflow,Reference,Reference\n result,log-lilliefors,pvalue,log-lilliefors,pvalue,log-lilliefors,pvalue\n param,,,,,,\n A,0.08548109,0.95458004,0.15443943,0.19715747,0.20141389,0.03268737\n B,0.16162839,0.10505016,0.12447902,0.49697902,0.15934334,0.22969362\n C,0.16957278,0.07248915,0.12388174,0.44379732,0.11746642,0.48915671\n D,0.06885549,0.99,0.06067356,0.99,0.13401954,0.41967483\n E,0.13506577,0.47186822,0.14552341,0.47797919,0.09164876,0.92860794\n F,0.14420794,0.30694533,0.08463267,0.92741885,0.08586933,0.9800294\n \"\"\"\n check_stat(known_csv, dc.lilliefors_log)\n\n\n@helpers.seed\ndef test_anderson_darling(dc):\n with helpers.raises(NotImplementedError):\n _ = dc.anderson_darling\n\n\n@helpers.seed\ndef test_anderson_darling_log(dc):\n with helpers.raises(NotImplementedError):\n _ = dc.anderson_darling_log\n\n\n@helpers.seed\ndef test_mann_whitney(dc):\n known_csv = \"\"\"\\\n ,,mann_whitney,mann_whitney,mann_whitney,pvalue,pvalue,pvalue\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,180.0,179.0,,0.2198330905,0.4263216587\n A,Outflow,282.0,,248.0,0.2198330905,,0.488580368\n A,Reference,241.0,192.0,,0.4263216587,0.488580368,\n B,Inflow,,345.0,317.0,,0.0766949991,0.0304383994\n B,Outflow,183.0,,216.0,0.0766949991,,0.8650586835\n B,Reference,139.0,202.0,,0.0304383994,0.8650586835,\n C,Inflow,,282.0,323.0,,0.9097070273,0.6527104406\n C,Outflow,294.0,,323.0,0.9097070273,,0.6527104406\n C,Reference,277.0,277.0,,0.6527104406,0.6527104406,\n D,Inflow,,285.0,263.0,,0.7718162376,0.8111960975\n D,Outflow,315.0,,293.0,0.7718162376,,0.5082395211\n D,Reference,241.0,232.0,,0.8111960975,0.5082395211,\n E,Inflow,,164.0,188.0,,0.7033493939,0.9663820218\n E,Outflow,140.0,,132.0,0.7033493939,,0.3813114322\n E,Reference,192.0,188.0,,0.9663820218,0.3813114322,\n F,Inflow,,201.0,172.0,,0.2505911218,0.8601783903\n F,Outflow,303.0,,236.0,0.2505911218,,0.4045186043\n F,Reference,185.0,172.0,,0.8601783903,0.4045186043\n \"\"\"\n check_stat(known_csv, dc.mann_whitney, comp=True)\n\n\n@helpers.seed\ndef test_t_test(dc):\n known_csv = \"\"\"\\\n ,,pvalue,pvalue,pvalue,t_test,t_test,t_test\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,0.2178424157,0.4563196599,,-1.2604458127,-0.7539785777\n A,Outflow,0.2178424157,,0.5240147979,1.2604458127,,0.643450194\n A,Reference,0.4563196599,0.5240147979,,0.7539785777,-0.643450194,\n B,Inflow,,0.8430007638,0.3898358794,,0.1992705833,0.869235357\n B,Outflow,0.8430007638,,0.5491097882,-0.1992705833,,0.6043850808\n B,Reference,0.3898358794,0.5491097882,,-0.869235357,-0.6043850808,\n C,Inflow,,0.1847386316,0.8191392537,,-1.3639360123,-0.2300373632\n C,Outflow,0.1847386316,,0.2179907667,1.3639360123,,1.2615982727\n C,Reference,0.8191392537,0.2179907667,,0.2300373632,-1.2615982727,\n D,Inflow,,0.5484265023,0.344783812,,0.6056706932,0.9582600001\n D,Outflow,0.5484265023,,0.6299742693,-0.6056706932,,0.4851636024\n D,Reference,0.344783812,0.6299742693,,-0.9582600001,-0.4851636024,\n E,Inflow,,0.2304569921,0.6770414622,,1.2287029977,-0.4198288251\n E,Outflow,0.2304569921,,0.1023435465,-1.2287029977,,-1.6935358498\n E,Reference,0.6770414622,0.1023435465,,0.4198288251,1.6935358498,\n F,Inflow,,0.422008391,0.3549979666,,0.8190789273,0.9463539528\n F,Outflow,0.422008391,,0.4988994144,-0.8190789273,,0.6826435968\n F,Reference,0.3549979666,0.4988994144,,-0.9463539528,-0.6826435968\n \"\"\"\n check_stat(known_csv, dc.t_test, comp=True)\n\n\n@helpers.seed\ndef test_levene(dc):\n known_csv = \"\"\"\\\n ,,levene,levene,levene,pvalue,pvalue,pvalue\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,1.176282059,0.293152155,,0.284450688,0.591287419\n A,Outflow,1.176282059,,0.397705309,0.284450688,,0.531863542\n A,Reference,0.293152155,0.397705309,,0.591287419,0.531863542,\n B,Inflow,,0.003559637,0.402002411,,0.952694449,0.529578712\n B,Outflow,0.003559637,,0.408938588,0.952694449,,0.526247443\n B,Reference,0.402002411,0.408938588,,0.529578712,0.526247443,\n C,Inflow,,1.965613561,0.679535532,,0.167626459,0.413910674\n C,Outflow,1.965613561,,1.462364363,0.167626459,,0.232602352\n C,Reference,0.679535532,1.462364363,,0.413910674,0.232602352,\n D,Inflow,,0.643364813,0.983777911,,0.426532092,0.32681669\n D,Outflow,0.643364813,,0.116830634,0.426532092,,0.734124856\n D,Reference,0.983777911,0.116830634,,0.32681669,0.734124856,\n E,Inflow,,0.961616536,0.410491665,,0.333914902,0.525668596\n E,Outflow,0.961616536,,2.726351564,0.333914902,,0.107912818\n E,Reference,0.410491665,2.726351564,,0.525668596,0.107912818,\n F,Inflow,,0.841984453,0.734809611,,0.363948105,0.396999375\n F,Outflow,0.841984453,,0.25881357,0.363948105,,0.613802541\n F,Reference,0.734809611,0.25881357,,0.396999375,0.613802541,\n \"\"\"\n check_stat(known_csv, dc.levene, comp=True)\n\n\n@helpers.seed\ndef test_wilcoxon(dc):\n known_csv = \"\"\"\\\n ,,wilcoxon,wilcoxon,wilcoxon,pvalue,pvalue,pvalue\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,32.0,59.0,,0.03479,0.430679\n A,Outflow,32.0,,46.0,0.03479,,0.274445\n A,Reference,59.0,46.0,,0.430679,0.274445,\n B,Inflow,,38.0,22.0,,0.600179,0.182338\n B,Outflow,38.0,,31.0,0.600179,,0.858863\n B,Reference,22.0,31.0,,0.182338,0.858863,\n C,Inflow,,75.0,120.0,,0.167807,0.601046\n C,Outflow,75.0,,113.0,0.167807,,0.463381\n C,Reference,120.0,113.0,,0.601046,0.463381,\n D,Inflow,,44.0,31.0,,0.593618,0.530285\n D,Outflow,44.0,,45.0,0.593618,,0.972125\n D,Reference,31.0,45.0,,0.530285,0.972125,\n E,Inflow,,21.0,19.0,,0.910156,0.386271\n E,Outflow,21.0,,16.0,0.910156,,0.077148\n E,Reference,19.0,16.0,,0.386271,0.077148,\n F,Inflow,,62.0,22.0,,0.492459,0.952765\n F,Outflow,62.0,,28.0,0.492459,,0.656642\n F,Reference,22.0,28.0,,0.952765,0.656642,\n \"\"\"\n with pytest.warns(UserWarning):\n check_stat(known_csv, dc.wilcoxon, comp=True)\n\n\n@helpers.seed\ndef test_ranksums(dc):\n known_csv = \"\"\"\\\n ,,pvalue,pvalue,pvalue,rank_sums,rank_sums,rank_sums\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,0.2153009,0.4187782,,-1.2391203,-0.8085428\n A,Outflow,0.2153009,,0.4807102,1.2391203,,0.7051607\n A,Reference,0.4187782,0.4807102,,0.8085428,-0.7051607,\n B,Inflow,,0.0748817,0.029513,,1.781188,2.1765661\n B,Outflow,0.0748817,,0.8547898,-1.781188,,0.1830104\n B,Reference,0.029513,0.8547898,,-2.1765661,-0.1830104,\n C,Inflow,,0.9015386,0.6455162,,-0.1237179,0.46\n C,Outflow,0.9015386,,0.6455162,0.1237179,,0.46\n C,Reference,0.6455162,0.6455162,,-0.46,-0.46,\n D,Inflow,,0.7641772,0.8023873,,-0.3,0.2502587\n D,Outflow,0.7641772,,0.5011969,0.3,,0.6726078\n D,Reference,0.8023873,0.5011969,,-0.2502587,-0.6726078,\n E,Inflow,,0.6911022,0.9551863,,0.3973597,-0.0561951\n E,Outflow,0.6911022,,0.3727144,-0.3973597,,-0.8914004\n E,Reference,0.9551863,0.3727144,,0.0561951,0.8914004,\n F,Inflow,,0.2459307,0.8486619,,-1.1602902,-0.190826\n F,Outflow,0.2459307,,0.3971011,1.1602902,,0.8468098\n F,Reference,0.8486619,0.3971011,,0.190826,-0.8468098,\n \"\"\"\n check_stat(known_csv, dc.ranksums, comp=True)\n\n\n@helpers.seed\n@pytest.mark.xfail(OLD_SCIPY, reason=\"Scipy < 0.19\")\ndef test_kendall(dc):\n known_csv = \"\"\"\\\n ,,kendalltau,kendalltau,kendalltau,pvalue,pvalue,pvalue\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,-0.051661,-0.00738,,0.772893,0.967114\n A,Outflow,-0.051661,,-0.083333,0.772893,,0.690095\n A,Reference,-0.00738,-0.083333,,0.967114,0.690095,\n B,Inflow,,0.441351,0.298246,,0.015267,0.119265\n B,Outflow,0.441351,,0.559855,0.015267,,0.004202\n B,Reference,0.298246,0.559855,,0.119265,0.004202,\n C,Inflow,,0.280223,0.084006,,0.078682,0.578003\n C,Outflow,0.280223,,-0.1417,0.078682,,0.352394\n C,Reference,0.084006,-0.1417,,0.578003,0.352394,\n D,Inflow,,0.403469,0.095299,,0.020143,0.634826\n D,Outflow,0.403469,,0.318337,0.020143,,0.094723\n D,Reference,0.095299,0.318337,,0.634826,0.094723,\n E,Inflow,,0.114286,0.640703,,0.673337,0.004476\n E,Outflow,0.114286,,0.167944,0.673337,,0.449603\n E,Reference,0.640703,0.167944,,0.004476,0.449603,\n F,Inflow,,0.0,0.07231,,1.0,0.763851\n F,Outflow,0.0,,0.388889,1.0,,0.063\n F,Reference,0.07231,0.388889,,0.763851,0.063,\n \"\"\"\n check_stat(known_csv, dc.kendall, comp=True)\n\n\n@helpers.seed\ndef test_spearman(dc):\n known_csv = \"\"\"\\\n ,,pvalue,pvalue,pvalue,spearmanrho,spearmanrho,spearmanrho\n loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference\n param,loc_1,,,,,,\n A,Inflow,,0.7574884491,0.9627447553,,-0.0809319588,0.012262418\n A,Outflow,0.7574884491,,0.7617330788,-0.0809319588,,-0.0823529412\n A,Reference,0.9627447553,0.7617330788,,0.012262418,-0.0823529412,\n B,Inflow,,0.0110829791,0.0775159774,,0.5831305575,0.4537313433\n B,Outflow,0.0110829791,,0.0024069317,0.5831305575,,0.6850916941\n B,Reference,0.0775159774,0.0024069317,,0.4537313433,0.6850916941,\n C,Inflow,,0.1330504059,0.6063501968,,0.3387640122,0.1134228342\n C,Outflow,0.1330504059,,0.3431640379,0.3387640122,,-0.2070506455\n C,Reference,0.6063501968,0.3431640379,,0.1134228342,-0.2070506455,\n D,Inflow,,0.0195715066,0.4751861062,,0.4935814032,0.1858231711\n D,Outflow,0.0195715066,,0.1263974782,0.4935814032,,0.363209462\n D,Reference,0.4751861062,0.1263974782,,0.1858231711,0.363209462,\n E,Inflow,,0.9828818202,0.0013596162,,0.0084033613,0.8112988341\n E,Outflow,0.9828818202,,0.3413722947,0.0084033613,,0.3012263814\n E,Reference,0.0013596162,0.3413722947,,0.8112988341,0.3012263814,\n F,Inflow,,0.9645303744,0.6759971848,,-0.0106277141,0.1348767061\n F,Outflow,0.9645303744,,0.0560590794,-0.0106277141,,0.5028571429\n F,Reference,0.6759971848,0.0560590794,,0.1348767061,0.5028571429\n \"\"\"\n check_stat(known_csv, dc.spearman, comp=True)\n\n\n@helpers.seed\ndef test_theilslopes(dc):\n with helpers.raises(NotImplementedError):\n _ = dc.theilslopes\n\n\ndef test_inventory(dc):\n known_csv = StringIO(\n dedent(\n \"\"\"\\\n loc,param,Count,Non-Detect\n Inflow,A,21,3\n Inflow,B,24,6\n Inflow,C,24,0\n Inflow,D,24,11\n Inflow,E,19,4\n Inflow,F,21,8\n Outflow,A,22,1\n Outflow,B,22,9\n Outflow,C,24,4\n Outflow,D,25,12\n Outflow,E,16,2\n Outflow,F,24,8\n Reference,A,20,2\n Reference,B,19,6\n Reference,C,25,4\n Reference,D,21,12\n Reference,E,20,3\n Reference,F,17,7\n \"\"\"\n )\n )\n expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)\n pdtest.assert_frame_equal(expected, dc.inventory.astype(int), check_names=False)\n\n\ndef test_inventory_noNDs(dc_noNDs):\n known_csv = StringIO(\n dedent(\n \"\"\"\\\n loc,param,Count,Non-Detect\n Inflow,A,21,0\n Inflow,B,24,0\n Inflow,C,24,0\n Inflow,D,24,0\n Inflow,E,19,0\n Inflow,F,21,0\n Outflow,A,22,0\n Outflow,B,22,0\n Outflow,C,24,0\n Outflow,D,25,0\n Outflow,E,16,0\n Outflow,F,24,0\n Reference,A,20,0\n Reference,B,19,0\n Reference,C,25,0\n Reference,D,21,0\n Reference,E,20,0\n Reference,F,17,0\n \"\"\"\n )\n )\n expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)\n pdtest.assert_frame_equal(\n expected, dc_noNDs.inventory.astype(int), check_names=False,\n )\n\n\n@helpers.seed\ndef test_stat_summary(dc):\n known_csv = StringIO(\n dedent(\n \"\"\"\\\n ros_res,loc,A,B,C,D,E,F\n Count,Inflow,21,24,24,24,19,21\n Count,Outflow,22,22,24,25,16,24\n Count,Reference,20,19,25,21,20,17\n Non-Detect,Inflow,3.0,6.0,0.0,11.0,4.0,8.0\n Non-Detect,Outflow,1.0,9.0,4.0,12.0,2.0,8.0\n Non-Detect,Reference,2.0,6.0,4.0,12.0,3.0,7.0\n mean,Inflow,2.64668,7.64717,0.51325,3.02124,1.9147,9.8254\n mean,Outflow,5.24928,6.86384,1.00464,2.31881,1.09824,3.45018\n mean,Reference,3.77797,4.50425,0.54196,1.94583,2.28329,2.49171\n std,Inflow,3.67506,12.62594,0.36136,4.91543,2.62027,35.29825\n std,Outflow,8.92456,13.92253,1.72758,2.90815,1.13267,5.47634\n std,Reference,5.67123,11.05411,0.5035,2.3037,2.8617,3.50296\n min,Inflow,0.0756,0.17404,0.10213,0.05365,0.08312,0.00803\n min,Outflow,0.11177,0.02106,0.03578,0.11678,0.07425,0.06377\n min,Reference,0.15575,0.04909,0.04046,0.08437,0.05237,0.03445\n 10%,Inflow,0.1772,0.45233,0.13467,0.15495,0.1763,0.03548\n 10%,Outflow,0.44852,0.08297,0.08222,0.26949,0.19903,0.18008\n 10%,Reference,0.38448,0.13467,0.08241,0.19355,0.12777,0.09457\n 25%,Inflow,0.5226,1.47254,0.16401,0.35688,0.36475,0.12007\n 25%,Outflow,0.90603,0.25113,0.26752,0.51699,0.31151,0.40613\n 25%,Reference,1.09472,0.31423,0.13646,0.3839,0.39466,0.22443\n 50%,Inflow,1.19725,2.77399,0.52596,1.20189,1.07086,0.83249\n 50%,Outflow,2.23106,1.5465,0.39698,1.36276,0.51675,1.51094\n 50%,Reference,1.63947,1.56508,0.41269,0.8827,0.80716,0.74599\n 75%,Inflow,2.56354,4.72887,0.77639,3.04268,1.53278,1.79299\n 75%,Outflow,3.83802,2.84995,0.85354,2.79341,1.59183,2.80979\n 75%,Reference,2.65065,2.26185,0.79261,3.61179,3.20153,2.74225\n 90%,Inflow,6.02835,24.40655,0.99293,8.00691,6.28345,8.51706\n 90%,Outflow,12.43052,23.90022,2.43829,5.66731,2.30348,10.32829\n 90%,Reference,12.58278,6.67125,1.2205,4.78255,7.72012,8.57303\n max,Inflow,13.87664,45.97893,1.26657,21.75505,8.88365,163.01001\n max,Outflow,36.58941,47.49381,8.04948,12.39894,4.19118,23.29367\n max,Reference,21.22363,48.23615,1.94442,7.67751,8.75609,10.5095\n \"\"\"\n )\n )\n\n expected = pandas.read_csv(known_csv, index_col=[0, 1]).T\n pdtest.assert_frame_equal(\n expected.round(5),\n dc.stat_summary().round(5),\n check_names=False,\n check_dtype=False,\n rtol=1e-4,\n )\n\n\ndef test_locations(dc):\n for loc in dc.locations:\n assert isinstance(loc, Location)\n assert len(dc.locations) == 18\n assert dc.locations[0].definition == {\"loc\": \"Inflow\", \"param\": \"A\"}\n assert dc.locations[1].definition == {\"loc\": \"Inflow\", \"param\": \"B\"}\n\n\ndef test_datasets(dc):\n _ds = []\n for d in dc.datasets(\"Inflow\", \"Outflow\"):\n assert isinstance(d, Dataset)\n _ds.append(d)\n assert len(_ds) == 6\n assert _ds[0].definition == {\"param\": \"A\"}\n assert _ds[1].definition == {\"param\": \"B\"}\n\n\n# this sufficiently tests dc._filter_collection\ndef test_selectLocations(dc):\n locs = dc.selectLocations(param=\"A\", loc=[\"Inflow\", \"Outflow\"])\n assert len(locs) == 2\n for n, (loc, loctype) in enumerate(zip(locs, [\"Inflow\", \"Outflow\"])):\n assert isinstance(loc, Location)\n assert loc.definition[\"param\"] == \"A\"\n assert loc.definition[\"loc\"] == loctype\n\n\ndef test_selectLocations_squeeze_False(dc):\n locs = dc.selectLocations(param=\"A\", loc=[\"Inflow\"], squeeze=False)\n assert len(locs) == 1\n for n, loc in enumerate(locs):\n assert isinstance(loc, Location)\n assert loc.definition[\"param\"] == \"A\"\n assert loc.definition[\"loc\"] == \"Inflow\"\n\n\ndef test_selectLocations_squeeze_True(dc):\n loc = dc.selectLocations(param=\"A\", loc=[\"Inflow\"], squeeze=True)\n assert isinstance(loc, Location)\n assert loc.definition[\"param\"] == \"A\"\n assert loc.definition[\"loc\"] == \"Inflow\"\n\n\ndef test_selectLocations_squeeze_True_None(dc):\n loc = dc.selectLocations(param=\"A\", loc=[\"Junk\"], squeeze=True)\n assert loc is None\n\n\n# since the test_selectLocations* tests stress _filter_collection\n# enough, we'll mock it out for datasets:\ndef test_selectDatasets(dc):\n with mock.patch.object(dc, \"_filter_collection\") as _fc:\n with mock.patch.object(dc, \"datasets\", return_value=[\"A\", \"B\"]) as _ds:\n dc.selectDatasets(\"Inflow\", \"Reference\", foo=\"A\", bar=\"C\")\n _ds.assert_called_once_with(\"Inflow\", \"Reference\")\n _fc.assert_called_once_with([\"A\", \"B\"], foo=\"A\", bar=\"C\", squeeze=False)\n\n\n@pytest.mark.parametrize(\"func\", [stats.mannwhitneyu, stats.wilcoxon])\n@pytest.mark.parametrize(\n (\"x\", \"all_same\"), [([5, 5, 5, 5, 5], True), ([5, 6, 7, 7, 8], False)]\n)\ndef test_dist_compare_wrapper(x, all_same, func):\n y = [5, 5, 5, 5, 5]\n with mock.patch.object(stats, func.__name__) as _test:\n result = _dist_compare(x, y, _test)\n if all_same:\n assert numpy.isnan(result.stat)\n assert numpy.isnan(result.pvalue)\n assert _test.call_count == 0\n else:\n # assert result == (0, 0)\n _test.assert_called_once_with(x, y, alternative=\"two-sided\")\n","license":"bsd-3-clause"} {"repo_name":"legacysurvey\/rapala","path":"ninetyprime\/linearitycheck.py","copies":"2","size":"17953","content":"#!\/usr\/bin\/env python\n\nimport os\nimport glob\nimport numpy as np\nimport fitsio\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom astropy.table import Table\n\nfrom bokpipe import *\nfrom bokpipe.bokoscan import _convertfitsreg\n\ndef init_data_map(datadir,outdir,expTimes=None,files=None):\n\tdataMap = {}\n\tif not os.path.exists(outdir):\n\t\tos.mkdir(outdir)\n\tdataMap['outdir'] = outdir\n\tif files is None:\n\t\tdataMap['files'] = sorted(glob.glob(datadir+'*.fits') + \n\t\t glob.glob(datadir+'*.fits.gz') +\n\t\t glob.glob(datadir+'*.fits.fz'))\n\telse:\n\t\tdataMap['files'] = files\n\tdataMap['rawFiles'] = dataMap['files']\n\tdataMap['oscan'] = bokio.FileNameMap(outdir)\n\tdataMap['proc'] = bokio.FileNameMap(outdir,'_p')\n\tdataMap['files'] = [ dataMap['oscan'](f) for f in dataMap['files'] ]\n\tif expTimes is None:\n\t\tdataMap['expTime'] = np.array([fitsio.read_header(f)['EXPTIME']\n\t\t for f in dataMap['files']])\n\telse:\n\t\tdataMap['expTime'] = expTimes\n\ttry:\n\t\t# assume they are all the same\n\t\tdataMap['dataSec'] = \\\n\t\t _convertfitsreg(fitsio.read_header(\n\t\t dataMap['files'][0],'IM4')['DATASEC'])\n\texcept IOError:\n\t\tpass\n\treturn dataMap\n\ndef process_data(dataMap,redo=True,withvar=True,oscanims=False,bias2d=False):\n\toscanSubtract = BokOverscanSubtract(output_map=dataMap['oscan'],\n\t overwrite=redo,\n\t\t write_overscan_image=oscanims,\n\t\t oscan_cols_file=dataMap['outdir']+'oscan_cols',\n\t\t oscan_rows_file=dataMap['outdir']+'oscan_rows',\n\t\t verbose=10)#method='median_value')\n\toscanSubtract.process_files(dataMap['rawFiles'])\n\tif bias2d:\n\t\tbiasname = 'bias'\n\t\tbiasStack = bokproc.BokBiasStack(#reject=None,\n\t\t overwrite=redo,\n\t\t with_variance=withvar)\n\t\tbias2dFile = os.path.join(dataMap['outdir'],biasname+'.fits')\n\t\tbiasStack.stack(dataMap['biasFiles'],bias2dFile)\n\t\t#imProcess = bokproc.BokCCDProcess(bias2dFile,\n\t\t# output_map=dataMap['proc'])\n\t\t#imProcess.process_files(flatFrames)\n\ndef imstat(dataMap,outfn='stats'):\n\tfrom astropy.stats import sigma_clip\n\tfrom scipy.stats import mode,scoreatpercentile\n\tarray_stats = bokutil.array_stats\n\tfnlen = len(os.path.basename(dataMap['files'][0]))\n\tst = np.zeros(len(dataMap['flatSequence']),\n\t dtype=[('file','S%d'%fnlen),\n\t ('expTime','f4'),\n\t ('median','16f4'),\n\t ('mean','16f4'),\n\t ('mode','16f4'),\n\t ('iqr25','16f4'),\n\t ('iqr75','16f4'),\n\t ('iqr10','16f4'),\n\t ('iqr90','16f4')])\n\tfor _i,i in enumerate(dataMap['flatSequence']):\n\t\texpTime = dataMap['expTime'][i]\n\t\tfn = os.path.basename(dataMap['files'][i])\n\t\tfits = fitsio.FITS(dataMap['files'][i])\n\t\tprint '%s %4.1f ' % (fn,expTime),\n\t\tst['file'][_i] = fn\n\t\tst['expTime'][_i] = expTime\n\t\tfor j,extn in enumerate(['IM%d' % n for n in range(1,17)]):\n\t\t\tmodeVal,pix = array_stats(fits[extn].read()[dataMap['statsPix']],\n\t\t\t method='mode',retArray=True)\n\t\t\tst['mode'][_i,j] = modeVal\n\t\t\tst['mean'][_i,j] = pix.mean()\n\t\t\tst['median'][_i,j] = np.ma.median(pix)\n\t\t\tst['iqr25'][_i,j] = scoreatpercentile(pix,25)\n\t\t\tst['iqr75'][_i,j] = scoreatpercentile(pix,75)\n\t\t\tst['iqr10'][_i,j] = scoreatpercentile(pix,10)\n\t\t\tst['iqr90'][_i,j] = scoreatpercentile(pix,90)\n\t\t\tprint '%5d ' % (modeVal),\n\t\tprint\n\tfitsio.write(outfn+'.fits',st,clobber=True)\n\ndef scaled_histograms(dataMap,nims=None,outfn='pixhist'):\n\tpdf = PdfPages(outfn+'.pdf')\n\tfor _i,i in enumerate(dataMap['flatSequence']):\n\t\tif nims is not None and _i==nims:\n\t\t\tbreak\n\t\texpTime = dataMap['expTime'][i]\n\t\texpScale = dataMap['refExpTime'] \/ expTime\n\t\tprint dataMap['files'][i]\n\t\tfn = os.path.basename(dataMap['files'][i])\n\t\tfits = fitsio.FITS(dataMap['files'][i])\n\t\tfig = plt.figure(figsize=(8.0,10))\n\t\tplt.subplots_adjust(0.08,0.08,0.92,0.92,0.3,0.35)\n\t\tfor j,extn in enumerate(['IM%d' % n for n in range(1,17)]):\n\t\t\tax = plt.subplot(8,2,j+1)\n\t\t\tpix = fits[extn].read()[dataMap['statsPix']]\n\t\t\tax.hist(expScale*pix.flatten(),100,(0,40000),edgecolor='none')\n\t\t\tax.text(0.05,0.9,extn,va='top',size=9,transform=ax.transAxes)\n\t\t\tax.set_xlim(0,40000)\n\t\t\tax.xaxis.set_major_locator(ticker.MultipleLocator(10000))\n\t\t\tax.xaxis.set_minor_locator(ticker.MultipleLocator(2000))\n\t\t\tax.yaxis.set_major_locator(ticker.MultipleLocator(50000))\n\t\tplt.figtext(0.5,0.99,fn+' exp=%.1f' % expTime,ha='center',va='top')\n\t\tpdf.savefig(fig)\n\t\tplt.close(fig)\n\tpdf.close()\n\ndef plot_sequence(dataMap,st,imNum,which='median'):\n\texpScale = dataMap['refExpTime']\/st['expTime']\n\tseqno = 1 + np.arange(len(st))\n\tref = np.isclose(expScale,1.0)\n\tj = imNum - 1\n\tplt.figure(figsize=(8,6))\n\tplt.subplots_adjust(0.11,0.08,0.96,0.95)\n\tplt.errorbar(seqno[ref],expScale[ref]*st[which][ref,j],\n\t [expScale[ref]*(st[which]-st['iqr10'])[ref,j],\n\t expScale[ref]*(st['iqr90']-st[which])[ref,j]],\n\t fmt='bs-')\n\tplt.errorbar(seqno[~ref],expScale[~ref]*st[which][~ref,j],\n\t [expScale[~ref]*(st[which]-st['iqr10'])[~ref,j],\n\t expScale[~ref]*(st['iqr90']-st[which])[~ref,j]],\n\t fmt='cs-')\n\t#plt.scatter(seqno,expScale*st['mode'][:,j],marker='+',c='r')\n\t#plt.scatter(seqno,expScale*st['mean'][:,j],marker='x',c='g')\n\tplt.xlabel('sequence number')\n\tplt.ylabel('counts scaled by exp time')\n\tplt.title('IM%d'%imNum)\n\tplt.xlim(0.5,len(st)+0.5)\n\ndef fit_ref_exposures(dataMap,st,imNum,\n which='median',method='spline',doplot=False):\n\tfrom scipy.interpolate import UnivariateSpline\n\tseqno = 1 + np.arange(len(st))\n\tt = st['expTime']\n\tref = np.isclose(t,dataMap['refExpTime'])\n\tj = imNum - 1\n\trefCounts = st[which][ref,j][0]\n\tif method=='linear':\n\t\t_fit = np.polyfit(seqno[ref],refCounts\/st[which][ref,j],1)\n\t\tfit = lambda x: np.polyval(_fit,x)\n\telif method=='spline':\n\t\tfit = UnivariateSpline(seqno[ref],refCounts\/st[which][ref,j],\n\t\t s=1e-5,k=3)\n\telse:\n\t\traise ValueError\n\tif doplot:\n\t\tplt.figure()\n\t\tplt.subplot(211)\n\t\tplt.plot(seqno[ref],st[which][ref,j],'bs-')\n\t\tplt.plot(seqno,refCounts\/fit(seqno),c='r')\n\t\tplt.subplot(212)\n\t\tplt.plot(seqno[ref],(st[which][ref,j]-refCounts\/fit(seqno[ref]))\n\t\t \/st[which][ref,j],'bs-')\n\t\tplt.axhline(0,c='r')\n\treturn fit\n\ndef plot_linearity_curves(dataMap,st,which='median',correct=True,isPTC=False,\n refCor=None,fitmethod='spline',outfn='linearity',\n\t onlyim=None):\n\tseqno = 1 + np.arange(len(st))\n\tt = st['expTime']\n\tprint seqno,t\n\trefExpTime = dataMap['refExpTime']\n\tref = np.isclose(t,refExpTime)\n\trefCorFit = None\n\tii = np.arange(len(st))\n\t# only use the increasing sequence, not the reference exposures\n\tii = ii[~ref]\n\tif isPTC:\n\t\t# for PTCs skip every other image since they are done in pairs\n\t\tii = ii[::2]\n\t# only fit to unsaturated frames\n\ttry:\n\t\tfirstsat = np.where(np.any(st[which][ii,:] > 55000,axis=1))[0][0]\n\texcept IndexError:\n\t\tfirstsat = -1\n\tif onlyim is None:\n\t\tpdf = PdfPages(outfn+'.pdf')\n\tfor imNum in range(1,17):\n\t\tif onlyim is not None and imNum != onlyim:\n\t\t\tcontinue\n\t\tj = imNum - 1\n\t\t# correct lamp variation\n\t\tif correct:\n\t\t\tif refCor is None:\n\t\t\t\tfscl_fit = fit_ref_exposures(dataMap,st,imNum,which,\n\t\t\t\t method=fitmethod)\n\t\t\telse:\n\t\t\t\tif refCorFit is None:\n\t\t\t\t\trefCorFit = fit_ref_exposures(dataMap,st,imNum,which)\n\t\t\t\tfscl_fit = refCorFit\n\t\t\tfscl = fscl_fit(seqno)\n\t\telse:\n\t\t\tfscl = np.ones_like(seqno)\n\t\tfit = np.polyfit(t[ii[:firstsat]],\n\t\t fscl[ii[:firstsat]]*st[which][ii[:firstsat],j],1)\n\t\tfitv = np.polyval(fit,t)\n\t\tslope = fit[0] \/ (st[which][ref,j][0]\/refExpTime)\n\t\t#\n\t\tpltindex = imNum % 4\n\t\tif onlyim is None:\n\t\t\tif pltindex == 1:\n\t\t\t\tfig = plt.figure(figsize=(8,10))\n\t\t\t\tplt.subplots_adjust(0.11,0.08,0.96,0.95,0.25,0.2)\n\t\t\tax = plt.subplot(4,2,2*(j%4)+1)\n\t\telse:\n\t\t\tfig = plt.figure(figsize=(6,2.5))\n\t\t\tplt.subplots_adjust(0.11,0.23,0.99,0.98,0.35,0.2)\n\t\t\tax = plt.subplot(1,2,1)\n\t\tplt.plot(t[ii],fscl[ii]*st[which][ii,j],'bs-')\n\t\tplt.xlim(0.9*t.min(),t.max()+0.5)\n\t\tplt.xscale('log')\n\t\tplt.ylim(1e2,9e4)\n\t\tplt.yscale('log')\n\t\tplt.ylabel('counts [%s]' % which)\n\t\ttt = np.logspace(-1,np.log10(1.3*t.max()),100)\n\t\tplt.plot(tt,np.polyval(fit,tt),c='r')\n\t\tplt.text(0.05,0.9,'IM%d'%imNum,va='top',transform=ax.transAxes)\n\t\tplt.text(0.95,0.18,r'y = %.1f $\\times$ t + %.1f' % tuple(fit),\n\t\t ha='right',va='top',size=9,transform=ax.transAxes)\n\t\tplt.text(0.95,0.10,r'y = %.3f $\\times$ counts + %.1f' % (slope,fit[1]),\n\t\t ha='right',va='top',size=9,transform=ax.transAxes)\n\t\tif pltindex==0 or onlyim is not None:\n\t\t\tplt.xlabel('exptime (s)')\n\t\t#\n\t\tif onlyim is None:\n\t\t\tax = plt.subplot(4,2,2*(j%4)+2)\n\t\telse:\n\t\t\tax = plt.subplot(1,2,2)\n\t\tplt.plot(t[ii],100*(fscl[ii]*st[which][ii,j]-fitv[ii])\/fitv[ii],'bs-')\n\t\tplt.axhline(0,c='r')\n\t\t#ax.xaxis.set_major_locator(ticker.MultipleLocator(10))\n\t\t#ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))\n\t\tax.yaxis.set_major_locator(ticker.MultipleLocator(2))\n\t\tax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))\n\t\tplt.ylim(-5,5)\n\t\tplt.xlim(0.9*t.min(),t.max()+0.5)\n\t\tplt.xscale('log')\n\t\tif pltindex==0 or onlyim is not None:\n\t\t\tplt.xlabel('exptime (s)')\n\t\tplt.ylabel('residual \\%')\n\t\tif onlyim is None:\n\t\t\tif pltindex == 0:\n\t\t\t\tpdf.savefig(fig)\n\t\t\t\tplt.close(fig)\n\tif onlyim is None:\n\t\tpdf.close()\n\ndef rel_gain(dataMap,st,which='median',correct=True,fitmethod='spline',\n nskip=0):\n\tseqno = 1 + np.arange(len(st))\n\tt = st['expTime']\n\trefExpTime = dataMap['refExpTime']\n\tref = np.isclose(t,refExpTime)\n\trefCorFit = None\n\tii = np.arange(len(st))\n\tii = ii[~ref]\n\tii = ii[nskip:]\n\tsky4 = st[which][ii,3]\n\tfit_ii = ii[np.where((sky4>5000)&(sky4<25000))[0]]\n\tplt.figure()\n\tfor imNum in range(1,17):\n\t\tj = imNum - 1\n\t\t# correct lamp variation\n\t\tif correct:\n\t\t\tif True: #refCor is None:\n\t\t\t\tfscl_fit = fit_ref_exposures(dataMap,st,imNum,which,\n\t\t\t\t method=fitmethod)\n\t\t\telse:\n\t\t\t\tif refCorFit is None:\n\t\t\t\t\trefCorFit = fit_ref_exposures(dataMap,st,imNum,which)\n\t\t\t\tfscl_fit = refCorFit\n\t\t\tfscl = fscl_fit(seqno)\n\t\telse:\n\t\t\tfscl = np.ones_like(seqno)\n\t\tfit = np.polyfit(t[fit_ii],fscl[fit_ii]*st[which][fit_ii,j],1)\n\t\tfitv = np.polyval(fit,t)\n#\t\tslope = fit[0] \/ (st[which][ref,j][0]\/refExpTime)\n\t\txx = np.array(0,1.1*t.max())\n\t\tplt.subplot(4,4,imNum)\n\t\tif False:\n\t\t\tplt.scatter(t[ii],fscl[ii]*st[which][ii,j])\n\t\t\tplt.plot(xx,np.polyval(fit,xx),c='r')\n\t\telse:\n\t\t\tplt.scatter(t[ii],fscl[ii]*st[which][ii,j]\/fitv[ii])\n\t\t\tplt.axhline(1,c='r')\n\t\tplt.ylim(0.7,1.3)\n\t\tif True:\n\t\t\tplt.xscale('log')\n\t\tplt.xlim(0.9*t.min(),1.1*t.max())\n\ndef get_first_saturated_frame(seq):\n\ttry:\n\t\tfirstsat = np.where(seq > 55000)[0][0]\n\texcept IndexError:\n\t\tfirstsat = -1\n\treturn firstsat\n\ndef compare_oscan_levels(dataMap,st):\n\tfiles = [ dataMap['files'][i] for i in dataMap['flatSequence'] ]\n\toscans = np.zeros((len(files),16))\n\tfor j in range(16):\n\t\toscans[:,j] = [ fitsio.read_header(f,'IM%d'%(j+1))['OSCANMED']\n\t for f in files ]\n\tseqno = 1 + np.arange(len(st))\n\tplt.figure()\n\tfor j in range(8,16):\n\t\tax = plt.subplot(8,2,2*(j%8)+1)\n\t\ti1 = get_first_saturated_frame(st['median'][:,j])\n\t\tplt.scatter(st['median'][:i1,j],oscans[:i1,j],c='b')\n\t\tplt.ylabel('IM%d'%(j+1))\n\t\tax = plt.subplot(8,2,2*(j%8)+2)\n\t\tplt.scatter(seqno[:i1],oscans[:i1,j],c='b')\n\ndef init_sep09bss_data_map():\n\tdatadir = os.environ.get('BASSDATA')+'\/20150909\/bss\/20150908\/'\n\texptimes = np.loadtxt(datadir+'..\/bss.20150909.log',usecols=(3,))\n\texptimes = exptimes[50:]\n\tprint exptimes\n\trdxdir = os.environ.get('GSCRATCH','tmp_sep')+'\/bss_sep09\/'\n\tif not os.path.exists(rdxdir):\n\t\tos.makedirs(rdxdir)\n\tdataMap = init_data_map(datadir,rdxdir,\n\t expTimes=exptimes,files=None)\n\tdataMap['rawFiles'] = dataMap['rawFiles'][50:]\n\tdataMap['files'] = dataMap['files'][50:]\n\tdataMap['biasFiles'] = dataMap['files'][-5:]\n\t#dataMap['flatSequence'] = range(50,68)\n\tdataMap['flatSequence'] = range(18)\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = 40.0\n\treturn dataMap\n\ndef init_sep29ptc_data_map():\n\tdataMap = init_data_map(\n\t \"\/home\/ian\/dev\/rapala\/bokpipe\/scratch\/sep29ptcs\/ptc\/\",'sep29ptcs\/')\n\tdataMap['biasFiles'] = [dataMap['files'][0],]\n\tdataMap['flatSequence'] = range(1,len(dataMap['files']))\n\tdataMap['statsPix'] = np.s_[20:-20,100:-100]\n\tdataMap['refExpTime'] = 10.0\n\treturn dataMap\n\ndef init_oct02ptc_data_map():\n\tdataMap = init_data_map(os.environ.get('GSCRATCH')+'\/02oct15\/ptc\/',\n\t os.environ.get('GSCRATCH')+'\/02oct15\/ptc_proc\/')\n\tdataMap['biasFiles'] = [dataMap['files'][0],]\n\tdataMap['flatSequence'] = range(1,len(dataMap['files']))\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = 10.0\n\treturn dataMap\n\ndef init_oct20_data_map():\n\tdatadir = os.environ.get('BASSDATA')+'\/20151020\/'\n\texptimes = np.loadtxt(datadir+'images.log',usecols=(6,))\n\tnuse = 53\n\texptimes = exptimes[:nuse]\n\tprint exptimes\n\tdataMap = init_data_map(datadir,'tmp_oct20',expTimes=exptimes)\n\tdataMap['rawFiles'] = dataMap['rawFiles'][:nuse]\n\tdataMap['files'] = [ dataMap['oscan'](f) \n\t for f in dataMap['files'][:nuse] ]\n\tdataMap['biasFiles'] = dataMap['files'][:20]\n\tdataMap['flatSequence'] = range(20,nuse)\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = 3.0\n\treturn dataMap\n\ndef init_nov11g_data_map():\n\tdatadir = os.environ.get('BASSDATA')+'\/Nov2015\/'\n\tlog = Table.read(datadir+'bassLog_Nov2015.fits')\n\texptimes = log['expTime'][111:150]\n\tfiles = [ datadir+f['utDir']+'\/'+f['fileName']+'.fits'\n\t for f in log[111:150] ]\n\tdataMap = init_data_map(datadir,'tmp_nov11g',\n\t expTimes=exptimes,files=files)\n\tdataMap['biasFiles'] = dataMap['files'][-10:]\n\tdataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = 3.0\n\treturn dataMap\n\ndef init_nov14_data_map(filt):\n\tdatadir = os.environ.get('BASSDATA')+'\/Nov2015\/'\n\tlog = Table.read(datadir+'bassLog_Nov2015.fits')\n\tif filt=='g':\n\t\tframes = np.r_[np.s_[297:345],np.s_[247:257]]\n\telse:\n\t\tframes = np.r_[np.s_[345:393],np.s_[247:257]]\n\texptimes = log['expTime'][frames]\n\tfiles = [ datadir+f['utDir']+'\/'+f['fileName']+'.fits'\n\t for f in log[frames] ]\n\tdataMap = init_data_map(datadir,'tmp_nov14'+filt,\n\t expTimes=exptimes,files=files)\n\tdataMap['biasFiles'] = dataMap['files'][-10:]\n\tdataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]\n\treturn dataMap\n\ndef init_jan3_data_map(filt):\n\tdatadir = os.environ.get('BASSDATA')\n\tlog = Table.read('basslogs\/log_ut20160103.fits')\n\tif filt=='g':\n\t\tframes = np.r_[np.s_[57:105],np.s_[160:170]]\n\telse:\n\t\tframes = np.r_[np.s_[105:160],np.s_[160:170]]\n\texptimes = log['expTime'][frames]\n\tfiles = [ datadir+'\/'+f['utDir'].strip()+'\/'+f['fileName'].strip()+'.fits'\n\t for f in log[frames] ]\n\tdataMap = init_data_map(datadir,'tmp_jan3'+filt,\n\t expTimes=exptimes,files=files)\n\tdataMap['biasFiles'] = dataMap['files'][-10:]\n\tdataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\tdataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]\n\treturn dataMap\n\ndef init_data_map_fromfile(filename,outdir='tmp',nersc=True):\n\tdatadir = os.environ.get('BASSDATA')\n\tif nersc:\n\t\tdatadir = os.path.join(datadir,'BOK_Raw')\n\tlog = np.loadtxt(filename,dtype=[('frameNum','i4'),('utDir','S8'),\n\t ('fileName','S35'),\n\t ('imType','S10'),('filter','S8'),\n\t ('expTime','f4')],skiprows=1)\n\texptimes = log['expTime']\n\tfiles = [ datadir+'\/'+f['utDir'].strip()+'\/'+f['fileName'].strip()+'.fits'\n\t for f in log ]\n\tif nersc:\n\t\tfiles = [ f+'.fz' for f in files ]\n\tdataMap = init_data_map(datadir,outdir,\n\t expTimes=exptimes,files=files)\n\tdataMap['biasFiles'] = np.array(dataMap['files'])[log['imType']=='zero']\n\tdataMap['flatSequence'] = np.where(log['imType']=='flat')[0]\n\tdataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')\n\t# assume it starts with reference\n\tdataMap['refExpTime'] = exptimes[dataMap['flatSequence'][0]]\n\treturn dataMap\n\nif __name__=='__main__':\n\timport sys\n\tdataset = sys.argv[1] \n\tif dataset == 'sep09bss':\n\t\tdataMap = init_sep09bss_data_map()\n\telif dataset == 'oct02':\n\t\tdataMap = init_oct02ptc_data_map()\n\telif dataset == 'oct20':\n\t\tdataMap = init_oct20_data_map()\n\telif dataset == 'nov11g':\n\t\tdataMap = init_nov11g_data_map()\n\telif dataset == 'nov14g':\n\t\tdataMap = init_nov14_data_map('g')\n\telif dataset == 'nov14Ha':\n\t\tdataMap = init_nov14_data_map('Ha')\n\telif dataset == 'jan3g':\n\t\tdataMap = init_jan3_data_map('g')\n\telif dataset == 'jan3Ha':\n\t\tdataMap = init_jan3_data_map('Ha')\n\telse:\n\t\tdataMap = init_data_map_fromfile(sys.argv[2],dataset)\n\tprint 'processing ',dataset\n\tif not os.path.exists('stats_'+dataset+'.fits'):\n\t\tprocess_data(dataMap,bias2d=True)\n\t\timstat(dataMap,outfn='stats_'+dataset)\n\tst = fitsio.read('stats_'+dataset+'.fits')\n\tplot_linearity_curves(dataMap,st,outfn='linearity_'+dataset)\n\tif True:\n\t\tplot_linearity_curves(dataMap,st,outfn='linearity_'+dataset,\n\t\t onlyim=4)\n\t\tplt.savefig('linearity_IM4_%s.png'%dataset)\n\t\tplot_sequence(dataMap,st,4)\n\t\tplt.savefig('linsequence_IM4_%s.png'%dataset)\n\n","license":"bsd-3-clause"} {"repo_name":"Jailander\/COSMOS","path":"kriging_exploration\/scripts\/explorator.py","copies":"1","size":"34183","content":"#!\/usr\/bin\/env python\n\n\nimport cv2\nimport sys\nimport yaml\n\nimport signal\nimport numpy as np\n#import utm\n\n\nimport matplotlib as mpl\nimport matplotlib.cm as cm\n\nimport rospy\n\nimport argparse\n\nimport actionlib\n\n\nfrom cosmos_msgs.msg import KrigInfo\nfrom cosmos_msgs.srv import CompareModels\nimport kriging_exploration.map_coords\nimport std_msgs.msg\n\nimport open_nav.msg\n\nfrom kriging_exploration.data_grid import DataGrid\nfrom kriging_exploration.map_coords import MapCoords\nfrom kriging_exploration.visualiser import KrigingVisualiser\nfrom kriging_exploration.canvas import ViewerCanvas\nfrom kriging_exploration.topological_map import TopoMap\nfrom kriging_exploration.exploration import ExplorationPlan\n\nfrom sensor_msgs.msg import NavSatFix\n\n\ndef overlay_image_alpha(img, img_overlay):\n \"\"\"Overlay img_overlay on top of img at the position specified by\n pos and blend using alpha_mask.\n \"\"\"\n show_image = img.copy()\n alpha = img_overlay[:, :, 3] \/ 255.0 # Alpha mask must contain values \n # within the range [0, 1] \n # and be the same size as img_overlay.\n # Image ranges\n y1, y2 = 0, img.shape[0]\n x1, x2 = 0, img.shape[1]\n\n channels = img.shape[2]\n\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n show_image[y1:y2, x1:x2, c] = (alpha * img_overlay[y1:y2, x1:x2, c] + alpha_inv * img[y1:y2, x1:x2, c])\n return show_image\n\n\nclass Explorator(KrigingVisualiser):\n\n #_w_shape=[(0, 16), (1, 17), (3, 17), (5, 16), (8, 15), (10, 15), (12, 14), (14, 13), (12, 12), (10, 11), (8, 11), (5, 10), (8, 9), (10, 9), (12, 8), (14, 7), (12, 6), (10, 5), (8, 5), (6, 4), (4, 3), (3, 2), (4, 1), (5, 0), (7, 0)]\n #_w_shape=[(17, 0), (17, 1), (17, 3), (16, 5), (15, 8), (15, 10), (14, 12), (13, 14), (12, 12), (11, 10), (11, 8), (10, 5), (9, 8), (9, 10), (8, 12), (7, 14), (6, 12), (5, 10), (5, 8), (4, 6), (3, 4), (2, 3), (1, 4), (0, 5), (0, 7)]\n #_w_shape=[(17, 0), (17,1), (17, 2), (17, 4), (16, 4), (16, 6), (16, 8), (15, 8), (15, 10), (14, 10), (14, 12), (13, 12), (13, 14), (12, 14), (12, 12), (11, 12), (11, 10), (10, 10), (10, 8), (10, 6), (10, 4), (9, 4), (9, 6), (9, 8), (9, 10), (8, 10), (8, 12), (7, 12), (7, 14), (6, 14), (6, 12), (5, 12), (5, 10), (4, 10), (4, 8), (4, 6), (4, 4), (3, 4), (3, 3), (2, 3), (2, 4), (1,4), (1, 6), (0,6), (1, 8), (0,8), (1, 10), (0, 10), (0, 12), (0, 14)]\n _w_shape=[(17, 0), (16, 1), (14, 6), (12, 11), (10, 14), (8, 9), (5, 14), (3, 11), (2, 6), (0, 3)]\n def __init__(self, lat_deg, lon_deg, zoom, size, args):\n self.targets = []\n self.results =[]\n self.result_counter=0\n self.explodist=0\n self.running = True\n self.last_coord=None\n signal.signal(signal.SIGINT, self.signal_handler)\n self.expid=args.experiment_name\n print \"Creating visualiser object\"\n super(Explorator, self).__init__(lat_deg, lon_deg, zoom, size)\n\n cv2.namedWindow('explorator')\n cv2.setMouseCallback('explorator', self.click_callback)\n\n self.current_model=-1\n self.draw_mode = 'none'\n self.grid = DataGrid(args.limits_file, args.cell_size)\n self.topo_map= TopoMap(self.grid)\n self.visited_wp=[]\n\n explo_type = args.area_coverage_type\n self.define_exploration_type(explo_type)\n \n \n self.navigating = False\n self.pause_exp = False\n self.exploring = 0\n self.n_inputs = 0\n \n print \"NUMBER OF TARGETS:\"\n print len(self.explo_plan.targets) \n \n self.limits_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.grid_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.exploration_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.gps_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n\n self.limits_canvas.draw_polygon(self.grid.limits, (0,0,255,128), thickness=1)\n self.grid_canvas.draw_grid(self.grid.cells, args.cell_size, (128,128,128,2), thickness=1)\n \n self.redraw()\n\n self.redraw_kriged=True\n self.redraw_var=True\n self.redraw_devi=True\n \n self.model_canvas=[]\n self.model_legend=[]\n self.kriging_canvas=[]\n self.klegend_canvas=[]\n self.klegend2_canvas=[]\n self.klegend3_canvas=[]\n self.sigma_canvas=[]\n self.sigma2_canvas=[]\n self.model_canvas_names=[]\n \n self.mean_out_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.mean_out_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.mean_var_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.mean_var_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.mean_dev_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)\n self.mean_dev_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res) \n\n rospy.loginfo(\"Subscribing to Krig Info\")\n rospy.Subscriber(\"\/kriging_data\", KrigInfo, self.data_callback)\n rospy.Subscriber(\"\/fix\", NavSatFix, self.gps_callback)\n rospy.Subscriber('\/penetrometer_scan', std_msgs.msg.String, self.scan_callback)\n self.req_data_pub = rospy.Publisher('\/request_scan', std_msgs.msg.String, latch=False, queue_size=1)\n\n rospy.loginfo(\" ... Connecting to Open_nav\")\n \n self.open_nav_client = actionlib.SimpleActionClient('\/open_nav', open_nav.msg.OpenNavAction)\n self.open_nav_client.wait_for_server()\n\n rospy.loginfo(\" ... done\")\n\n\n tim1 = rospy.Timer(rospy.Duration(0.2), self.drawing_timer_callback)\n tim2 = rospy.Timer(rospy.Duration(0.1), self.control_timer_callback)\n self.refresh()\n\n while(self.running):\n cv2.imshow('explorator', self.show_image)\n k = cv2.waitKey(20) & 0xFF\n self._change_mode(k)\n\n tim1.shutdown()\n tim2.shutdown()\n cv2.destroyAllWindows() \n sys.exit(0)\n\n\n # EXPLORATION PARAMS HERE!!!!\n def define_exploration_type(self, explo_type):\n self.exploration_strategy=explo_type \n self.n_goals=10 \n \n if explo_type=='area_split':\n self.grid._split_area(3,3)\n sb=[]\n for i in self.grid.area_splits_coords:\n (y, x) = self.grid.get_cell_inds_from_coords(i)\n sb.append((x,y))\n self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=sb)\n elif explo_type=='random':\n self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent)\n elif explo_type=='w_shape':\n self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=self._w_shape)\n else: #greedy\n self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, exploration_type='greedy', ac_model=explo_type)\n\n\n\n\n def drawing_timer_callback(self, event):\n self.refresh()\n\n def control_timer_callback(self, event):\n if self.navigating:\n if self.open_nav_client.simple_state ==2:\n print \"DONE NAVIGATING\"\n self.navigating = False\n if self.exploring==1:\n self.exploring=2\n \n elif self.exploring==2:\n if not self.pause_exp:\n self.explo_plan.explored_wp.append(self.explo_plan.route.pop(0))\n info_str='Do_reading'\n self.req_data_pub.publish(info_str)\n self.exploring=3\n \n elif self.exploring==4:\n if not self.pause_exp:\n if len(self.explo_plan.route) >0:\n gg=self.explo_plan.route[0]\n self.open_nav_client.cancel_goal()\n targ = open_nav.msg.OpenNavActionGoal()\n \n targ.goal.coords.header.stamp=rospy.Time.now()\n targ.goal.coords.latitude=gg.coord.lat\n targ.goal.coords.longitude=gg.coord.lon\n \n print \"Going TO: \", gg\n self.exploring=1\n self.navigating=True\n self.open_nav_client.send_goal(targ.goal)\n else:\n print \"Done Exploring\"\n self.exploring = 0\n# else:\n# if self.exploring:\n# print \"waiting for new goal\"\n \n def gps_callback(self, data):\n if not np.isnan(data.latitude):\n self.gps_canvas.clear_image()\n gps_coord = MapCoords(data.latitude,data.longitude) \n self.gps_canvas.draw_coordinate(gps_coord,'black',size=2, thickness=2, alpha=255)\n if self.last_coord:\n dist = gps_coord - self.last_coord\n self.explodist+= dist[0]\n self.last_coord=gps_coord\n\n\n\n def data_callback(self, msg):\n point_coord = kriging_exploration.map_coords.coord_from_satnav_fix(msg.coordinates)\n for i in msg.data:\n self.grid.add_data_point(i.model_name, point_coord, i.measurement)\n\n self.vmin, self.vmax = self.grid.get_max_min_vals()\n self.n_models=len(self.grid.models)\n \n for i in self.grid.models:\n if i.name not in self.model_canvas_names: \n print i.name\n self.model_canvas_names.append(i.name)\n self.model_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.model_legend.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.kriging_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.klegend_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.klegend2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.klegend3_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)) \n self.sigma_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.sigma2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))\n self.draw_inputs(self.model_canvas_names.index(i.name))\n\n self.n_inputs+=1\n if self.exploring==3:\n if self.n_inputs>3:\n self.krieg_all_mmodels()\n rospy.sleep(0.1)\n self.grid.calculate_mean_grid()\n rospy.sleep(0.1)\n self.draw_means()\n self.draw_mode=\"means\"\n \n resp = self.get_errors()\n self.result_counter+=1\n d={}\n d['step']=self.result_counter\n d['id']=self.expid\n d['ns']=len(self.explo_plan.targets)\n d['coord']={}\n d['coord']['lat']=self.last_coord.lat\n d['coord']['lon']=self.last_coord.lon\n d['dist']=float(self.explodist)\n d['results']={}\n d['results']['groundtruth']=resp\n d['results']['var']={}\n d['results']['var']['mean']={}\n d['results']['var']['mean']['mean']= float(np.mean(self.grid.mean_variance))\n d['results']['var']['mean']['max']= float(np.max(self.grid.mean_variance))\n d['results']['var']['mean']['min']= float(np.min(self.grid.mean_variance))\n \n# d['results']['var']['std']['mean']= np.mean(self.grid.mean_deviation)\n# d['results']['var']['std']['max']= np.max(self.grid.mean_deviation)\n# d['results']['var']['std']['min']= np.min(self.grid.mean_deviation)\n\n means=[]\n maxs=[]\n mins=[]\n for i in range(self.n_models):\n means.append(float(np.mean(self.grid.models[i].variance)))\n maxs.append(float(np.max(self.grid.models[i].variance)))\n mins.append(float(np.min(self.grid.models[i].variance)))\n \n d['results']['models']={}\n d['results']['models']['means']=means\n d['results']['models']['maxs']=maxs\n d['results']['models']['mins']=mins\n\n\n rospy.sleep(0.1)\n self.results.append(d)\n if self.exploration_strategy == 'greedy':\n nwp = len(self.explo_plan.route) + len(self.explo_plan.explored_wp)\n print nwp, \" nodes in plan\"\n if nwp <= self.n_goals:\n #THIS IS the ONE\n #self.explo_plan.add_limited_greedy_goal(self.grid.mean_variance, self.last_coord) \n \n self.explo_plan.add_greedy_goal(self.grid.mean_variance)\n \n #self.explo_plan.add_montecarlo_goal(self.grid.mean_variance, self.last_coord)\n \n \n #self.draw_mode=\"deviation\"\n# self.current_model=0\n# if self.redraw_devi:\n# self.draw_all_devs()\n self.redraw()\n rospy.sleep(0.1)\n self.exploring=4\n\n def scan_callback(self, msg):\n if msg.data == 'Reading':\n print \"GOT READING!!!\"\n cx, cy = self.grid.get_cell_inds_from_coords(self.last_coord)\n if cx <0 or cy<0:\n print \"Reading outside the grid\"\n else:\n print 'Reading at: ', cx, cy\n for i in self.topo_map.waypoints:\n if (cy,cx) == i.ind:\n print 'Setting: ', i.name, i.coord, \"as Visited\"\n i.visited= True\n self.visited_wp.append(i)\n self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)\n self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)\n self.redraw()\n\n\n def refresh(self):\n #self.show_image = self.image.copy()\n #self.show_image = cv2.addWeighted(self.gps_canvas.image, 0.7, self.image, 1.0, 0)\n #self.show_image = transparentOverlay(self.image, self.gps_canvas.image)\n self.show_image = overlay_image_alpha(self.image,self.gps_canvas.image)\n\n def redraw(self):\n self.image = cv2.addWeighted(self.grid_canvas.image, 0.5, self.base_image, 1.0, 0)\n self.image = cv2.addWeighted(self.limits_canvas.image, 0.75, self.image, 1.0, 0)\n self.image = cv2.addWeighted(self.exploration_canvas.image, 0.75, self.image, 1.0, 0)\n if self.draw_mode == \"inputs\" and self.current_model>=0 :\n self.image = cv2.addWeighted(self.model_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)\n self.image = overlay_image_alpha(self.image, self.model_legend[self.current_model].image)\n\n if self.draw_mode == \"kriging\":# and self.current_model>=0 :\n self.image = cv2.addWeighted(self.kriging_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)\n #self.image = cv2.addWeighted(self.klegend_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)\n self.image = overlay_image_alpha(self.image, self.klegend_canvas[self.current_model].image)\n\n if self.draw_mode == \"deviation\":# and self.current_model>=0 :\n self.image = cv2.addWeighted(self.sigma_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)\n #self.image = cv2.addWeighted(self.klegend3_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)\n self.image = overlay_image_alpha(self.image, self.klegend3_canvas[self.current_model].image)\n \n if self.draw_mode == \"variance\":# and self.current_model>=0 : \n self.image = cv2.addWeighted(self.sigma2_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)\n #self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)\n self.image = overlay_image_alpha(self.image, self.klegend2_canvas[self.current_model].image)\n \n if self.draw_mode == \"means\":\n self.image = cv2.addWeighted(self.mean_dev_canvas.image, 0.75, self.image, 1.0, 0)\n #self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)\n self.image = overlay_image_alpha(self.image, self.mean_dev_legend_canvas.image)\n \n \n self.show_image = self.image.copy()\n\n \n\n def click_callback(self, event, x, y, flags, param):\n \n if event == cv2.EVENT_RBUTTONDOWN:\n click_coord = self.satellite._pix2coord(x,y)\n cx, cy = self.grid.get_cell_inds_from_coords(click_coord)\n\n if cx <0 or cy<0:\n print \"click outside the grid\"\n else:\n print cx, cy\n \n for i in self.topo_map.waypoints:\n if (cy,cx) == i.ind:\n print i.name, i.coord.easting, i.coord.northing\n i.visited= True\n self.visited_wp.append(i)\n self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)\n self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)\n self.redraw()\n\n if event == cv2.EVENT_LBUTTONDOWN:\n click_coord = self.satellite._pix2coord(x,y)\n cx, cy = self.grid.get_cell_inds_from_coords(click_coord)\n\n if cx <0 or cy<0:\n print \"click outside the grid\"\n else:\n print cx, cy\n\n for i in self.topo_map.waypoints:\n if (cy,cx) == i.ind:\n self.open_nav_client.cancel_goal()\n targ = open_nav.msg.OpenNavActionGoal()\n\n #goal.goal.goal.header.\n targ.goal.coords.header.stamp=rospy.Time.now()\n targ.goal.coords.latitude=i.coord.lat\n targ.goal.coords.longitude=i.coord.lon\n\n print targ\n self.navigating=True\n self.open_nav_client.send_goal(targ.goal)\n #self.client.wait_for_result()\n # Prints out the result of executing the action\n #ps = self.client.get_result()\n #print ps\n \n\n\n def draw_inputs(self, nm):\n \n minv = self.grid.models[nm].lims[0]\n maxv = self.grid.models[nm].lims[1]\n\n if (maxv-minv) <=1:\n maxv = maxv + 50\n minv = minv - 50\n \n norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)\n cmap = cm.jet\n colmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n self.model_canvas[nm].clear_image()\n self.model_legend[nm].clear_image()\n \n for i in self.grid.models[nm].orig_data:\n cell = self.grid.cells[i.y][i.x]\n a= colmap.to_rgba(int(i.value)) \n b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))\n self.model_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)\n self.model_canvas[nm].put_text(self.grid.models[nm].name)\n \n self.model_legend[nm].put_text(self.grid.models[nm].name)\n self.model_legend[nm].draw_legend(minv, maxv, colmap, title=\"Kriging\")\n \n\n\n\n def draw_krigged(self, nm):\n print \"drawing kriging\" + str(nm)\n\n minv = self.grid.models[nm].min_val\n maxv = self.grid.models[nm].max_val\n\n if (maxv-minv) <=1:\n maxv = maxv + 50\n minv = minv - 50\n\n norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)\n cmap = cm.jet\n colmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n\n self.kriging_canvas[nm].clear_image()\n self.klegend_canvas[nm].clear_image()\n for i in range(self.grid.models[nm].shape[0]):\n for j in range(self.grid.models[nm].shape[1]):\n cell = self.grid.cells[i][j]\n a= colmap.to_rgba(int(self.grid.models[nm].output[i][j]))\n b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50)) \n self.kriging_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)\n \n self.klegend_canvas[nm].put_text(self.grid.models[nm].name)\n self.klegend_canvas[nm].draw_legend(minv, maxv, colmap, title=\"Kriging\")\n \n self.redraw()\n\n\n def draw_variance(self, nm):\n print \"drawing variance\" + str(nm)\n \n minv = self.grid.models[nm].min_var\n maxv = self.grid.models[nm].max_var\n \n if (maxv-minv) <=1:\n maxv = maxv + 50\n minv = minv - 50\n \n norm = mpl.colors.Normalize(vmin=minv, vmax= maxv)\n cmap = cm.jet\n colmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n self.sigma_canvas[nm].clear_image()\n self.klegend2_canvas[nm].clear_image()\n \n for i in range(self.grid.models[nm].shape[0]):\n for j in range(self.grid.models[nm].shape[1]):\n cell = self.grid.cells[i][j]\n a= colmap.to_rgba(int(self.grid.models[nm].variance[i][j]))\n b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))\n self.sigma2_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)\n\n\n self.klegend2_canvas[nm].put_text(self.grid.models[nm].name)\n self.klegend2_canvas[nm].draw_legend(minv, maxv, colmap, title=\"Variance\")\n self.redraw()\n\n\n\n\n def draw_means(self):\n print \"drawing mean deviation ...\"\n \n minv = self.grid.min_mean_deviation\n maxv = self.grid.max_mean_deviation\n\n if (maxv-minv) <=1:\n maxv = maxv + 50\n minv = minv - 50 \n \n \n norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)\n cmap = cm.jet\n colmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n self.mean_dev_canvas.clear_image()\n self.mean_dev_legend_canvas.clear_image()\n \n for i in range(self.grid.shape[0]):\n for j in range(self.grid.shape[1]):\n cell = self.grid.cells[i][j]\n a= colmap.to_rgba(int(self.grid.mean_deviation[i][j]))\n b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))\n self.mean_dev_canvas.draw_cell(cell, self.grid.cell_size, b, thickness=-1)\n\n\n #self.mean_dev_legend_canvas.put_text(self.grid.models[nm].name)\n self.mean_dev_legend_canvas.draw_legend(minv, maxv, colmap, title=\"Mean Deviation\")\n \n #self.draw_mode=\"means\"\n self.redraw()\n\n\n\n def draw_deviation(self, nm):\n print \"drawing deviation\" + str(nm)\n \n minv = self.grid.models[nm].min_dev\n maxv = self.grid.models[nm].max_dev\n\n if (maxv-minv) <=1:\n maxv = maxv + 50\n minv = minv - 50 \n \n \n \n norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)\n cmap = cm.jet\n colmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n self.sigma_canvas[nm].clear_image()\n self.klegend3_canvas[nm].clear_image()\n \n for i in range(self.grid.models[nm].shape[0]):\n for j in range(self.grid.models[nm].shape[1]):\n cell = self.grid.cells[i][j]\n a= colmap.to_rgba(int(self.grid.models[nm].deviation[i][j]))\n b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))\n self.sigma_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)\n\n\n self.klegend3_canvas[nm].put_text(self.grid.models[nm].name)\n self.klegend3_canvas[nm].draw_legend(minv, maxv, colmap, title=\"Deviation\")\n self.redraw()\n\n\n def krieg_all_mmodels(self):\n for i in self.grid.models:\n i.do_krigging()\n self.redraw_kriged=True\n self.redraw_var=True\n self.redraw_devi=True\n\n def draw_all_outputs(self):\n for i in self.grid.models:\n self.draw_krigged(self.model_canvas_names.index(i.name))\n self.redraw_kriged=False\n\n\n def draw_all_vars(self):\n for i in self.grid.models:\n self.draw_variance(self.model_canvas_names.index(i.name))\n self.redraw_var=False\n \n def draw_all_devs(self):\n for i in self.grid.models:\n self.draw_deviation(self.model_canvas_names.index(i.name))\n self.redraw_devi=False\n \n \n def _change_mode(self, k):\n if k == 27:\n self.running = False\n elif k == ord('q'):\n self.running = False\n elif k == ord('n'):\n print len(self.grid.models)\n elif k == ord('i'):\n if self.n_models > 0:\n self.draw_mode=\"inputs\"\n self.current_model=0\n self.redraw()\n elif k == ord('d'):\n if self.n_models > 0:\n self.draw_mode=\"deviation\"\n self.current_model=0\n if self.redraw_devi:\n self.draw_all_devs()\n self.redraw() \n elif k == ord('v'):\n if self.n_models > 0:\n self.draw_mode=\"variance\"\n self.current_model=0\n if self.redraw_var:\n self.draw_all_vars()\n self.redraw()\n elif k == ord('t'):\n self.krieg_all_mmodels()\n self.grid.calculate_mean_grid()\n if self.n_models > 0:\n self.draw_all_outputs()\n self.draw_mode=\"kriging\"\n self.current_model=0\n self.redraw()\n\n elif k == ord('k'):\n if self.n_models > 0:\n self.draw_mode=\"kriging\"\n self.current_model=0\n if self.redraw_kriged:\n self.draw_all_outputs()\n self.redraw()\n\n elif k == ord('>'):\n self.current_model+=1\n if self.current_model >= self.n_models:\n self.current_model=0\n self.redraw()\n elif k == ord('<'):\n self.current_model-=1\n if self.current_model < 0:\n self.current_model=self.n_models-1\n self.redraw()\n elif k == ord('w'):\n self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)\n self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)\n self.redraw()\n elif k == ord('e'):\n self.exploration_canvas.draw_waypoints(self.explo_plan.targets, (255,200,128,255), thickness=3)\n self.exploration_canvas.draw_plan(self.explo_plan.route, 'cyan', thickness=1)\n self.redraw()\n #xnames = [x.name for x in self.explo_plan.route]\n #print xnames\n elif k == ord('g'):\n if len(self.explo_plan.route) >0:\n gg=self.explo_plan.route[0]\n self.open_nav_client.cancel_goal()\n targ = open_nav.msg.OpenNavActionGoal()\n \n targ.goal.coords.header.stamp=rospy.Time.now()\n targ.goal.coords.latitude=gg.coord.lat\n targ.goal.coords.longitude=gg.coord.lon\n \n print \"Going TO: \", gg\n self.exploring=1\n self.navigating=True\n self.open_nav_client.send_goal(targ.goal)\n self.result_counter=0\n self.explodist=0\n else:\n print \"Done Exploring\"\n self.exploring = 0\n elif k == ord('y'):\n vwp = []\n for i in self.visited_wp:\n vwp.append(i.name)\n yml = yaml.safe_dump(vwp, default_flow_style=False)\n fh = open(\"visited.yaml\", \"w\")\n s_output = str(yml)\n fh.write(s_output)\n fh.close \n elif k == ord('l'):\n print \"loading visited\"\n \n with open(\"visited.yaml\", 'r') as f:\n visited = yaml.load(f)\n for i in visited:\n for l in self.topo_map.waypoints:\n if i == l.name:\n self.visited_wp.append(l)\n break\n\n elif k == ord('a'):\n self.grid.calculate_mean_grid()\n self.draw_means()\n self.draw_mode=\"means\"\n\n elif k == ord('p'): \n self.pause_exp= not self.pause_exp\n \n elif k == ord('c'):\n print self.grid.limits\n print \"Area: \", self.grid.calculate_area(self.grid.limits)\n print \"Area of Area: \", self.grid.area.area_size\n colours=['magenta','cyan', 'grey','white','red','yellow','green','blue']\n \n nc=0\n for j in self.grid.area_splits:\n print j.area_size\n #self.limits_canvas.draw_coordinate(j.centre, 'crimson', size=3, thickness=2)\n for i in j.limit_lines:\n #self.limits_canvas.draw_line(i, colours[nc], thickness=1)\n self.limits_canvas.draw_line(i, 'white', thickness=1)\n if nc < len(colours)-1:\n nc+=1\n else:\n nc=0\n\n self.redraw()\n \n elif k== ord('r'):\n #diff = (self.grid.models[1].output - self.grid.models[0].output)\n #print np.mean(diff), np.std(diff), diff.dtype\n print self.get_errors() \n\n elif k== ord('o'):\n print self.results\n outfile = self.expid + '.yaml'\n #print self.data_out\n yml = yaml.safe_dump(self.results, default_flow_style=False)\n fh = open(outfile, \"w\")\n s_output = str(yml)\n #print s_output\n fh.write(s_output)\n fh.close\n\n \n \n\n def get_errors(self):\n error_chain=[]\n shapeo = self.grid.models[0].output.shape\n \n #print vals\n print \"Waiting for Service\"\n rospy.wait_for_service('\/compare_model')\n compare_serv = rospy.ServiceProxy('\/compare_model', CompareModels)\n \n for i in range(self.n_models):\n try:\n d={}\n print \"going for it \", i\n vals = np.reshape(self.grid.models[i].output, -1)\n resp1 = compare_serv('kriging', i, shapeo[0], shapeo[1], vals.tolist())\n d['name']= self.grid.models[i].name\n d['type']= 'kriging'\n d['errors']={}\n d['errors']['error']=resp1.error\n d['errors']['mse']=resp1.mse\n d['errors']['std']=resp1.std\n d['errors']['var']=resp1.var\n #print resp1\n error_chain.append(d)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e \n \n try:\n d={}\n print \"Mean \"\n vals = np.reshape(self.grid.mean_output, -1)\n resp1 = compare_serv('mean', 0, shapeo[0], shapeo[1], vals.tolist())\n #print self.grid.mean_output\n d['name']= 'mean'\n d['type']= 'mean'\n d['errors']={}\n d['errors']['error']=resp1.error\n d['errors']['mse']=resp1.mse\n d['errors']['std']=resp1.std\n d['errors']['var']=resp1.var\n\n #print resp1\n error_chain.append(d)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e \n \n \n return error_chain\n \n def signal_handler(self, signal, frame):\n self.running = False\n print('You pressed Ctrl+C!')\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cell_size\", type=int, default=10,\n help=\"cell size in meters\")\n parser.add_argument(\"--initial_percent\", type=float, default=0.05,\n help=\"Percentage of cells to be explored on the initial plan\") \n parser.add_argument(\"--limits_file\", type=str, default='limits.coords',\n help=\"Percentage of cells to be explored on the initial plan\")\n parser.add_argument(\"--initial_waypoint\", type=str, default='WayPoint498',\n help=\"Percentage of cells to be explored on the initial plan\")\n parser.add_argument(\"--area_coverage_type\", type=str, default='area_split',\n help=\"Type of area coverage, random or area_split\")\n parser.add_argument(\"--experiment_name\", type=str, default='exp1',\n help=\"Experiment ID\")\n args = parser.parse_args()\n \n rospy.init_node('kriging_exploration')\n #Explorator(53.261685, -0.527158, 16, 640, args.cell_size)\n \n #Explorator(53.267213, -0.533420, 17, 640, args) #Football Field\n Explorator(53.261576, -0.526648, 17, 640, args) #Half cosmos field\n #Explorator(53.261685, -0.525158, 17, 640, args) #COSMOS Field\n\n \n","license":"mit"} {"repo_name":"rabipanda\/tensorflow","path":"tensorflow\/contrib\/metrics\/python\/kernel_tests\/histogram_ops_test.py","copies":"130","size":"9577","content":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for histogram_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.metrics.python.ops import histogram_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass Strict1dCumsumTest(test.TestCase):\n \"\"\"Test this private function.\"\"\"\n\n def test_empty_tensor_returns_empty(self):\n with self.test_session():\n tensor = constant_op.constant([])\n result = histogram_ops._strict_1d_cumsum(tensor, 0)\n expected = constant_op.constant([])\n np.testing.assert_array_equal(expected.eval(), result.eval())\n\n def test_length_1_tensor_works(self):\n with self.test_session():\n tensor = constant_op.constant([3], dtype=dtypes.float32)\n result = histogram_ops._strict_1d_cumsum(tensor, 1)\n expected = constant_op.constant([3], dtype=dtypes.float32)\n np.testing.assert_array_equal(expected.eval(), result.eval())\n\n def test_length_3_tensor_works(self):\n with self.test_session():\n tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)\n result = histogram_ops._strict_1d_cumsum(tensor, 3)\n expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)\n np.testing.assert_array_equal(expected.eval(), result.eval())\n\n\nclass AUCUsingHistogramTest(test.TestCase):\n\n def setUp(self):\n self.rng = np.random.RandomState(0)\n\n def test_empty_labels_and_scores_gives_nan_auc(self):\n with self.test_session():\n labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)\n scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)\n score_range = [0, 1.]\n auc, update_op = histogram_ops.auc_using_histogram(labels, scores,\n score_range)\n variables.local_variables_initializer().run()\n update_op.run()\n self.assertTrue(np.isnan(auc.eval()))\n\n def test_perfect_scores_gives_auc_1(self):\n self._check_auc(\n nbins=100,\n desired_auc=1.0,\n score_range=[0, 1.],\n num_records=50,\n frac_true=0.5,\n atol=0.05,\n num_updates=1)\n\n def test_terrible_scores_gives_auc_0(self):\n self._check_auc(\n nbins=100,\n desired_auc=0.0,\n score_range=[0, 1.],\n num_records=50,\n frac_true=0.5,\n atol=0.05,\n num_updates=1)\n\n def test_many_common_conditions(self):\n for nbins in [50]:\n for desired_auc in [0.3, 0.5, 0.8]:\n for score_range in [[-1, 1], [-10, 0]]:\n for frac_true in [0.3, 0.8]:\n # Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.\n self._check_auc(\n nbins=nbins,\n desired_auc=desired_auc,\n score_range=score_range,\n num_records=100,\n frac_true=frac_true,\n atol=0.05,\n num_updates=50)\n\n def test_large_class_imbalance_still_ok(self):\n # With probability frac_true ** num_records, each batch contains only True\n # records. In this case, ~ 95%.\n # Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.\n self._check_auc(\n nbins=100,\n desired_auc=0.8,\n score_range=[-1, 1.],\n num_records=10,\n frac_true=0.995,\n atol=0.05,\n num_updates=1000)\n\n def test_super_accuracy_with_many_bins_and_records(self):\n # Test passes with atol = 0.0005. Increased atol to avoid flakes.\n self._check_auc(\n nbins=1000,\n desired_auc=0.75,\n score_range=[0, 1.],\n num_records=1000,\n frac_true=0.5,\n atol=0.005,\n num_updates=100)\n\n def _check_auc(self,\n nbins=100,\n desired_auc=0.75,\n score_range=None,\n num_records=50,\n frac_true=0.5,\n atol=0.05,\n num_updates=10):\n \"\"\"Check auc accuracy against synthetic data.\n\n Args:\n nbins: nbins arg from contrib.metrics.auc_using_histogram.\n desired_auc: Number in [0, 1]. The desired auc for synthetic data.\n score_range: 2-tuple, (low, high), giving the range of the resultant\n scores. Defaults to [0, 1.].\n num_records: Positive integer. The number of records to return.\n frac_true: Number in (0, 1). Expected fraction of resultant labels that\n will be True. This is just in expectation...more or less may actually\n be True.\n atol: Absolute tolerance for final AUC estimate.\n num_updates: Update internal histograms this many times, each with a new\n batch of synthetic data, before computing final AUC.\n\n Raises:\n AssertionError: If resultant AUC is not within atol of theoretical AUC\n from synthetic data.\n \"\"\"\n score_range = [0, 1.] or score_range\n with self.test_session():\n labels = array_ops.placeholder(dtypes.bool, shape=[num_records])\n scores = array_ops.placeholder(dtypes.float32, shape=[num_records])\n auc, update_op = histogram_ops.auc_using_histogram(\n labels, scores, score_range, nbins=nbins)\n variables.local_variables_initializer().run()\n # Updates, then extract auc.\n for _ in range(num_updates):\n labels_a, scores_a = synthetic_data(desired_auc, score_range,\n num_records, self.rng, frac_true)\n update_op.run(feed_dict={labels: labels_a, scores: scores_a})\n labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,\n self.rng, frac_true)\n # Fetch current auc, and verify that fetching again doesn't change it.\n auc_eval = auc.eval()\n self.assertAlmostEqual(auc_eval, auc.eval(), places=5)\n\n msg = ('nbins: %s, desired_auc: %s, score_range: %s, '\n 'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,\n desired_auc,\n score_range,\n num_records,\n frac_true,\n num_updates)\n np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)\n\n\ndef synthetic_data(desired_auc, score_range, num_records, rng, frac_true):\n \"\"\"Create synthetic boolean_labels and scores with adjustable auc.\n\n Args:\n desired_auc: Number in [0, 1], the theoretical AUC of resultant data.\n score_range: 2-tuple, (low, high), giving the range of the resultant scores\n num_records: Positive integer. The number of records to return.\n rng: Initialized np.random.RandomState random number generator\n frac_true: Number in (0, 1). Expected fraction of resultant labels that\n will be True. This is just in expectation...more or less may actually be\n True.\n\n Returns:\n boolean_labels: np.array, dtype=bool.\n scores: np.array, dtype=np.float32\n \"\"\"\n # We prove here why the method (below) for computing AUC works. Of course we\n # also checked this against sklearn.metrics.roc_auc_curve.\n #\n # First do this for score_range = [0, 1], then rescale.\n # WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap\n # the labels.\n # So for AUC in [0, 1] we create False and True labels\n # and corresponding scores drawn from:\n # F ~ U[0, 1], T ~ U[x, 1]\n # We have,\n # AUC\n # = P[T > F]\n # = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]\n # = (1 * x) + (0.5 * (1 - x)).\n # Inverting, we have:\n # x = 2 * AUC - 1, when AUC >= 0.5.\n assert 0 <= desired_auc <= 1\n assert 0 < frac_true < 1\n\n if desired_auc < 0.5:\n flip_labels = True\n desired_auc = 1 - desired_auc\n frac_true = 1 - frac_true\n else:\n flip_labels = False\n x = 2 * desired_auc - 1\n\n labels = rng.binomial(1, frac_true, size=num_records).astype(bool)\n num_true = labels.sum()\n num_false = num_records - labels.sum()\n\n # Draw F ~ U[0, 1], and T ~ U[x, 1]\n false_scores = rng.rand(num_false)\n true_scores = x + rng.rand(num_true) * (1 - x)\n\n # Reshape [0, 1] to score_range.\n def reshape(scores):\n return score_range[0] + scores * (score_range[1] - score_range[0])\n\n false_scores = reshape(false_scores)\n true_scores = reshape(true_scores)\n\n # Place into one array corresponding with the labels.\n scores = np.nan * np.ones(num_records, dtype=np.float32)\n scores[labels] = true_scores\n scores[~labels] = false_scores\n\n if flip_labels:\n labels = ~labels\n\n return labels, scores\n\n\nif __name__ == '__main__':\n test.main()\n","license":"apache-2.0"} {"repo_name":"sharthee\/ProgrammingAssignment2","path":"labs\/lab2\/cs109style.py","copies":"38","size":"1293","content":"from __future__ import print_function\n\nfrom IPython.core.display import HTML\nfrom matplotlib import rcParams\n\n#colorbrewer2 Dark2 qualitative color table\ndark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),\n (0.8509803921568627, 0.37254901960784315, 0.00784313725490196),\n (0.4588235294117647, 0.4392156862745098, 0.7019607843137254),\n (0.9058823529411765, 0.1607843137254902, 0.5411764705882353),\n (0.4, 0.6509803921568628, 0.11764705882352941),\n (0.9019607843137255, 0.6705882352941176, 0.00784313725490196),\n (0.6509803921568628, 0.4627450980392157, 0.11372549019607843),\n (0.4, 0.4, 0.4)]\n\ndef customize_mpl():\n \"\"\"Tweak matplotlib visual style\"\"\"\n print(\"Setting custom matplotlib visual style\")\n\n rcParams['figure.figsize'] = (10, 6)\n rcParams['figure.dpi'] = 150\n rcParams['axes.color_cycle'] = dark2_colors\n rcParams['lines.linewidth'] = 2\n rcParams['axes.grid'] = True\n rcParams['axes.facecolor'] = '#eeeeee'\n rcParams['font.size'] = 14\n rcParams['patch.edgecolor'] = 'none'\n\n\ndef customize_css():\n print(\"Setting custom CSS for the IPython Notebook\")\n styles = open('custom.css', 'r').read()\n return HTML(styles)\n","license":"mit"} {"repo_name":"rabernat\/xrft","path":"setup.py","copies":"1","size":"1391","content":"import os\nimport versioneer\nfrom setuptools import setup, find_packages\nPACKAGES = find_packages()\n\nDISTNAME = 'xrft'\nLICENSE = 'MIT'\nAUTHOR = 'xrft Developers'\nAUTHOR_EMAIL = 'takaya@ldeo.columbia.edu'\nURL = 'https:\/\/github.com\/xgcm\/xrft'\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science\/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific\/Engineering',\n]\n\nINSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy']\nEXTRAS_REQUIRE = ['cftime']\nSETUP_REQUIRES = ['pytest-runner']\nTESTS_REQUIRE = ['pytest >= 2.8', 'coverage']\n\nDESCRIPTION = \"Discrete Fourier Transform with xarray\"\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nsetup(name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n description=DESCRIPTION,\n long_description=readme(),\n install_requires=INSTALL_REQUIRES,\n setup_requires=SETUP_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n packages=find_packages())\n","license":"mit"} {"repo_name":"enigmampc\/catalyst","path":"catalyst\/data\/continuous_future_reader.py","copies":"1","size":"12198","content":"import numpy as np\nimport pandas as pd\nfrom catalyst.data.session_bars import SessionBarReader\n\n\nclass ContinuousFutureSessionBarReader(SessionBarReader):\n\n def __init__(self, bar_reader, roll_finders):\n self._bar_reader = bar_reader\n self._roll_finders = roll_finders\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n \"\"\"\n Parameters\n ----------\n fields : list of str\n 'sid'\n start_dt: Timestamp\n Beginning of the window range.\n end_dt: Timestamp\n End of the window range.\n sids : list of int\n The asset identifiers in the window.\n\n Returns\n -------\n list of np.ndarray\n A list with an entry per field of ndarrays with shape\n (minutes in range, sids) with a dtype of float64, containing the\n values for the respective field over start and end dt range.\n \"\"\"\n rolls_by_asset = {}\n for asset in assets:\n rf = self._roll_finders[asset.roll_style]\n rolls_by_asset[asset] = rf.get_rolls(\n asset.root_symbol, start_date, end_date, asset.offset)\n num_sessions = len(\n self.trading_calendar.sessions_in_range(start_date, end_date))\n shape = num_sessions, len(assets)\n\n results = []\n\n tc = self._bar_reader.trading_calendar\n sessions = tc.sessions_in_range(start_date, end_date)\n\n # Get partitions\n partitions_by_asset = {}\n for asset in assets:\n partitions = []\n partitions_by_asset[asset] = partitions\n rolls = rolls_by_asset[asset]\n start = start_date\n for roll in rolls:\n sid, roll_date = roll\n start_loc = sessions.get_loc(start)\n if roll_date is not None:\n end = roll_date - sessions.freq\n end_loc = sessions.get_loc(end)\n else:\n end = end_date\n end_loc = len(sessions) - 1\n partitions.append((sid, start, end, start_loc, end_loc))\n if roll[-1] is not None:\n start = sessions[end_loc + 1]\n\n for column in columns:\n if column != 'volume' and column != 'sid':\n out = np.full(shape, np.nan)\n else:\n out = np.zeros(shape, dtype=np.int64)\n for i, asset in enumerate(assets):\n partitions = partitions_by_asset[asset]\n for sid, start, end, start_loc, end_loc in partitions:\n if column != 'sid':\n result = self._bar_reader.load_raw_arrays(\n [column], start, end, [sid])[0][:, 0]\n else:\n result = int(sid)\n out[start_loc:end_loc + 1, i] = result\n results.append(out)\n return results\n\n @property\n def last_available_dt(self):\n \"\"\"\n Returns\n -------\n dt : pd.Timestamp\n The last session for which the reader can provide data.\n \"\"\"\n return self._bar_reader.last_available_dt\n\n @property\n def trading_calendar(self):\n \"\"\"\n Returns the catalyst.utils.calendar.trading_calendar used to read\n the data. Can be None (if the writer didn't specify it).\n \"\"\"\n return self._bar_reader.trading_calendar\n\n @property\n def first_trading_day(self):\n \"\"\"\n Returns\n -------\n dt : pd.Timestamp\n The first trading day (session) for which the reader can provide\n data.\n \"\"\"\n return self._bar_reader.first_trading_day\n\n def get_value(self, continuous_future, dt, field):\n \"\"\"\n Retrieve the value at the given coordinates.\n\n Parameters\n ----------\n sid : int\n The asset identifier.\n dt : pd.Timestamp\n The timestamp for the desired data point.\n field : string\n The OHLVC name for the desired data point.\n\n Returns\n -------\n value : float|int\n The value at the given coordinates, ``float`` for OHLC, ``int``\n for 'volume'.\n\n Raises\n ------\n NoDataOnDate\n If the given dt is not a valid market minute (in minute mode) or\n session (in daily mode) according to this reader's tradingcalendar.\n \"\"\"\n rf = self._roll_finders[continuous_future.roll_style]\n sid = (rf.get_contract_center(continuous_future.root_symbol,\n dt,\n continuous_future.offset))\n return self._bar_reader.get_value(sid, dt, field)\n\n def get_last_traded_dt(self, asset, dt):\n \"\"\"\n Get the latest minute on or before ``dt`` in which ``asset`` traded.\n\n If there are no trades on or before ``dt``, returns ``pd.NaT``.\n\n Parameters\n ----------\n asset : catalyst.asset.Asset\n The asset for which to get the last traded minute.\n dt : pd.Timestamp\n The minute at which to start searching for the last traded minute.\n\n Returns\n -------\n last_traded : pd.Timestamp\n The dt of the last trade for the given asset, using the input\n dt as a vantage point.\n \"\"\"\n rf = self._roll_finders[asset.roll_style]\n sid = (rf.get_contract_center(asset.root_symbol,\n dt,\n asset.offset))\n if sid is None:\n return pd.NaT\n contract = rf.asset_finder.retrieve_asset(sid)\n return self._bar_reader.get_last_traded_dt(contract, dt)\n\n @property\n def sessions(self):\n \"\"\"\n Returns\n -------\n sessions : DatetimeIndex\n All session labels (unionining the range for all assets) which the\n reader can provide.\n \"\"\"\n return self._bar_reader.sessions\n\n\nclass ContinuousFutureMinuteBarReader(SessionBarReader):\n\n def __init__(self, bar_reader, roll_finders):\n self._bar_reader = bar_reader\n self._roll_finders = roll_finders\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n \"\"\"\n Parameters\n ----------\n fields : list of str\n 'open', 'high', 'low', 'close', or 'volume'\n start_dt: Timestamp\n Beginning of the window range.\n end_dt: Timestamp\n End of the window range.\n sids : list of int\n The asset identifiers in the window.\n\n Returns\n -------\n list of np.ndarray\n A list with an entry per field of ndarrays with shape\n (minutes in range, sids) with a dtype of float64, containing the\n values for the respective field over start and end dt range.\n \"\"\"\n rolls_by_asset = {}\n\n tc = self.trading_calendar\n start_session = tc.minute_to_session_label(start_date)\n end_session = tc.minute_to_session_label(end_date)\n\n for asset in assets:\n rf = self._roll_finders[asset.roll_style]\n rolls_by_asset[asset] = rf.get_rolls(\n asset.root_symbol,\n start_session,\n end_session, asset.offset)\n\n sessions = tc.sessions_in_range(start_date, end_date)\n\n minutes = tc.minutes_in_range(start_date, end_date)\n num_minutes = len(minutes)\n shape = num_minutes, len(assets)\n\n results = []\n\n # Get partitions\n partitions_by_asset = {}\n for asset in assets:\n partitions = []\n partitions_by_asset[asset] = partitions\n rolls = rolls_by_asset[asset]\n start = start_date\n for roll in rolls:\n sid, roll_date = roll\n start_loc = minutes.searchsorted(start)\n if roll_date is not None:\n _, end = tc.open_and_close_for_session(\n roll_date - sessions.freq)\n end_loc = minutes.searchsorted(end)\n else:\n end = end_date\n end_loc = len(minutes) - 1\n partitions.append((sid, start, end, start_loc, end_loc))\n if roll[-1] is not None:\n start, _ = tc.open_and_close_for_session(\n tc.minute_to_session_label(minutes[end_loc + 1]))\n\n for column in columns:\n if column != 'volume':\n out = np.full(shape, np.nan)\n else:\n out = np.zeros(shape, dtype=np.uint32)\n for i, asset in enumerate(assets):\n partitions = partitions_by_asset[asset]\n for sid, start, end, start_loc, end_loc in partitions:\n if column != 'sid':\n result = self._bar_reader.load_raw_arrays(\n [column], start, end, [sid])[0][:, 0]\n else:\n result = int(sid)\n out[start_loc:end_loc + 1, i] = result\n results.append(out)\n return results\n\n @property\n def last_available_dt(self):\n \"\"\"\n Returns\n -------\n dt : pd.Timestamp\n The last session for which the reader can provide data.\n \"\"\"\n return self._bar_reader.last_available_dt\n\n @property\n def trading_calendar(self):\n \"\"\"\n Returns the catalyst.utils.calendar.trading_calendar used to read\n the data. Can be None (if the writer didn't specify it).\n \"\"\"\n return self._bar_reader.trading_calendar\n\n @property\n def first_trading_day(self):\n \"\"\"\n Returns\n -------\n dt : pd.Timestamp\n The first trading day (session) for which the reader can provide\n data.\n \"\"\"\n return self._bar_reader.first_trading_day\n\n def get_value(self, continuous_future, dt, field):\n \"\"\"\n Retrieve the value at the given coordinates.\n\n Parameters\n ----------\n sid : int\n The asset identifier.\n dt : pd.Timestamp\n The timestamp for the desired data point.\n field : string\n The OHLVC name for the desired data point.\n\n Returns\n -------\n value : float|int\n The value at the given coordinates, ``float`` for OHLC, ``int``\n for 'volume'.\n\n Raises\n ------\n NoDataOnDate\n If the given dt is not a valid market minute (in minute mode) or\n session (in daily mode) according to this reader's tradingcalendar.\n \"\"\"\n rf = self._roll_finders[continuous_future.roll_style]\n sid = (rf.get_contract_center(continuous_future.root_symbol,\n dt,\n continuous_future.offset))\n return self._bar_reader.get_value(sid, dt, field)\n\n def get_last_traded_dt(self, asset, dt):\n \"\"\"\n Get the latest minute on or before ``dt`` in which ``asset`` traded.\n\n If there are no trades on or before ``dt``, returns ``pd.NaT``.\n\n Parameters\n ----------\n asset : catalyst.asset.Asset\n The asset for which to get the last traded minute.\n dt : pd.Timestamp\n The minute at which to start searching for the last traded minute.\n\n Returns\n -------\n last_traded : pd.Timestamp\n The dt of the last trade for the given asset, using the input\n dt as a vantage point.\n \"\"\"\n rf = self._roll_finders[asset.roll_style]\n sid = (rf.get_contract_center(asset.root_symbol,\n dt,\n asset.offset))\n if sid is None:\n return pd.NaT\n contract = rf.asset_finder.retrieve_asset(sid)\n return self._bar_reader.get_last_traded_dt(contract, dt)\n\n @property\n def sessions(self):\n return self._bar_reader.sessions\n","license":"apache-2.0"} {"repo_name":"CIFASIS\/pylearn2","path":"pylearn2\/packaged_dependencies\/theano_linear\/unshared_conv\/localdot.py","copies":"39","size":"5044","content":"\"\"\"\nWRITEME\n\"\"\"\n\nimport logging\nfrom ..linear import LinearTransform\nfrom .unshared_conv import FilterActs, ImgActs\nfrom theano.compat.six.moves import xrange\nfrom theano.sandbox import cuda\nif cuda.cuda_available:\n import gpu_unshared_conv # register optimizations\n\nimport numpy as np\nimport warnings\n\ntry:\n import matplotlib.pyplot as plt\nexcept (RuntimeError, ImportError, TypeError) as matplotlib_exception:\n warnings.warn(\"Unable to import matplotlib. Some features unavailable. \"\n \"Original exception: \" + str(matplotlib_exception))\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalDot(LinearTransform):\n \"\"\"\n LocalDot is an linear operation computationally similar to\n convolution in the spatial domain, except that whereas convolution\n applying a single filter or set of filters across an image, the\n LocalDot has different filterbanks for different points in the image.\n\n Mathematically, this is a general linear transform except for a\n restriction that filters are 0 outside of a spatially localized patch\n within the image.\n\n Image shape is 5-tuple:\n color_groups\n colors_per_group\n rows\n cols\n images\n\n Filterbank shape is 7-tuple (!)\n 0 row_positions\n 1 col_positions\n 2 colors_per_group\n 3 height\n 4 width\n 5 color_groups\n 6 filters_per_group\n\n The result of left-multiplication a 5-tuple with shape:\n filter_groups\n filters_per_group\n row_positions\n col_positions\n images\n\n Parameters\n ----------\n filters : WRITEME\n irows : WRITEME\n Image rows\n icols : WRITEME\n Image columns\n subsample : WRITEME\n padding_start : WRITEME\n filters_shape : WRITEME\n message : WRITEME\n \"\"\"\n\n def __init__(self, filters, irows, icols=None,\n subsample=(1, 1),\n padding_start=None,\n filters_shape=None,\n message=\"\"):\n LinearTransform.__init__(self, [filters])\n self._filters = filters\n if filters_shape is None:\n self._filters_shape = tuple(filters.get_value(borrow=True).shape)\n else:\n self._filters_shape = tuple(filters_shape)\n self._irows = irows\n if icols is None:\n self._icols = irows\n else:\n self._icols = icols\n if self._icols != self._irows:\n raise NotImplementedError('GPU code at least needs square imgs')\n self._subsample = tuple(subsample)\n self._padding_start = padding_start\n\n if len(self._filters_shape) != 7:\n raise TypeError('need 7-tuple filter shape', self._filters_shape)\n if self._subsample[0] != self._subsample[1]:\n raise ValueError('subsampling must be same in rows and cols')\n\n self._filter_acts = FilterActs(self._subsample[0])\n self._img_acts = ImgActs(module_stride=self._subsample[0])\n\n if message:\n self._message = message\n else:\n self._message = filters.name\n\n def rmul(self, x):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert x.ndim == 5\n return self._filter_acts(x, self._filters)\n\n def rmul_T(self, x):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n return self._img_acts(self._filters, x, self._irows, self._icols)\n\n def col_shape(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n ishape = self.row_shape() + (-99,)\n fshape = self._filters_shape\n hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))\n assert hshape[-1] == -99\n return hshape[:-1]\n\n def row_shape(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n fshape = self._filters_shape\n fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]\n fgroups, filters_per_group = fshape[-2:]\n\n return fgroups, fcolors, self._irows, self._icols\n\n\n def print_status(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n raise NotImplementedError(\"TODO: fix dependence on non-existent \"\n \"ndarray_status function\")\n \"\"\"print ndarray_status(\n self._filters.get_value(borrow=True),\n msg='%s{%s}'% (self.__class__.__name__,\n self._message))\n \"\"\"\n\n def imshow_gray(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n filters = self._filters.get_value()\n modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape\n logger.info(filters.shape)\n\n rval = np.zeros((\n modR * (rows + 1) - 1,\n modC * (cols + 1) - 1,\n ))\n\n for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):\n for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):\n rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]\n\n plt.imshow(rval, cmap='gray')\n return rval\n","license":"bsd-3-clause"} {"repo_name":"dingocuster\/scikit-learn","path":"examples\/semi_supervised\/plot_label_propagation_structure.py","copies":"247","size":"2432","content":"\"\"\"\n==============================================\nLabel Propagation learning a complex structure\n==============================================\n\nExample of LabelPropagation learning a complex internal structure\nto demonstrate \"manifold learning\". The outer circle should be\nlabeled \"red\" and the inner circle \"blue\". Because both label groups\nlie inside their own distinct shape, we can see that the labels\npropagate correctly around the circle.\n\"\"\"\nprint(__doc__)\n\n# Authors: Clay Woolam \n# Andreas Mueller \n# Licence: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.semi_supervised import label_propagation\nfrom sklearn.datasets import make_circles\n\n# generate ring with inner box\nn_samples = 200\nX, y = make_circles(n_samples=n_samples, shuffle=False)\nouter, inner = 0, 1\nlabels = -np.ones(n_samples)\nlabels[0] = outer\nlabels[-1] = inner\n\n###############################################################################\n# Learn with LabelSpreading\nlabel_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)\nlabel_spread.fit(X, labels)\n\n###############################################################################\n# Plot output labels\noutput_labels = label_spread.transduction_\nplt.figure(figsize=(8.5, 4))\nplt.subplot(1, 2, 1)\nplot_outer_labeled, = plt.plot(X[labels == outer, 0],\n X[labels == outer, 1], 'rs')\nplot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')\nplot_inner_labeled, = plt.plot(X[labels == inner, 0],\n X[labels == inner, 1], 'bs')\nplt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),\n ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',\n numpoints=1, shadow=False)\nplt.title(\"Raw data (2 classes=red and blue)\")\n\nplt.subplot(1, 2, 2)\noutput_label_array = np.asarray(output_labels)\nouter_numbers = np.where(output_label_array == outer)[0]\ninner_numbers = np.where(output_label_array == inner)[0]\nplot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')\nplot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')\nplt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),\n 'upper left', numpoints=1, shadow=False)\nplt.title(\"Labels learned with Label Spreading (KNN)\")\n\nplt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"nhuntwalker\/astroML","path":"book_figures\/chapter5\/fig_likelihood_cauchy.py","copies":"3","size":"3219","content":"\"\"\"\nLog-likelihood for Cauchy Distribution\n--------------------------------------\nFigure 5.10\n\nAn illustration of the logarithm of posterior probability distribution for\n:math:`\\mu` and :math:`\\gamma`, :math:`L(\\mu,\\gamma)` (see eq. 5.75) for\nN = 10 (the sample is generated using the Cauchy distribution with\n:math:`\\mu = 0` and :math:`\\gamma = 2`). The maximum of L is renormalized\nto 0, and color coded as shown in the legend. The contours enclose the regions\nthat contain 0.683, 0.955 and 0.997 of the cumulative (integrated) posterior\nprobability.\n\"\"\"\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http:\/\/astroML.github.com\n# To report a bug or issue, use the following forum:\n# https:\/\/groups.google.com\/forum\/#!forum\/astroml-general\nfrom __future__ import print_function, division\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import cauchy\nfrom astroML.plotting.mcmc import convert_to_stdev\nfrom astroML.stats import median_sigmaG\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=True)\n\n\ndef cauchy_logL(xi, gamma, mu):\n \"\"\"Equation 5.74: cauchy likelihood\"\"\"\n xi = np.asarray(xi)\n n = xi.size\n shape = np.broadcast(gamma, mu).shape\n\n xi = xi.reshape(xi.shape + tuple([1 for s in shape]))\n\n return ((n - 1) * np.log(gamma)\n - np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0))\n\n\n#------------------------------------------------------------\n# Define the grid and compute logL\ngamma = np.linspace(0.1, 5, 70)\nmu = np.linspace(-5, 5, 70)\n\nnp.random.seed(44)\nmu0 = 0\ngamma0 = 2\nxi = cauchy(mu0, gamma0).rvs(10)\n\nlogL = cauchy_logL(xi, gamma[:, np.newaxis], mu)\nlogL -= logL.max()\n\n#------------------------------------------------------------\n# Find the max and print some information\ni, j = np.where(logL >= np.max(logL))\n\nprint(\"mu from likelihood:\", mu[j])\nprint(\"gamma from likelihood:\", gamma[i])\nprint()\n\nmed, sigG = median_sigmaG(xi)\nprint(\"mu from median\", med)\nprint(\"gamma from quartiles:\", sigG \/ 1.483) # Equation 3.54\nprint()\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure(figsize=(5, 3.75))\nplt.imshow(logL, origin='lower', cmap=plt.cm.binary,\n extent=(mu[0], mu[-1], gamma[0], gamma[-1]),\n aspect='auto')\nplt.colorbar().set_label(r'$\\log(L)$')\nplt.clim(-5, 0)\n\nplt.contour(mu, gamma, convert_to_stdev(logL),\n levels=(0.683, 0.955, 0.997),\n colors='k')\n\nplt.text(0.5, 0.93,\n r'$L(\\mu,\\gamma)\\ \\mathrm{for}\\ \\bar{x}=0,\\ \\gamma=2,\\ n=10$',\n bbox=dict(ec='k', fc='w', alpha=0.9),\n ha='center', va='center', transform=plt.gca().transAxes)\n\nplt.xlabel(r'$\\mu$')\nplt.ylabel(r'$\\gamma$')\n\nplt.show()\n","license":"bsd-2-clause"} {"repo_name":"AllenDowney\/SoftwareSystems","path":"hw04\/wave3\/thinkdsp.py","copies":"23","size":"31996","content":"\"\"\"This file contains code used in \"Think DSP\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2013 Allen B. Downey\nLicense: GNU GPLv3 http:\/\/www.gnu.org\/licenses\/gpl.html\n\"\"\"\n\nimport array\nimport math\nimport numpy\nimport random\nimport scipy\nimport scipy.stats\nimport struct\nimport subprocess\nimport thinkplot\n\nfrom fractions import gcd\nfrom wave import open as open_wave\n\nimport matplotlib.pyplot as pyplot\n\nPI2 = math.pi * 2\n\n\ndef random_seed(x):\n \"\"\"Initialize the random and numpy.random generators.\n\n x: int seed\n \"\"\"\n random.seed(x)\n numpy.random.seed(x)\n\n\nclass UnimplementedMethodException(Exception):\n \"\"\"Exception if someone calls a method that should be overridden.\"\"\"\n\n\nclass WavFileWriter(object):\n \"\"\"Writes wav files.\"\"\"\n\n def __init__(self, filename='sound.wav', framerate=11025):\n \"\"\"Opens the file and sets parameters.\n\n filename: string\n framerate: samples per second\n \"\"\"\n self.filename = filename\n self.framerate = framerate\n self.nchannels = 1\n self.sampwidth = 2\n self.bits = self.sampwidth * 8\n self.bound = 2**(self.bits-1) - 1\n\n self.fmt = 'h'\n self.dtype = numpy.int16\n\n self.fp = open_wave(self.filename, 'w')\n self.fp.setnchannels(self.nchannels)\n self.fp.setsampwidth(self.sampwidth)\n self.fp.setframerate(self.framerate)\n \n def write(self, wave):\n \"\"\"Writes a wave.\n\n wave: Wave\n \"\"\"\n zs = wave.quantize(self.bound, self.dtype)\n self.fp.writeframes(zs.tostring())\n\n def close(self, duration=0):\n \"\"\"Closes the file.\n\n duration: how many seconds of silence to append\n \"\"\"\n if duration:\n self.write(rest(duration))\n\n self.fp.close()\n\n\ndef read_wave(filename='sound.wav'):\n \"\"\"Reads a wave file.\n\n filename: string\n\n returns: Wave\n \"\"\"\n fp = open_wave(filename, 'r')\n\n nchannels = fp.getnchannels()\n nframes = fp.getnframes()\n sampwidth = fp.getsampwidth()\n framerate = fp.getframerate()\n \n z_str = fp.readframes(nframes)\n \n fp.close()\n\n dtype_map = {1:numpy.int8, 2:numpy.int16}\n assert sampwidth in dtype_map\n \n ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])\n wave = Wave(ys, framerate)\n return wave\n\n\ndef play_wave(filename='sound.wav', player='aplay'):\n \"\"\"Plays a wave file.\n\n filename: string\n player: string name of executable that plays wav files\n \"\"\"\n cmd = '%s %s' % (player, filename)\n popen = subprocess.Popen(cmd, shell=True)\n popen.communicate()\n\n\nclass _SpectrumParent(object):\n \"\"\"Contains code common to Spectrum and DCT.\n \"\"\"\n\n @property\n def max_freq(self):\n return self.framerate \/ 2.0\n \n @property\n def freq_res(self):\n return self.max_freq \/ (len(self.fs) - 1)\n\n def plot(self, low=0, high=None, **options):\n \"\"\"Plots amplitude vs frequency.\n\n low: int index to start at \n high: int index to end at\n \"\"\"\n thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)\n\n def plot_power(self, low=0, high=None, **options):\n \"\"\"Plots power vs frequency.\n\n low: int index to start at \n high: int index to end at\n \"\"\"\n thinkplot.plot(self.fs[low:high], self.power[low:high], **options)\n\n def estimate_slope(self):\n \"\"\"Runs linear regression on log power vs log frequency.\n\n returns: slope, inter, r2, p, stderr\n \"\"\"\n x = numpy.log(self.fs[1:])\n y = numpy.log(self.power[1:])\n t = scipy.stats.linregress(x,y)\n return t\n\n def peaks(self):\n \"\"\"Finds the highest peaks and their frequencies.\n\n returns: sorted list of (amplitude, frequency) pairs\n \"\"\"\n t = zip(self.amps, self.fs)\n t.sort(reverse=True)\n return t\n\n\nclass Spectrum(_SpectrumParent):\n \"\"\"Represents the spectrum of a signal.\"\"\"\n\n def __init__(self, hs, framerate):\n self.hs = hs\n self.framerate = framerate\n\n n = len(hs)\n self.fs = numpy.linspace(0, self.max_freq, n)\n\n def __add__(self, other):\n if other == 0:\n return self\n\n assert self.framerate == other.framerate\n hs = self.hs + other.hs\n return Spectrum(hs, self.framerate)\n\n __radd__ = __add__\n \n\n @property\n def real(self):\n \"\"\"Returns the real part of the hs (read-only property).\"\"\"\n return numpy.real(self.hs)\n\n @property\n def imag(self):\n \"\"\"Returns the imaginary part of the hs (read-only property).\"\"\"\n return numpy.imag(self.hs)\n\n @property\n def amps(self):\n \"\"\"Returns a sequence of amplitudes (read-only property).\"\"\"\n return numpy.absolute(self.hs)\n\n @property\n def power(self):\n \"\"\"Returns a sequence of powers (read-only property).\"\"\"\n return self.amps ** 2\n\n def low_pass(self, cutoff, factor=0):\n \"\"\"Attenuate frequencies above the cutoff.\n\n cutoff: frequency in Hz\n factor: what to multiply the magnitude by\n \"\"\"\n for i in xrange(len(self.hs)):\n if self.fs[i] > cutoff:\n self.hs[i] *= factor\n\n def high_pass(self, cutoff, factor=0):\n \"\"\"Attenuate frequencies below the cutoff.\n\n cutoff: frequency in Hz\n factor: what to multiply the magnitude by\n \"\"\"\n for i in xrange(len(self.hs)):\n if self.fs[i] < cutoff:\n self.hs[i] *= factor\n\n def band_stop(self, low_cutoff, high_cutoff, factor=0):\n \"\"\"Attenuate frequencies between the cutoffs.\n\n low_cutoff: frequency in Hz\n high_cutoff: frequency in Hz\n factor: what to multiply the magnitude by\n \"\"\"\n for i in xrange(len(self.hs)):\n if low_cutoff < self.fs[i] < high_cutoff:\n self.hs[i] = 0\n\n def pink_filter(self, beta=1):\n \"\"\"Apply a filter that would make white noise pink.\n\n beta: exponent of the pink noise\n \"\"\"\n denom = self.fs ** (beta\/2.0)\n denom[0] = 1\n self.hs \/= denom\n\n def angles(self, i):\n \"\"\"Computes phase angles in radians.\n\n returns: list of phase angles\n \"\"\"\n return numpy.angle(self.hs)\n\n def make_integrated_spectrum(self):\n \"\"\"Makes an integrated spectrum.\n \"\"\"\n cs = numpy.cumsum(self.power)\n cs \/= cs[-1]\n return IntegratedSpectrum(cs, self.fs)\n\n def make_wave(self):\n \"\"\"Transforms to the time domain.\n\n returns: Wave\n \"\"\"\n ys = numpy.fft.irfft(self.hs)\n return Wave(ys, self.framerate)\n\n\nclass IntegratedSpectrum(object):\n \"\"\"Represents the integral of a spectrum.\"\"\"\n \n def __init__(self, cs, fs):\n \"\"\"Initializes an integrated spectrum:\n\n cs: sequence of cumulative amplitudes\n fs: sequence of frequences\n \"\"\"\n self.cs = cs\n self.fs = fs\n\n def plot_power(self, low=0, high=None, expo=False, **options):\n \"\"\"Plots the integrated spectrum.\n\n low: int index to start at \n high: int index to end at\n \"\"\"\n cs = self.cs[low:high]\n fs = self.fs[low:high]\n\n if expo:\n cs = numpy.exp(cs)\n\n thinkplot.Plot(fs, cs, **options)\n\n def estimate_slope(self, low=1, high=-12000):\n \"\"\"Runs linear regression on log cumulative power vs log frequency.\n\n returns: slope, inter, r2, p, stderr\n \"\"\"\n #print self.fs[low:high]\n #print self.cs[low:high]\n x = numpy.log(self.fs[low:high])\n y = numpy.log(self.cs[low:high])\n t = scipy.stats.linregress(x,y)\n return t\n\n\nclass Dct(_SpectrumParent):\n \"\"\"Represents the spectrum of a signal.\"\"\"\n\n def __init__(self, amps, framerate):\n self.amps = amps\n self.framerate = framerate\n n = len(amps)\n self.fs = numpy.arange(n) \/ float(n) * self.max_freq\n\n def make_wave(self):\n \"\"\"Transforms to the time domain.\n\n returns: Wave\n \"\"\"\n ys = scipy.fftpack.dct(self.amps, type=3) \/ 2\n return Wave(ys, self.framerate)\n\n\nclass Spectrogram(object):\n \"\"\"Represents the spectrum of a signal.\"\"\"\n\n def __init__(self, spec_map, seg_length, window_func=None):\n \"\"\"Initialize the spectrogram.\n\n spec_map: map from float time to Spectrum\n seg_length: number of samples in each segment\n window_func: function that computes the window\n \"\"\"\n self.spec_map = spec_map\n self.seg_length = seg_length\n self.window_func = window_func\n\n def any_spectrum(self):\n \"\"\"Returns an arbitrary spectrum from the spectrogram.\"\"\"\n return self.spec_map.itervalues().next()\n\n @property\n def time_res(self):\n \"\"\"Time resolution in seconds.\"\"\"\n spectrum = self.any_spectrum()\n return float(self.seg_length) \/ spectrum.framerate\n\n @property\n def freq_res(self):\n \"\"\"Frequency resolution in Hz.\"\"\"\n return self.any_spectrum().freq_res\n\n def times(self):\n \"\"\"Sorted sequence of times.\n\n returns: sequence of float times in seconds\n \"\"\"\n ts = sorted(self.spec_map.iterkeys())\n return ts\n\n def frequencies(self):\n \"\"\"Sequence of frequencies.\n\n returns: sequence of float freqencies in Hz.\n \"\"\"\n fs = self.any_spectrum().fs\n return fs\n\n def plot(self, low=0, high=None, **options):\n \"\"\"Make a pseudocolor plot.\n\n low: index of the lowest frequency component to plot\n high: index of the highest frequency component to plot\n \"\"\"\n ts = self.times()\n fs = self.frequencies()[low:high]\n\n # make the array\n size = len(fs), len(ts)\n array = numpy.zeros(size, dtype=numpy.float)\n\n # copy amplitude from each spectrum into a column of the array\n for i, t in enumerate(ts):\n spectrum = self.spec_map[t]\n array[:,i] = spectrum.amps[low:high]\n\n thinkplot.pcolor(ts, fs, array, **options)\n\n def make_wave(self):\n \"\"\"Inverts the spectrogram and returns a Wave.\n\n returns: Wave\n \"\"\"\n res = []\n for t, spectrum in sorted(self.spec_map.iteritems()):\n wave = spectrum.make_wave()\n n = len(wave)\n \n if self.window_func:\n window = 1 \/ self.window_func(n)\n wave.window(window)\n\n i = int(round(t * wave.framerate))\n start = i - n \/ 2\n end = start + n\n res.append((start, end, wave))\n\n starts, ends, waves = zip(*res)\n low = min(starts)\n high = max(ends)\n\n ys = numpy.zeros(high-low, numpy.float)\n for start, end, wave in res:\n ys[start:end] = wave.ys\n\n return Wave(ys, wave.framerate)\n\n\nclass Wave(object):\n \"\"\"Represents a discrete-time waveform.\n\n Note: the ys attribute is a \"wave array\" which is a numpy\n array of floats.\n \"\"\"\n\n def __init__(self, ys, framerate, start=0):\n \"\"\"Initializes the wave.\n\n ys: wave array\n framerate: samples per second\n \"\"\"\n self.ys = ys\n self.framerate = framerate\n self.start = start\n\n def __len__(self):\n return len(self.ys)\n\n @property\n def duration(self):\n \"\"\"Duration (property).\n\n returns: float duration in seconds\n \"\"\"\n return len(self.ys) \/ float(self.framerate)\n\n def __or__(self, other):\n \"\"\"Concatenates two waves.\n\n other: Wave\n \n returns: Wave\n \"\"\"\n if self.framerate != other.framerate:\n raise ValueError('Wave.__or__: framerates do not agree')\n\n ys = numpy.concatenate((self.ys, other.ys))\n return Wave(ys, self.framerate)\n\n def quantize(self, bound, dtype):\n \"\"\"Maps the waveform to quanta.\n\n bound: maximum amplitude\n dtype: numpy data type or string\n\n returns: quantized signal\n \"\"\"\n return quantize(self.ys, bound, dtype)\n\n def apodize(self, denom=20, duration=0.1):\n \"\"\"Tapers the amplitude at the beginning and end of the signal.\n\n Tapers either the given duration of time or the given\n fraction of the total duration, whichever is less.\n\n denom: float fraction of the segment to taper\n duration: float duration of the taper in seconds\n \"\"\"\n self.ys = apodize(self.ys, self.framerate, denom, duration)\n\n def hamming(self):\n \"\"\"Apply a Hamming window to the wave.\n \"\"\"\n self.ys *= numpy.hamming(len(self.ys))\n\n def window(self, window):\n \"\"\"Apply a window to the wave.\n\n window: sequence of multipliers, same length as self.ys\n \"\"\"\n self.ys *= window\n\n def normalize(self, amp=1.0):\n \"\"\"Normalizes the signal to the given amplitude.\n\n amp: float amplitude\n \"\"\"\n self.ys = normalize(self.ys, amp=amp)\n\n def unbias(self):\n \"\"\"Unbiases the signal.\n \"\"\"\n self.ys = unbias(self.ys)\n\n def segment(self, start=0, duration=None):\n \"\"\"Extracts a segment.\n\n start: float start time in seconds\n duration: float duration in seconds\n\n returns: Wave\n \"\"\"\n i = start * self.framerate\n\n if duration is None:\n j = None\n else:\n j = i + duration * self.framerate\n\n ys = self.ys[i:j]\n return Wave(ys, self.framerate)\n\n def make_spectrum(self):\n \"\"\"Computes the spectrum using FFT.\n\n returns: Spectrum\n \"\"\"\n hs = numpy.fft.rfft(self.ys)\n return Spectrum(hs, self.framerate)\n\n def make_dct(self):\n amps = scipy.fftpack.dct(self.ys, type=2)\n return Dct(amps, self.framerate)\n\n def make_spectrogram(self, seg_length, window_func=numpy.hamming):\n \"\"\"Computes the spectrogram of the wave.\n\n seg_length: number of samples in each segment\n window_func: function used to compute the window\n\n returns: Spectrogram\n \"\"\"\n n = len(self.ys)\n window = window_func(seg_length)\n\n start, end, step = 0, seg_length, seg_length \/ 2\n spec_map = {}\n\n while end < n:\n ys = self.ys[start:end] * window\n hs = numpy.fft.rfft(ys)\n\n t = (start + end) \/ 2.0 \/ self.framerate\n spec_map[t] = Spectrum(hs, self.framerate)\n\n start += step\n end += step\n\n return Spectrogram(spec_map, seg_length, window_func)\n\n def plot(self, **options):\n \"\"\"Plots the wave.\n\n \"\"\"\n n = len(self.ys)\n ts = numpy.linspace(0, self.duration, n)\n thinkplot.plot(ts, self.ys, **options)\n\n def corr(self, other):\n \"\"\"Correlation coefficient two waves.\n\n other: Wave\n\n returns: 2x2 covariance matrix\n \"\"\"\n mat = self.cov_mat(other)\n corr = mat[0][1] \/ math.sqrt(mat[0][0] * mat[1][1])\n return corr\n \n def cov_mat(self, other):\n \"\"\"Covariance matrix of two waves.\n\n other: Wave\n\n returns: 2x2 covariance matrix\n \"\"\"\n return numpy.cov(self.ys, other.ys)\n\n def cov(self, other):\n \"\"\"Covariance of two unbiased waves.\n\n other: Wave\n\n returns: float\n \"\"\"\n total = sum(self.ys * other.ys) \/ len(self.ys)\n return total\n\n def cos_cov(self, k):\n \"\"\"Covariance with a cosine signal.\n\n freq: freq of the cosine signal in Hz\n\n returns: float covariance\n \"\"\"\n n = len(self.ys)\n factor = math.pi * k \/ n\n ys = [math.cos(factor * (i+0.5)) for i in range(n)]\n total = 2 * sum(self.ys * ys)\n return total\n\n def cos_transform(self):\n \"\"\"Discrete cosine transform.\n\n returns: list of frequency, cov pairs\n \"\"\"\n n = len(self.ys)\n res = []\n for k in range(n):\n cov = self.cos_cov(k)\n res.append((k, cov))\n\n return res\n\n def write(self, filename='sound.wav'):\n \"\"\"Write a wave file.\n\n filename: string\n \"\"\"\n print 'Writing', filename\n wfile = WavFileWriter(filename, self.framerate)\n wfile.write(self)\n wfile.close()\n\n def play(self, filename='sound.wav'):\n \"\"\"Plays a wave file.\n\n filename: string\n \"\"\"\n self.write(filename)\n play_wave(filename)\n\n\ndef unbias(ys):\n \"\"\"Shifts a wave array so it has mean 0.\n\n ys: wave array\n\n returns: wave array\n \"\"\"\n return ys - ys.mean()\n\n\ndef normalize(ys, amp=1.0):\n \"\"\"Normalizes a wave array so the maximum amplitude is +amp or -amp.\n\n ys: wave array\n amp: max amplitude (pos or neg) in result\n\n returns: wave array\n \"\"\"\n high, low = abs(max(ys)), abs(min(ys))\n return amp * ys \/ max(high, low)\n\n\ndef quantize(ys, bound, dtype):\n \"\"\"Maps the waveform to quanta.\n\n ys: wave array\n bound: maximum amplitude\n dtype: numpy data type of the result\n\n returns: quantized signal\n \"\"\"\n if max(ys) > 1 or min(ys) < -1:\n print 'Warning: normalizing before quantizing.'\n ys = normalize(ys)\n \n zs = (ys * bound).astype(dtype)\n return zs\n\n\ndef apodize(ys, framerate, denom=20, duration=0.1):\n \"\"\"Tapers the amplitude at the beginning and end of the signal.\n\n Tapers either the given duration of time or the given\n fraction of the total duration, whichever is less.\n\n ys: wave array\n framerate: int frames per second\n denom: float fraction of the segment to taper\n duration: float duration of the taper in seconds\n\n returns: wave array\n \"\"\"\n # a fixed fraction of the segment\n n = len(ys)\n k1 = n \/ denom\n\n # a fixed duration of time\n k2 = int(duration * framerate)\n\n k = min(k1, k2)\n\n w1 = numpy.linspace(0, 1, k)\n w2 = numpy.ones(n - 2*k)\n w3 = numpy.linspace(1, 0, k)\n\n window = numpy.concatenate((w1, w2, w3))\n return ys * window\n\n\nclass Signal(object):\n \"\"\"Represents a time-varying signal.\"\"\"\n\n def __add__(self, other):\n \"\"\"Adds two signals.\n\n other: Signal\n\n returns: Signal\n \"\"\"\n if other == 0:\n return self\n return SumSignal(self, other)\n\n __radd__ = __add__\n\n @property\n def period(self):\n \"\"\"Period of the signal in seconds (property).\n\n For non-periodic signals, use the default, 0.1 seconds\n\n returns: float seconds\n \"\"\"\n return 0.1\n\n def plot(self, framerate=11025):\n \"\"\"Plots the signal.\n\n framerate: samples per second\n \"\"\"\n duration = self.period * 3\n wave = self.make_wave(duration, start=0, framerate=framerate)\n wave.plot()\n \n def make_wave(self, duration=1, start=0, framerate=11025):\n \"\"\"Makes a Wave object.\n\n duration: float seconds\n start: float seconds\n framerate: int frames per second\n\n returns: Wave\n \"\"\"\n dt = 1.0 \/ framerate\n ts = numpy.arange(start, duration, dt)\n ys = self.evaluate(ts)\n return Wave(ys, framerate=framerate, start=start)\n\n\ndef infer_framerate(ts):\n \"\"\"Given ts, find the framerate.\n\n Assumes that the ts are equally spaced.\n\n ts: sequence of times in seconds\n\n returns: frames per second\n \"\"\"\n dt = ts[1] - ts[0]\n framerate = 1.0 \/ dt\n return framerate\n\n\nclass SumSignal(Signal):\n \"\"\"Represents the sum of signals.\"\"\"\n \n def __init__(self, *args):\n \"\"\"Initializes the sum.\n\n args: tuple of signals\n \"\"\"\n self.signals = args\n\n @property\n def period(self):\n \"\"\"Period of the signal in seconds.\n\n Note: this is not correct; it's mostly a placekeeper.\n\n But it is correct for a harmonic sequence where all\n component frequencies are multiples of the fundamental.\n\n returns: float seconds\n \"\"\"\n return max(sig.period for sig in self.signals)\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n return sum(sig.evaluate(ts) for sig in self.signals)\n\n\nclass Sinusoid(Signal):\n \"\"\"Represents a sinusoidal signal.\"\"\"\n \n def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):\n \"\"\"Initializes a sinusoidal signal.\n\n freq: float frequency in Hz\n amp: float amplitude, 1.0 is nominal max\n offset: float phase offset in radians\n func: function that maps phase to amplitude\n \"\"\"\n self.freq = freq\n self.amp = amp\n self.offset = offset\n self.func = func\n\n @property\n def period(self):\n \"\"\"Period of the signal in seconds.\n\n returns: float seconds\n \"\"\"\n return 1.0 \/ self.freq\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n phases = PI2 * self.freq * ts + self.offset\n ys = self.amp * self.func(phases)\n return ys\n\n\ndef CosSignal(freq=440, amp=1.0, offset=0):\n \"\"\"Makes a consine Sinusoid.\n\n freq: float frequency in Hz\n amp: float amplitude, 1.0 is nominal max\n offset: float phase offset in radians\n \n returns: Sinusoid object\n \"\"\"\n return Sinusoid(freq, amp, offset, func=numpy.cos)\n\n\ndef SinSignal(freq=440, amp=1.0, offset=0):\n \"\"\"Makes a sine Sinusoid.\n\n freq: float frequency in Hz\n amp: float amplitude, 1.0 is nominal max\n offset: float phase offset in radians\n \n returns: Sinusoid object\n \"\"\"\n return Sinusoid(freq, amp, offset, func=numpy.sin)\n\n\nclass SquareSignal(Sinusoid):\n \"\"\"Represents a square signal.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n cycles = self.freq * ts + self.offset \/ PI2\n frac, _ = numpy.modf(cycles)\n ys = self.amp * numpy.sign(unbias(frac))\n return ys\n\n\nclass SawtoothSignal(Sinusoid):\n \"\"\"Represents a sawtooth signal.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n cycles = self.freq * ts + self.offset \/ PI2\n frac, _ = numpy.modf(cycles)\n ys = normalize(unbias(frac), self.amp)\n return ys\n\n\nclass ParabolicSignal(Sinusoid):\n \"\"\"Represents a parabolic signal.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n cycles = self.freq * ts + self.offset \/ PI2\n frac, _ = numpy.modf(cycles)\n ys = frac**2\n ys = normalize(unbias(ys), self.amp)\n return ys\n\n\nclass GlottalSignal(Sinusoid):\n \"\"\"Represents a periodic signal that resembles a glottal signal.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n cycles = self.freq * ts + self.offset \/ PI2\n frac, _ = numpy.modf(cycles)\n ys = frac**4 * (1-frac)\n ys = normalize(unbias(ys), self.amp)\n return ys\n\n\nclass TriangleSignal(Sinusoid):\n \"\"\"Represents a triangle signal.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n cycles = self.freq * ts + self.offset \/ PI2\n frac, _ = numpy.modf(cycles)\n ys = numpy.abs(frac - 0.5)\n ys = normalize(unbias(ys), self.amp)\n return ys\n\n\nclass Chirp(Signal):\n \"\"\"Represents a signal with variable frequency.\"\"\"\n \n def __init__(self, start=440, end=880, amp=1.0):\n \"\"\"Initializes a linear chirp.\n\n start: float frequency in Hz\n end: float frequency in Hz\n amp: float amplitude, 1.0 is nominal max\n \"\"\"\n self.start = start\n self.end = end\n self.amp = amp\n\n @property\n def period(self):\n \"\"\"Period of the signal in seconds.\n\n returns: float seconds\n \"\"\"\n return ValueError('Non-periodic signal.')\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n freqs = numpy.linspace(self.start, self.end, len(ts)-1)\n return self._evaluate(ts, freqs)\n\n def _evaluate(self, ts, freqs):\n \"\"\"Helper function that evaluates the signal.\n\n ts: float array of times\n freqs: float array of frequencies during each interval\n \"\"\"\n #n = len(freqs)\n #print freqs[::n\/2]\n dts = numpy.diff(ts)\n dps = PI2 * freqs * dts\n phases = numpy.cumsum(dps)\n phases = numpy.insert(phases, 0, 0)\n ys = self.amp * numpy.cos(phases)\n return ys\n\n\nclass ExpoChirp(Chirp):\n \"\"\"Represents a signal with varying frequency.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n start, end = math.log10(self.start), math.log10(self.end)\n freqs = numpy.logspace(start, end, len(ts)-1)\n return self._evaluate(ts, freqs)\n\n\nclass SilentSignal(Signal):\n \"\"\"Represents silence.\"\"\"\n \n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n return numpy.zeros(len(ts))\n\n\nclass _Noise(Signal):\n \"\"\"Represents a noise signal (abstract parent class).\"\"\"\n \n def __init__(self, amp=1.0):\n \"\"\"Initializes a white noise signal.\n\n amp: float amplitude, 1.0 is nominal max\n \"\"\"\n self.amp = amp\n\n @property\n def period(self):\n \"\"\"Period of the signal in seconds.\n\n returns: float seconds\n \"\"\"\n return ValueError('Non-periodic signal.')\n\n\nclass UncorrelatedUniformNoise(_Noise):\n \"\"\"Represents uncorrelated uniform noise.\"\"\"\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n ys = numpy.random.uniform(-self.amp, self.amp, len(ts))\n return ys\n\n\nclass UncorrelatedGaussianNoise(_Noise):\n \"\"\"Represents uncorrelated gaussian noise.\"\"\"\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n ys = numpy.random.normal(0, 1, len(ts))\n ys = normalize(ys, self.amp)\n return ys\n\n\nclass BrownianNoise(_Noise):\n \"\"\"Represents Brownian noise, aka red noise.\"\"\"\n\n def evaluate(self, ts):\n \"\"\"Evaluates the signal at the given times.\n\n Computes Brownian noise by taking the cumulative sum of\n a uniform random series.\n\n ts: float array of times\n \n returns: float wave array\n \"\"\"\n #dys = numpy.random.normal(0, 1, len(ts))\n dys = numpy.random.uniform(-1, 1, len(ts))\n #ys = numpy.cumsum(dys)\n ys = scipy.integrate.cumtrapz(dys, ts)\n ys = normalize(unbias(ys), self.amp)\n return ys\n\n\nclass PinkNoise(_Noise):\n \"\"\"Represents Brownian noise, aka red noise.\"\"\"\n\n def __init__(self, amp=1.0, beta=1.0):\n \"\"\"Initializes a pink noise signal.\n\n amp: float amplitude, 1.0 is nominal max\n \"\"\"\n self.amp = amp\n self.beta = beta\n\n def make_wave(self, duration=1, start=0, framerate=11025):\n \"\"\"Makes a Wave object.\n\n duration: float seconds\n start: float seconds\n framerate: int frames per second\n\n returns: Wave\n \"\"\"\n signal = UncorrelatedUniformNoise()\n wave = signal.make_wave(duration, start, framerate)\n spectrum = wave.make_spectrum()\n\n spectrum.pink_filter(beta=self.beta)\n\n wave2 = spectrum.make_wave()\n wave2.unbias()\n wave2.normalize(self.amp)\n return wave2\n\n\ndef rest(duration):\n \"\"\"Makes a rest of the given duration.\n\n duration: float seconds\n\n returns: Wave\n \"\"\"\n signal = SilentSignal()\n wave = signal.make_wave(duration)\n return wave\n\n\ndef make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):\n \"\"\"Make a MIDI note with the given duration.\n\n midi_num: int MIDI note number\n duration: float seconds\n sig_cons: Signal constructor function\n framerate: int frames per second\n\n returns: Wave\n \"\"\"\n freq = midi_to_freq(midi_num)\n signal = sig_cons(freq)\n wave = signal.make_wave(duration, framerate=framerate)\n wave.apodize()\n return wave\n\n\ndef make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):\n \"\"\"Make a chord with the given duration.\n\n midi_nums: sequence of int MIDI note numbers\n duration: float seconds\n sig_cons: Signal constructor function\n framerate: int frames per second\n\n returns: Wave\n \"\"\"\n freqs = [midi_to_freq(num) for num in midi_nums]\n signal = sum(sig_cons(freq) for freq in freqs)\n wave = signal.make_wave(duration, framerate=framerate)\n wave.apodize()\n return wave\n\n\ndef midi_to_freq(midi_num):\n \"\"\"Converts MIDI note number to frequency.\n\n midi_num: int MIDI note number\n \n returns: float frequency in Hz\n \"\"\"\n x = (midi_num - 69) \/ 12.0\n freq = 440.0 * 2**x\n return freq\n\n\ndef sin_wave(freq, duration=1, offset=0):\n \"\"\"Makes a sine wave with the given parameters.\n\n freq: float cycles per second\n duration: float seconds\n offset: float radians\n\n returns: Wave\n \"\"\"\n signal = SinSignal(freq, offset=offset)\n wave = signal.make_wave(duration)\n return wave\n\n\ndef cos_wave(freq, duration=1, offset=0):\n \"\"\"Makes a cosine wave with the given parameters.\n\n freq: float cycles per second\n duration: float seconds\n offset: float radians\n\n returns: Wave\n \"\"\"\n signal = CosSignal(freq, offset=offset)\n wave = signal.make_wave(duration)\n return wave\n\n\ndef mag(a):\n \"\"\"Computes the magnitude of a numpy array.\n\n a: numpy array\n\n returns: float\n \"\"\"\n return numpy.sqrt(numpy.dot(a, a))\n\n\ndef main():\n\n cos_basis = cos_wave(440)\n sin_basis = sin_wave(440)\n\n wave = cos_wave(440, offset=math.pi\/2)\n cos_cov = cos_basis.cov(wave)\n sin_cov = sin_basis.cov(wave)\n print cos_cov, sin_cov, mag((cos_cov, sin_cov))\n return\n\n wfile = WavFileWriter()\n for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal, \n GlottalSignal, ParabolicSignal, SquareSignal]:\n print sig_cons\n sig = sig_cons(440)\n wave = sig.make_wave(1)\n wave.apodize()\n wfile.write(wave)\n wfile.close()\n return\n\n signal = GlottalSignal(440)\n signal.plot()\n pyplot.show()\n return\n\n wfile = WavFileWriter()\n for m in range(60, 0, -1):\n wfile.write(make_note(m, 0.25))\n wfile.close()\n return\n\n wave1 = make_note(69, 1)\n wave2 = make_chord([69, 72, 76], 1)\n wave = wave1 | wave2\n\n wfile = WavFileWriter()\n wfile.write(wave)\n wfile.close()\n return\n\n sig1 = CosSignal(freq=440)\n sig2 = CosSignal(freq=523.25)\n sig3 = CosSignal(freq=660)\n sig4 = CosSignal(freq=880)\n sig5 = CosSignal(freq=987)\n sig = sig1 + sig2 + sig3 + sig4\n\n #wave = Wave(sig, duration=0.02)\n #wave.plot()\n\n wave = sig.make_wave(duration=1)\n #wave.normalize()\n\n wfile = WavFileWriter(wave)\n wfile.write()\n wfile.close()\n\n\nif __name__ == '__main__':\n main()\n","license":"gpl-3.0"} {"repo_name":"dusenberrymw\/systemml","path":"src\/main\/python\/systemml\/converters.py","copies":"8","size":"12296","content":"#-------------------------------------------------------------\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#-------------------------------------------------------------\n\n__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\n\nfrom pyspark.context import SparkContext\nfrom scipy.sparse import coo_matrix, spmatrix, csr_matrix\nfrom .classloader import *\n\nSUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)\n\nDATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}\n\ndef getNumCols(numPyArr):\n if numPyArr.ndim == 1:\n return 1\n else:\n return numPyArr.shape[1]\n\ndef get_pretty_str(key, value):\n return '\\t\"' + key + '\": ' + str(value) + ',\\n'\n\ndef save_tensor_csv(tensor, file_path, shouldTranspose):\n w = w.reshape(w.shape[0], -1)\n if shouldTranspose:\n w = w.T\n np.savetxt(file_path, w, delimiter=',')\n with open(file_path + '.mtd', 'w') as file:\n file.write('{\\n\\t\"data_type\": \"matrix\",\\n\\t\"value_type\": \"double\",\\n')\n file.write(get_pretty_str('rows', w.shape[0]))\n file.write(get_pretty_str('cols', w.shape[1]))\n file.write(get_pretty_str('nnz', np.count_nonzero(w)))\n file.write('\\t\"format\": \"csv\",\\n\\t\"description\": {\\n\\t\\t\"author\": \"SystemML\"\\n\\t}\\n}\\n')\n\ndef convert_caffemodel(sc, deploy_file, caffemodel_file, output_dir, format=\"binary\", is_caffe_installed=False):\n \"\"\"\n Saves the weights and bias in the caffemodel file to output_dir in the specified format.\n This method does not requires caffe to be installed.\n\n Parameters\n ----------\n sc: SparkContext\n SparkContext\n\n deploy_file: string\n Path to the input network file\n\n caffemodel_file: string\n Path to the input caffemodel file\n\n output_dir: string\n Path to the output directory\n\n format: string\n Format of the weights and bias (can be binary, csv or text)\n\n is_caffe_installed: bool\n True if caffe is installed\n \"\"\"\n if is_caffe_installed:\n if format != 'csv':\n raise ValueError('The format ' + str(format) + ' is not supported when caffe is installed. Hint: Please specify format=csv')\n import caffe\n net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)\n for layerName in net.params.keys():\n num_parameters = len(net.params[layerName])\n if num_parameters == 0:\n continue\n elif num_parameters == 2:\n # Weights and Biases\n layerType = net.layers[list(net._layer_names).index(layerName)].type\n shouldTranspose = True if layerType == 'InnerProduct' else False\n save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)\n save_tensor_csv(net.params[layerName][1].data, os.path.join(output_dir, layerName + '_bias.mtx'), shouldTranspose)\n elif num_parameters == 1:\n # Only Weight\n layerType = net.layers[list(net._layer_names).index(layerName)].type\n shouldTranspose = True if layerType == 'InnerProduct' else False\n save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)\n else:\n raise ValueError('Unsupported number of parameters:' + str(num_parameters))\n else:\n createJavaObject(sc, 'dummy')\n utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()\n utilObj.saveCaffeModelFile(sc._jsc, deploy_file, caffemodel_file, output_dir, format)\n\n\ndef convert_lmdb_to_jpeg(lmdb_img_file, output_dir):\n \"\"\"\n Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.\n To install cv2 package, do `pip install opencv-python`.\n\n Parameters\n ----------\n lmdb_img_file: string\n Path to the input lmdb file\n\n output_dir: string\n Output directory for images (local filesystem)\n \"\"\"\n import lmdb, caffe, cv2\n lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()\n datum = caffe.proto.caffe_pb2.Datum()\n i = 1\n for _, value in lmdb_cursor:\n datum.ParseFromString(value)\n data = caffe.io.datum_to_array(datum)\n output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')\n image = np.transpose(data, (1,2,0)) # CxHxW to HxWxC in cv2\n cv2.imwrite(output_file_path, image)\n i = i + 1\n\n\ndef convertToLabeledDF(sparkSession, X, y=None):\n from pyspark.ml.feature import VectorAssembler\n if y is not None:\n pd1 = pd.DataFrame(X)\n pd2 = pd.DataFrame(y, columns=['label'])\n pdf = pd.concat([pd1, pd2], axis=1)\n inputColumns = ['C' + str(i) for i in pd1.columns]\n outputColumns = inputColumns + ['label']\n else:\n pdf = pd.DataFrame(X)\n inputColumns = ['C' + str(i) for i in pdf.columns]\n outputColumns = inputColumns\n assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')\n out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))\n if y is not None:\n return out.select('features', 'label')\n else:\n return out.select('features')\n\ndef _convertSPMatrixToMB(sc, src):\n src = coo_matrix(src, dtype=np.float64)\n numRows = src.shape[0]\n numCols = src.shape[1]\n data = src.data\n row = src.row.astype(np.int32)\n col = src.col.astype(np.int32)\n nnz = len(src.col)\n buf1 = bytearray(data.tostring())\n buf2 = bytearray(row.tostring())\n buf3 = bytearray(col.tostring())\n createJavaObject(sc, 'dummy')\n return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(buf1, buf2, buf3, numRows, numCols, nnz)\n\ndef _convertDenseMatrixToMB(sc, src):\n numCols = getNumCols(src)\n numRows = src.shape[0]\n arr = src.ravel().astype(np.float64)\n buf = bytearray(arr.tostring())\n createJavaObject(sc, 'dummy')\n return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(buf, numRows, numCols)\n\ndef _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):\n rowIndex = int(i \/ numRowsPerBlock)\n tmp = src[i:min(i+numRowsPerBlock, rlen),]\n mb = _convertSPMatrixToMB(sc, tmp) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, tmp)\n sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(mb, rowIndex, ret, numRowsPerBlock, rlen, clen)\n return i\n\ndef convertToMatrixBlock(sc, src, maxSizeBlockInMB=8):\n if not isinstance(sc, SparkContext):\n raise TypeError('sc needs to be of type SparkContext')\n isSparse = True if isinstance(src, spmatrix) else False\n src = np.asarray(src, dtype=np.float64) if not isSparse else src\n if len(src.shape) != 2:\n src_type = str(type(src).__name__)\n raise TypeError('Expected 2-dimensional ' + src_type + ', instead passed ' + str(len(src.shape)) + '-dimensional ' + src_type)\n # Ignoring sparsity for computing numRowsPerBlock for now\n numRowsPerBlock = int(math.ceil((maxSizeBlockInMB*1000000) \/ (src.shape[1]*8)))\n multiBlockTransfer = False if numRowsPerBlock >= src.shape[0] else True\n if not multiBlockTransfer:\n return _convertSPMatrixToMB(sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)\n else:\n # Since coo_matrix does not have range indexing\n src = csr_matrix(src) if isSparse else src\n rlen = int(src.shape[0])\n clen = int(src.shape[1])\n ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(rlen, clen, isSparse)\n [ _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen) for i in range(0, src.shape[0], numRowsPerBlock) ]\n sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(ret)\n return ret\n\ndef convertToNumPyArr(sc, mb):\n if isinstance(sc, SparkContext):\n numRows = mb.getNumRows()\n numCols = mb.getNumColumns()\n createJavaObject(sc, 'dummy')\n buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(mb)\n return np.frombuffer(buf, count=numRows*numCols, dtype=np.float64).reshape((numRows, numCols))\n else:\n raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves\n\n# Returns the mean of a model if defined otherwise None\ndef getDatasetMean(dataset_name):\n \"\"\"\n Parameters\n ----------\n dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.\n\n Returns\n -------\n mean: Mean value of model if its defined in the list DATASET_MEAN else None.\n\n \"\"\"\n\n try:\n mean = DATASET_MEAN[dataset_name.upper()]\n except:\n mean = None\n return mean\n\n\n# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)\n# The above call returns a numpy array of shape (6, 50176) in NCHW format\ndef convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,\n color_mode = 'RGB', mean=None):\n\n ## Input Parameters\n\n # color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,\n # color_mode parameter is used to process image data in BGR format.\n\n # mean: mean value is used to subtract from input data from every pixel value. By default value is None, so mean value not subtracted.\n\n if img_shape is not None:\n num_channels = img_shape[0]\n size = (img_shape[1], img_shape[2])\n else:\n num_channels = 1 if im.mode == 'L' else 3\n size = None\n if num_channels != 1 and num_channels != 3:\n raise ValueError('Expected the number of channels to be either 1 or 3')\n\n from PIL import Image\n\n if size is not None:\n im = im.resize(size, Image.LANCZOS)\n expected_mode = 'L' if num_channels == 1 else 'RGB'\n if expected_mode is not im.mode:\n im = im.convert(expected_mode)\n\n def _im2NumPy(im):\n if expected_mode == 'L':\n return np.asarray(im.getdata()).reshape((1, -1))\n else:\n im = (np.array(im).astype(np.float))\n\n # (H,W,C) -> (C,H,W)\n im = im.transpose(2, 0, 1)\n\n # RGB -> BGR\n if color_mode == 'BGR':\n im = im[...,::-1]\n\n # Subtract Mean\n if mean is not None:\n for c in range(3):\n im[:, :, c] = im[:, :, c] - mean[c]\n\n # (C,H,W) --> (1, C*H*W)\n return im.reshape((1, -1))\n\n ret = _im2NumPy(im)\n\n if add_rotated_images:\n ret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))\n if add_mirrored_images:\n ret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))\n return ret\n\n\ndef convertToPandasDF(X):\n if not isinstance(X, pd.DataFrame):\n return pd.DataFrame(X, columns=['C' + str(i) for i in range(getNumCols(X))])\n return X\n","license":"apache-2.0"} {"repo_name":"thilbern\/scikit-learn","path":"sklearn\/neighbors\/base.py","copies":"7","size":"25049","content":"\"\"\"Base and mixin classes for nearest neighbors\"\"\"\n# Authors: Jake Vanderplas \n# Fabian Pedregosa \n# Alexandre Gramfort \n# Sparseness support by Lars Buitinck \n# Multi-output support by Arnaud Joly \n#\n# License: BSD 3 clause (C) INRIA, University of Amsterdam\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, issparse\n\nfrom .ball_tree import BallTree\nfrom .kd_tree import KDTree\nfrom ..base import BaseEstimator\nfrom ..metrics import pairwise_distances\nfrom ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS\nfrom ..utils import check_X_y, check_array\nfrom ..utils.fixes import argpartition\nfrom ..utils.validation import DataConversionWarning\nfrom ..externals import six\n\n\nVALID_METRICS = dict(ball_tree=BallTree.valid_metrics,\n kd_tree=KDTree.valid_metrics,\n # The following list comes from the\n # sklearn.metrics.pairwise doc string\n brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +\n ['braycurtis', 'canberra', 'chebyshev',\n 'correlation', 'cosine', 'dice', 'hamming',\n 'jaccard', 'kulsinski', 'mahalanobis',\n 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener',\n 'sokalsneath', 'sqeuclidean',\n 'yule', 'wminkowski']))\n\n\nVALID_METRICS_SPARSE = dict(ball_tree=[],\n kd_tree=[],\n brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())\n\n\nclass NeighborsWarning(UserWarning):\n pass\n\n\n# Make sure that NeighborsWarning are displayed more than once\nwarnings.simplefilter(\"always\", NeighborsWarning)\n\n\ndef _check_weights(weights):\n \"\"\"Check to make sure weights are valid\"\"\"\n if weights in (None, 'uniform', 'distance'):\n return weights\n elif callable(weights):\n return weights\n else:\n raise ValueError(\"weights not recognized: should be 'uniform', \"\n \"'distance', or a callable function\")\n\n\ndef _get_weights(dist, weights):\n \"\"\"Get the weights from an array of distances and a parameter ``weights``\n\n Parameters\n ===========\n dist: ndarray\n The input distances\n weights: {'uniform', 'distance' or a callable}\n The kind of weighting used\n\n Returns\n ========\n weights_arr: array of the same shape as ``dist``\n if ``weights == 'uniform'``, then returns None\n \"\"\"\n if weights in (None, 'uniform'):\n return None\n elif weights == 'distance':\n with np.errstate(divide='ignore'):\n dist = 1. \/ dist\n return dist\n elif callable(weights):\n return weights(dist)\n else:\n raise ValueError(\"weights not recognized: should be 'uniform', \"\n \"'distance', or a callable function\")\n\n\nclass NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):\n \"\"\"Base class for nearest neighbors estimators.\"\"\"\n\n @abstractmethod\n def __init__(self):\n pass\n\n def _init_params(self, n_neighbors=None, radius=None,\n algorithm='auto', leaf_size=30, metric='minkowski',\n p=2, metric_params=None, **kwargs):\n if kwargs:\n warnings.warn(\"Passing additional arguments to the metric \"\n \"function as **kwargs is deprecated \"\n \"and will no longer be supported in 0.18. \"\n \"Use metric_params instead.\",\n DeprecationWarning, stacklevel=3)\n if metric_params is None:\n metric_params = {}\n metric_params.update(kwargs)\n\n self.n_neighbors = n_neighbors\n self.radius = radius\n self.algorithm = algorithm\n self.leaf_size = leaf_size\n self.metric = metric\n self.metric_params = metric_params\n self.p = p\n\n if algorithm not in ['auto', 'brute',\n 'kd_tree', 'ball_tree']:\n raise ValueError(\"unrecognized algorithm: '%s'\" % algorithm)\n\n if algorithm == 'auto':\n alg_check = 'ball_tree'\n else:\n alg_check = algorithm\n\n if callable(metric):\n if algorithm == 'kd_tree':\n # callable metric is only valid for brute force and ball_tree\n raise ValueError(\n \"kd_tree algorithm does not support callable metric '%s'\"\n % metric)\n elif metric not in VALID_METRICS[alg_check]:\n raise ValueError(\"Metric '%s' not valid for algorithm '%s'\"\n % (metric, algorithm))\n\n if self.metric_params is not None and 'p' in self.metric_params:\n warnings.warn(\"Parameter p is found in metric_params. \"\n \"The corresponding parameter from __init__ \"\n \"is ignored.\", SyntaxWarning, stacklevel=3)\n effective_p = metric_params['p']\n else:\n effective_p = self.p\n\n if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:\n raise ValueError(\"p must be greater than one for minkowski metric\")\n\n self._fit_X = None\n self._tree = None\n self._fit_method = None\n\n def _fit(self, X):\n if self.metric_params is None:\n self.effective_metric_params_ = {}\n else:\n self.effective_metric_params_ = self.metric_params.copy()\n\n effective_p = self.effective_metric_params_.get('p', self.p)\n if self.metric in ['wminkowski', 'minkowski']:\n self.effective_metric_params_['p'] = effective_p\n\n self.effective_metric_ = self.metric\n # For minkowski distance, use more efficient methods where available\n if self.metric == 'minkowski':\n p = self.effective_metric_params_.pop('p', 2)\n if p < 1:\n raise ValueError(\"p must be greater than one \"\n \"for minkowski metric\")\n elif p == 1:\n self.effective_metric_ = 'manhattan'\n elif p == 2:\n self.effective_metric_ = 'euclidean'\n elif p == np.inf:\n self.effective_metric_ = 'chebyshev'\n else:\n self.effective_metric_params_['p'] = p\n\n if isinstance(X, NeighborsBase):\n self._fit_X = X._fit_X\n self._tree = X._tree\n self._fit_method = X._fit_method\n return self\n\n elif isinstance(X, BallTree):\n self._fit_X = X.data\n self._tree = X\n self._fit_method = 'ball_tree'\n return self\n\n elif isinstance(X, KDTree):\n self._fit_X = X.data\n self._tree = X\n self._fit_method = 'kd_tree'\n return self\n\n X = check_array(X, accept_sparse='csr')\n\n n_samples = X.shape[0]\n if n_samples == 0:\n raise ValueError(\"n_samples must be greater than 0\")\n\n if issparse(X):\n if self.algorithm not in ('auto', 'brute'):\n warnings.warn(\"cannot use tree with sparse input: \"\n \"using brute force\")\n if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:\n raise ValueError(\"metric '%s' not valid for sparse input\"\n % self.effective_metric_)\n self._fit_X = X.copy()\n self._tree = None\n self._fit_method = 'brute'\n return self\n\n self._fit_method = self.algorithm\n self._fit_X = X\n\n if self._fit_method == 'auto':\n # A tree approach is better for small number of neighbors,\n # and KDTree is generally faster when available\n if (self.n_neighbors is None\n or self.n_neighbors < self._fit_X.shape[0] \/\/ 2):\n if self.effective_metric_ in VALID_METRICS['kd_tree']:\n self._fit_method = 'kd_tree'\n else:\n self._fit_method = 'ball_tree'\n else:\n self._fit_method = 'brute'\n\n if self._fit_method == 'ball_tree':\n self._tree = BallTree(X, self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_)\n elif self._fit_method == 'kd_tree':\n self._tree = KDTree(X, self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_)\n elif self._fit_method == 'brute':\n self._tree = None\n else:\n raise ValueError(\"algorithm = '%s' not recognized\"\n % self.algorithm)\n return self\n\n\nclass KNeighborsMixin(object):\n \"\"\"Mixin for k-neighbors searches\"\"\"\n\n def kneighbors(self, X, n_neighbors=None, return_distance=True):\n \"\"\"Finds the K-neighbors of a point.\n\n Returns distance\n\n Parameters\n ----------\n X : array-like, last dimension same as that of fit data\n The new point.\n\n n_neighbors : int\n Number of neighbors to get (default is the value\n passed to the constructor).\n\n return_distance : boolean, optional. Defaults to True.\n If False, distances will not be returned\n\n Returns\n -------\n dist : array\n Array representing the lengths to point, only present if\n return_distance=True\n\n ind : array\n Indices of the nearest points in the population matrix.\n\n Examples\n --------\n In the following example, we construct a NeighborsClassifier\n class from an array representing our data set and ask who's\n the closest point to [1,1,1]\n\n >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=1)\n >>> neigh.fit(samples) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS\n (array([[ 0.5]]), array([[2]]...))\n\n As you can see, it returns [[0.5]], and [[2]], which means that the\n element is at distance 0.5 and is the third element of samples\n (indexes start at 0). You can also query for multiple points:\n\n >>> X = [[0., 1., 0.], [1., 0., 1.]]\n >>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS\n array([[1],\n [2]]...)\n\n \"\"\"\n if self._fit_method is None:\n raise ValueError(\"must fit neighbors before querying\")\n\n X = check_array(X, accept_sparse='csr')\n\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n\n if self._fit_method == 'brute':\n # for efficiency, use squared euclidean distances\n if self.effective_metric_ == 'euclidean':\n dist = pairwise_distances(X, self._fit_X, 'euclidean',\n squared=True)\n else:\n dist = pairwise_distances(X, self._fit_X,\n self.effective_metric_,\n **self.effective_metric_params_)\n\n neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)\n neigh_ind = neigh_ind[:, :n_neighbors]\n # argpartition doesn't guarantee sorted order, so we sort again\n j = np.arange(neigh_ind.shape[0])[:, None]\n neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]\n if return_distance:\n if self.effective_metric_ == 'euclidean':\n return np.sqrt(dist[j, neigh_ind]), neigh_ind\n else:\n return dist[j, neigh_ind], neigh_ind\n else:\n return neigh_ind\n elif self._fit_method in ['ball_tree', 'kd_tree']:\n if issparse(X):\n raise ValueError(\n \"%s does not work with sparse matrices. Densify the data, \"\n \"or set algorithm='brute'\" % self._fit_method)\n result = self._tree.query(X, n_neighbors,\n return_distance=return_distance)\n return result\n else:\n raise ValueError(\"internal: _fit_method not recognized\")\n\n def kneighbors_graph(self, X, n_neighbors=None,\n mode='connectivity'):\n \"\"\"Computes the (weighted) graph of k-Neighbors for points in X\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Sample data\n\n n_neighbors : int\n Number of neighbors for each sample.\n (default is value passed to the constructor).\n\n mode : {'connectivity', 'distance'}, optional\n Type of returned matrix: 'connectivity' will return the\n connectivity matrix with ones and zeros, in 'distance' the\n edges are Euclidean distance between points.\n\n Returns\n -------\n A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]\n n_samples_fit is the number of samples in the fitted data\n A[i, j] is assigned the weight of edge that connects i to j.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=2)\n >>> neigh.fit(X) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> A = neigh.kneighbors_graph(X)\n >>> A.toarray()\n array([[ 1., 0., 1.],\n [ 0., 1., 1.],\n [ 1., 0., 1.]])\n\n See also\n --------\n NearestNeighbors.radius_neighbors_graph\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n\n n_samples1 = X.shape[0]\n n_samples2 = self._fit_X.shape[0]\n n_nonzero = n_samples1 * n_neighbors\n A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)\n\n # construct CSR matrix representation of the k-NN graph\n if mode == 'connectivity':\n A_data = np.ones((n_samples1, n_neighbors))\n A_ind = self.kneighbors(X, n_neighbors, return_distance=False)\n\n elif mode == 'distance':\n data, ind = self.kneighbors(X, n_neighbors + 1,\n return_distance=True)\n A_data, A_ind = data[:, 1:], ind[:, 1:]\n\n else:\n raise ValueError(\n 'Unsupported mode, must be one of \"connectivity\" '\n 'or \"distance\" but got \"%s\" instead' % mode)\n\n return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),\n shape=(n_samples1, n_samples2))\n\n\nclass RadiusNeighborsMixin(object):\n \"\"\"Mixin for radius-based neighbors searches\"\"\"\n\n def radius_neighbors(self, X, radius=None, return_distance=True):\n \"\"\"Finds the neighbors within a given radius of a point or points.\n\n Returns indices of and distances to the neighbors of each point.\n\n Parameters\n ----------\n X : array-like, last dimension same as that of fit data\n The new point or points\n\n radius : float\n Limiting distance of neighbors to return.\n (default is the value passed to the constructor).\n\n return_distance : boolean, optional. Defaults to True.\n If False, distances will not be returned\n\n Returns\n -------\n dist : array\n Array representing the euclidean distances to each point,\n only present if return_distance=True.\n\n ind : array\n Indices of the nearest points in the population matrix.\n\n Examples\n --------\n In the following example, we construct a NeighborsClassifier\n class from an array representing our data set and ask who's\n the closest point to [1,1,1]\n\n >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(radius=1.6)\n >>> neigh.fit(samples) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS\n (array([[ 1.5, 0.5]]...), array([[1, 2]]...)\n\n The first array returned contains the distances to all points which\n are closer than 1.6, while the second array returned contains their\n indices. In general, multiple points can be queried at the same time.\n\n Notes\n -----\n Because the number of neighbors of each point is not necessarily\n equal, the results for multiple query points cannot be fit in a\n standard data array.\n For efficiency, `radius_neighbors` returns arrays of objects, where\n each object is a 1D array of indices or distances.\n \"\"\"\n\n if self._fit_method is None:\n raise ValueError(\"must fit neighbors before querying\")\n\n X = check_array(X, accept_sparse='csr')\n\n if radius is None:\n radius = self.radius\n\n if self._fit_method == 'brute':\n # for efficiency, use squared euclidean distances\n if self.effective_metric_ == 'euclidean':\n dist = pairwise_distances(X, self._fit_X, 'euclidean',\n squared=True)\n radius *= radius\n else:\n dist = pairwise_distances(X, self._fit_X,\n self.effective_metric_,\n **self.effective_metric_params_)\n neigh_ind = [np.where(d < radius)[0] for d in dist]\n\n # if there are the same number of neighbors for each point,\n # we can do a normal array. Otherwise, we return an object\n # array with elements that are numpy arrays\n try:\n neigh_ind = np.asarray(neigh_ind, dtype=int)\n dtype_F = float\n except ValueError:\n neigh_ind = np.asarray(neigh_ind, dtype='object')\n dtype_F = object\n\n if return_distance:\n if self.effective_metric_ == 'euclidean':\n dist = np.array([np.sqrt(d[neigh_ind[i]])\n for i, d in enumerate(dist)],\n dtype=dtype_F)\n else:\n dist = np.array([d[neigh_ind[i]]\n for i, d in enumerate(dist)],\n dtype=dtype_F)\n return dist, neigh_ind\n else:\n return neigh_ind\n elif self._fit_method in ['ball_tree', 'kd_tree']:\n if issparse(X):\n raise ValueError(\n \"%s does not work with sparse matrices. Densify the data, \"\n \"or set algorithm='brute'\" % self._fit_method)\n results = self._tree.query_radius(X, radius,\n return_distance=return_distance)\n if return_distance:\n ind, dist = results\n return dist, ind\n else:\n return results\n else:\n raise ValueError(\"internal: _fit_method not recognized\")\n\n def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):\n \"\"\"Computes the (weighted) graph of Neighbors for points in X\n\n Neighborhoods are restricted the points at a distance lower than\n radius.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Sample data\n\n radius : float\n Radius of neighborhoods.\n (default is the value passed to the constructor).\n\n mode : {'connectivity', 'distance'}, optional\n Type of returned matrix: 'connectivity' will return the\n connectivity matrix with ones and zeros, in 'distance' the\n edges are Euclidean distance between points.\n\n Returns\n -------\n A : sparse matrix in CSR format, shape = [n_samples, n_samples]\n A[i, j] is assigned the weight of edge that connects i to j.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(radius=1.5)\n >>> neigh.fit(X) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> A = neigh.radius_neighbors_graph(X)\n >>> A.toarray()\n array([[ 1., 0., 1.],\n [ 0., 1., 0.],\n [ 1., 0., 1.]])\n\n See also\n --------\n kneighbors_graph\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n\n if radius is None:\n radius = self.radius\n\n n_samples1 = X.shape[0]\n n_samples2 = self._fit_X.shape[0]\n\n # construct CSR matrix representation of the NN graph\n if mode == 'connectivity':\n A_ind = self.radius_neighbors(X, radius,\n return_distance=False)\n A_data = None\n elif mode == 'distance':\n dist, A_ind = self.radius_neighbors(X, radius,\n return_distance=True)\n A_data = np.concatenate(list(dist))\n else:\n raise ValueError(\n 'Unsupported mode, must be one of \"connectivity\", '\n 'or \"distance\" but got %s instead' % mode)\n\n n_neighbors = np.array([len(a) for a in A_ind])\n n_nonzero = np.sum(n_neighbors)\n if A_data is None:\n A_data = np.ones(n_nonzero)\n A_ind = np.concatenate(list(A_ind))\n A_indptr = np.concatenate((np.zeros(1, dtype=int),\n np.cumsum(n_neighbors)))\n\n return csr_matrix((A_data, A_ind, A_indptr),\n shape=(n_samples1, n_samples2))\n\n\nclass SupervisedFloatMixin(object):\n def fit(self, X, y):\n \"\"\"Fit the model using X as training data and y as target values\n\n Parameters\n ----------\n X : {array-like, sparse matrix, BallTree, KDTree}\n Training data. If array or matrix, shape = [n_samples, n_features]\n\n y : {array-like, sparse matrix}\n Target values, array of float values, shape = [n_samples]\n or [n_samples, n_outputs]\n \"\"\"\n if not isinstance(X, (KDTree, BallTree)):\n X, y = check_X_y(X, y, \"csr\", multi_output=True)\n self._y = y\n return self._fit(X)\n\n\nclass SupervisedIntegerMixin(object):\n def fit(self, X, y):\n \"\"\"Fit the model using X as training data and y as target values\n\n Parameters\n ----------\n X : {array-like, sparse matrix, BallTree, KDTree}\n Training data. If array or matrix, shape = [n_samples, n_features]\n\n y : {array-like, sparse matrix}\n Target values of shape = [n_samples] or [n_samples, n_outputs]\n\n \"\"\"\n if not isinstance(X, (KDTree, BallTree)):\n X, y = check_X_y(X, y, \"csr\", multi_output=True)\n\n if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:\n if y.ndim != 1:\n warnings.warn(\"A column-vector y was passed when a 1d array \"\n \"was expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\",\n DataConversionWarning, stacklevel=2)\n\n self.outputs_2d_ = False\n y = y.reshape((-1, 1))\n else:\n self.outputs_2d_ = True\n\n self.classes_ = []\n self._y = np.empty(y.shape, dtype=np.int)\n for k in range(self._y.shape[1]):\n classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes)\n\n if not self.outputs_2d_:\n self.classes_ = self.classes_[0]\n self._y = self._y.ravel()\n\n return self._fit(X)\n\n\nclass UnsupervisedMixin(object):\n def fit(self, X, y=None):\n \"\"\"Fit the model using X as training data\n\n Parameters\n ----------\n X : {array-like, sparse matrix, BallTree, KDTree}\n Training data. If array or matrix, shape = [n_samples, n_features]\n \"\"\"\n return self._fit(X)\n","license":"bsd-3-clause"} {"repo_name":"mjgrav2001\/scikit-learn","path":"sklearn\/decomposition\/tests\/test_sparse_pca.py","copies":"142","size":"5990","content":"# Author: Vlad Niculae\n# License: BSD 3 clause\n\nimport sys\n\nimport numpy as np\n\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import SkipTest\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_false\nfrom sklearn.utils.testing import if_not_mac_os\n\nfrom sklearn.decomposition import SparsePCA, MiniBatchSparsePCA\nfrom sklearn.utils import check_random_state\n\n\ndef generate_toy_data(n_components, n_samples, image_size, random_state=None):\n n_features = image_size[0] * image_size[1]\n\n rng = check_random_state(random_state)\n U = rng.randn(n_samples, n_components)\n V = rng.randn(n_components, n_features)\n\n centers = [(3, 3), (6, 7), (8, 1)]\n sz = [1, 2, 1]\n for k in range(n_components):\n img = np.zeros(image_size)\n xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]\n ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]\n img[xmin:xmax][:, ymin:ymax] = 1.0\n V[k, :] = img.ravel()\n\n # Y is defined by : Y = UV + noise\n Y = np.dot(U, V)\n Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise\n return Y, U, V\n\n# SparsePCA can be a bit slow. To avoid having test times go up, we\n# test different aspects of the code in the same test\n\n\ndef test_correct_shapes():\n rng = np.random.RandomState(0)\n X = rng.randn(12, 10)\n spca = SparsePCA(n_components=8, random_state=rng)\n U = spca.fit_transform(X)\n assert_equal(spca.components_.shape, (8, 10))\n assert_equal(U.shape, (12, 8))\n # test overcomplete decomposition\n spca = SparsePCA(n_components=13, random_state=rng)\n U = spca.fit_transform(X)\n assert_equal(spca.components_.shape, (13, 10))\n assert_equal(U.shape, (12, 13))\n\n\ndef test_fit_transform():\n alpha = 1\n rng = np.random.RandomState(0)\n Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array\n spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,\n random_state=0)\n spca_lars.fit(Y)\n\n # Test that CD gives similar results\n spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,\n alpha=alpha)\n spca_lasso.fit(Y)\n assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)\n\n\n@if_not_mac_os()\ndef test_fit_transform_parallel():\n alpha = 1\n rng = np.random.RandomState(0)\n Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array\n spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,\n random_state=0)\n spca_lars.fit(Y)\n U1 = spca_lars.transform(Y)\n # Test multiple CPUs\n spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,\n random_state=0).fit(Y)\n U2 = spca.transform(Y)\n assert_true(not np.all(spca_lars.components_ == 0))\n assert_array_almost_equal(U1, U2)\n\n\ndef test_transform_nan():\n # Test that SparsePCA won't return NaN when there is 0 feature in all\n # samples.\n rng = np.random.RandomState(0)\n Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array\n Y[:, 0] = 0\n estimator = SparsePCA(n_components=8)\n assert_false(np.any(np.isnan(estimator.fit_transform(Y))))\n\n\ndef test_fit_transform_tall():\n rng = np.random.RandomState(0)\n Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array\n spca_lars = SparsePCA(n_components=3, method='lars',\n random_state=rng)\n U1 = spca_lars.fit_transform(Y)\n spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)\n U2 = spca_lasso.fit(Y).transform(Y)\n assert_array_almost_equal(U1, U2)\n\n\ndef test_initialization():\n rng = np.random.RandomState(0)\n U_init = rng.randn(5, 3)\n V_init = rng.randn(3, 4)\n model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,\n random_state=rng)\n model.fit(rng.randn(5, 4))\n assert_array_equal(model.components_, V_init)\n\n\ndef test_mini_batch_correct_shapes():\n rng = np.random.RandomState(0)\n X = rng.randn(12, 10)\n pca = MiniBatchSparsePCA(n_components=8, random_state=rng)\n U = pca.fit_transform(X)\n assert_equal(pca.components_.shape, (8, 10))\n assert_equal(U.shape, (12, 8))\n # test overcomplete decomposition\n pca = MiniBatchSparsePCA(n_components=13, random_state=rng)\n U = pca.fit_transform(X)\n assert_equal(pca.components_.shape, (13, 10))\n assert_equal(U.shape, (12, 13))\n\n\ndef test_mini_batch_fit_transform():\n raise SkipTest(\"skipping mini_batch_fit_transform.\")\n alpha = 1\n rng = np.random.RandomState(0)\n Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array\n spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,\n alpha=alpha).fit(Y)\n U1 = spca_lars.transform(Y)\n # Test multiple CPUs\n if sys.platform == 'win32': # fake parallelism for win32\n import sklearn.externals.joblib.parallel as joblib_par\n _mp = joblib_par.multiprocessing\n joblib_par.multiprocessing = None\n try:\n U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,\n random_state=0).fit(Y).transform(Y)\n finally:\n joblib_par.multiprocessing = _mp\n else: # we can efficiently use parallelism\n U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,\n random_state=0).fit(Y).transform(Y)\n assert_true(not np.all(spca_lars.components_ == 0))\n assert_array_almost_equal(U1, U2)\n # Test that CD gives similar results\n spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,\n random_state=0).fit(Y)\n assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)\n","license":"bsd-3-clause"} {"repo_name":"Rickyfox\/MLMA2","path":"core\/DataHandler.py","copies":"1","size":"1770","content":"'''\nCreated on Dec 17, 2014\n\n@author: Dominik Lang\n'''\n\nimport csv\nimport os.path\nfrom random import shuffle\nimport collections\nimport numpy\nfrom sklearn.preprocessing import Imputer\n\n\nclass DataHandler(object):\n\n def __init__(self):\n pass\n \n '''\n @summary: A method to handle reading the data in from the csv file\n @return: List containing the rows of the dataset as seperate lists\n '''\n def readData(self):\n # We get the path to the current file, then go one directory up to find the data file\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(os.path.join(basepath, \"..\",\"data.csv\"))\n \n output=[]\n \n with open(filepath, 'rb') as csvfile:\n i=0\n linereader = csv.reader(csvfile, delimiter=',')\n for row in linereader:\n if i==0:\n i+=1\n continue\n \n output.append(row)\n \n return output\n\n '''\n @summary: A method that splits the dataset into a training and a test set\n '''\n def splitData(self,dataset):\n sets = collections.namedtuple('Sets', ['train', 'test'])\n third=len(dataset)\/3\n shuffle(dataset)\n testset=dataset[0:third]\n trainset=dataset[third:-1]\n s=sets(trainset,testset)\n \n return s\n \n def vectorizeData(self,dataset):\n vectors = collections.namedtuple('vectors', ['X', 'Y'])\n x=[]\n y=[]\n \n for i in dataset:\n atts=i[0:-2]\n c=i[-1]\n x.append(atts)\n y.append(c)\n \n x=numpy.asarray(x)\n y=numpy.asarray(y)\n output=vectors(x,y)\n return output\n \n","license":"gpl-2.0"} {"repo_name":"siutanwong\/scikit-learn","path":"sklearn\/neighbors\/tests\/test_approximate.py","copies":"142","size":"18692","content":"\"\"\"\nTesting for the approximate neighbor search using\nLocality Sensitive Hashing Forest module\n(sklearn.neighbors.LSHForest).\n\"\"\"\n\n# Author: Maheshakya Wijewardena, Joel Nothman\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_array_less\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.neighbors import LSHForest\nfrom sklearn.neighbors import NearestNeighbors\n\n\ndef test_neighbors_accuracy_with_n_candidates():\n # Checks whether accuracy increases as `n_candidates` increases.\n n_candidates_values = np.array([.1, 50, 500])\n n_samples = 100\n n_features = 10\n n_iter = 10\n n_points = 5\n rng = np.random.RandomState(42)\n accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)\n X = rng.rand(n_samples, n_features)\n\n for i, n_candidates in enumerate(n_candidates_values):\n lshf = LSHForest(n_candidates=n_candidates)\n lshf.fit(X)\n for j in range(n_iter):\n query = X[rng.randint(0, n_samples)]\n neighbors = lshf.kneighbors(query, n_neighbors=n_points,\n return_distance=False)\n distances = pairwise_distances(query, X, metric='cosine')\n ranks = np.argsort(distances)[0, :n_points]\n\n intersection = np.intersect1d(ranks, neighbors).shape[0]\n ratio = intersection \/ float(n_points)\n accuracies[i] = accuracies[i] + ratio\n\n accuracies[i] = accuracies[i] \/ float(n_iter)\n # Sorted accuracies should be equal to original accuracies\n assert_true(np.all(np.diff(accuracies) >= 0),\n msg=\"Accuracies are not non-decreasing.\")\n # Highest accuracy should be strictly greater than the lowest\n assert_true(np.ptp(accuracies) > 0,\n msg=\"Highest accuracy is not strictly greater than lowest.\")\n\n\ndef test_neighbors_accuracy_with_n_estimators():\n # Checks whether accuracy increases as `n_estimators` increases.\n n_estimators = np.array([1, 10, 100])\n n_samples = 100\n n_features = 10\n n_iter = 10\n n_points = 5\n rng = np.random.RandomState(42)\n accuracies = np.zeros(n_estimators.shape[0], dtype=float)\n X = rng.rand(n_samples, n_features)\n\n for i, t in enumerate(n_estimators):\n lshf = LSHForest(n_candidates=500, n_estimators=t)\n lshf.fit(X)\n for j in range(n_iter):\n query = X[rng.randint(0, n_samples)]\n neighbors = lshf.kneighbors(query, n_neighbors=n_points,\n return_distance=False)\n distances = pairwise_distances(query, X, metric='cosine')\n ranks = np.argsort(distances)[0, :n_points]\n\n intersection = np.intersect1d(ranks, neighbors).shape[0]\n ratio = intersection \/ float(n_points)\n accuracies[i] = accuracies[i] + ratio\n\n accuracies[i] = accuracies[i] \/ float(n_iter)\n # Sorted accuracies should be equal to original accuracies\n assert_true(np.all(np.diff(accuracies) >= 0),\n msg=\"Accuracies are not non-decreasing.\")\n # Highest accuracy should be strictly greater than the lowest\n assert_true(np.ptp(accuracies) > 0,\n msg=\"Highest accuracy is not strictly greater than lowest.\")\n\n\n@ignore_warnings\ndef test_kneighbors():\n # Checks whether desired number of neighbors are returned.\n # It is guaranteed to return the requested number of neighbors\n # if `min_hash_match` is set to 0. Returned distances should be\n # in ascending order.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(min_hash_match=0)\n # Test unfitted estimator\n assert_raises(ValueError, lshf.kneighbors, X[0])\n\n lshf.fit(X)\n\n for i in range(n_iter):\n n_neighbors = rng.randint(0, n_samples)\n query = X[rng.randint(0, n_samples)]\n neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,\n return_distance=False)\n # Desired number of neighbors should be returned.\n assert_equal(neighbors.shape[1], n_neighbors)\n\n # Multiple points\n n_queries = 5\n queries = X[rng.randint(0, n_samples, n_queries)]\n distances, neighbors = lshf.kneighbors(queries,\n n_neighbors=1,\n return_distance=True)\n assert_equal(neighbors.shape[0], n_queries)\n assert_equal(distances.shape[0], n_queries)\n # Test only neighbors\n neighbors = lshf.kneighbors(queries, n_neighbors=1,\n return_distance=False)\n assert_equal(neighbors.shape[0], n_queries)\n # Test random point(not in the data set)\n query = rng.randn(n_features)\n lshf.kneighbors(query, n_neighbors=1,\n return_distance=False)\n # Test n_neighbors at initialization\n neighbors = lshf.kneighbors(query, return_distance=False)\n assert_equal(neighbors.shape[1], 5)\n # Test `neighbors` has an integer dtype\n assert_true(neighbors.dtype.kind == 'i',\n msg=\"neighbors are not in integer dtype.\")\n\n\ndef test_radius_neighbors():\n # Checks whether Returned distances are less than `radius`\n # At least one point should be returned when the `radius` is set\n # to mean distance from the considering point to other points in\n # the database.\n # Moreover, this test compares the radius neighbors of LSHForest\n # with the `sklearn.neighbors.NearestNeighbors`.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest()\n # Test unfitted estimator\n assert_raises(ValueError, lshf.radius_neighbors, X[0])\n\n lshf.fit(X)\n\n for i in range(n_iter):\n # Select a random point in the dataset as the query\n query = X[rng.randint(0, n_samples)]\n\n # At least one neighbor should be returned when the radius is the\n # mean distance from the query to the points of the dataset.\n mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))\n neighbors = lshf.radius_neighbors(query, radius=mean_dist,\n return_distance=False)\n\n assert_equal(neighbors.shape, (1,))\n assert_equal(neighbors.dtype, object)\n assert_greater(neighbors[0].shape[0], 0)\n # All distances to points in the results of the radius query should\n # be less than mean_dist\n distances, neighbors = lshf.radius_neighbors(query,\n radius=mean_dist,\n return_distance=True)\n assert_array_less(distances[0], mean_dist)\n\n # Multiple points\n n_queries = 5\n queries = X[rng.randint(0, n_samples, n_queries)]\n distances, neighbors = lshf.radius_neighbors(queries,\n return_distance=True)\n\n # dists and inds should not be 1D arrays or arrays of variable lengths\n # hence the use of the object dtype.\n assert_equal(distances.shape, (n_queries,))\n assert_equal(distances.dtype, object)\n assert_equal(neighbors.shape, (n_queries,))\n assert_equal(neighbors.dtype, object)\n\n # Compare with exact neighbor search\n query = X[rng.randint(0, n_samples)]\n mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))\n nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)\n\n distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)\n distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)\n\n # Radius-based queries do not sort the result points and the order\n # depends on the method, the random_state and the dataset order. Therefore\n # we need to sort the results ourselves before performing any comparison.\n sorted_dists_exact = np.sort(distances_exact[0])\n sorted_dists_approx = np.sort(distances_approx[0])\n\n # Distances to exact neighbors are less than or equal to approximate\n # counterparts as the approximate radius query might have missed some\n # closer neighbors.\n assert_true(np.all(np.less_equal(sorted_dists_exact,\n sorted_dists_approx)))\n\n\ndef test_radius_neighbors_boundary_handling():\n X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]\n n_points = len(X)\n\n # Build an exact nearest neighbors model as reference model to ensure\n # consistency between exact and approximate methods\n nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)\n\n # Build a LSHForest model with hyperparameter values that always guarantee\n # exact results on this toy dataset.\n lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)\n\n # define a query aligned with the first axis\n query = [1., 0.]\n\n # Compute the exact cosine distances of the query to the four points of\n # the dataset\n dists = pairwise_distances(query, X, metric='cosine').ravel()\n\n # The first point is almost aligned with the query (very small angle),\n # the cosine distance should therefore be almost null:\n assert_almost_equal(dists[0], 0, decimal=5)\n\n # The second point form an angle of 45 degrees to the query vector\n assert_almost_equal(dists[1], 1 - np.cos(np.pi \/ 4))\n\n # The third point is orthogonal from the query vector hence at a distance\n # exactly one:\n assert_almost_equal(dists[2], 1)\n\n # The last point is almost colinear but with opposite sign to the query\n # therefore it has a cosine 'distance' very close to the maximum possible\n # value of 2.\n assert_almost_equal(dists[3], 2, decimal=5)\n\n # If we query with a radius of one, all the samples except the last sample\n # should be included in the results. This means that the third sample\n # is lying on the boundary of the radius query:\n exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)\n approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)\n\n assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])\n assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])\n assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])\n assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])\n\n # If we perform the same query with a slighltly lower radius, the third\n # point of the dataset that lay on the boundary of the previous query\n # is now rejected:\n eps = np.finfo(np.float64).eps\n exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)\n approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)\n\n assert_array_equal(np.sort(exact_idx[0]), [0, 1])\n assert_array_equal(np.sort(approx_idx[0]), [0, 1])\n assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])\n assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])\n\n\ndef test_distances():\n # Checks whether returned neighbors are from closest to farthest.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest()\n lshf.fit(X)\n\n for i in range(n_iter):\n n_neighbors = rng.randint(0, n_samples)\n query = X[rng.randint(0, n_samples)]\n distances, neighbors = lshf.kneighbors(query,\n n_neighbors=n_neighbors,\n return_distance=True)\n\n # Returned neighbors should be from closest to farthest, that is\n # increasing distance values.\n assert_true(np.all(np.diff(distances[0]) >= 0))\n\n # Note: the radius_neighbors method does not guarantee the order of\n # the results.\n\n\ndef test_fit():\n # Checks whether `fit` method sets all attribute values correctly.\n n_samples = 12\n n_features = 2\n n_estimators = 5\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(n_estimators=n_estimators)\n lshf.fit(X)\n\n # _input_array = X\n assert_array_equal(X, lshf._fit_X)\n # A hash function g(p) for each tree\n assert_equal(n_estimators, len(lshf.hash_functions_))\n # Hash length = 32\n assert_equal(32, lshf.hash_functions_[0].components_.shape[0])\n # Number of trees_ in the forest\n assert_equal(n_estimators, len(lshf.trees_))\n # Each tree has entries for every data point\n assert_equal(n_samples, len(lshf.trees_[0]))\n # Original indices after sorting the hashes\n assert_equal(n_estimators, len(lshf.original_indices_))\n # Each set of original indices in a tree has entries for every data point\n assert_equal(n_samples, len(lshf.original_indices_[0]))\n\n\ndef test_partial_fit():\n # Checks whether inserting array is consitent with fitted data.\n # `partial_fit` method should set all attribute values correctly.\n n_samples = 12\n n_samples_partial_fit = 3\n n_features = 2\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n X_partial_fit = rng.rand(n_samples_partial_fit, n_features)\n\n lshf = LSHForest()\n\n # Test unfitted estimator\n lshf.partial_fit(X)\n assert_array_equal(X, lshf._fit_X)\n\n lshf.fit(X)\n\n # Insert wrong dimension\n assert_raises(ValueError, lshf.partial_fit,\n np.random.randn(n_samples_partial_fit, n_features - 1))\n\n lshf.partial_fit(X_partial_fit)\n\n # size of _input_array = samples + 1 after insertion\n assert_equal(lshf._fit_X.shape[0],\n n_samples + n_samples_partial_fit)\n # size of original_indices_[1] = samples + 1\n assert_equal(len(lshf.original_indices_[0]),\n n_samples + n_samples_partial_fit)\n # size of trees_[1] = samples + 1\n assert_equal(len(lshf.trees_[1]),\n n_samples + n_samples_partial_fit)\n\n\ndef test_hash_functions():\n # Checks randomness of hash functions.\n # Variance and mean of each hash function (projection vector)\n # should be different from flattened array of hash functions.\n # If hash functions are not randomly built (seeded with\n # same value), variances and means of all functions are equal.\n n_samples = 12\n n_features = 2\n n_estimators = 5\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(n_estimators=n_estimators,\n random_state=rng.randint(0, np.iinfo(np.int32).max))\n lshf.fit(X)\n\n hash_functions = []\n for i in range(n_estimators):\n hash_functions.append(lshf.hash_functions_[i].components_)\n\n for i in range(n_estimators):\n assert_not_equal(np.var(hash_functions),\n np.var(lshf.hash_functions_[i].components_))\n\n for i in range(n_estimators):\n assert_not_equal(np.mean(hash_functions),\n np.mean(lshf.hash_functions_[i].components_))\n\n\ndef test_candidates():\n # Checks whether candidates are sufficient.\n # This should handle the cases when number of candidates is 0.\n # User should be warned when number of candidates is less than\n # requested number of neighbors.\n X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],\n [6, 10, 2]], dtype=np.float32)\n X_test = np.array([7, 10, 3], dtype=np.float32)\n\n # For zero candidates\n lshf = LSHForest(min_hash_match=32)\n lshf.fit(X_train)\n\n message = (\"Number of candidates is not sufficient to retrieve\"\n \" %i neighbors with\"\n \" min_hash_match = %i. Candidates are filled up\"\n \" uniformly from unselected\"\n \" indices.\" % (3, 32))\n assert_warns_message(UserWarning, message, lshf.kneighbors,\n X_test, n_neighbors=3)\n distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)\n assert_equal(distances.shape[1], 3)\n\n # For candidates less than n_neighbors\n lshf = LSHForest(min_hash_match=31)\n lshf.fit(X_train)\n\n message = (\"Number of candidates is not sufficient to retrieve\"\n \" %i neighbors with\"\n \" min_hash_match = %i. Candidates are filled up\"\n \" uniformly from unselected\"\n \" indices.\" % (5, 31))\n assert_warns_message(UserWarning, message, lshf.kneighbors,\n X_test, n_neighbors=5)\n distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)\n assert_equal(distances.shape[1], 5)\n\n\ndef test_graphs():\n # Smoke tests for graph methods.\n n_samples_sizes = [5, 10, 20]\n n_features = 3\n rng = np.random.RandomState(42)\n\n for n_samples in n_samples_sizes:\n X = rng.rand(n_samples, n_features)\n lshf = LSHForest(min_hash_match=0)\n lshf.fit(X)\n\n kneighbors_graph = lshf.kneighbors_graph(X)\n radius_neighbors_graph = lshf.radius_neighbors_graph(X)\n\n assert_equal(kneighbors_graph.shape[0], n_samples)\n assert_equal(kneighbors_graph.shape[1], n_samples)\n assert_equal(radius_neighbors_graph.shape[0], n_samples)\n assert_equal(radius_neighbors_graph.shape[1], n_samples)\n\n\ndef test_sparse_input():\n # note: Fixed random state in sp.rand is not supported in older scipy.\n # The test should succeed regardless.\n X1 = sp.rand(50, 100)\n X2 = sp.rand(10, 100)\n forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)\n forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)\n\n d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)\n d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)\n\n assert_almost_equal(d_sparse, d_dense)\n assert_almost_equal(i_sparse, i_dense)\n\n d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,\n return_distance=True)\n d_dense, i_dense = forest_dense.radius_neighbors(X2.A,\n return_distance=True)\n assert_equal(d_sparse.shape, d_dense.shape)\n for a, b in zip(d_sparse, d_dense):\n assert_almost_equal(a, b)\n for a, b in zip(i_sparse, i_dense):\n assert_almost_equal(a, b)\n","license":"bsd-3-clause"} {"repo_name":"jjx02230808\/project0223","path":"examples\/decomposition\/plot_kernel_pca.py","copies":"353","size":"2011","content":"\"\"\"\n==========\nKernel PCA\n==========\n\nThis example shows that Kernel PCA is able to find a projection of the data\nthat makes data linearly separable.\n\"\"\"\nprint(__doc__)\n\n# Authors: Mathieu Blondel\n# Andreas Mueller\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.decomposition import PCA, KernelPCA\nfrom sklearn.datasets import make_circles\n\nnp.random.seed(0)\n\nX, y = make_circles(n_samples=400, factor=.3, noise=.05)\n\nkpca = KernelPCA(kernel=\"rbf\", fit_inverse_transform=True, gamma=10)\nX_kpca = kpca.fit_transform(X)\nX_back = kpca.inverse_transform(X_kpca)\npca = PCA()\nX_pca = pca.fit_transform(X)\n\n# Plot results\n\nplt.figure()\nplt.subplot(2, 2, 1, aspect='equal')\nplt.title(\"Original space\")\nreds = y == 0\nblues = y == 1\n\nplt.plot(X[reds, 0], X[reds, 1], \"ro\")\nplt.plot(X[blues, 0], X[blues, 1], \"bo\")\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\n\nX1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))\nX_grid = np.array([np.ravel(X1), np.ravel(X2)]).T\n# projection on the first principal component (in the phi space)\nZ_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)\nplt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')\n\nplt.subplot(2, 2, 2, aspect='equal')\nplt.plot(X_pca[reds, 0], X_pca[reds, 1], \"ro\")\nplt.plot(X_pca[blues, 0], X_pca[blues, 1], \"bo\")\nplt.title(\"Projection by PCA\")\nplt.xlabel(\"1st principal component\")\nplt.ylabel(\"2nd component\")\n\nplt.subplot(2, 2, 3, aspect='equal')\nplt.plot(X_kpca[reds, 0], X_kpca[reds, 1], \"ro\")\nplt.plot(X_kpca[blues, 0], X_kpca[blues, 1], \"bo\")\nplt.title(\"Projection by KPCA\")\nplt.xlabel(\"1st principal component in space induced by $\\phi$\")\nplt.ylabel(\"2nd component\")\n\nplt.subplot(2, 2, 4, aspect='equal')\nplt.plot(X_back[reds, 0], X_back[reds, 1], \"ro\")\nplt.plot(X_back[blues, 0], X_back[blues, 1], \"bo\")\nplt.title(\"Original space after inverse transform\")\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\n\nplt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"igabriel85\/dmon-adp","path":"adpformater\/adpformater.py","copies":"1","size":"1615","content":"import pandas as pd\n\n\nclass DataFormatter():\n def __init__(self, dataloc):\n self.dataloc = dataloc\n\n def aggJsonToCsv(self):\n return \"CSV file\"\n\n def expTimestamp(self):\n return \"Expand metric timestamp\"\n\n def window(self):\n return \"Window metrics\"\n\n def pivot(self):\n return \"Pivot values\"\n\n def addID(self):\n return \"Add new ID as index\"\n\n def removeID(self):\n return \"Remove selected column as index\"\n\n def renameHeader(self):\n return \"Rename headers\"\n\n def normalize(self):\n return \"Normalize data\"\n\n def denormalize(self):\n return \"Denormalize data\"\n\ninput_table = pd.read_csv(\"metrics.csv\")\n\n\nfor index, row in input_table.iterrows():\n input_table = input_table.append([row]*9)\n\ninput_table = input_table.sort_values(['row ID'])\ninput_table = input_table.reset_index(drop=True)\n\nfor index, rows in input_table.iterrows():\n\n if int(index) > 59:\n print \"Index to big!\"\n time = rows[0].split(\", \", 1) #In Knime row for timestamp is row(55) last one\n timeHour = time[1].split(\":\", 2)\n timeHourSeconds = timeHour[2].split(\".\", 1)\n timeHourSecondsDecimal = timeHour[2].split(\".\", 1)\n timeHourSecondsDecimal[0] = str(index)\n if len(timeHourSecondsDecimal[0]) == 1:\n timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0]\n decimal = '.'.join(timeHourSecondsDecimal)\n timeHour[2] = decimal\n timenew = ':'.join(timeHour)\n time[1] = timenew\n finalString = ', '.join(time)\n input_table.set_value(index, 'row ID', finalString)\n\ninput_table.to_csv('out.csv')\n\n\n","license":"apache-2.0"} {"repo_name":"idlead\/scikit-learn","path":"examples\/linear_model\/plot_sgd_comparison.py","copies":"112","size":"1819","content":"\"\"\"\n==================================\nComparing various online solvers\n==================================\n\nAn example showing how different online solvers perform\non the hand-written digits dataset.\n\n\"\"\"\n# Author: Rob Zinkov \n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier, Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nheldout = [0.95, 0.90, 0.75, 0.50, 0.01]\nrounds = 20\ndigits = datasets.load_digits()\nX, y = digits.data, digits.target\n\nclassifiers = [\n (\"SGD\", SGDClassifier()),\n (\"ASGD\", SGDClassifier(average=True)),\n (\"Perceptron\", Perceptron()),\n (\"Passive-Aggressive I\", PassiveAggressiveClassifier(loss='hinge',\n C=1.0)),\n (\"Passive-Aggressive II\", PassiveAggressiveClassifier(loss='squared_hinge',\n C=1.0)),\n (\"SAG\", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 \/ X.shape[0]))\n]\n\nxx = 1. - np.array(heldout)\n\nfor name, clf in classifiers:\n print(\"training %s\" % name)\n rng = np.random.RandomState(42)\n yy = []\n for i in heldout:\n yy_ = []\n for r in range(rounds):\n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=i, random_state=rng)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n yy_.append(1 - np.mean(y_pred == y_test))\n yy.append(np.mean(yy_))\n plt.plot(xx, yy, label=name)\n\nplt.legend(loc=\"upper right\")\nplt.xlabel(\"Proportion train\")\nplt.ylabel(\"Test Error Rate\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"junbochen\/pylearn2","path":"pylearn2\/scripts\/papers\/jia_huang_wkshp_11\/evaluate.py","copies":"44","size":"3208","content":"from __future__ import print_function\n\nfrom optparse import OptionParser\nimport warnings\ntry:\n from sklearn.metrics import classification_report\nexcept ImportError:\n classification_report = None\n warnings.warn(\"couldn't find sklearn.metrics.classification_report\")\ntry:\n from sklearn.metrics import confusion_matrix\nexcept ImportError:\n confusion_matrix = None\n warnings.warn(\"couldn't find sklearn.metrics.metrics.confusion_matrix\")\nfrom galatea.s3c.feature_loading import get_features\nfrom pylearn2.utils import serial\nfrom pylearn2.datasets.cifar10 import CIFAR10\nfrom pylearn2.datasets.cifar100 import CIFAR100\nimport numpy as np\n\ndef test(model, X, y):\n print(\"Evaluating svm\")\n y_pred = model.predict(X)\n #try:\n if True:\n acc = (y == y_pred).mean()\n print(\"Accuracy \",acc)\n \"\"\"except:\n print(\"something went wrong\")\n print('y:')\n print(y)\n print('y_pred:')\n print(y_pred)\n print('extra info')\n print(type(y))\n print(type(y_pred))\n print(y.dtype)\n print(y_pred.dtype)\n print(y.shape)\n print(y_pred.shape)\n raise\n\"\"\"\n#\n\n\ndef get_test_labels(cifar10, cifar100, stl10):\n assert cifar10 + cifar100 + stl10 == 1\n\n if stl10:\n print('loading entire stl-10 test set just to get the labels')\n stl10 = serial.load(\"${PYLEARN2_DATA_PATH}\/stl10\/stl10_32x32\/test.pkl\")\n return stl10.y\n if cifar10:\n print('loading entire cifar10 test set just to get the labels')\n cifar10 = CIFAR10(which_set = 'test')\n return np.asarray(cifar10.y)\n if cifar100:\n print('loading entire cifar100 test set just to get the fine labels')\n cifar100 = CIFAR100(which_set = 'test')\n return np.asarray(cifar100.y_fine)\n assert False\n\n\ndef main(model_path,\n test_path,\n dataset,\n **kwargs):\n\n model = serial.load(model_path)\n\n cifar100 = dataset == 'cifar100'\n cifar10 = dataset == 'cifar10'\n stl10 = dataset == 'stl10'\n assert cifar10 + cifar100 + stl10 == 1\n\n y = get_test_labels(cifar10, cifar100, stl10)\n X = get_features(test_path, False, False)\n if stl10:\n num_examples = 8000\n if cifar10 or cifar100:\n num_examples = 10000\n if not X.shape[0] == num_examples:\n raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))\n assert y.shape[0] == num_examples\n\n test(model,X,y)\n\n\nif __name__ == '__main__':\n \"\"\"\n Useful for quick tests.\n Usage: python train_bilinear.py\n \"\"\"\n\n parser = OptionParser()\n parser.add_option(\"-m\", \"--model\",\n action=\"store\", type=\"string\", dest=\"model_path\")\n parser.add_option(\"-t\", \"--test\",\n action=\"store\", type=\"string\", dest=\"test\")\n parser.add_option(\"-o\", action=\"store\", dest=\"output\", default = None, help=\"path to write the report to\")\n parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)\n\n #(options, args) = parser.parse_args()\n\n #assert options.output\n\n main(model_path='final_model.pkl',\n test_path='test_features.npy',\n dataset = 'cifar100',\n )\n","license":"bsd-3-clause"} {"repo_name":"fboers\/jumeg","path":"examples\/do_MLICA.py","copies":"1","size":"5891","content":"\"\"\"\nCompute ICA object based on filtered and downsampled data.\nIdentify ECG and EOG artifacts using MLICA and compare\nresults to correlation & ctps analysis.\n\nApply ICA object to filtered and unfiltered data.\n\nAhmad Hasasneh, Nikolas Kampel, Praveen Sripad, N. Jon Shah, and Juergen Dammers\n\"Deep Learning Approach for Automatic Classification of Ocular and Cardiac\nArtifacts in MEG Data\"\nJournal of Engineering, vol. 2018, Article ID 1350692,10 pages, 2018.\nhttps:\/\/doi.org\/10.1155\/2018\/1350692\n\"\"\"\n\nimport os.path as op\nimport matplotlib.pylab as plt\nplt.ion()\nimport numpy as np\nimport mne\nfrom jumeg.decompose.ica_replace_mean_std import ICA, ica_update_mean_std\nfrom keras.models import load_model\nfrom jumeg.jumeg_noise_reducer import noise_reducer\nfrom jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular\nfrom jumeg.jumeg_plot import plot_performance_artifact_rejection\nfrom jumeg.jumeg_utils import get_jumeg_path\n\n# config\nMLICA_threshold = 0.8\nn_components = 60\nnjobs = 4 # for downsampling\ntmin = 0\ntmax = tmin + 15000\nflow_ecg, fhigh_ecg = 8, 20\nflow_eog, fhigh_eog = 1, 20\necg_thresh, eog_thresh = 0.3, 0.3\necg_ch = 'ECG 001'\neog1_ch = 'EOG 001'\neog2_ch = 'EOG 002'\nreject = {'mag': 5e-12}\nrefnotch = [50., 100., 150., 200., 250., 300., 350., 400.]\n\ndata_path = op.join(get_jumeg_path(), 'data')\nprint(data_path)\n\n# example filname\nraw_fname = \"\/Volumes\/megraid21\/sripad\/cau_fif_data\/jumeg_test_data\/\" \\\n \"109925_CAU01A_100715_0842_2_c,rfDC-raw.fif\"\n\n# load the model for artifact rejection\n# the details of the model is provided in the x_validation_shuffle_v4_split_23.txt\nmodel_name = op.join(data_path, \"dcnn_model.hdf5\")\n\nmodel = load_model(model_name)\n\n# noise reducer\nraw_nr = noise_reducer(raw_fname, reflp=5., return_raw=True)\n\nraw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=0.1, noiseref=['RFG ...'],\n return_raw=True)\n\n# 50HZ and 60HZ notch filter to remove noise\nraw = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch, return_raw=True)\n\npicks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,\n stim=False, exclude='bads')\n\nraw_filtered = raw.copy().filter(0., 45., picks=picks, filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto',\n n_jobs=njobs, method='fir', phase='zero',\n fir_window='hamming')\n\n# downsample the data to 250 Hz, necessary for the model\nraw_ds = raw_filtered.copy().resample(250, npad='auto', window='boxcar', stim_picks=None,\n n_jobs=njobs, events=None)\nraw_ds_chop = raw_ds.copy().crop(tmin=tmin*4.\/1000, tmax=tmax*4.\/1000) # downsampled raw\nraw_filtered_chop = raw_filtered.copy().crop(tmin=tmin*4.\/1000, tmax=tmax*4.\/1000)\nraw_chop = raw.copy().crop(tmin=tmin*4.\/1000, tmax=tmax*4.\/1000)\n\nica = ICA(method='fastica', n_components=n_components, random_state=42,\n max_pca_components=None, max_iter=5000, verbose=None)\n\n# do the ICA decomposition on downsampled raw\nica.fit(raw_ds_chop, picks=picks, reject=reject, verbose=None)\n\nsources = ica.get_sources(raw_ds_chop)._data\n\n# extract temporal and spatial components\nmm = np.float32(np.dot(ica.mixing_matrix_[:, :].T,\n ica.pca_components_[:ica.n_components_]))\n\n# use [:, :15000] to make sure it's 15000 data points\nchop = sources[:, :15000]\nchop_reshaped = np.reshape(chop, (len(chop), len(chop[0]), 1))\n\nmodel_scores = model.predict([mm, chop_reshaped], verbose=1)\n\nbads_MLICA = []\n\n# print model_scores\n\nfor idx in range(0, len(model_scores)):\n if model_scores[idx][0] > MLICA_threshold:\n bads_MLICA.append(idx)\n\n# visualisation\n# ica.exclude = bads_MLICA\n# ica.plot_sources(raw_ds_chop, block=True)\n\n# compare MLICA to results from correlation and ctps analysis\nica.exclude = []\n\nprint('Identifying components..')\n# get ECG\/EOG related components using JuMEG\nic_ecg = get_ics_cardiac(raw_filtered_chop, ica, flow=flow_ecg, fhigh=fhigh_ecg,\n thresh=ecg_thresh, tmin=-0.5, tmax=0.5,\n name_ecg=ecg_ch, use_CTPS=True)[0] # returns both ICs and scores (take only ICs)\nic_eog = get_ics_ocular(raw_filtered_chop, ica, flow=flow_eog, fhigh=fhigh_eog,\n thresh=eog_thresh, name_eog_hor=eog1_ch,\n name_eog_ver=eog2_ch, score_func='pearsonr')\n\nbads_corr_ctps = list(ic_ecg) + list(ic_eog)\nbads_corr_ctps = list(set(bads_corr_ctps)) # remove potential duplicates\nbads_corr_ctps.sort()\n\n# visualisation\n# ica.exclude = bads_corr_ctps\n# ica.plot_sources(raw_chop, block=True)\n\nprint('Bad components from MLICA:', bads_MLICA)\nprint('Bad components from correlation & ctps:', bads_corr_ctps)\n\n# apply MLICA result to filtered and unfiltered data\n# exclude bad components identified by MLICA\nica.exclude = bads_MLICA\n\nfnout_fig = '109925_CAU01A_100715_0842_2_c,rfDC,0-45hz,ar-perf'\nica_filtered_chop = ica_update_mean_std(raw_filtered_chop, ica, picks=picks, reject=reject)\nraw_filtered_chop_clean = ica_filtered_chop.apply(raw_filtered_chop, exclude=ica.exclude,\n n_pca_components=None)\n\nica_unfiltered_chop = ica_update_mean_std(raw_chop, ica, picks=picks, reject=reject)\nraw_unfiltered_chop_clean = ica_unfiltered_chop.apply(raw_chop, exclude=ica.exclude, n_pca_components=None)\n\n# create copy of original data since apply_ica_replace_mean_std changes the input data in place (raw and ica)\nraw_copy = raw.copy().crop(tmin=tmin*4.\/1000, tmax=tmax*4.\/1000)\n\nplot_performance_artifact_rejection(raw_copy, ica_unfiltered_chop, fnout_fig,\n meg_clean=raw_unfiltered_chop_clean,\n show=False, verbose=False,\n name_ecg=ecg_ch,\n name_eog=eog2_ch)\n","license":"bsd-3-clause"} {"repo_name":"wzbozon\/scikit-learn","path":"sklearn\/tests\/test_learning_curve.py","copies":"225","size":"10791","content":"# Author: Alexander Fabisch \n#\n# License: BSD 3 clause\n\nimport sys\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nimport numpy as np\nimport warnings\nfrom sklearn.base import BaseEstimator\nfrom sklearn.learning_curve import learning_curve, validation_curve\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.datasets import make_classification\nfrom sklearn.cross_validation import KFold\nfrom sklearn.linear_model import PassiveAggressiveClassifier\n\n\nclass MockImprovingEstimator(BaseEstimator):\n \"\"\"Dummy classifier to test the learning curve\"\"\"\n def __init__(self, n_max_train_sizes):\n self.n_max_train_sizes = n_max_train_sizes\n self.train_sizes = 0\n self.X_subset = None\n\n def fit(self, X_subset, y_subset=None):\n self.X_subset = X_subset\n self.train_sizes = X_subset.shape[0]\n return self\n\n def predict(self, X):\n raise NotImplementedError\n\n def score(self, X=None, Y=None):\n # training score becomes worse (2 -> 1), test error better (0 -> 1)\n if self._is_training_data(X):\n return 2. - float(self.train_sizes) \/ self.n_max_train_sizes\n else:\n return float(self.train_sizes) \/ self.n_max_train_sizes\n\n def _is_training_data(self, X):\n return X is self.X_subset\n\n\nclass MockIncrementalImprovingEstimator(MockImprovingEstimator):\n \"\"\"Dummy classifier that provides partial_fit\"\"\"\n def __init__(self, n_max_train_sizes):\n super(MockIncrementalImprovingEstimator,\n self).__init__(n_max_train_sizes)\n self.x = None\n\n def _is_training_data(self, X):\n return self.x in X\n\n def partial_fit(self, X, y=None, **params):\n self.train_sizes += X.shape[0]\n self.x = X[0]\n\n\nclass MockEstimatorWithParameter(BaseEstimator):\n \"\"\"Dummy classifier to test the validation curve\"\"\"\n def __init__(self, param=0.5):\n self.X_subset = None\n self.param = param\n\n def fit(self, X_subset, y_subset):\n self.X_subset = X_subset\n self.train_sizes = X_subset.shape[0]\n return self\n\n def predict(self, X):\n raise NotImplementedError\n\n def score(self, X=None, y=None):\n return self.param if self._is_training_data(X) else 1 - self.param\n\n def _is_training_data(self, X):\n return X is self.X_subset\n\n\ndef test_learning_curve():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(20)\n with warnings.catch_warnings(record=True) as w:\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))\n if len(w) > 0:\n raise RuntimeError(\"Unexpected warning: %r\" % w[0].message)\n assert_equal(train_scores.shape, (10, 3))\n assert_equal(test_scores.shape, (10, 3))\n assert_array_equal(train_sizes, np.linspace(2, 20, 10))\n assert_array_almost_equal(train_scores.mean(axis=1),\n np.linspace(1.9, 1.0, 10))\n assert_array_almost_equal(test_scores.mean(axis=1),\n np.linspace(0.1, 1.0, 10))\n\n\ndef test_learning_curve_unsupervised():\n X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(20)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))\n assert_array_equal(train_sizes, np.linspace(2, 20, 10))\n assert_array_almost_equal(train_scores.mean(axis=1),\n np.linspace(1.9, 1.0, 10))\n assert_array_almost_equal(test_scores.mean(axis=1),\n np.linspace(0.1, 1.0, 10))\n\n\ndef test_learning_curve_verbose():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(20)\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n train_sizes, train_scores, test_scores = \\\n learning_curve(estimator, X, y, cv=3, verbose=1)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n\n assert(\"[learning_curve]\" in out)\n\n\ndef test_learning_curve_incremental_learning_not_possible():\n X, y = make_classification(n_samples=2, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n # The mockup does not have partial_fit()\n estimator = MockImprovingEstimator(1)\n assert_raises(ValueError, learning_curve, estimator, X, y,\n exploit_incremental_learning=True)\n\n\ndef test_learning_curve_incremental_learning():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockIncrementalImprovingEstimator(20)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=3, exploit_incremental_learning=True,\n train_sizes=np.linspace(0.1, 1.0, 10))\n assert_array_equal(train_sizes, np.linspace(2, 20, 10))\n assert_array_almost_equal(train_scores.mean(axis=1),\n np.linspace(1.9, 1.0, 10))\n assert_array_almost_equal(test_scores.mean(axis=1),\n np.linspace(0.1, 1.0, 10))\n\n\ndef test_learning_curve_incremental_learning_unsupervised():\n X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockIncrementalImprovingEstimator(20)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y=None, cv=3, exploit_incremental_learning=True,\n train_sizes=np.linspace(0.1, 1.0, 10))\n assert_array_equal(train_sizes, np.linspace(2, 20, 10))\n assert_array_almost_equal(train_scores.mean(axis=1),\n np.linspace(1.9, 1.0, 10))\n assert_array_almost_equal(test_scores.mean(axis=1),\n np.linspace(0.1, 1.0, 10))\n\n\ndef test_learning_curve_batch_and_incremental_learning_are_equal():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n train_sizes = np.linspace(0.2, 1.0, 5)\n estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)\n\n train_sizes_inc, train_scores_inc, test_scores_inc = \\\n learning_curve(\n estimator, X, y, train_sizes=train_sizes,\n cv=3, exploit_incremental_learning=True)\n train_sizes_batch, train_scores_batch, test_scores_batch = \\\n learning_curve(\n estimator, X, y, cv=3, train_sizes=train_sizes,\n exploit_incremental_learning=False)\n\n assert_array_equal(train_sizes_inc, train_sizes_batch)\n assert_array_almost_equal(train_scores_inc.mean(axis=1),\n train_scores_batch.mean(axis=1))\n assert_array_almost_equal(test_scores_inc.mean(axis=1),\n test_scores_batch.mean(axis=1))\n\n\ndef test_learning_curve_n_sample_range_out_of_bounds():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(20)\n assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,\n train_sizes=[0, 1])\n assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,\n train_sizes=[0.0, 1.0])\n assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,\n train_sizes=[0.1, 1.1])\n assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,\n train_sizes=[0, 20])\n assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,\n train_sizes=[1, 21])\n\n\ndef test_learning_curve_remove_duplicate_sample_sizes():\n X, y = make_classification(n_samples=3, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(2)\n train_sizes, _, _ = assert_warns(\n RuntimeWarning, learning_curve, estimator, X, y, cv=3,\n train_sizes=np.linspace(0.33, 1.0, 3))\n assert_array_equal(train_sizes, [1, 2])\n\n\ndef test_learning_curve_with_boolean_indices():\n X, y = make_classification(n_samples=30, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n estimator = MockImprovingEstimator(20)\n cv = KFold(n=30, n_folds=3)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))\n assert_array_equal(train_sizes, np.linspace(2, 20, 10))\n assert_array_almost_equal(train_scores.mean(axis=1),\n np.linspace(1.9, 1.0, 10))\n assert_array_almost_equal(test_scores.mean(axis=1),\n np.linspace(0.1, 1.0, 10))\n\n\ndef test_validation_curve():\n X, y = make_classification(n_samples=2, n_features=1, n_informative=1,\n n_redundant=0, n_classes=2,\n n_clusters_per_class=1, random_state=0)\n param_range = np.linspace(0, 1, 10)\n with warnings.catch_warnings(record=True) as w:\n train_scores, test_scores = validation_curve(\n MockEstimatorWithParameter(), X, y, param_name=\"param\",\n param_range=param_range, cv=2\n )\n if len(w) > 0:\n raise RuntimeError(\"Unexpected warning: %r\" % w[0].message)\n\n assert_array_almost_equal(train_scores.mean(axis=1), param_range)\n assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)\n","license":"bsd-3-clause"} {"repo_name":"irblsensitivity\/irblsensitivity","path":"scripts\/analysis\/MWU_Project_EMSE.py","copies":"1","size":"9231","content":"#-*- coding: utf-8 -*-\r\n'''\r\nCreated on 2017. 02. 12\r\nUpdated on 2017. 02. 12\r\n\r\n'''\r\nfrom __future__ import print_function\r\nimport os\r\nimport re\r\nimport matplotlib\r\n# Force matplotlib to not use any Xwindows backend.\r\nmatplotlib.use('Agg')\r\n\r\nfrom scipy.stats import mannwhitneyu, pearsonr\r\nfrom ExpBase import ExpBase\r\nimport numpy as np\r\nfrom commons import Subjects\r\n\r\n\r\nclass MWUTest(ExpBase):\r\n\ttechniques = ['BugLocator', 'BRTracer', 'BLUiR', 'AmaLgam', 'BLIA', 'Locus']\r\n\r\n\tvalidDigits = {\r\n\t\t'AvgLOC': 2, 'InvNSrc': 4, 'AvgCC': 4, 'SrcAvgDistTk': 2, 'SrcAvgNTk': 2, 'SrcRatioDict': 4, 'NSrc': 2, 'SrcNumCmt': 4, 'SrcNDistTk': 0, 'SrcLocalDistTk': 3, 'SrcRatioCmt': 4, 'SrcNumMhd': 4, \t'RatioEnum': 4,\r\n\t\t'RepAvgTk': 2, 'NReport': 0, 'RepNDistTk': 0, 'RepAvgDistTk': 3, 'RepAvgLocalTk':4, 'RepAvgCE': 4, 'RatioCode': 4, 'RatioSTrace': 4, \t'|STinterRT|': 0,\r\n\t\t'AvgMinIRf': 4, 'AvgMaxIRf': 4, 'AvgMeanIRf': 4, 'KSDist': 4, 'AvgUIRf': 4, 'AvgProdIRf': 4, \t'hasCE': 4,\r\n\t\t'hasSTrace': 4, 'hasCR': 4, 'hasEnum': 4,\r\n\t\t'NTk':2, 'NDistTk':3, 'NLocalTk':4, 'NDistCE':3\r\n\t}\r\n\r\n\tfeatureorders = {\r\n\t\t'01': ['AvgLOC', 'AvgCC', 'SrcAvgNTk', 'SrcAvgDistTk', 'SrcLocalDistTk', 'SrcNDistTk', 'NSrc', 'InvNSrc',\r\n\t\t\t 'SrcNumMhd',\r\n\t\t\t 'SrcNumCmt', 'SrcRatioCmt', 'SrcRatioDict'],\r\n\t\t'02': ['RatioEnum', 'RatioSTrace', 'RatioCode', 'RepNDistTk', 'RepAvgTk', 'RepAvgDistTk', 'RepAvgLocalTk', 'RepAvgCE',\r\n\t\t\t 'NReport'],\r\n\t\t'03': ['|STinterRT|', 'KSDist', 'AvgProdIRf', 'AvgMinIRf', 'AvgMaxIRf', 'AvgMeanIRf', 'AvgUIRf'],\r\n\t\t'04': ['hasEnum', 'hasSTrace', 'hasCR', 'hasCE'],\r\n\t\t'05': ['NTk', 'NDistTk', 'NLocalTk', 'NDistCE']\r\n\t}\r\n\r\n\tdef MWUtest(self, _dataA, _dataB, _bugsA=None, _bugsB=None):\r\n\t\t'''\r\n\t\tMann-Whitney U Test between IRBL technique results\r\n\t\t:param _nameA: The results of Type A\r\n\t\t:param _nameB: The results of Type B\r\n\t\t:param _bugsA: the count of bugs for each techniques\r\n\t\t:param _bugsB: the count of bugs for each techniques\r\n\t\t:return: {technique : pvalue, techinique: pvalue, ...}\r\n\t\t'''\r\n\r\n\t\tresults = {}\r\n\r\n\t\tfor idx in range(len(self.techniques)):\r\n\t\t\tfilteredDataA = [items[idx] for items in _dataA.values()]\r\n\t\t\tfilteredDataB = [items[idx] for items in _dataB.values()]\r\n\t\t\t#filteredDataA, labels = self.get_array_items(_dataA, idx)\r\n\t\t\t#filteredDataB, labels = self.get_array_items(_dataB, idx)\r\n\r\n\t\t\tif _bugsA is not None:\r\n\t\t\t\tif isinstance(_bugsA, dict) is True:\r\n\t\t\t\t\tfilteredDataA += ([0] * (_bugsA[self.techniques[idx]] - len(filteredDataA)))\r\n\t\t\t\telse:\r\n\t\t\t\t\tfilteredDataA += ([0] * (_bugsA - len(filteredDataA)))\r\n\t\t\tif _bugsB is not None:\r\n\t\t\t\tif isinstance(_bugsB, dict) is True:\r\n\t\t\t\t\tfilteredDataB += ([0] * (_bugsB[self.techniques[idx]] - len(filteredDataB)))\r\n\t\t\t\telse:\r\n\t\t\t\t\tfilteredDataB += ([0] * (_bugsB - len(filteredDataB)))\r\n\r\n\r\n\t\t\t#slope, intercept, r_value, p_value, stderr = stats.linregress(dataMAP, dataFeature)\r\n\t\t\tt_statistic, t_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided')\r\n\t\t\tl_statistic, l_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='less')\r\n\t\t\tg_statistic, g_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='greater')\r\n\r\n\t\t\tpvalue = min(t_pvalue , l_pvalue, g_pvalue)\r\n\t\t\t#statistic, pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided') # 'less', 'two-sided', 'greater'\r\n\r\n\t\t\tresults[self.techniques[idx]] = pvalue\r\n\r\n\t\treturn results\r\n\r\n\tdef get_technique_averages(self, _source, _counts):\r\n\t\t'''\r\n\r\n\t\t:param _source: project's bug results dict\r\n\t\t:param _count: original bug counts for each technique\r\n\t\t:return:\r\n\t\t'''\r\n\t\tresults = {}\r\n\t\tfor idx in range(len(self.techniques)):\r\n\t\t\tsumValue = 0\r\n\t\t\tfor itemID, item in _source.iteritems():\r\n\t\t\t\tsumValue += item[idx]\r\n\t\t\tresults[self.techniques[idx]] = sumValue \/ float(_counts[self.techniques[idx]])\r\n\t\treturn results\r\n\r\n\tdef compare_single_results(self, _basepath):\r\n\t\t'''\r\n\t\tfor Table 7 : single results\r\n\t\t:param _basepath:\r\n\t\t:return:\r\n\t\t'''\r\n\t\ttechinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)\r\n\r\n\t\tdef get_averages(_itemType):\r\n\t\t\tresults = {}\r\n\t\t\tfor tData in ['Old', 'New_Single']:\r\n\t\t\t\tfilepath = os.path.join(_basepath, u'%s_%s.txt' % (tData, _itemType))\r\n\t\t\t\ttitles, data = self.load_results_items(filepath, ['str'] * 3 + ['float'] * 6)\r\n\t\t\t\tfor group in data:\r\n\t\t\t\t\tif group not in results: results[group] = {}\r\n\t\t\t\t\tfor project in data[group]:\r\n\t\t\t\t\t\tCNTs = dict(zip(titles, CNTdata[group][project]))\r\n\t\t\t\t\t\tresults[group][project] = self.get_technique_averages(data[group][project], CNTs)\r\n\t\t\treturn results\r\n\r\n\t\tAPresults = get_averages('AP')\r\n\t\tTPresults = get_averages('TP')\r\n\t\tfeatures = self.extract_features(_basepath)\r\n\r\n\t\tprint(u'Technique Mann-Whitney U Test p-values')\r\n\t\tprint(u'\\t' + u'\\t\\t'.join(self.techniques))\r\n\t\tprint(u'Subject\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR')\r\n\t\tS = Subjects()\r\n\t\tS.groups.append(u'Previous')\r\n\t\tS.projects[u'Previous'] = [u'AspectJ', u'ZXing', u'PDE', u'JDT', u'SWT']\r\n\r\n\t\tfor group in S.groups:\r\n\t\t\tfor project in S.projects[group]:\r\n\t\t\t\ttext = u'%s' % project\r\n\t\t\t\tAPmax = self.techniques[0]\r\n\t\t\t\tTPmax = self.techniques[0]\r\n\t\t\t\tfor tech in self.techniques:\r\n\t\t\t\t\tif APresults[group][project][APmax] < APresults[group][project][tech]:\r\n\t\t\t\t\t\tAPmax = tech\r\n\t\t\t\t\tif TPresults[group][project][TPmax] < TPresults[group][project][tech]:\r\n\t\t\t\t\t\tTPmax = tech\r\n\r\n\t\t\t\tfor tech in self.techniques:\r\n\t\t\t\t\tif APmax != tech: text += u' & %.4f' % APresults[group][project][tech]\r\n\t\t\t\t\telse: text += u' & \\\\cellcolor{blue!25}\\\\textbf{%.4f}' % APresults[group][project][tech]\r\n\r\n\t\t\t\t\tif TPmax != tech: text += u' & %.4f' % TPresults[group][project][tech]\r\n\t\t\t\t\telse: text += u' & \\\\cellcolor{green!25}\\\\textbf{%.4f}' % TPresults[group][project][tech]\r\n\r\n\t\t\t\t# if group in features:\r\n\t\t\t\t# \tfor fid in [u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk']:\r\n\t\t\t\t# \t\ttext += u' & %.4f' % features[group][project][fid]\r\n\t\t\t\t# \ttext += u' \\\\\\\\'\r\n\t\t\t\t# else:\r\n\t\t\t\t# \ttext += u' & & & & \\\\\\\\'\r\n\t\t\t\ttext += u' \\\\\\\\'\r\n\t\t\t\tprint(text)\r\n\t\tpass\r\n\r\n\tdef compare_multi_results(self, _basepath):\r\n\t\t'''\r\n\t\tfor Table 7 : single results\r\n\t\t:param _basepath:\r\n\t\t:return:\r\n\t\t'''\r\n\t\ttechinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)\r\n\r\n\t\tdef get_average_mwu(_itemType):\r\n\t\t\tresults = {}\r\n\t\t\tmulti = os.path.join(_basepath, u'New_Multiple_%s.txt' % _itemType)\r\n\t\t\ttitles, dataM = self.load_results_items(multi, ['str'] * 3 + ['float'] * 6)\r\n\t\t\t# MWUresults = {}\r\n\t\t\t# single = os.path.join(_basepath, u'New_Single_%s.txt' % _itemType)\r\n\t\t\t# titles, dataS = self.load_results_items(single, ['str'] * 3 + ['float'] * 6)\r\n\t\t\tfor group in dataM:\r\n\t\t\t\tif group not in results: results[group] = {}\r\n\t\t\t\t#if group not in MWUresults: MWUresults[group] = {}\r\n\t\t\t\tfor project in dataM[group]:\r\n\t\t\t\t\tCNTs = dict(zip(titles, CNTdata[group][project]))\r\n\t\t\t\t\tresults[group][project] = self.get_technique_averages(dataM[group][project], CNTs)\r\n\t\t\t\t\t#MWUresults[group][project] = self.MWUtest(dataS[group][project], dataM[group][project], CNTs, CNTs)\r\n\r\n\t\t\treturn results #, MWUresults\r\n\r\n\t\tAPresults = get_average_mwu('AP')\r\n\t\tTPresults = get_average_mwu('TP')\r\n\r\n\t\tprint(u'')\r\n\t\tprint(u'\\t' + u'\\t\\t'.join(self.techniques))\r\n\t\tprint(u'Subject\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR\\tMAP\\tMRR')\r\n\t\tS = Subjects()\r\n\t\tfor group in S.groups:\r\n\t\t\tfor project in S.projects[group]:\r\n\t\t\t\ttext = u'%s' % project\r\n\t\t\t\tAPmax = self.techniques[0]\r\n\t\t\t\tTPmax = self.techniques[0]\r\n\t\t\t\tfor tech in self.techniques:\r\n\t\t\t\t\tif APresults[group][project][APmax] < APresults[group][project][tech]:\r\n\t\t\t\t\t\tAPmax = tech\r\n\t\t\t\t\tif TPresults[group][project][TPmax] < TPresults[group][project][tech]:\r\n\t\t\t\t\t\tTPmax = tech\r\n\r\n\t\t\t\tfor tech in self.techniques:\r\n\t\t\t\t\tif APmax != tech: text += u' & %.4f' % APresults[group][project][tech]\r\n\t\t\t\t\telse: text += u' & \\\\cellcolor{blue!25}\\\\textbf{%.4f}' % APresults[group][project][tech]\r\n\r\n\t\t\t\t\tif TPmax != tech:\ttext += u' & %.4f ' % TPresults[group][project][tech]\r\n\t\t\t\t\telse:\ttext += u' & \\\\cellcolor{green!25}\\\\textbf{%.4f} ' % TPresults[group][project][tech]\r\n\r\n\t\t\t\tprint(text, end=u'')\r\n\t\t\t\tprint(u' \\\\\\\\')\r\n\t\tpass\r\n\r\n\tdef extract_features(self, _basepath):\r\n\t\ttitles, data = self.load_results(os.path.join(_basepath, u'02_PW_Bug_Features.txt'), ['str'] * 2 + ['int'] + ['float'] * 3 + ['int', 'float'] )\r\n\r\n\t\tfor group in data:\r\n\t\t\tfor project in data[group]:\r\n\t\t\t\titem = data[group][project]\r\n\t\t\t\tdata[group][project] = dict(zip([u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk'], [item[1], item[2], item[3], item[5]]))\r\n\t\treturn data\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################################################\r\n###############################################################################################################\r\nif __name__ == \"__main__\":\r\n\tbasepath = u'\/mnt\/exp\/Bug\/analysis\/'\r\n\tobj = MWUTest()\r\n\tobj.compare_multi_results(basepath)\r\n\tobj.compare_single_results(basepath)\r\n\t# obj.compare_test(basepath)\r\n\t#obj.calc_pearson(basepath)\r\n\t#obj.compare_dup_results(basepath)\r\n\r\n","license":"apache-2.0"} {"repo_name":"fmfn\/UnbalancedDataset","path":"examples\/under-sampling\/plot_illustration_tomek_links.py","copies":"2","size":"3180","content":"\"\"\"\n==============================================\nIllustration of the definition of a Tomek link\n==============================================\n\nThis example illustrates what is a Tomek link.\n\"\"\"\n\n# Authors: Guillaume Lemaitre \n# License: MIT\n\n# %%\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_context(\"poster\")\n\n# %% [markdown]\n# This function allows to make nice plotting\n\n# %%\n\n\ndef make_plot_despine(ax):\n sns.despine(ax=ax, offset=10)\n ax.set_xlim([0, 3])\n ax.set_ylim([0, 3])\n ax.set_xlabel(r\"$X_1$\")\n ax.set_ylabel(r\"$X_2$\")\n ax.legend(loc=\"lower right\")\n\n\n# %% [markdown]\n# We will generate some toy data that illustrates how\n# :class:`~imblearn.under_sampling.TomekLinks` is used to clean a dataset.\n\n# %%\nimport numpy as np\n\nrng = np.random.RandomState(18)\n\nX_minority = np.transpose(\n [[1.1, 1.3, 1.15, 0.8, 0.55, 2.1], [1.0, 1.5, 1.7, 2.5, 0.55, 1.9]]\n)\nX_majority = np.transpose(\n [\n [2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],\n [1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],\n ]\n)\n\n# %% [markdown]\n# In the figure above, the samples highlighted in green form a Tomek link since\n# they are of different classes and are nearest neighbors of each other.\n\nfig, ax = plt.subplots(figsize=(8, 8))\nax.scatter(\n X_minority[:, 0],\n X_minority[:, 1],\n label=\"Minority class\",\n s=200,\n marker=\"_\",\n)\nax.scatter(\n X_majority[:, 0],\n X_majority[:, 1],\n label=\"Majority class\",\n s=200,\n marker=\"+\",\n)\n\n# highlight the samples of interest\nax.scatter(\n [X_minority[-1, 0], X_majority[1, 0]],\n [X_minority[-1, 1], X_majority[1, 1]],\n label=\"Tomek link\",\n s=200,\n alpha=0.3,\n)\nmake_plot_despine(ax)\nfig.suptitle(\"Illustration of a Tomek link\")\nfig.tight_layout()\n\n# %% [markdown]\n# We can run the :class:`~imblearn.under_sampling.TomekLinks` sampling to\n# remove the corresponding samples. If `sampling_strategy='auto'` only the\n# sample from the majority class will be removed. If `sampling_strategy='all'`\n# both samples will be removed.\n\n# %%\nfrom imblearn.under_sampling import TomekLinks\n\nfig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))\n\nsamplers = {\n \"Removing only majority samples\": TomekLinks(sampling_strategy=\"auto\"),\n \"Removing all samples\": TomekLinks(sampling_strategy=\"all\"),\n}\n\nfor ax, (title, sampler) in zip(axs, samplers.items()):\n X_res, y_res = sampler.fit_resample(\n np.vstack((X_minority, X_majority)),\n np.array([0] * X_minority.shape[0] + [1] * X_majority.shape[0]),\n )\n ax.scatter(\n X_res[y_res == 0][:, 0],\n X_res[y_res == 0][:, 1],\n label=\"Minority class\",\n s=200,\n marker=\"_\",\n )\n ax.scatter(\n X_res[y_res == 1][:, 0],\n X_res[y_res == 1][:, 1],\n label=\"Majority class\",\n s=200,\n marker=\"+\",\n )\n\n # highlight the samples of interest\n ax.scatter(\n [X_minority[-1, 0], X_majority[1, 0]],\n [X_minority[-1, 1], X_majority[1, 1]],\n label=\"Tomek link\",\n s=200,\n alpha=0.3,\n )\n\n ax.set_title(title)\n make_plot_despine(ax)\nfig.tight_layout()\n\nplt.show()\n","license":"mit"} {"repo_name":"waylonflinn\/bquery","path":"bquery\/benchmarks\/bench_groupby.py","copies":"2","size":"2465","content":"from __future__ import print_function\n# bench related imports\nimport numpy as np\nimport shutil\nimport bquery\nimport pandas as pd\nimport itertools as itt\nimport cytoolz\nimport cytoolz.dicttoolz\nfrom toolz import valmap, compose\nfrom cytoolz.curried import pluck\nimport blaze as blz\n# other imports\nimport contextlib\nimport os\nimport time\n\ntry:\n # Python 2\n from itertools import izip\nexcept ImportError:\n # Python 3\n izip = zip\n\nt_elapsed = 0.0\n\n\n@contextlib.contextmanager\ndef ctime(message=None):\n \"Counts the time spent in some context\"\n global t_elapsed\n t_elapsed = 0.0\n print('\\n')\n t = time.time()\n yield\n if message:\n print(message + \": \", end='')\n t_elapsed = time.time() - t\n print(round(t_elapsed, 4), \"sec\")\n\n\nga = itt.cycle(['ES', 'NL'])\ngb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])\ngx = itt.cycle([1, 2])\ngy = itt.cycle([-1, -2])\nrootdir = 'bench-data.bcolz'\nif os.path.exists(rootdir):\n shutil.rmtree(rootdir)\n\nn_rows = 1000000\nprint('Rows: ', n_rows)\n\n# -- data\nz = np.fromiter(((a, b, x, y) for a, b, x, y in izip(ga, gb, gx, gy)),\n dtype='S2,S2,i8,i8', count=n_rows)\n\nct = bquery.ctable(z, rootdir=rootdir, )\nprint(ct)\n\n# -- pandas --\ndf = pd.DataFrame(z)\nwith ctime(message='pandas'):\n result = df.groupby(['f0'])['f2'].sum()\nprint(result)\nt_pandas = t_elapsed\n\n# -- cytoolz --\nwith ctime(message='cytoolz over bcolz'):\n # In Memory Split-Apply-Combine\n # http:\/\/toolz.readthedocs.org\/en\/latest\/streaming-analytics.html?highlight=reduce#split-apply-combine-with-groupby-and-reduceby\n r = cytoolz.groupby(lambda row: row.f0, ct)\n result = valmap(compose(sum, pluck(2)), r)\nprint('x{0} slower than pandas'.format(round(t_elapsed \/ t_pandas, 2)))\nprint(result)\n\n# -- blaze + bcolz --\nblaze_data = blz.Data(ct.rootdir)\nexpr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())\nwith ctime(message='blaze over bcolz'):\n result = blz.compute(expr)\nprint('x{0} slower than pandas'.format(round(t_elapsed \/ t_pandas, 2)))\nprint(result)\n\n# -- bquery --\nwith ctime(message='bquery over bcolz'):\n result = ct.groupby(['f0'], ['f2'])\nprint('x{0} slower than pandas'.format(round(t_elapsed \/ t_pandas, 2)))\nprint(result)\n\nct.cache_factor(['f0'], refresh=True)\nwith ctime(message='bquery over bcolz (factorization cached)'):\n result = ct.groupby(['f0'], ['f2'])\nprint('x{0} slower than pandas'.format(round(t_elapsed \/ t_pandas, 2)))\nprint(result)\n\nshutil.rmtree(rootdir)\n","license":"bsd-3-clause"} {"repo_name":"HiSPARC\/sapphire","path":"scripts\/simulations\/analyze_shower_front.py","copies":"1","size":"5153","content":"import numpy as np\nimport tables\n\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import scoreatpercentile\n\nfrom artist import GraphArtist\nfrom pylab import *\n\nimport matplotlib.pyplot as plt\nimport utils\n\nUSE_TEX = False\n\n# For matplotlib plots\nif USE_TEX:\n rcParams['font.serif'] = 'Computer Modern'\n rcParams['font.sans-serif'] = 'Computer Modern'\n rcParams['font.family'] = 'sans-serif'\n rcParams['figure.figsize'] = [4 * x for x in (1, 2. \/ 3)]\n rcParams['figure.subplot.left'] = 0.175\n rcParams['figure.subplot.bottom'] = 0.175\n rcParams['font.size'] = 10\n rcParams['legend.fontsize'] = 'small'\n rcParams['text.usetex'] = True\n\n\ndef main():\n global data\n data = tables.open_file('master-ch4v2.h5', 'r')\n #utils.set_suffix('E_1PeV')\n\n #scatterplot_core_distance_vs_time()\n #median_core_distance_vs_time()\n boxplot_core_distance_vs_time()\n #hists_core_distance_vs_time()\n plot_front_passage()\n\n\ndef scatterplot_core_distance_vs_time():\n plt.figure()\n\n sim = data.root.showers.E_1PeV.zenith_0\n electrons = sim.electrons\n\n plt.loglog(electrons[:]['core_distance'], electrons[:]['arrival_time'], ',')\n plt.xlim(1e0, 1e2)\n plt.ylim(1e-3, 1e3)\n\n plt.xlabel(\"Core distance [m]\")\n plt.ylabel(\"Arrival time [ns]\")\n\n utils.title(\"Shower front timing structure\")\n utils.saveplot()\n\n\ndef median_core_distance_vs_time():\n plt.figure()\n plot_and_fit_statistic(lambda a: scoreatpercentile(a, 25))\n plot_and_fit_statistic(lambda a: scoreatpercentile(a, 75))\n\n utils.title(\"Shower front timing structure (25, 75 %)\")\n utils.saveplot()\n plt.xlabel(\"Core distance [m]\")\n plt.ylabel(\"Median arrival time [ns]\")\n legend(loc='lower right')\n\n\ndef plot_and_fit_statistic(func):\n sim = data.root.showers.E_1PeV.zenith_0\n electrons = sim.electrons\n\n bins = np.logspace(0, 2, 25)\n x, y = [], []\n for low, high in zip(bins[:-1], bins[1:]):\n sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')\n statistic = func(sel[:]['arrival_time'])\n x.append(np.mean([low, high]))\n y.append(statistic)\n\n plt.loglog(x, y)\n\n logx = log10(x)\n logy = log10(y)\n logf = lambda x, a, b: a * x + b\n g = lambda x, a, b: 10 ** logf(log10(x), a, b)\n popt, pcov = curve_fit(logf, logx, logy)\n plot(x, g(x, *popt), label=\"f(x) = %.2e * x ^ %.2e\" % (10 ** popt[1],\n popt[0]))\n\n\ndef boxplot_core_distance_vs_time():\n plt.figure()\n\n sim = data.root.showers.E_1PeV.zenith_0.shower_0\n leptons = sim.leptons\n\n #bins = np.logspace(0, 2, 25)\n bins = np.linspace(0, 100, 15)\n x, arrival_time, widths = [], [], []\n t25, t50, t75 = [], [], []\n for low, high in zip(bins[:-1], bins[1:]):\n sel = leptons.read_where('(low < core_distance) & (core_distance <= high)')\n x.append(np.mean([low, high]))\n arrival_time.append(sel[:]['arrival_time'])\n widths.append((high - low) \/ 2)\n ts = sel[:]['arrival_time']\n t25.append(scoreatpercentile(ts, 25))\n t50.append(scoreatpercentile(ts, 50))\n t75.append(scoreatpercentile(ts, 75))\n\n fill_between(x, t25, t75, color='0.75')\n plot(x, t50, 'o-', color='black')\n\n plt.xlabel(\"Core distance [m]\")\n plt.ylabel(\"Arrival time [ns]\")\n\n #utils.title(\"Shower front timing structure\")\n utils.saveplot()\n\n graph = GraphArtist()\n graph.plot(x, t50, linestyle=None)\n graph.shade_region(x, t25, t75)\n graph.set_xlabel(r\"Core distance [\\si{\\meter}]\")\n graph.set_ylabel(r\"Arrival time [\\si{\\nano\\second}]\")\n graph.set_ylimits(0, 30)\n graph.set_xlimits(0, 100)\n graph.save('plots\/front-passage-vs-R')\n\n\ndef hists_core_distance_vs_time():\n plt.figure()\n\n sim = data.root.showers.E_1PeV.zenith_0\n electrons = sim.electrons\n\n bins = np.logspace(0, 2, 5)\n for low, high in zip(bins[:-1], bins[1:]):\n sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')\n arrival_time = sel[:]['arrival_time']\n plt.hist(arrival_time, bins=np.logspace(-2, 3, 50), histtype='step',\n label=\"%.2f <= log10(R) < %.2f\" % (np.log10(low),\n np.log10(high)))\n\n plt.xscale('log')\n\n plt.xlabel(\"Arrival Time [ns]\")\n plt.ylabel(\"Count\")\n plt.legend(loc='upper left')\n\n utils.title(\"Shower front timing structure\")\n utils.saveplot()\n\n\ndef plot_front_passage():\n sim = data.root.showers.E_1PeV.zenith_0.shower_0\n leptons = sim.leptons\n R = 40\n dR = 2\n low = R - dR\n high = R + dR\n global t\n t = leptons.read_where('(low < core_distance) & (core_distance <= high)',\n field='arrival_time')\n\n n, bins, patches = hist(t, bins=linspace(0, 30, 31), histtype='step')\n\n graph = GraphArtist()\n graph.histogram(n, bins)\n graph.set_xlabel(r\"Arrival time [\\si{\\nano\\second}]\")\n graph.set_ylabel(\"Number of leptons\")\n graph.set_ylimits(min=0)\n graph.set_xlimits(0, 30)\n graph.save('plots\/front-passage')\n\n\nif __name__ == '__main__':\n main()\n","license":"gpl-3.0"} {"repo_name":"jmbeuken\/abinit","path":"scripts\/post_processing\/abinit_eignc_to_bandstructure.py","copies":"3","size":"47417","content":"#!\/usr\/bin\/python\n#=================================================================#\n# Script to plot the bandstructure from an abinit bandstructure #\n# _EIG.nc netcdf file or from a wannier bandstructure, or from #\n# an _EIG.nc file+GW file+ bandstructure _EIG.nc file #\n#=================================================================#\n\n#########\n#IMPORTS#\n#########\n\nimport numpy as N\nimport matplotlib.pyplot as P\nimport netCDF4 as nc\nimport sys\nimport os\nimport argparse\nimport time\n\n#############\n##VARIABLES##\n#############\n\nclass VariableContainer:pass\n\n#Constants\ncsts = VariableContainer()\n\ncsts.hartree2ev = N.float(27.211396132)\ncsts.ev2hartree = N.float(1\/csts.hartree2ev)\ncsts.sqrtpi = N.float(N.sqrt(N.pi))\ncsts.invsqrtpi = N.float(1\/csts.sqrtpi)\ncsts.TOLKPTS = N.float(0.00001)\n\n###########\n##CLASSES##\n###########\n\nclass PolynomialFit(object):\n def __init__(self):\n self.degree = 2\n\nclass EigenvalueContainer(object):\n nsppol = None\n nkpt = None\n mband = None\n eigenvalues = None\n units = None\n wtk = None\n filename = None\n filefullpath = None\n bd_indices = None\n eigenvalue_type = None\n kpoints = None\n #kpoint_sampling_type: can be Monkhorst-Pack or Bandstructure\n KPT_W90_TOL = N.float(1.0e-6)\n KPT_DFT_TOL = N.float(1.0e-8)\n kpoint_sampling_type = 'Monkhorst-Pack'\n inputgvectors = None\n gvectors = None\n special_kpoints = None\n special_kpoints_names = None\n special_kpoints_indices = None\n kpoint_path_values = None\n kpoint_reduced_path_values = None\n kpoint_path_length = None\n #reduced_norm = None\n norm_paths = None\n norm_reduced_paths = None\n def __init__(self,directory=None,filename=None):\n if filename == None:return\n if directory == None:directory='.'\n self.filename = filename\n self.filefullpath = '%s\/%s' %(directory,filename)\n self.file_open(self.filefullpath)\n def set_kpoint_sampling_type(self,kpoint_sampling_type):\n if kpoint_sampling_type != 'Monkhorst-Pack' and kpoint_sampling_type != 'Bandstructure':\n print 'ERROR: kpoint_sampling_type \"%s\" does not exists' %kpoint_sampling_type\n print ' it should be \"Monkhorst-Pack\" or \"Bandstructure\" ... exit'\n sys.exit()\n self.kpoint_sampling_type = kpoint_sampling_type\n def correct_kpt(self,kpoint,tolerance=N.float(1.0e-6)):\n kpt_correct = N.array(kpoint,N.float)\n changed = False\n for ii in range(3):\n if N.allclose(kpoint[ii],N.float(1.0\/3.0),atol=tolerance):\n kpt_correct[ii] = N.float(1.0\/3.0)\n changed = True\n elif N.allclose(kpoint[ii],N.float(1.0\/6.0),atol=tolerance):\n kpt_correct[ii] = N.float(1.0\/6.0)\n changed = True\n elif N.allclose(kpoint[ii],N.float(-1.0\/6.0),atol=tolerance):\n kpt_correct[ii] = N.float(-1.0\/6.0)\n changed = True\n elif N.allclose(kpoint[ii],N.float(-1.0\/3.0),atol=tolerance):\n kpt_correct[ii] = N.float(-1.0\/3.0)\n changed = True\n if changed:\n print 'COMMENT: kpoint %15.12f %15.12f %15.12f has been changed to %15.12f %15.12f %15.12f' %(kpoint[0],kpoint[1],kpoint[2],kpt_correct[0],kpt_correct[1],kpt_correct[2])\n return kpt_correct\n def find_special_kpoints(self,gvectors=None):\n if self.kpoint_sampling_type != 'Bandstructure':\n print 'ERROR: special kpoints are usefull only for bandstructures ... returning find_special_kpoints'\n return\n if self.eigenvalue_type == 'W90':\n correct_kpt_tolerance = N.float(1.0e-4)\n KPT_TOL = self.KPT_W90_TOL\n elif self.eigenvalue_type == 'DFT':\n correct_kpt_tolerance = N.float(1.0e-6)\n KPT_TOL = self.KPT_DFT_TOL\n else:\n print 'ERROR: eigenvalue_type is \"%s\" while it should be \"W90\" or \"DFT\" ... returning find_special_kpoints' %self.eigenvalue_type\n return\n if gvectors == None:\n self.inputgvectors = False\n self.gvectors = N.identity(3,N.float)\n else:\n if N.shape(gvectors) != (3, 3):\n print 'ERROR: wrong gvectors ... exiting now'\n sys.exit()\n self.inputgvectors = True\n self.gvectors = gvectors\n full_kpoints = N.zeros((self.nkpt,3),N.float)\n for ikpt in range(self.nkpt):\n full_kpoints[ikpt,:] = self.kpoints[ikpt,0]*self.gvectors[0,:]+self.kpoints[ikpt,1]*self.gvectors[1,:]+self.kpoints[ikpt,2]*self.gvectors[2,:]\n delta_kpt = full_kpoints[1,:]-full_kpoints[0,:]\n self.special_kpoints_indices = list()\n self.special_kpoints = list()\n self.special_kpoints_indices.append(0)\n self.special_kpoints.append(self.correct_kpt(self.kpoints[0,:],tolerance=correct_kpt_tolerance))\n for ikpt in range(1,self.nkpt-1):\n thisdelta = full_kpoints[ikpt+1,:]-full_kpoints[ikpt,:]\n if not N.allclose(thisdelta,delta_kpt,atol=KPT_TOL):\n delta_kpt = thisdelta\n self.special_kpoints_indices.append(ikpt)\n self.special_kpoints.append(self.correct_kpt(self.kpoints[ikpt,:],tolerance=correct_kpt_tolerance))\n self.special_kpoints_indices.append(N.shape(self.kpoints)[0]-1)\n self.special_kpoints.append(self.correct_kpt(self.kpoints[-1,:],tolerance=correct_kpt_tolerance))\n print 'Special Kpoints : '\n print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(1,self.kpoints[0,:])\n self.norm_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)\n self.norm_reduced_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)\n for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):\n self.norm_paths[ispkpt-1] = N.linalg.norm(full_kpoints[self.special_kpoints_indices[ispkpt]]-full_kpoints[self.special_kpoints_indices[ispkpt-1]])\n self.norm_reduced_paths[ispkpt-1] = N.linalg.norm(self.special_kpoints[ispkpt]-self.special_kpoints[ispkpt-1])\n print ' {2:d}-{3:d} path length : {0: 8.8f} | reduced path length : {1: 8.8f}'.\\\n format(self.norm_paths[ispkpt-1],self.norm_reduced_paths[ispkpt-1],ispkpt,ispkpt+1)\n print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(ispkpt+1,self.kpoints[self.special_kpoints_indices[ispkpt],:])\n self.kpoint_path_length = N.sum(self.norm_paths)\n self.kpoint_reduced_path_length = N.sum(self.norm_reduced_paths)\n self.normalized_kpoint_path_norm = self.norm_paths\/self.kpoint_path_length\n self.normalized_kpoint_reduced_path_norm = self.norm_reduced_paths\/self.kpoint_reduced_path_length\n \n kptredpathval = list()\n kptpathval = list()\n kptredpathval.append(N.float(0.0))\n kptpathval.append(N.float(0.0))\n curlen = N.float(0.0)\n redcurlen = N.float(0.0)\n for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):\n kptredpathval.extend(N.linspace(redcurlen,redcurlen+self.norm_reduced_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])\n kptpathval.extend(N.linspace(curlen,curlen+self.norm_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])\n redcurlen = redcurlen + self.norm_reduced_paths[ispkpt-1]\n curlen = curlen + self.norm_paths[ispkpt-1]\n self.kpoint_path_values = N.array(kptpathval,N.float)\n self.kpoint_reduced_path_values = N.array(kptredpathval,N.float)\n self.normalized_kpoint_path_values = self.kpoint_path_values\/self.kpoint_path_length\n self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values\/self.kpoint_reduced_path_length\n self.special_kpoints = N.array(self.special_kpoints,N.float)\n def file_open(self,filefullpath):\n if filefullpath[-3:] == '_GW':\n self.gw_file_open(filefullpath)\n elif filefullpath[-7:] == '_EIG.nc':\n self.nc_eig_open(filefullpath)\n elif filefullpath[-4:] == '.dat':\n self.wannier_bs_file_open(filefullpath)\n def has_eigenvalue(self,nsppol,isppol,kpoint,iband):\n if self.nsppol != nsppol:\n return False\n for ikpt in range(self.nkpt):\n if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \\\n N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \\\n N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:\n if iband >= self.bd_indices[isppol,ikpt,0]-1 and iband < self.bd_indices[isppol,ikpt,1]:\n return True\n return False\n return False\n def get_eigenvalue(self,nsppol,isppol,kpoint,iband):\n for ikpt in range(self.nkpt):\n if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \\\n N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \\\n N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:\n return self.eigenvalues[isppol,ikpt,iband]\n def wannier_bs_file_open(self,filefullpath):\n if not (os.path.isfile(filefullpath)):\n print 'ERROR : file \"%s\" does not exists' %filefullpath\n print '... exiting now ...'\n sys.exit()\n print 'WARNING: no spin polarization reading yet for Wannier90 bandstructure files!'\n self.eigenvalue_type = 'W90'\n self.nsppol = None\n self.nkpt = None\n self.mband = None\n self.eigenvalues = None\n self.units = None\n self.filefullpath = filefullpath\n reader = open(self.filefullpath,'r')\n filedata = reader.readlines()\n reader.close()\n for iline in range(len(filedata)):\n if filedata[iline].strip() == '':\n self.nkpt = iline\n break\n self.mband = N.int(len(filedata)\/self.nkpt)\n self.nsppol = 1\n self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)\n self.kpoints = N.zeros([self.nkpt,3],N.float)\n iline = 0\n kpt_file = '%s.kpt' %filefullpath[:-4]\n if os.path.isfile(kpt_file):\n reader = open(kpt_file,'r')\n kptdata = reader.readlines()\n reader.close()\n if N.int(kptdata[0]) != self.nkpt:\n print 'ERROR : the number of kpoints in file \"%s\" is not the same as in \"%s\" ... exit' %(self.filefullpath,kpt_file)\n sys.exit()\n for ikpt in range(self.nkpt):\n linesplit = kptdata[ikpt+1].split()\n self.kpoints[ikpt,0] = N.float(linesplit[0])\n self.kpoints[ikpt,1] = N.float(linesplit[1])\n self.kpoints[ikpt,2] = N.float(linesplit[2])\n else:\n for ikpt in range(self.nkpt):\n self.kpoints[ikpt,0] = N.float(filedata[ikpt].split()[0])\n for iband in range(self.mband):\n for ikpt in range(self.nkpt):\n self.eigenvalues[0,ikpt,iband] = N.float(filedata[iline].split()[1])\n iline = iline+1\n iline = iline+1\n self.eigenvalues = self.eigenvalues*csts.ev2hartree\n self.units = 'Hartree'\n def gw_file_open(self,filefullpath):\n if not (os.path.isfile(filefullpath)):\n print 'ERROR : file \"%s\" does not exists' %filefullpath\n print '... exiting now ...'\n sys.exit()\n self.eigenvalue_type = 'GW'\n self.nsppol = None\n self.nkpt = None\n self.mband = None\n self.eigenvalues = None\n self.units = None\n self.filefullpath = filefullpath\n reader = open(self.filefullpath,'r')\n filedata = reader.readlines()\n reader.close()\n self.nkpt = N.int(filedata[0].split()[0])\n self.kpoints = N.ones([self.nkpt,3],N.float)\n self.nsppol = N.int(filedata[0].split()[1])\n self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)\n icur = 1\n nbd_kpt = N.zeros([self.nsppol,self.nkpt],N.int)\n for isppol in range(self.nsppol):\n for ikpt in range(self.nkpt):\n self.kpoints[ikpt,:] = N.array(filedata[icur].split()[:],N.float)\n icur = icur + 1\n nbd_kpt[isppol,ikpt] = N.int(filedata[icur])\n self.bd_indices[isppol,ikpt,0] = N.int(filedata[icur+1].split()[0])\n self.bd_indices[isppol,ikpt,1] = N.int(filedata[icur+nbd_kpt[isppol,ikpt]].split()[0])\n icur = icur + nbd_kpt[isppol,ikpt] + 1\n self.mband = N.max(self.bd_indices[:,:,1])\n self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)\n self.eigenvalues[:,:,:] = N.nan\n ii = 3\n for isppol in range(self.nsppol):\n for ikpt in range(self.nkpt):\n for iband in range(self.bd_indices[isppol,ikpt,0]-1,self.bd_indices[isppol,ikpt,1]):\n self.eigenvalues[isppol,ikpt,iband] = N.float(filedata[ii].split()[1])\n ii = ii + 1\n ii = ii + 2\n self.eigenvalues = csts.ev2hartree*self.eigenvalues\n self.units = 'Hartree'\n def pfit_gw_file_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):\n if filename == None:return\n if directory == None:directory='.'\n filefullpath = '%s\/%s' %(directory,filename)\n if (os.path.isfile(filefullpath)):\n user_input = raw_input('WARNING : file \"%s\" exists, do you want to overwrite it ? (y\/n)' %filefullpath)\n if not (user_input == 'y' or user_input == 'Y'):\n return\n writer = open(filefullpath,'w')\n writer.write('%12s%12s\\n' %(self.nkpt,self.nsppol))\n if gwec == None:\n for ikpt in range(self.nkpt):\n for isppol in range(self.nsppol):\n writer.write('%10.6f%10.6f%10.6f\\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))\n writer.write('%4i\\n' %(bdgw[1]-bdgw[0]+1))\n for iband in range(bdgw[0]-1,bdgw[1]):\n delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n for ipivot in range(len(energy_pivots)):\n if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:\n delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n break\n writer.write('%6i%9.4f%9.4f%9.4f\\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))\n else:\n for ikpt in range(self.nkpt):\n for isppol in range(self.nsppol):\n writer.write('%10.6f%10.6f%10.6f\\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))\n writer.write('%4i\\n' %(bdgw[1]-bdgw[0]+1))\n for iband in range(bdgw[0]-1,bdgw[1]):\n if gwec.has_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband):\n gw_eig = gwec.get_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband)\n writer.write('%6i%9.4f%9.4f%9.4f\\n' %(iband+1,csts.hartree2ev*gw_eig,csts.hartree2ev*(gw_eig-self.eigenvalues[isppol,ikpt,iband]),0.0))\n else:\n delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n for ipivot in range(len(energy_pivots)):\n if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:\n delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n break\n writer.write('%6i%9.4f%9.4f%9.4f\\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))\n writer.close()\n def pfit_dft_to_gw_bs_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):\n if filename == None:return\n if directory == None:directory='.'\n filefullpath = '%s\/%s' %(directory,filename)\n if (os.path.isfile(filefullpath)):\n user_input = raw_input('WARNING : file \"%s\" exists, do you want to overwrite it ? (y\/n)' %filefullpath)\n if not (user_input == 'y' or user_input == 'Y'):\n return\n writer = open(filefullpath,'w')\n if gwec == None:\n for ikpt in range(self.nkpt):\n writer.write('%s' %ikpt)\n for isppol in range(self.nsppol):\n for iband in range(bdgw[0]-1,bdgw[1]):\n delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n for ipivot in range(len(energy_pivots)):\n if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:\n delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])\n break\n writer.write(' %s' %(csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta))\n writer.write('\\n')\n else:\n print 'NOT SUPPORTED YET'\n sys.exit()\n writer.close()\n def nc_eig_open(self,filefullpath):\n if not (os.path.isfile(filefullpath)):\n print 'ERROR : file \"%s\" does not exists' %filefullpath\n print '... exiting now ...'\n sys.exit()\n ncdata = nc.Dataset(filefullpath)\n self.eigenvalue_type = 'DFT'\n self.nsppol = None\n self.nkpt = None\n self.mband = None\n self.eigenvalues = None\n self.units = None\n self.filefullpath = filefullpath\n for dimname,dimobj in ncdata.dimensions.iteritems():\n if dimname == 'nsppol':self.nsppol = N.int(len(dimobj))\n if dimname == 'nkpt':self.nkpt = N.int(len(dimobj))\n if dimname == 'mband':self.mband = N.int(len(dimobj))\n for varname in ncdata.variables:\n if varname == 'Eigenvalues':\n varobj = ncdata.variables[varname]\n varshape = N.shape(varobj[:])\n self.units = None\n for attrname in varobj.ncattrs():\n if attrname == 'units':\n self.units = varobj.getncattr(attrname)\n if self.units == None:\n print 'WARNING : units are not specified'\n print '... assuming \"Hartree\" units ...'\n self.units = 'Hartree'\n elif self.units != 'Hartree':\n print 'ERROR : units are unknown : \"%s\"' %self.units\n print '... exiting now ...'\n sys.exit()\n self.eigenvalues = N.reshape(N.array(varobj,N.float),varshape)\n self.nsppol = varshape[0]\n self.nkpt = varshape[1]\n self.kpoints = -1*N.ones((self.nkpt,3),N.float)\n self.mband = varshape[2]\n self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)\n self.bd_indices[:,:,0] = 1\n self.bd_indices[:,:,1] = self.mband\n break\n for varname in ncdata.variables:\n if varname == 'Kptns':\n varobj = ncdata.variables[varname]\n varshape = N.shape(varobj[:])\n self.kpoints = N.reshape(N.array(varobj,N.float),varshape)\n def write_bandstructure_to_file(self,filename,option_kpts='bohrm1_units'):\n #if option_kpts is set to 'normalized', the path of the bandstructure will be normalized to 1 (and special k-points correctly chosen)\n if self.kpoint_sampling_type != 'Bandstructure':\n print 'ERROR: kpoint_sampling_type is not \"Bandstructure\" ... returning from write_bandstructure_to_file'\n return\n if self.nsppol > 1:\n print 'ERROR: number of spins is more than 1, this is not fully tested ... use with care !'\n writer = open(filename,'w')\n writer.write('# BANDSTRUCTURE FILE FROM DAVID\\'S SCRIPT\\n')\n writer.write('# nsppol = %s\\n' %self.nsppol)\n writer.write('# nband = %s\\n' %self.mband)\n writer.write('# eigenvalue_type = %s\\n' %self.eigenvalue_type)\n if self.inputgvectors:\n writer.write('# inputgvectors = 1 (%s)\\n' %self.inputgvectors)\n else:\n writer.write('# inputgvectors = 0 (%s)\\n' %self.inputgvectors)\n writer.write('# gvectors(1) = %20.17f %20.17f %20.17f \\n' %(self.gvectors[0,0],self.gvectors[0,1],self.gvectors[0,2]))\n writer.write('# gvectors(2) = %20.17f %20.17f %20.17f \\n' %(self.gvectors[1,0],self.gvectors[1,1],self.gvectors[1,2]))\n writer.write('# gvectors(3) = %20.17f %20.17f %20.17f \\n' %(self.gvectors[2,0],self.gvectors[2,1],self.gvectors[2,2]))\n writer.write('# special_kpoints_number = %s\\n' %(len(self.special_kpoints_indices)))\n writer.write('# list of special kpoints : (given in reduced coordinates, value_path is in Bohr^-1, value_red_path has its total path normalized to 1)\\n')\n for ii in range(len(self.special_kpoints_indices)):\n ispkpt = self.special_kpoints_indices[ii]\n spkpt = self.special_kpoints[ii]\n writer.write('# special_kpt_index %5s : %20.17f %20.17f %20.17f (value_path = %20.17f | value_red_path = %20.17f)\\n' %(ispkpt,spkpt[0],spkpt[1],spkpt[2],self.kpoint_path_values[ispkpt],self.kpoint_reduced_path_values[ispkpt]))\n writer.write('# special_kpoints_names :\\n')\n for ii in range(len(self.special_kpoints_indices)):\n ispkpt = self.special_kpoints_indices[ii]\n spkpt = self.special_kpoints[ii]\n writer.write('# special_kpt_name %3s : \"%s\" : %20.17f %20.17f %20.17f\\n' %(ii+1,self.special_kpoints_names[ii],spkpt[0],spkpt[1],spkpt[2]))\n writer.write('# kpoint_path_length = %20.17f \\n' %(self.kpoint_path_length))\n writer.write('# kpoint_path_number = %s \\n' %(self.nkpt))\n if self.inputgvectors:\n writer.write('# kpoint_path_units = %s\\n' %(option_kpts))\n else:\n writer.write('# kpoint_path_units = %s (!!! CONSIDERING UNITARY GVECTORS MATRIX !!!)\\n' %(option_kpts))\n writer.write('#BEGIN\\n')\n if option_kpts == 'bohrm1_units':\n values_path = self.kpoint_path_values\n elif option_kpts == 'reduced':\n values_path = self.kpoint_reduced_path_values\n elif option_kpts == 'bohrm1_units_normalized':\n values_path = self.normalized_kpoint_path_values\n elif option_kpts == 'reduced_normalized':\n values_path = self.normalized_kpoint_reduced_path_values\n else:\n print 'ERROR: wrong option_kpts ... exit'\n writer.write('... CANCELLED (wrong option_kpts)')\n writer.close()\n sys.exit()\n for isppol in range(self.nsppol):\n writer.write('#isppol %s\\n' %isppol)\n for iband in range(self.mband):\n writer.write('#iband %5s (band number %s)\\n' %(iband,iband+1))\n for ikpt in range(self.nkpt):\n writer.write('%20.17f %20.17f\\n' %(values_path[ikpt],self.eigenvalues[isppol,ikpt,iband]))\n writer.write('\\n')\n writer.write('#END\\n')\n writer.write('\\n#KPT_LIST\\n')\n for ikpt in range(self.nkpt):\n writer.write('# %6d : %20.17f %20.17f %20.17f\\n' %(ikpt,self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))\n writer.close()\n def read_bandstructure_from_file(self,filename):\n reader = open(filename,'r')\n bs_data = reader.readlines()\n reader.close()\n self.gvectors = N.identity(3,N.float)\n self.kpoint_sampling_type = 'Bandstructure'\n self.special_kpoints_indices = list()\n self.special_kpoints = list()\n for ii in range(len(bs_data)):\n if bs_data[ii] == '#BEGIN\\n':\n ibegin = ii\n break\n elif bs_data[ii][:10] == '# nsppol =':\n self.nsppol = N.int(bs_data[ii][10:])\n elif bs_data[ii][:9] == '# nband =':\n self.mband = N.int(bs_data[ii][9:])\n elif bs_data[ii][:19] == '# eigenvalue_type =':\n self.eigenvalue_type = bs_data[ii][19:].strip()\n elif bs_data[ii][:17] == '# inputgvectors =':\n tt = N.int(bs_data[ii][18])\n if tt == 1:\n self.inputgvectors = True\n elif tt == 0:\n self.inputgvectors = False\n else:\n print 'ERROR: reading inputgvectors ... exit'\n sys.exit()\n elif bs_data[ii][:15] == '# gvectors(1) =':\n sp = bs_data[ii][15:].split()\n self.gvectors[0,0] = N.float(sp[0])\n self.gvectors[0,1] = N.float(sp[1])\n self.gvectors[0,2] = N.float(sp[2])\n elif bs_data[ii][:15] == '# gvectors(2) =':\n sp = bs_data[ii][15:].split()\n self.gvectors[1,0] = N.float(sp[0])\n self.gvectors[1,1] = N.float(sp[1])\n self.gvectors[1,2] = N.float(sp[2])\n elif bs_data[ii][:15] == '# gvectors(3) =':\n sp = bs_data[ii][15:].split()\n self.gvectors[2,0] = N.float(sp[0])\n self.gvectors[2,1] = N.float(sp[1])\n self.gvectors[2,2] = N.float(sp[2])\n elif bs_data[ii][:26] == '# special_kpoints_number =':\n special_kpoints_number = N.int(bs_data[ii][26:])\n self.special_kpoints_names = ['']*special_kpoints_number\n elif bs_data[ii][:22] == '# special_kpt_index':\n sp = bs_data[ii][22:].split()\n self.special_kpoints_indices.append(N.int(sp[0]))\n self.special_kpoints.append(N.array([sp[2],sp[3],sp[4]]))\n elif bs_data[ii][:21] == '# special_kpt_name':\n sp = bs_data[ii][21:].split()\n ispkpt = N.int(sp[0])-1\n self.special_kpoints_names[ispkpt] = sp[2][1:-1]\n elif bs_data[ii][:22] == '# kpoint_path_length =':\n self.kpoint_path_length = N.float(bs_data[ii][22:])\n elif bs_data[ii][:22] == '# kpoint_path_number =':\n self.nkpt = N.int(bs_data[ii][22:])\n elif bs_data[ii][:21] == '# kpoint_path_units =':\n kpoint_path_units = bs_data[ii][21:].strip()\n self.special_kpoints_indices = N.array(self.special_kpoints_indices,N.int)\n self.special_kpoints = N.array(self.special_kpoints,N.float)\n if len(self.special_kpoints_indices) != special_kpoints_number or len(self.special_kpoints) != special_kpoints_number:\n print 'ERROR: reading the special kpoints ... exit'\n sys.exit()\n self.kpoint_path_values = N.zeros([self.nkpt],N.float)\n self.kpoint_reduced_path_values = N.zeros([self.nkpt],N.float)\n if kpoint_path_units == 'bohrm1_units':\n jj = 0\n for ii in range(ibegin+1,len(bs_data)):\n if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue\n if bs_data[ii] == '\\n':\n break\n self.kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])\n jj = jj + 1\n if jj != self.nkpt:\n print 'ERROR: reading bandstructure file ... exit'\n sys.exit()\n self.normalized_kpoint_path_values = self.kpoint_path_values\/self.kpoint_path_length\n if kpoint_path_units == 'bohrm1_units_normalized':\n jj = 0\n for ii in range(ibegin+1,len(bs_data)):\n if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue\n if bs_data[ii] == '\\n':\n break\n self.normalized_kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])\n jj = jj + 1\n if jj != self.nkpt:\n print 'ERROR: reading bandstructure file ... exit'\n sys.exit()\n self.kpoint_path_values = self.normalized_kpoint_path_values*self.kpoint_path_length\n elif kpoint_path_units == 'reduced_normalized':\n jj = 0\n for ii in range(ibegin+1,len(bs_data)):\n if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue\n if bs_data[ii] == '\\n':\n break\n self.normalized_kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])\n jj = jj + 1\n if jj != self.nkpt:\n print 'ERROR: reading bandstructure file ... exit'\n sys.exit()\n self.kpoint_reduced_path_values = self.normalized_kpoint_reduced_path_values\/self.kpoint_reduced_path_length\n elif kpoint_path_units == 'reduced':\n jj = 0\n for ii in range(ibegin+1,len(bs_data)):\n if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue\n if bs_data[ii] == '\\n':\n break\n self.kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])\n jj = jj + 1\n if jj != self.nkpt:\n print 'ERROR: reading bandstructure file ... exit'\n sys.exit()\n self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values\/self.kpoint_reduced_path_length\n self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)\n check_nband = 0\n for ii in range(ibegin+1,len(bs_data)):\n if bs_data[ii][:7] == '#isppol':\n isppol = N.int(bs_data[ii][7:])\n elif bs_data[ii][:6] == '#iband':\n iband = N.int(bs_data[ii][6:].split()[0])\n ikpt = 0\n elif bs_data[ii][:4] == '#END':\n break\n elif bs_data[ii] == '\\n':\n check_nband = check_nband + 1\n else:\n self.eigenvalues[isppol,ikpt,iband] = N.float(bs_data[ii].split()[1])\n ikpt = ikpt + 1\n\ndef check_gw_vs_dft_parameters(dftec,gwec):\n if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':\n print 'ERROR: eigenvalue files do not contain GW and DFT eigenvalues ... exiting now'\n sys.exit()\n if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:\n print 'ERROR: the number of spins\/kpoints is not the same in the GW and DFT files used to make the interpolation ... exiting now'\n sys.exit()\n for ikpt in range(dftec.nkpt):\n if N.absolute(dftec.kpoints[ikpt,0]-gwec.kpoints[ikpt,0]) > csts.TOLKPTS or \\\n N.absolute(dftec.kpoints[ikpt,1]-gwec.kpoints[ikpt,1]) > csts.TOLKPTS or \\\n N.absolute(dftec.kpoints[ikpt,2]-gwec.kpoints[ikpt,2]) > csts.TOLKPTS:\n print 'ERROR: the kpoints are not the same in the GW and DFT files used to make the interpolation ... exiting now'\n sys.exit()\n\ndef plot_gw_vs_dft_eig(dftec,gwec,vbm_index,energy_pivots=None,polyfit_degrees=None):\n if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':\n print 'ERROR: eigenvalue containers do not contain GW and DFT eigenvalues ... exiting now'\n sys.exit()\n if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:\n print 'ERROR: the number of spins\/kpoints is not the same in the GW and DFT containers ... exiting now'\n sys.exit()\n valdftarray = N.array([],N.float)\n conddftarray = N.array([],N.float)\n valgwarray = N.array([],N.float)\n condgwarray = N.array([],N.float)\n for isppol in range(dftec.nsppol):\n for ikpt in range(dftec.nkpt):\n ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1\n ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1\n valdftarray = N.append(valdftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])\n valgwarray = N.append(valgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])\n conddftarray = N.append(conddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])\n condgwarray = N.append(condgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])\n if energy_pivots == None:\n if plot_figures == 1:\n P.figure(1)\n P.hold(True)\n P.grid(True)\n P.plot(valdftarray,valgwarray,'bx')\n P.plot(conddftarray,condgwarray,'rx')\n P.xlabel('DFT eigenvalues (in eV)')\n P.ylabel('GW eigenvalues (in eV)')\n P.figure(2)\n P.hold(True)\n P.grid(True)\n P.plot(valdftarray,valgwarray-valdftarray,'bx')\n P.plot(conddftarray,condgwarray-conddftarray,'rx')\n P.xlabel('DFT eigenvalues (in eV)')\n P.ylabel('GW correction to the DFT eigenvalues (in eV)')\n P.show()\n return\n polyfitlist = list()\n if len(polyfit_degrees) == 1:\n print 'ERROR: making a fit with only one interval is not allowed ... exiting now'\n sys.exit()\n dftarray = N.append(valdftarray,conddftarray)\n gwarray = N.append(valgwarray,condgwarray)\n dftarray_list = list()\n gwarray_list = list()\n for iinterval in range(len(polyfit_degrees)):\n tmpdftarray = N.array([],N.float)\n tmpgwarray = N.array([],N.float)\n if iinterval == 0:\n emin = None\n emax = energy_pivots[0]\n for ii in range(len(dftarray)):\n if dftarray[ii] <= emax:\n tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])\n tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])\n elif iinterval == len(polyfit_degrees)-1:\n emin = energy_pivots[-1]\n emax = None\n for ii in range(len(dftarray)):\n if dftarray[ii] >= emin:\n tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])\n tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])\n else:\n emin = energy_pivots[iinterval-1]\n emax = energy_pivots[iinterval]\n for ii in range(len(dftarray)):\n if dftarray[ii] >= emin and dftarray[ii] <= emax:\n tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])\n tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])\n dftarray_list.append(tmpdftarray)\n gwarray_list.append(tmpgwarray)\n pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees[iinterval])\n polyfitlist.append(pfit)\n if plot_figures == 1:\n linspace_npoints = 200\n valpoly_x = N.linspace(N.min(valdftarray),N.max(valdftarray),linspace_npoints)\n condpoly_x = N.linspace(N.min(conddftarray),N.max(conddftarray),linspace_npoints)\n P.figure(3)\n P.hold(True)\n P.grid(True)\n P.plot(valdftarray,valgwarray-valdftarray,'bx')\n P.plot(conddftarray,condgwarray-conddftarray,'rx')\n [x_min,x_max] = P.xlim()\n for iinterval in range(len(polyfit_degrees)):\n if iinterval == 0:\n tmppoly_x = N.linspace(x_min,energy_pivots[iinterval],linspace_npoints)\n elif iinterval == len(polyfit_degrees)-1:\n tmppoly_x = N.linspace(energy_pivots[iinterval-1],x_max,linspace_npoints)\n else:\n tmppoly_x = N.linspace(energy_pivots[iinterval-1],energy_pivots[iinterval],linspace_npoints)\n P.plot(tmppoly_x,N.polyval(polyfitlist[iinterval],tmppoly_x),'k')\n for ipivot in range(len(energy_pivots)):\n en = energy_pivots[ipivot]\n P.plot([en,en],[N.polyval(polyfitlist[ipivot],en),N.polyval(polyfitlist[ipivot+1],en)],'k-.')\n P.xlabel('DFT eigenvalues (in eV)')\n P.ylabel('GW correction to the DFT eigenvalues (in eV)')\n P.figure(4)\n P.hold(True)\n P.grid(True)\n for iinterval in range(len(polyfit_degrees)):\n P.plot(dftarray_list[iinterval],gwarray_list[iinterval]-dftarray_list[iinterval]-N.polyval(polyfitlist[iinterval],dftarray_list[iinterval]),'bx')\n [x_min,x_max] = P.xlim()\n P.plot([x_min,x_max],[0,0],'k-')\n P.xlabel('DFT eigenvalues (in eV)')\n P.ylabel('Error in the fit (in eV)')\n P.show()\n return polyfitlist\n\ndef compare_bandstructures(ec_ref,ec_test):\n nspkpt_ref = len(ec_ref.special_kpoints)\n nspkpt_test = len(ec_test.special_kpoints)\n if nspkpt_ref != nspkpt_test:\n print 'ERROR: The number of special kpoints is different in the two files ... exit'\n sys.exit()\n eig_type_ref = ec_ref.eigenvalue_type\n eig_type_test = ec_test.eigenvalue_type\n print eig_type_ref,eig_type_test\n if eig_type_ref == 'DFT' and eig_type_test == 'W90':\n TOL_KPTS = N.float(1.0e-4)\n else:\n TOL_KPTS = N.float(1.0e-6)\n print TOL_KPTS\n for ispkpt in range(nspkpt_ref):\n print 'difference between the two :',ec_ref.special_kpoints[ispkpt,:]-ec_test.special_kpoints[ispkpt,:]\n if not N.allclose(ec_ref.special_kpoints[ispkpt,:],ec_test.special_kpoints[ispkpt,:],atol=TOL_KPTS):\n print 'ERROR: The kpoints are not the same :'\n print ' Kpt #%s ' %ispkpt\n print ' Reference => %20.17f %20.17f %20.17f' %(ec_ref.special_kpoints[ispkpt,0],ec_ref.special_kpoints[ispkpt,1],ec_ref.special_kpoints[ispkpt,2])\n print ' Compared => %20.17f %20.17f %20.17f' %(ec_test.special_kpoints[ispkpt,0],ec_test.special_kpoints[ispkpt,1],ec_test.special_kpoints[ispkpt,2])\n print ' ... exit'\n sys.exit()\n mband_comparison = N.min([ec_ref.mband,ec_test.mband])\n if mband_comparison < ec_ref.mband:\n print 'Number of bands in the test bandstructure is lower than the number of bands in the reference (%s)' %ec_ref.mband\n print ' => Comparison will proceed with %s bands' %ec_test.mband\n elif mband_comparison < ec_test.mband:\n print 'Number of bands in the reference bandstructure is lower than the number of bands in the test bandstructure (%s)' %ec_test.mband\n print ' => Comparison will only proceed with %s bands of the test bandstructure' %ec_ref.mband\n else:\n print 'Number of bands in the reference and test bandstructure is the same'\n print ' => Comparison will proceed with %s bands' %mband_comparison\n# eig_test_ref_path = ec_ref.eigenvalues[:,:,:mband_comparison]\n rmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)\n nrmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)\n mae_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)\n for isppol in range(ec_ref.nsppol):\n for iband in range(mband_comparison):\n interp = N.interp(ec_ref.normalized_kpoint_path_values,ec_test.normalized_kpoint_path_values,ec_test.eigenvalues[isppol,:,iband])\n rmsd_per_band[isppol,iband] = N.sqrt(N.sum((csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband])**2)\/ec_ref.nkpt)\n mae_per_band[isppol,iband] = N.sum(N.abs(csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband]))\/ec_ref.nkpt\n P.figure(1)\n P.plot(mae_per_band[0,:])\n P.figure(2)\n P.plot(rmsd_per_band[0,:])\n P.show()\n\ndef get_gvectors():\n if os.path.isfile('.gvectors.bsinfo'):\n print 'File \".gvectors.bsinfo found with the following gvectors information :\"'\n try:\n gvectors_reader = open('.gvectors.bsinfo','r')\n gvectors_data = gvectors_reader.readlines()\n gvectors_reader.close()\n trial_gvectors = N.identity(3,N.float)\n trial_gvectors[0,0] = N.float(gvectors_data[0].split()[0])\n trial_gvectors[0,1] = N.float(gvectors_data[0].split()[1])\n trial_gvectors[0,2] = N.float(gvectors_data[0].split()[2])\n trial_gvectors[1,0] = N.float(gvectors_data[1].split()[0])\n trial_gvectors[1,1] = N.float(gvectors_data[1].split()[1])\n trial_gvectors[1,2] = N.float(gvectors_data[1].split()[2])\n trial_gvectors[2,0] = N.float(gvectors_data[2].split()[0])\n trial_gvectors[2,1] = N.float(gvectors_data[2].split()[1])\n trial_gvectors[2,2] = N.float(gvectors_data[2].split()[2])\n print ' gvectors(1) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2])\n print ' gvectors(2) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2])\n print ' gvectors(3) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2])\n except:\n print 'ERROR: file \".gvectors.bsinfo\" might be corrupted (empty or not formatted correctly ...)'\n print ' you should remove the file and start again or check the file ... exit'\n sys.exit()\n test = raw_input('Press to use these gvectors (any other character to enter manually other gvectors)\\n')\n if test == '':\n gvectors = trial_gvectors\n else:\n gvectors = N.identity(3,N.float)\n test = raw_input('Enter G1 (example : \"0.153 0 0\") : \\n')\n gvectors[0,0] = N.float(test.split()[0])\n gvectors[0,1] = N.float(test.split()[1])\n gvectors[0,2] = N.float(test.split()[2])\n test = raw_input('Enter G2 (example : \"0.042 1.023 0\") : \\n')\n gvectors[1,0] = N.float(test.split()[0])\n gvectors[1,1] = N.float(test.split()[1])\n gvectors[1,2] = N.float(test.split()[2])\n test = raw_input('Enter G3 (example : \"0 0 1.432\") : \\n')\n gvectors[2,0] = N.float(test.split()[0])\n gvectors[2,1] = N.float(test.split()[1])\n gvectors[2,2] = N.float(test.split()[2])\n test = raw_input('Do you want to overwrite the gvectors contained in the file \".gvectors.bsinfo\" ? ( for yes, anything else for no)\\n')\n if test == '':\n print 'Writing gvectors to file \".gvectors.bsinfo\" ...'\n gvectors_writer = open('.gvectors.bsinfo','w')\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2]))\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2]))\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2]))\n gvectors_writer.close()\n print '... done'\n else: \n test = raw_input('Do you want to enter the the reciprocal space primitive vectors (y\/n)\\n')\n if test == 'y':\n gvectors = N.identity(3,N.float)\n test = raw_input('Enter G1 (example : \"0.153 0 0\") : ')\n gvectors[0,0] = N.float(test.split()[0])\n gvectors[0,1] = N.float(test.split()[1])\n gvectors[0,2] = N.float(test.split()[2])\n test = raw_input('Enter G2 (example : \"0.042 1.023 0\") : ')\n gvectors[1,0] = N.float(test.split()[0])\n gvectors[1,1] = N.float(test.split()[1])\n gvectors[1,2] = N.float(test.split()[2])\n test = raw_input('Enter G3 (example : \"0 0 1.432\") : ')\n gvectors[2,0] = N.float(test.split()[0])\n gvectors[2,1] = N.float(test.split()[1])\n gvectors[2,2] = N.float(test.split()[2])\n test = raw_input('Do you want to write the gvectors to file \".gvectors.bsinfo\" ? ( for yes, anything else for no)\\n')\n if test == '':\n print 'Writing gvectors to file \".gvectors.bsinfo\" ...'\n gvectors_writer = open('.gvectors.bsinfo','w')\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(gvectors[0,0],gvectors[0,1],gvectors[0,2]))\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(gvectors[1,0],gvectors[1,1],gvectors[1,2]))\n gvectors_writer.write('%20.17f %20.17f %20.17f\\n' %(gvectors[2,0],gvectors[2,1],gvectors[2,2]))\n gvectors_writer.close()\n print '... done'\n else:\n gvectors = None\n return gvectors\n\n# Parse the command line options\n\nparser = argparse.ArgumentParser(description='Tool for plotting dft bandstructures')\nparser.add_argument('files',help='files to be opened',nargs=1)\nargs = parser.parse_args()\nargs_dict = vars(args)\n\nif args_dict['files']:\n print 'will open the file'\nelse:\n print 'ERROR: you should provide some bandstructure file ! exiting now ...'\n sys.exit()\ndft_file = args_dict['files'][0]\n\ngvectors = get_gvectors()\n\nec_dft = EigenvalueContainer(directory='.',filename=dft_file)\nec_dft.set_kpoint_sampling_type('Bandstructure')\nec_dft.find_special_kpoints(gvectors)\n\nprint 'Number of bands in the file : %s' %(N.shape(ec_dft.eigenvalues)[2])\ntest = raw_input('Enter the number of bands to be plotted ( : %s) : \\n' %(N.shape(ec_dft.eigenvalues)[2]))\nif test == '':\n nbd_plot = N.shape(ec_dft.eigenvalues)[2]\nelse:\n nbd_plot = N.int(test)\nif nbd_plot > N.shape(ec_dft.eigenvalues)[2]:\n print 'ERROR: the number of bands to be plotted is larger than the number available ... exit'\n sys.exit()\n\nec_dft.special_kpoints_names = ['']*len(ec_dft.special_kpoints_indices)\nfor ii in range(len(ec_dft.special_kpoints_indices)):\n ec_dft.special_kpoints_names[ii] = 'k%s' %(ii+1)\nprint 'List of special kpoints :'\nfor ii in range(len(ec_dft.special_kpoints_indices)):\n spkpt = ec_dft.kpoints[ec_dft.special_kpoints_indices[ii]]\n print ' Kpoint %s : %s %s %s' %(ii+1,spkpt[0],spkpt[1],spkpt[2])\nprint 'Enter the name of the %s special k-points :' %(len(ec_dft.special_kpoints_indices))\ntest = raw_input('')\nif len(test.split()) == len(ec_dft.special_kpoints_indices):\n for ii in range(len(ec_dft.special_kpoints_indices)):\n ec_dft.special_kpoints_names[ii] = test.split()[ii]\n\ntest = raw_input('Enter base name for bandstructure file : \\n')\nec_dft.write_bandstructure_to_file('%s.bandstructure' %test)\n\n\nP.figure(1,figsize=(3.464,5))\nP.hold('on')\nP.grid('on')\nP.xticks(N.take(ec_dft.kpoint_reduced_path_values,N.array(ec_dft.special_kpoints_indices,N.int)),ec_dft.special_kpoints_names)\nif ec_dft.nsppol == 1:\n for iband in range(nbd_plot):\n P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)\nelif ec_dft.nsppol == 2:\n for iband in range(nbd_plot):\n P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)\n P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[1,:,iband]*csts.hartree2ev,'r-',linewidth=2)\nP.show()\n","license":"gpl-3.0"} {"repo_name":"robios\/PyTES","path":"pytes\/Util.py","copies":"1","size":"32573","content":"import warnings\nimport numpy as np\nimport time\nfrom struct import unpack\nfrom scipy.stats import norm\nfrom scipy.signal import tukey\nfrom Filter import median_filter\nimport Analysis, Filter, Constants\n\ndef savefits(data, filename, vmax=1.0, sps=1e6, bits=14, noise=False, clobber=True):\n \"\"\"\n Save pulse\/noise to FITS file\n \"\"\"\n \n import pyfits as pf\n \n # Prepare data\n data = (np.asarray(data)\/vmax*2**(bits-1)).round()\n \n # Column Name\n if noise:\n \tcolname = 'NoiseRec'\n else:\n \tcolname = 'PulseRec'\n \n # Columns\n col_t = pf.Column(name='TIME', format='1D', unit='s', array=np.zeros(data.shape[0], dtype=int))\n col_data = pf.Column(name=colname, format='%dI' % data.shape[1], unit='V', array=data)\n \n cols = pf.ColDefs([col_t, col_data])\n tbhdu = pf.BinTableHDU.from_columns(cols)\n \n # Name of extension\n exthdr = tbhdu.header\n exthdr['EXTNAME'] = ('Record', 'name of this binary table extension')\n exthdr['EXTVER'] = (1, 'extension version number')\n \n # Add more attributes\n exthdr['TSCAL2'] = (vmax\/2**(bits-1), '[V\/ch]')\n exthdr['TZERO2'] = (0., '[V]')\n exthdr['THSCL2'] = (sps**-1, '[s\/bin] horizontal resolution of record')\n exthdr['THZER2'] = (0, '[s] horizontal offset of record')\n exthdr['THSAM2'] = (data.shape[1], 'sample number of record')\n exthdr['THUNI2'] = ('s', 'physical unit of sampling step of record')\n exthdr['TRMIN2'] = (-2**(bits-1)+1, '[channel] minimum number of each sample')\n exthdr['TRMAX2'] = (2**(bits-1)-1, '[channel] maximum number of each sample')\n exthdr['TRBIN2'] = (1, '[channel] default bin number of each sample')\n \n # More attributes\n exthdr['TSTART'] = (0, 'start time of experiment in total second')\n exthdr['TSTOP'] = (0, 'end time of experiment in total second')\n exthdr['TEND'] = (0, 'end time of experiment (obsolete)')\n exthdr['DATE'] = (time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime()), 'file creation date (UT)')\n \n # We anyway need Primary HDU\n hdu = pf.PrimaryHDU()\n \n # Write to FITS\n thdulist = pf.HDUList([hdu, tbhdu])\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n thdulist.writeto(filename, clobber=clobber)\n\ndef fopen(filename):\n \"\"\"\n Read FITS file\n \n Parameters\n ==========\n filename: file number to read\n \n Returns\n =======\n t: time array\n wave: waveform array\n \"\"\"\n \n import pyfits as pf\n\n # Open fits file and get pulse\/noise data\n header = pf.open(filename)\n wave = header[1].data.field(1).copy()\n dt = header[1].header['THSCL2']\n t = np.arange(wave.shape[-1]) * dt\n header.close()\n \n return t, wave\n\ndef yopen(filenumber, summary=False, nf=None, tmin=None, tmax=None, raw=False):\n \"\"\"\n Read Yokogawa WVF file\n \n Parameters\n ==========\n filenumber: file number to read\n summary: to summary waves (default: False)\n nf: sigmas for valid data using median noise filter, None to disable noise filter (default: None)\n tmin: lower boundary of time for partial extraction, scaler or list (Default: None)\n tmax: upper boundary of time for partial extraction, scaler or list (Default: None)\n raw: returns raw data without scaling\/offsetting if True (Default: False)\n \n Returns\n =======\n if summary is False:\n [ t1, d1, t2, d2, t3, d3, ... ]\n \n if summary is True:\n [ t1, d1, err1, t2, d2, err2, ... ]\n \n if raw is True:\n t1 is a tuple of (hres1, hofs1, vres1, vofs1)\n \n where t1 is timing for 1st ch, d1 is data for 1st ch, err1 is error (1sigma) for 1st ch, and so on.\n \"\"\"\n \n # Read header (HDR)\n h = open(str(filenumber) + \".HDR\")\n lines = h.readlines()\n h.close()\n \n # Parse $PublicInfo\n for line in lines:\n token = line.split()\n \n if len(token) > 0:\n # Check endian\n if token[0] == \"Endian\":\n endian = '>' if token[1] == \"Big\" else '<'\n \n # Check data format\n if token[0] == \"DataFormat\":\n format = token[1]\n assert format == \"Block\"\n \n # Check # of groups\n if token[0] == \"GroupNumber\":\n groups = int(token[1])\n \n # Check # of total traces\n if token[0] == \"TraceTotalNumber\":\n ttraces = int(token[1])\n \n # Check data offset\n if token[0] == \"DataOffset\":\n offset = int(token[1])\n \n # Initialize containers\n traces = [None] * groups # Number of traces for each group\n blocks = [None] * ttraces # Number of blocks for each trace\n bsizes = [None] * ttraces # Block size for each trace\n vres = [None] * ttraces # VResolution for each trace\n voffset = [None] * ttraces # VOffset for each trace\n hres = [None] * ttraces # HResolution for each trace\n hoffset = [None] * ttraces # HOffset for each trace\n \n # Parse $Group\n for line in lines:\n token = line.split()\n\n if len(token) > 0:\n # Read current group number\n if token[0][:6] == \"$Group\":\n cgn = int(token[0][6:]) - 1 # Current group number (minus 1)\n \n # Check # of traces in this group\n if token[0] == \"TraceNumber\":\n traces[cgn] = int(token[1])\n traceofs = np.sum(traces[:cgn], dtype=int)\n \n # Check # of Blocks\n if token[0] == \"BlockNumber\":\n blocks[traceofs:traceofs+traces[cgn]] = [ int(token[1]) ] * traces[cgn]\n \n # Check Block Size\n if token[0] == \"BlockSize\":\n bsizes[traceofs:traceofs+traces[cgn]] = [ int(s) for s in token[1:] ]\n \n # Check VResolusion\n if token[0] == \"VResolution\":\n vres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]\n \n # Check VOffset\n if token[0] == \"VOffset\":\n voffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]\n \n # Check VDataType\n if token[0] == \"VDataType\":\n assert token[1] == \"IS2\"\n \n # Check HResolution\n if token[0] == \"HResolution\":\n hres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]\n \n # Check HOffset\n if token[0] == \"HOffset\":\n hoffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]\n \n # Data Initialization\n time = [ np.array(range(bsizes[t])) * hres[t] + hoffset[t] for t in range(ttraces) ]\n data = [ [None] * blocks[t] for t in range(ttraces) ]\n \n # Open WVF\n f = open(str(filenumber) + \".WVF\", 'rb')\n f.seek(offset)\n \n # Read WVF\n if format == \"Block\":\n # Block format (assuming block size is the same for all the traces in Block format)\n for b in range(blocks[0]):\n for t in range(ttraces):\n if raw:\n data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')\n else:\n data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]\n else:\n # Trace format\n for t in range(ttraces):\n for b in range(blocks[t]):\n if raw:\n data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')\n else:\n data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]\n\n # Array conversion\n for t in range(ttraces):\n if raw:\n data[t] = np.array(data[t], dtype='int64')\n else:\n data[t] = np.array(data[t])\n \n \n # Tmin\/Tmax filtering\n for t in range(ttraces):\n if type(tmin) == list or type(tmax) == list:\n if not (type(tmin) == list and type(tmax) == list and len(tmin) == len(tmax)):\n raise ValueError(\"tmin and tmax both have to be list and have to have the same length.\")\n mask = np.add.reduce([ (time[t] >= _tmin) & (time[t] < _tmax) for (_tmax, _tmin) in zip(tmax, tmin)], dtype=bool)\n else:\n _tmin = np.min(time[t]) if tmin is None else tmin\n _tmax = np.max(time[t]) + 1 if tmax is None else tmax\n mask = (time[t] >= _tmin) & (time[t] < _tmax)\n \n data[t] = data[t][:, mask]\n time[t] = time[t][mask]\n \n f.close()\n \n if summary is False:\n # Return wave data as is\n if raw:\n return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), data[t] ] for t in range(ttraces) ]\n else:\n return [ [ time[t], data[t] ] for t in range(ttraces) ]\n else:\n if nf is None:\n # Noise filter is off\n if raw:\n return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), np.mean(data[t].astype(dtype='float64'), axis=0), np.std(data[t].astype(dtype='float64'), axis=0, ddof=1) ]\n for t in range(ttraces) ]\n else:\n return [ [ time[t], np.mean(data[t], axis=0), np.std(data[t], axis=0, ddof=1) ]\n for t in range(ttraces) ]\n else:\n # Noise filter is on\n if raw:\n return [ [ (hres[t], hoffset[t], vres[t], voffset[t]),\n np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t].astype(dtype='float64')),\n np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t].astype(dtype='float64')) ]\n for t in range(ttraces) ]\n else:\n return [ [ time[t],\n np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t]),\n np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t]) ]\n for t in range(ttraces) ]\n\ndef popen(filename, ch=None, raw=False):\n \"\"\"\n Read pls file\n \n Parameters\n ==========\n filename: file name to read\n ch: returns data only for the given channel if given (Default: None)\n raw: returns raw data without scaling\/offsetting if True (Default: False)\n \n Returns\n =======\n if raw is True:\n [ header, vres, vofs, hres, hofs, tick, num, data, edata ]\n else:\n [ header, t, tick, num, data, edata ]\n \"\"\"\n \n # Initialize\n header = {'COMMENT': []}\n vres = {}\n vofs = {}\n hres = {}\n hofs = {}\n tick = {}\n num = {}\n data = {}\n edata = {}\n \n # Parser\n def parser():\n \"\"\"\n PLS Data Parser (generator)\n \"\"\"\n \n # Initialization\n samples = -1\n extra = 0\n chunk = ''\n isHeader = True\n \n while True:\n while len(chunk) < 2:\n chunk += yield\n \n # Get the magic character\n magic = chunk[0]\n\n if isHeader and magic == 'C':\n # Comment\n while len(chunk) < 80:\n chunk += yield\n header['COMMENT'].append(chunk[2:80])\n chunk = chunk[80:]\n \n elif isHeader and magic == 'V':\n # Version\n while len(chunk) < 80:\n chunk += yield\n header['VERSION'] = chunk[2:80]\n chunk = chunk[80:]\n \n elif isHeader and magic == 'O':\n # Date\n while len(chunk) < 10:\n chunk += yield\n _m, _d, _y = map(int, chunk[2:10].split())\n header['DATE'] = \"%d\/%d\/%d\" % (_y, _m, _d)\n chunk = chunk[10:]\n\n elif isHeader and magic == 'S':\n # Number of Samples\n while len(chunk) < 7:\n chunk += yield\n header['SAMPLES'] = samples = int(chunk[2:7])\n chunk = chunk[7:]\n\n elif isHeader and magic == 'E':\n # Extra Bytes\n while len(chunk) < 7:\n chunk += yield\n header['EXTRA'] = extra = int(chunk[2:7])\n chunk = chunk[7:]\n \n elif isHeader and magic == 'P':\n # Discriminator\n while len(chunk) < 78:\n chunk += yield\n _dis = chunk[2:78].split()\n if _dis[0] == '01':\n header['ULD'] = eval(_dis[1])\n elif _dis[0] == '02':\n header['LLD'] = eval(_dis[1])\n chunk = chunk[78:]\n \n elif isHeader and magic == 'N':\n # Normalization\n while len(chunk) < 47:\n chunk += yield\n _ch, _hofs, _hres, _vofs, _vres = chunk[2:47].split()\n _ch = int(_ch)\n vres[_ch] = eval(_vres)\n vofs[_ch] = eval(_vofs)\n hres[_ch] = eval(_hres)\n hofs[_ch] = eval(_hofs)\n chunk = chunk[47:]\n \n elif magic == 'D':\n # Data\n isHeader = False\n \n if samples < 0:\n raise ValueError(\"Invalid number of samples.\")\n while len(chunk) < (11 + samples*2):\n chunk += yield\n _ch, _tick, _num = unpack(' 1:\n p = p[:,:p.shape[-1]\/dsr*dsr].reshape(p.shape[0], -1, dsr).mean(axis=-1)\n n = n[:,:n.shape[-1]\/dsr*dsr].reshape(n.shape[0], -1, dsr).mean(axis=-1)\n dt *= dsr\n t = t[::dsr]\n\n # Pulse centering (for filtering)\n if center:\n # Roll pulse to the center\n r = p.shape[-1] \/ 2 - np.median(abs(p - Filter.offset(p)[:, np.newaxis]).argmax(axis=-1))\n p = np.hstack((p[...,-r:], p[...,:-r]))\n\n # Calculate offset (needs to be done before applying filter)\n if p.size > 0:\n offset = Filter.offset(p)\n \n # Generate Filter\n if filt is None:\n pass\n else:\n if filt.lower() == \"hanning\":\n f = np.hanning(p.shape[-1])\n elif filt.lower() == \"hamming\":\n f = np.hamming(p.shape[-1])\n elif filt.lower() == \"blackman\":\n f = np.blackman(p.shape[-1])\n elif filt.lower() == \"tukey\":\n f = tukey(p.shape[-1])\n else:\n raise ValueError('Unsupported filter: %s' % filt.lower())\n \n print \"Window filter function: %s\" % filt.lower()\n \n # Amplitude correction\n cf = f.sum() \/ len(f)\n p *= (f \/ cf)\n n *= (f \/ cf)\n\n # Equivalent noise bandwidth correction\n enb = len(f)*(f**2).sum()\/f.sum()**2\n df *= enb\n \n if p.size > 0:\n # Calculate averaged pulse\n avgp = Filter.average_pulse(p, max_shift=max_shift)\n\n if savedat:\n np.savetxt('%s-averagepulse.dat' % session, np.vstack((t, avgp)).T,\n header='Time (s), Averaged Pulse (%s)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\\t')\n\n if plotting:\n figure()\n plot(t, avgp)\n xlabel('Time$\\quad$(s)')\n ylabel('Averaged Pulse$\\quad$(%s)' % ('R' if Rspace else ('A' if gain else 'V')))\n tight_layout()\n savefig('%s-averagepulse.pdf' % session)\n\n # Calculate averaged pulse spectrum\n avgps = np.sqrt(Filter.power(avgp)) \/ df\n\n if savedat:\n np.savetxt('%s-avgpulse-power.dat' % session, np.vstack((np.arange(len(avgps))*df, avgps)).T,\n header='Frequency (Hz), Average Pulse Power (%s\/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\\t')\n \n if plotting:\n avgps[0] = 0 # for better plot\n figure()\n plot(np.arange(len(avgps))*df, avgps)\n loglog()\n xlabel('Frequency$\\quad$(Hz)')\n ylabel('Average Pulse Power$\\quad$(%s\/Hz)' % ('R' if Rspace else ('A' if gain else 'V')))\n tight_layout()\n savefig('%s-avgpulse-power.pdf' % session)\n\n if n.size > 0:\n # Plot noise spectrum\n avgns = np.sqrt(Filter.average_noise(n) \/ df)\n\n if savedat:\n np.savetxt('%s-noise.dat' % session, np.vstack((np.arange(len(avgns))*df, avgns)).T,\n header='Frequency (Hz), Noise (%s\/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\\t')\n \n if plotting:\n avgns[0] = 0 # for better plot\n figure()\n plot(np.arange(len(avgns))*df, avgns)\n loglog()\n xlabel('Frequency$\\quad$(Hz)')\n ylabel('Noise$\\quad$(%s\/$\\sqrt{\\mathrm{Hz}}$)' % ('R' if Rspace else ('A' if gain else 'V')))\n tight_layout()\n savefig('%s-noise.pdf' % session)\n\n if p.size > 0 and n.size > 0:\n # Generate template\n tmpl, sn = Filter.generate_template(p, n, lpfc=lpfc, hpfc=hpfc, nulldc=nulldc, max_shift=max_shift)\n\n if savedat:\n np.savetxt('%s-template.dat' % session, np.vstack((t, tmpl)).T,\n header='Time (s), Template (A.U.)', delimiter='\\t')\n np.savetxt('%s-sn.dat' % session, np.vstack((np.arange(len(sn))*df, sn\/np.sqrt(df))).T,\n header='Frequency (Hz), S\/N (\/srHz)', delimiter='\\t')\n \n if plotting:\n # Plot template\n figure()\n plot(t, tmpl)\n xlabel('Time$\\quad$(s)')\n ylabel('Template$\\quad$(A.U.)')\n tight_layout()\n savefig('%s-template.pdf' % session)\n \n # Plot SNR\n figure()\n plot(np.arange(len(sn))*df, sn\/np.sqrt(df))\n loglog()\n xlabel('Frequency$\\quad$(Hz)')\n ylabel('S\/N$\\quad$(\/$\\sqrt{\\mathrm{Hz}}$)')\n tight_layout()\n savefig('%s-sn.pdf' % session)\n \n # Calculate baseline resolution\n print \"Resolving power: %.2f (%.2f eV @ 5.9 keV)\" % (np.sqrt((sn**2).sum()*2), Analysis.baseline(sn))\n \n # Perform optimal filtering\n pha_p = Filter.optimal_filter(p, tmpl, max_shift=max_shift)\n pha_n = Filter.optimal_filter(n, tmpl, max_shift=0)\n \n # Offset correction\n (a, b), coef = Analysis.fit_offset(pha_p, offset, sigma=sigma, method=ocmethod, flip=flip)\n \n if coef > thre:\n oc_pha_p = Analysis.offset_correction(pha_p, offset, b)\n oc_pha_n = Analysis.offset_correction(pha_n, offset, b)\n print \"Offset correction with: PHA = %f * (1 + %f * Offset)\" % (a, b)\n \n if plotting:\n figure()\n ka = Analysis.ka(np.vstack((pha_p, offset)).T, sigma=sigma)\n plot(ka.T[1], ka.T[0], '.', c='k')\n x_min, x_max = xlim()\n ofs = np.linspace(x_min, x_max)\n label = '$\\mathrm{PHA}=%.2f\\\\times(1+%.2f\\\\times\\mathrm{Offset})$' % (a, b)\n plot(ofs, a*(1+b*ofs), 'r-', label=label)\n xlabel('Offset$\\quad$(V)')\n ylabel('PHA$\\quad$(V)')\n legend(frameon=False)\n tight_layout()\n savefig('%s-offset.pdf' % session)\n else:\n oc_pha_p = pha_p\n oc_pha_n = pha_n\n print \"Skipped offset correction: correlation coefficient (%f) is too small\" % coef\n\n # Check line database\n if \"%sKa\" % atom not in Constants.LE.keys() or \"%sKb\" % atom not in Constants.LE.keys():\n raise ValueError('Unsupported atom: %s' % atom)\n\n # Linearity correction\n pha_line_center = np.asarray([ np.median(Analysis.ka(oc_pha_p, sigma=sigma)), np.median(Analysis.kb(oc_pha_p, sigma=sigma)) ])\n line_energy = np.asarray([ Constants.LE['%sKa' % atom], Constants.LE['%sKb' % atom] ])\n\n if ignorekb:\n a, b = Analysis.fit_linearity([pha_line_center[0]], [line_energy[0]], deg=1)\n print \"Linearity correction with: PHA = %e * E\" % (b)\n else:\n a, b = Analysis.fit_linearity(pha_line_center, line_energy, deg=2)\n print \"Linearity correction with: PHA = %e * E^2 + %e * E\" % (a, b)\n print \"MnKb saturation ratio: %.2f %%\" % ((pha_line_center[1]\/pha_line_center[0])\/(line_energy[1]\/line_energy[0])*100)\n\n lc_pha_p = Analysis.linearity_correction(oc_pha_p, a, b)\n lc_pha_n = Analysis.linearity_correction(oc_pha_n, a, b)\n\n if savedat:\n np.savetxt('%s-linearity.dat' % session, array([pha_line_center[0]]) if ignorekb else pha_line_center[np.newaxis,:],\n header='%sKa PHA' % atom if ignorekb else '%sKa PHA, %sKb PHA' % (atom, atom), delimiter='\\t')\n \n if plotting:\n figure()\n x = np.linspace(0, 7e3)\n if ignorekb:\n plot(line_energy[0]\/1e3, pha_line_center[0], '+', color='b')\n plot(x\/1e3, x*b, 'r--')\n else:\n plot(line_energy\/1e3, pha_line_center, '+', color='b')\n plot(x\/1e3, x**2*a+x*b, 'r--')\n xlim((0, 7))\n xlabel('Energy$\\quad$(keV)')\n ylabel('PHA$\\quad$(a.u.)')\n tight_layout()\n savefig('%s-linearity.pdf' % session)\n \n # Energy Spectrum\n if plotting:\n figure()\n hcount, hbin, hpatch = hist(lc_pha_p[lc_pha_p==lc_pha_p]\/1e3, bins=7000\/binsize, histtype='stepfilled', color='y')\n xlim(0, 7)\n xlabel('Energy$\\quad$(keV)')\n ylabel('Count')\n tight_layout()\n savefig('%s-spec.pdf' % session)\n\n if savedat:\n hcount, hbin = np.histogram(lc_pha_p[lc_pha_p==lc_pha_p]\/1e3, bins=7000\/binsize)\n np.savetxt('%s-spec.dat' % session, np.vstack(((hbin[1:]+hbin[:-1])\/2, hcount)).T,\n header='Energy (keV), Count', delimiter='\\t')\n \n # Line fitting\n def _line_fit(data, min, line):\n\n # Fit\n (dE, width), (dE_error, width_error), e = Analysis.fit(data, binsize=binsize, min=min, line=line, shift=shift, method=method)\n\n if method == \"cs\":\n chi_squared, dof = e\n\n if method in (\"mle\", \"ls\"):\n print \"%s: %.2f +\/- %.2f eV @ Ec%+.2f eV\" \\\n % (line, width, width_error, dE)\n elif method == \"cs\":\n print \"%s: %.2f +\/- %.2f eV @ Ec%+.2f eV (Red. chi^2 = %.1f\/%d = %.2f)\" \\\n % (line, width, width_error, dE, chi_squared, dof, chi_squared\/dof)\n\n return dE, width, width_error\n \n def _line_spectrum(data, min, line, dE, width, width_error):\n\n # Draw histogram\n n, bins = Analysis.histogram(data, binsize=binsize)\n\n if method in (\"cs\"):\n gn, gbins = Analysis.group_bin(n, bins, min=min)\n else:\n # No grouping in mle and ls\n gn, gbins = n, bins\n\n ngn = gn\/(np.diff(gbins))\n ngn_sigma = np.sqrt(gn)\/(np.diff(gbins))\n cbins = (gbins[1:]+gbins[:-1])\/2\n\n if plotting:\n figure()\n\n if width_error is not None:\n label = 'FWHM$=%.2f\\pm %.2f$ eV' % (width, width_error)\n else:\n label = 'FWHM$=%.2f$ eV (Fixed)' % width\n\n if method == \"cs\":\n errorbar(cbins, ngn, yerr=ngn_sigma, xerr=np.diff(gbins)\/2, capsize=0, ecolor='k', fmt=None, label=label)\n else:\n hist(data, bins=gbins, weights=np.ones(len(data))\/binsize, histtype='step', ec='k', label=label)\n\n E = np.linspace(bins.min(), bins.max(), 1000)\n\n model = Analysis.normalization(ngn, gbins, dE, width, line=line, shift=shift) \\\n * Analysis.line_model(E, dE, width, line=line, shift=shift, full=True)\n\n # Plot theoretical model\n plot(E, model[0], 'r-')\n\n # Plot fine structures\n for m in model[1:]:\n plot(E, m, 'b--')\n\n xlabel('Energy$\\quad$(eV)')\n ylabel('Normalized Count$\\quad$(count\/eV)')\n legend(frameon=False)\n\n ymin, ymax = ylim()\n ylim(ymin, ymax*1.1)\n tight_layout()\n\n savefig(\"%s-%s.pdf\" % (session, line))\n\n if savedat:\n np.savetxt('%s-%s.dat' % (session, line), np.vstack((cbins, gn)).T,\n header='Energy (keV), Count', delimiter='\\t')\n \n ## Ka\n ka = Analysis.ka(lc_pha_p, sigma=sigma)\n dE, width, width_error = _line_fit(ka, ka_min, \"%sKa\" % atom)\n _line_spectrum(ka, ka_min, \"%sKa\" % atom, dE, width, width_error)\n\n ## Kb\n kb = Analysis.kb(lc_pha_p, sigma=sigma)\n if kbfit:\n dE, width, width_error = _line_fit(kb, kb_min, \"%sKb\" % atom)\n else:\n width_error = None\n _line_spectrum(kb, kb_min, \"%sKb\" % atom, dE, width, width_error)\n\n ## Baseline\n f_pha_n = lc_pha_n[Filter.median_filter(lc_pha_n, sigma=sigma)]\n baseline = Analysis.sigma2fwhm(np.std(f_pha_n))\n print \"Baseline resolution: %.2f eV\" % baseline\n\n n, bins = Analysis.histogram(f_pha_n, binsize=binsize)\n \n if savedat:\n np.savetxt('%s-baseline.dat' % session, np.vstack(((bins[1:]+bins[:-1])\/2, n)).T,\n header='Energy (keV), Count', delimiter='\\t')\n\n if plotting:\n figure()\n label = 'FWHM$=%.2f$ eV' % baseline\n hist(f_pha_n, bins=bins, weights=np.ones(len(f_pha_n))\/binsize, histtype='step', ec='k', label=label)\n mu, sigma = norm.fit(f_pha_n)\n E = np.linspace(bins.min(), bins.max(), 1000)\n plot(E, norm.pdf(E, loc=mu, scale=sigma)*len(f_pha_n), 'r-')\n \n xlabel('Energy$\\quad$(eV)')\n ylabel('Normalized Count$\\quad$(count\/eV)')\n \n legend(frameon=False)\n \n tight_layout()\n \n savefig('%s-baseline.pdf' % session)","license":"mit"} {"repo_name":"mwv\/scikit-learn","path":"examples\/linear_model\/plot_sgd_loss_functions.py","copies":"249","size":"1095","content":"\"\"\"\n==========================\nSGD: convex loss functions\n==========================\n\nA plot that compares the various convex loss functions supported by\n:class:`sklearn.linear_model.SGDClassifier` .\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef modified_huber_loss(y_true, y_pred):\n z = y_pred * y_true\n loss = -4 * z\n loss[z >= -1] = (1 - z[z >= -1]) ** 2\n loss[z >= 1.] = 0\n return loss\n\n\nxmin, xmax = -4, 4\nxx = np.linspace(xmin, xmax, 100)\nplt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',\n label=\"Zero-one loss\")\nplt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',\n label=\"Hinge loss\")\nplt.plot(xx, -np.minimum(xx, 0), 'm-',\n label=\"Perceptron loss\")\nplt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',\n label=\"Log loss\")\nplt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',\n label=\"Squared hinge loss\")\nplt.plot(xx, modified_huber_loss(xx, 1), 'y--',\n label=\"Modified Huber loss\")\nplt.ylim((0, 8))\nplt.legend(loc=\"upper right\")\nplt.xlabel(r\"Decision function $f(x)$\")\nplt.ylabel(\"$L(y, f(x))$\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"openfisca\/openfisca-france-indirect-taxation","path":"openfisca_france_indirect_taxation\/examples\/transports\/plot_legislation\/plot_ticpe_taux_implicite.py","copies":"4","size":"2264","content":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 17 18:06:45 2015\n\n@author: thomas.douenne\n\nTICPE: Taxe int\u00e9rieure sur la consommation des produits \u00e9nerg\u00e9tiques\n\"\"\"\n\n# L'objectif de ce script est d'illustrer graphiquement l'\u00e9volution du taux implicite de la TICPE depuis 1993.\n# On \u00e9tudie ce taux pour le diesel, et pour les carburants sans plombs.\n\n# Import de modules g\u00e9n\u00e9raux\nfrom pandas import concat\n\n# Import de modules sp\u00e9cifiques \u00e0 Openfisca\nfrom openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar_list\nfrom openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_accises import get_accises_carburants\nfrom openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_tva import get_tva_taux_plein\nfrom openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_prix_carburants import \\\n get_prix_carburants\n\n# Appel des param\u00e8tres de la l\u00e9gislation et des prix\nticpe = ['ticpe_gazole', 'ticpe_super9598']\naccise_diesel = get_accises_carburants(ticpe)\nprix_ttc = ['diesel_ttc', 'super_95_ttc']\nprix_carburants = get_prix_carburants(prix_ttc)\ntva_taux_plein = get_tva_taux_plein()\n\n# Cr\u00e9ation d'une dataframe contenant ces param\u00e8tres\ndf_taux_implicite = concat([accise_diesel, prix_carburants, tva_taux_plein], axis = 1)\ndf_taux_implicite.rename(columns = {'value': 'taux plein tva'}, inplace = True)\n\n# A partir des param\u00e8tres, calcul des taux de taxation implicites\ndf_taux_implicite['taux_implicite_diesel'] = (\n df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva']) \/\n (df_taux_implicite['prix diesel ttc'] -\n (df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva'])))\n )\n\ndf_taux_implicite['taux_implicite_sp95'] = (\n df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva']) \/\n (df_taux_implicite['prix super 95 ttc'] -\n (df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva'])))\n )\n\ndf_taux_implicite = df_taux_implicite.dropna()\n\n# R\u00e9alisation des graphiques\ngraph_builder_bar_list(df_taux_implicite['taux_implicite_diesel'], 1, 1)\ngraph_builder_bar_list(df_taux_implicite['taux_implicite_sp95'], 1, 1)\n","license":"agpl-3.0"} {"repo_name":"jmontgom10\/Mimir_pyPol","path":"oldCode\/04b_avgBAABditherHWPimages.py","copies":"1","size":"17054","content":"# -*- coding: utf-8 -*-\n\"\"\"\nCombines all the images for a given (TARGET, FILTER, HWP) combination to\nproduce a single, average image.\n\nEstimates the sky background level of the on-target position at the time of the\non-target observation using a bracketing pair of off-target observations through\nthe same HWP polaroid rotation value. Subtracts this background level from\neach on-target image to produce background free images. Applies an airmass\ncorrection to each image, and combines these final image to produce a background\nfree, airmass corrected, average image.\n\"\"\"\n\n# Core imports\nimport os\nimport sys\nimport copy\nimport warnings\n\n# Import scipy\/numpy packages\nimport numpy as np\nfrom scipy import ndimage\n\n# Import astropy packages\nfrom astropy.table import Table\nimport astropy.units as u\nfrom astropy.convolution import Gaussian2DKernel\nfrom astropy.modeling import models, fitting\nfrom astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats\nfrom photutils import (make_source_mask,\n MedianBackground, SigmaClip, Background2D)\n\n# Import plotting utilities\nfrom matplotlib import pyplot as plt\n\n# Add the AstroImage class\nimport astroimage as ai\n\n# Add the header handler to the BaseImage class\nfrom Mimir_header_handler import Mimir_header_handler\nai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)\nai.set_instrument('mimir')\n\n#==============================================================================\n# *********************** CUSTOM USER CODE ************************************\n# this is where the user specifies where the raw data is stored\n# and some of the subdirectory structure to find the actual .FITS images\n#==============================================================================\n\n# This is a list of targets for which to process each subgroup (observational\n# group... never spanning multiple nights, etc...) instead of combining into a\n# single \"metagroup\" for all observations of that target. The default behavior\n# is to go ahead and combine everything into a single, large \"metagroup\". The\n# calibration data should probably not be processed as a metagroup though.\nprocessSubGroupList = []\nprocessSubGroupList = [t.upper() for t in processSubGroupList]\n\n# Define the location of the PPOL reduced data to be read and worked on\nPPOL_data = 'C:\\\\Users\\\\Jordan\\\\FITS_data\\\\Mimir_data\\\\PPOL_Reduced\\\\201611\\\\'\nS3_dir = os.path.join(PPOL_data, 'S3_Astrometry')\n\n# This is the location where all pyPol data will be saved\npyPol_data = 'C:\\\\Users\\\\Jordan\\\\FITS_data\\\\Mimir_data\\\\pyPol_Reduced\\\\201611'\n\n# This is the location of the previously generated masks (step 4)\nmaskDir = os.path.join(pyPol_data, 'Masks')\n\n# Setup new directory for polarimetry data\npolarimetryDir = os.path.join(pyPol_data, 'Polarimetry')\nif (not os.path.isdir(polarimetryDir)):\n os.mkdir(polarimetryDir, 0o755)\n\nHWPDir = os.path.join(polarimetryDir, 'HWPImgs')\nif (not os.path.isdir(HWPDir)):\n os.mkdir(HWPDir, 0o755)\n\nbkgPlotDir = os.path.join(HWPDir, 'bkgPlots')\nif (not os.path.isdir(bkgPlotDir)):\n os.mkdir(bkgPlotDir, 0o755)\n\n# # Setup PRISM detector properties\n# read_noise = 13.0 # electrons\n# effective_gain = 3.3 # electrons\/ADU\n\n#########\n### Establish the atmospheric extinction (magnitudes\/airmass)\n#########\n# Following table from Hu (2011)\n# Data from Gaomeigu Observational Station\n# Passband | K'(lambda) [mag\/airmass] | K'' [mag\/(color*airmass)]\n# U\t 0.560 +\/- 0.023 0.061 +\/- 0.004\n# B 0.336 +\/- 0.021 0.012 +\/- 0.003\n# V 0.198 +\/- 0.024 -0.015 +\/- 0.004\n# R 0.142 +\/- 0.021 -0.067 +\/- 0.005\n# I 0.093 +\/- 0.020 0.023 +\/- 0.006\n\n\n# Following table from Schmude (1994)\n# Data from Texas A & M University Observatory\n# Passband | K(lambda) [mag\/airmass] | dispersion on K(lambda)\n# U\t 0.60 +\/- 0.05 0.120\n# B 0.40 +\/- 0.06 0.165\n# V 0.26 +\/- 0.03 0.084\n# R 0.19 +\/- 0.03 0.068\n# I 0.16 +\/- 0.02 0.055\n\n# TODO: Ask Dan about atmospheric extinction from airmass at NIR\nkappa = dict(zip(['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K' ],\n [0.60, 0.40, 0.26, 0.19, 0.16, 0.05, 0.01, 0.005]))\n\n\n# Read in the indexFile data and select the filenames\nprint('\\nReading file index from disk')\nindexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')\nfileIndex = Table.read(indexFile, format='ascii.csv')\n\n# Determine which parts of the fileIndex pertain to HEX dither science images\nuseFiles = np.logical_and(\n fileIndex['USE'] == 1,\n fileIndex['DITHER_TYPE'] == 'ABBA'\n)\nuseFileRows = np.where(useFiles)\n\n# Cull the file index to only include files selected for use\nfileIndex = fileIndex[useFileRows]\n\n# Define an approximate pixel scale\npixScale = 0.5789*(u.arcsec\/u.pixel)\n\n# TODO: implement a FWHM seeing cut... not yet working because PSF getter seems\n# to be malfunctioning in step 2\n#\n#\n# # Loop through each unique GROUP_ID and test for bad seeing conditions.\n# groupByID = fileIndex.group_by(['GROUP_ID'])\n# for subGroup in groupByID.groups:\n# # Grab the FWHM values for this subGroup\n# thisFWHMs = subGroup['FWHM']*u.pixel\n#\n# # Grab the median and standard deviation of the seeing for this subgroup\n# medianSeeing = np.median(thisFWHMs)\n# stdSeeing = np.std(thisFWHMs)\n#\n# # Find bad FWHM values\n# badFWHMs = np.logical_not(np.isfinite(subGroup['FWHM']))\n# badFWHMs = np.logical_or(\n# badFWHMs,\n# thisFWHMs <= 0\n# )\n# badFWHM = np.logical_and(\n# badFWHM,\n# thisFWHMs > 2.0*u.arcsec\n# )\n# import pdb; pdb.set_trace()\n\n# Group the fileIndex by...\n# 1. Target\n# 2. Waveband\nfileIndexByTarget = fileIndex.group_by(['TARGET', 'FILTER'])\n\n# Loop through each group\nfor group in fileIndexByTarget.groups:\n # Grab the current group information\n thisTarget = str(np.unique(group['TARGET'].data)[0])\n thisFilter = str(np.unique(group['FILTER'].data)[0])\n\n # # Skip the Merope nebula for now... not of primary scientific importance\n # if thisTarget == 'MEROPE': continue\n\n # Update the user on processing status\n print('\\nProcessing images for')\n print('Target : {0}'.format(thisTarget))\n print('Filter : {0}'.format(thisFilter))\n\n # Grab the atmospheric extinction coefficient for this wavelength\n thisKappa = kappa[thisFilter]\n\n # Further divide this group by its constituent HWP values\n indexByPolAng = group.group_by(['IPPA'])\n\n # Loop over each of the HWP values, as these are independent from\n # eachother and should be treated entirely separately from eachother.\n for IPPAgroup in indexByPolAng.groups:\n # Grab the current HWP information\n thisIPPA = np.unique(IPPAgroup['IPPA'].data)[0]\n\n # Update the user on processing status\n print('\\tIPPA : {0}'.format(thisIPPA))\n\n # For ABBA dithers, we need to compute the background levels on a\n # sub-group basis. If this target has not been selected for subGroup\n # averaging, then simply append the background subtracted images to a\n # cumulative list of images to align and average.\n\n # Initalize an image list to store all the images for this\n # (target, filter, pol-ang) combination\n imgList = []\n\n indexByGroupID = IPPAgroup.group_by(['GROUP_ID'])\n for subGroup in indexByGroupID.groups:\n # Grab the numae of this subGroup\n thisSubGroup = str(np.unique(subGroup['OBJECT'])[0])\n\n # if (thisSubGroup != 'NGC2023_R1') and (thisSubGroup != 'NGC2023_R2'): continue\n\n # Construct the output file name and test if it alread exsits.\n if thisTarget in processSubGroupList:\n outFile = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])\n outFile = os.path.join(HWPDir, outFile) + '.fits'\n elif thisTarget not in processSubGroupList:\n outFile = '_'.join([thisTarget, thisFilter, str(thisIPPA)])\n outFile = os.path.join(HWPDir, outFile) + '.fits'\n\n # Test if this file has already been constructed and either skip\n # this subgroup or break out of the subgroup loop.\n if os.path.isfile(outFile):\n print('\\t\\tFile {0} already exists...'.format(os.path.basename(outFile)))\n if thisTarget in processSubGroupList:\n continue\n elif thisTarget not in processSubGroupList:\n break\n\n # Update the user on the current execution status\n print('\\t\\tProcessing images for subgroup {0}'.format(thisSubGroup))\n\n # Initalize lists to store the A and B images.\n AimgList = []\n BimgList = []\n\n # Initalize a list to store the off-target sky background levels\n BbkgList = []\n\n # Initilaze lists to store the times of observation\n AdatetimeList = []\n BdatetimeList = []\n\n # Read in all the images for this subgroup\n progressString = '\\t\\tNumber of Images : {0}'\n for iFile, filename in enumerate(subGroup['FILENAME']):\n # Update the user on processing status\n print(progressString.format(iFile+1), end='\\r')\n\n # Read in a temporary compy of this image\n PPOL_file = os.path.join(S3_dir, filename)\n tmpImg = ai.reduced.ReducedScience.read(PPOL_file)\n\n # Crop the edges of this image\n ny, nx = tmpImg.shape\n binningArray = np.array(tmpImg.binning)\n\n # Compute the amount to crop to get a 1000 x 1000 image\n cy, cx = (ny - 1000, nx - 1000)\n\n # Compute the crop boundaries and apply them\n lf = np.int(np.round(0.5*cx))\n rt = lf + 1000\n bt = np.int(np.round(0.5*cy))\n tp = bt + 1000\n\n tmpImg = tmpImg[bt:tp, lf:rt]\n\n # Grab the on-off target value for this image\n thisAB = subGroup['AB'][iFile]\n\n # Place the image in a list and store required background values\n if thisAB == 'B':\n # Place B images in the BimgList\n BimgList.append(tmpImg)\n\n # Place the median value of this off-target image in list\n mask = make_source_mask(\n tmpImg.data, snr=2, npixels=5, dilate_size=11\n )\n mean, median, std = sigma_clipped_stats(\n tmpImg.data, sigma=3.0, mask=mask\n )\n BbkgList.append(median)\n\n # Place the time of this image in a list of time values\n BdatetimeList.append(tmpImg.julianDate)\n\n if thisAB == 'A':\n # Read in any associated masks and store them.\n maskFile = os.path.join(maskDir, os.path.basename(filename))\n\n # If there is a mask for this file, then apply it!\n if os.path.isfile(maskFile):\n # Read in the mask file\n tmpMask = ai.reduced.ReducedScience.read(maskFile)\n\n # Crop the mask to match the shape of the original image\n tmpMask = tmpMask[cy:ny-cy, cx:nx-cx]\n\n # Grab the data to be masked\n tmpData = tmpImg.data\n\n # Mask the data and put it back into the tmpImg\n maskInds = np.where(tmpMask.data)\n tmpData[maskInds] = np.NaN\n tmpImg.data = tmpData\n\n # Place B images in the BimgList\n AimgList.append(tmpImg)\n\n # Place the time of this image in a list of time values\n AdatetimeList.append(tmpImg.julianDate)\n\n # Create a new line for shell output\n print('')\n\n # Construct an image stack of the off-target images\n BimageStack = ai.utilitywrappers.ImageStack(BimgList)\n\n # Build a supersky image from these off-target images\n superskyImage = BimageStack.produce_supersky()\n\n import pdb; pdb.set_trace()\n\n # Locate regions outside of a 5% deviation\n tmpSuperskyData = superskyImage.data\n maskedPix = np.abs(tmpSuperskyData - 1.0) > 0.05\n\n # Get rid of the small stuff and expand the big stuff\n maskedPix = ndimage.binary_opening(maskedPix, iterations=2)\n maskedPix = ndimage.binary_closing(maskedPix, iterations=2)\n maskedPix = ndimage.binary_dilation(maskedPix, iterations=4)\n\n # TODO: Make the box_size and filter_size sensitive to binning.\n binningArray = np.array(superskyImage.binning)\n box_size = tuple((100\/binningArray).astype(int))\n filter_size = tuple((10\/binningArray).astype(int))\n\n # Setup the sigma clipping and median background estimators\n sigma_clip = SigmaClip(sigma=3., iters=10)\n bkg_estimator = MedianBackground()\n\n # Compute a smoothed background image\n bkgData = Background2D(superskyImage.data,\n box_size=box_size, filter_size=filter_size, mask=maskedPix,\n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n\n # Construct a smoothed supersky image object\n smoothedSuperskyImage = ai.reduced.ReducedScience(\n bkgData.background\/bkgData.background_median,\n uncertainty = bkgData.background_rms,\n properties={'unit':u.dimensionless_unscaled}\n )\n\n # Interpolate background values to A times\n AbkgList = np.interp(\n AdatetimeList,\n BdatetimeList,\n BbkgList,\n left=-1e6,\n right=-1e6\n )\n\n # Cut out any extrapolated data (and corresponding images)\n goodInds = np.where(AbkgList > -1e5)\n AimgList = np.array(AimgList)[goodInds]\n AdatetimeList = np.array(AdatetimeList)[goodInds]\n AbkgList = AbkgList[goodInds]\n\n AsubtractedList = []\n # Loop through the on-target images and subtract background values\n for Aimg, Abkg in zip(AimgList, AbkgList):\n # Subtract the interpolated background values from the A images\n tmpImg = Aimg - smoothedSuperskyImage*(Abkg*Aimg.unit)\n\n # Apply an airmass correction\n tmpImg = tmpImg.correct_airmass(thisKappa)\n\n # Append the subtracted and masked image to the list.\n AsubtractedList.append(tmpImg)\n\n # Now that the images have been fully processed, pause to generate\n # a plot to store in the \"background plots\" folder. These plots\n # constitute a good sanity check on background subtraction.\n plt.plot(BdatetimeList, BbkgList, '-ob')\n plt.scatter(AdatetimeList, AbkgList, marker='o', facecolor='r')\n plt.xlabel('Julian Date')\n plt.ylabel('Background Value [ADU]')\n figName = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])\n figName = os.path.join(bkgPlotDir, figName) + '.png'\n plt.savefig(figName, dpi=300)\n plt.close('all')\n\n # Here is where I need to decide if each subgroup image should be\n # computed or if I should just continue with the loop.\n if thisTarget.upper() in processSubGroupList:\n # Construct an image combiner for the A images\n AimgStack = ai.utilitywrappers.ImageStack(AsubtractedList)\n\n # Align the images\n AimgStack.align_images_with_wcs(\n subPixel=False,\n padding=np.NaN\n )\n\n # Combine the images\n AoutImg = imgStack.combine_images()\n\n # Save the image\n AoutImg.write(outFile, dtype=np.float64)\n else:\n # Extend the imgList variable with background corrected images\n imgList.extend(AsubtractedList)\n\n\n if len(imgList) > 0:\n # At the exit of the loop, process ALL the files from ALL the groups\n # Construct an image combiner for the A images\n imgStack = ai.utilitywrappers.ImageStack(imgList)\n\n # Align the images\n imgStack.align_images_with_wcs(\n subPixel=False,\n padding=np.NaN\n )\n\n # Combine the images\n outImg = imgStack.combine_images()\n\n # Save the image\n outImg.write(outFile, dtype=np.float64)\n\nprint('\\nDone computing average images!')\n","license":"mit"} {"repo_name":"jldbc\/pybaseball","path":"pybaseball\/standings.py","copies":"1","size":"3820","content":"from typing import List, Optional\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup, Comment, PageElement, ResultSet\n\nfrom . import cache\nfrom .utils import most_recent_season\n\n\ndef get_soup(year: int) -> BeautifulSoup:\n url = f'http:\/\/www.baseball-reference.com\/leagues\/MLB\/{year}-standings.shtml'\n s = requests.get(url).content\n return BeautifulSoup(s, \"lxml\")\n\ndef get_tables(soup: BeautifulSoup, season: int) -> List[pd.DataFrame]:\n datasets = []\n if season >= 1969:\n tables: List[PageElement] = soup.find_all('table')\n if season == 1981:\n # For some reason BRef has 1981 broken down by halves and overall\n # https:\/\/www.baseball-reference.com\/leagues\/MLB\/1981-standings.shtml\n tables = [x for x in tables if 'overall' in x.get('id', '')]\n for table in tables:\n data = []\n headings: List[PageElement] = [th.get_text() for th in table.find(\"tr\").find_all(\"th\")]\n data.append(headings)\n table_body: PageElement = table.find('tbody')\n rows: List[PageElement] = table_body.find_all('tr')\n for row in rows:\n cols: List[PageElement] = row.find_all('td')\n cols_text: List[str] = [ele.text.strip() for ele in cols]\n cols_text.insert(0, row.find_all('a')[0].text.strip()) # team name\n data.append([ele for ele in cols_text if ele])\n datasets.append(data)\n else:\n data = []\n table = soup.find('table')\n headings = [th.get_text() for th in table.find(\"tr\").find_all(\"th\")]\n headings[0] = \"Name\"\n if season >= 1930:\n for _ in range(15):\n headings.pop()\n elif season >= 1876:\n for _ in range(14):\n headings.pop()\n else:\n for _ in range(16):\n headings.pop()\n data.append(headings)\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n for row in rows:\n if row.find_all('a') == []:\n continue\n cols = row.find_all('td')\n if season >= 1930:\n for _ in range(15):\n cols.pop()\n elif season >= 1876:\n for _ in range(14):\n cols.pop()\n else:\n for _ in range(16):\n cols.pop()\n cols = [ele.text.strip() for ele in cols]\n cols.insert(0,row.find_all('a')[0].text.strip()) # team name\n data.append([ele for ele in cols if ele])\n datasets.append(data)\n #convert list-of-lists to dataframes\n for idx in range(len(datasets)):\n datasets[idx] = pd.DataFrame(datasets[idx])\n return datasets #returns a list of dataframes\n\n\n@cache.df_cache()\ndef standings(season:Optional[int] = None) -> pd.DataFrame:\n # get most recent standings if date not specified\n if season is None:\n season = most_recent_season()\n if season < 1876:\n raise ValueError(\n \"This query currently only returns standings until the 1876 season. \"\n \"Try looking at years from 1876 to present.\"\n )\n\n # retrieve html from baseball reference\n soup = get_soup(season)\n if season >= 1969:\n raw_tables = get_tables(soup, season)\n else:\n t = [x for x in soup.find_all(string=lambda text:isinstance(text,Comment)) if 'expanded_standings_overall' in x]\n code = BeautifulSoup(t[0], \"lxml\")\n raw_tables = get_tables(code, season)\n tables = [pd.DataFrame(table) for table in raw_tables]\n for idx in range(len(tables)):\n tables[idx] = tables[idx].rename(columns=tables[idx].iloc[0])\n tables[idx] = tables[idx].reindex(tables[idx].index.drop(0))\n return tables\n","license":"mit"} {"repo_name":"BrainTech\/openbci","path":"obci\/analysis\/csp\/MLogit.py","copies":"1","size":"11792","content":"#!\/usr\/bin\/env python\n#-*- coding:utf-8 -*-\n\"\"\"This is a class for Multinomial Logit Regression\n\nClass uses scipy.optimize package for minimalization of a cost function.\nThe gradient of the cost function is passed to the minimizer.\n\nPiotr Milanowski, November 2011, Warsaw\n\"\"\"\n\nfrom scipy.optimize import fmin_ncg, fmin_bfgs, fmin\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef mix(x1, x2, deg=6):\n out = np.zeros([len(x1), sum(range(deg+2))])\n k = 0\n for i in xrange(deg+1):\n for j in range(i+1):\n out[:,k] = x1**(i-j)*x2**(j)\n k += 1\n\n return out\n\nclass logit(object):\n \"\"\"This is a class for a normal two-class logistic regression\n\n The hypothesis of this regression is a sigmoid (logistic, logit) function.\n It returns the probability of the data belonging to the first class.\n\n The minimalization of a cost function is based on NCG algorithm from scipy.optimize package.\n\n The regression can account the regularization factors.\n \"\"\"\n def __init__(self, data, classes, labels=None):\n \"\"\"Initialization of data\n \n A column of ones is added to the data array.\n\n Parameters:\n ===========\n data : 2darray\n NxM array. Rows of this array represent data points, columns represent features.\n classes : 1darray\n a N dimensional vector of classes. Each class is represented by either 0 or 1.\n class_dict [= None] : dictionary\n a 2 element dictionary that maps classses to their names.\n\n Example:\n =========\n >>>X = np.random.rand(20, 4) #data\n >>>Y = np.random.randint(0,2,20) #classes\n >>>labels = ['class 1','class 2']\n >>>MLogit.logit(X, Y, labels)\n \"\"\"\n self.dataNo, self.featureNo = data.shape\n if len(classes) != self.dataNo:\n raise ValueError, 'Not every data point has its target lable!'\n #Adding a columns of 1s and normalizing data - NO NORMALIZATION NEEDED\n self.X = np.concatenate((np.ones([self.dataNo, 1]), data), axis = 1)\n self.Y = classes\n\n def _sigmoid(self, z):\n \"\"\"This returns the value of a sigmoid function.\n\n Sigmoid\/Logistic\/Logit finction looks like this:\n f(z) = over{1}{1 + exp(-z)}\n\n Parameters:\n ===========\n z : ndarray\n the parameter of the function\n Returns:\n sig : ndarray\n values of sigmoid function at z\n \"\"\"\n return 1\/(1 + np.exp(-z))\n\n def cost_function(self, theta, reg = 0):\n \"\"\"The cost function of logit regression model\n\n It looks like this:\n J(theta) = -((1\/M)*sum_{i=1}^{M}(y_i*log(h(theta;x_i))+(1-y_i)*log(1-h(theta;x_i)))) +\n + (reg\/2*m)sum_{i=1}^{N}(theta_i)^2\n Parameters:\n ===========\n theta : 1darray\n the array of parameters. It's a (N+1) dimensional vector\n reg [= 0] : float\n the regularization parameter. This parameter penalizes theta being too big (overfitting)\n Returns:\n ========\n J : float\n the value of cost function for given theta\n \"\"\"\n\n z = self._sigmoid(np.dot(self.X, theta))\n regular = (reg\/(2.0*self.dataNo))*sum(theta[1:]*theta[1:])\n J = self.Y * np.log(z) + (1 - self.Y)*np.log(1 - z)\n J = -(1.0 \/ self.dataNo) * sum(J)\n return regular + J\n\n def gradient_function(self, theta, reg = 0):\n \"\"\"The gradient of cost function\n\n The gradient looks like this:\n g[0] = 1\/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^0 \n g[j] = 1\/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^j - theta[j]*reg\/N\n Parameters:\n ===========\n theta : 1darray\n the vector of parameters\n reg : float\n the regularization parameter\n Returns:\n ========\n fprime : 1darray\n the gradient of cost function.\n \"\"\"\n gradient = np.zeros(self.featureNo + 1)\n N = 1.0 \/ self.dataNo\n z = np.dot(self.X, theta)\n cost = self._sigmoid(z) - self.Y\n# gradient[0] = N * sum(cost * self.X[:, 0])\n# for j in xrange(self.featureNo):\n# gradient[j] = N * sum(cost * self.X[:, j]) - reg * N * theta[j]\n gradient = N * np.dot(cost, self.X)\n gradient[1:] += reg * N * theta[1:]\n return gradient\n\n def fit(self, maxiter, reg = 0, initial_gues = None):\n \"\"\"Minimizing function\n\n Based on NCG function from scipy.optimize package\n\n Parameters:\n ===========\n maxiter : int\n maximal number of iterations\n reg [= 0] : float\n regularization parameter\n initial_gueas [= None] : 1darray\n a vector of #features + 1 size. If None zeros will be asumed.\n Returns:\n ========\n theta : 1darray\n optimal model parameters\n \"\"\"\n if initial_gues is None:\n initial_gues = np.zeros(self.featureNo + 1)\n\n out = fmin_bfgs(self.cost_function, initial_gues, \\\n self.gradient_function, args = ([reg]))\n self.theta = out\n return out\n\n def predict(self, x, val=0.9):\n \"\"\"For prediction of x\n\n Returns predicted probability of x being in class 1\n \"\"\"\n x = np.insert(x, 0, 1) #inserting one at the beginning\n z = np.dot(x, self.theta)\n #if self._sigmoid(z) >=val:\n #return 1\n #else:\n #return 0\n return self._sigmoid(z)\n \n def plot_features(self, show=True):\n y = self.Y\n idx = np.argsort(y)\n x = self.X[idx, :]\n y = y[idx]\n N, feats = x.shape\n if feats == 3:\n idx1 = np.where(y==1)[0][0]\n x1 = x[:idx1, :]\n x2 = x[idx1:, :]\n plt.plot(x1[:,1],x1[:,2],'ro',x2[:,1],x2[:,2],'go')\n for x in np.arange(-5, 5, 0.5):\n for y in np.arange(-3, 3, 0.5):\n if self.predict(np.array([x,y])) <=0.5:\n plt.plot(x,y,'r+')\n else:\n plt.plot(x,y,'g+')\n plt.legend(('Class 0','Class 1'))\n if show:\n plt.show()\n elif feats == 2:\n idx1 = np.where(y==1)[0][0]\n x1 = x[:idx1, :]\n x2 = x[idx1:, :]\n for x in np.arange(x1.min(), x1.max(), 0.1):\n for y in np.arange(x2.min(), x2.max(), 0.1):\n if self.predict(np.array([x,y])) <=0.01:\n plt.plot(x,y,'r+')\n else:\n plt.plot(x,y,'g+')\n plt.plot(x1[:,1],'ro',x2[:,1],'go')\n if show:\n plt.show()\n else:\n print \"More than 2 dimmensions\",x.shape \n\n# def plot_fitted(self):\n# N, feats = self.X.shape\n# if feats == 3:\n# x1 = se\n\n def __normalization(self, data):\n \"\"\"Function normalizes the data\n\n Normalization is done by subtracting the mean of each column from each column member\n and dividing by the column variance.\n\n Parameters:\n ===========\n data : 2darray\n the data array\n Returns:\n ========\n norms : 2darray\n normalized values\n \"\"\"\n mean = data.mean(axis = 0)\n variance = data.std(axis = 0)\n return (data - mean) \/ variance\n\nclass mlogit(logit):\n \"\"\"This is a multivariate variation of logit model\n\n \"\"\"\n def __init__(self, data, classes, labels=None):\n \"\"\"See logit description\"\"\"\n super(mlogit, self).__init__(data, classes, labels)\n self.classesNo, classesIdx = np.unique(classes, return_inverse = True)\n self.count_table = np.zeros([len(classes), len(self.classesNo)])\n self.count_table[range(len(classes)), classesIdx] = 1.0\n\n def fit(self, maxiter, reg = 0, initial_gues = None):\n \"\"\"Fitting logit model for multiclass case\"\"\"\n theta = np.zeros([self.featureNo + 1, len(self.classesNo)])\n for i in range(len(self.classesNo)):\n self.Y = self.count_table[:,i]\n theta[:, i] = super(mlogit, self).fit(maxiter, reg = reg, initial_gues = initial_gues)\n self.theta = theta\n return theta\n\n def predict(self, x, val=0.9):\n \"\"\"Class prediction\"\"\"\n x = np.insert(x, 0, 1)\n z = np.dot(x, self.theta)\n probs = super(mlogit, self)._sigmoid(z)\n idx = np.argmax(probs)\n if probs[idx] >= val:\n return self.classesNo[idx]\n else:\n return None\n\n def plot_features(self):\n cn = len(self.classesNo)\n idx = np.argsort(self.Y)\n y = self.Y[idx]\n x = self.X[idx,:]\n classes = []\n if x.shape[1] == 3:\n for i in range(cn):\n beg, end = np.where(y==i)[0][[0,-1]]\n plt.plot(x[beg:end+1, 1], x[beg:end +1, 2],'o')\n classes.append('Class'+str(i))\n plt.legend(classes)\n plt.show()\n else:\n print \"More than 2 dimmesions\"\n \n#class diagnostics(object):\n\n# def __init__(self, classifier_obj, division=[0.6, 0.2, 0.2]):\n# self.obj = classifier_obj\n# self.div = division\n# self.N, self.ft = self.obj.dataNo, self.obj.featureNo\n# self.cvNo = self.N * division[1]\n# self.testNo = self.N * division[2]\n# self.trainNo = self.N * division[0]\n\n# def diagnose(self, iters, reg, odrer=1, val=0.9):\n# idx = np.linspace(0, self.N-1, self.N)\n# TP, FP, TN, FN\n# train_ok = {'tp':0,'fp':0,'fn':0,'fp':0}\n# cv_ok = {'tp':0,'fp':0,'fn':0,'fp':0}\n# test_ok = {'tp':0,'fp':0,'fn':0,'fp':0}\n# X = self.obj.X\n# Y = self.obj.Y\n# for i in xrange(iters):\n# np.random.shuffle(idx)\n# train_set = X[idx[:self.trainNo], :]\n# cv_set = X[idx[self.trainNo:self.trainNo+self.cvNo], :]\n# test_set = X[idx[self.trainNo+self.cvNo:], :]\n# classes_train = Y[idx[:self.trainNo], :]\n# classes_cv = Y[idx[self.trainNo:self.trainNo+self.cvNo], :]\n# classes_test = Y[idx[self.trainNo+self.cvNo:], :]\n# Training\n# self.obj.X = train_set\n# self.obj.Y = classes_train\n# self.obj.fit(100)\n# for j, row in enumerate(train_set):\n# cl = self.obj.predict(row, val)\n# if cl == classes_train[j]:\n# train_ok['tp'] += 1\n# elif cl is None:\n# train_ok['fn'] += 1\n# else:\n# train_ok['fp'] += 1\n# Crossvalidation\n# for j, row in enumerate(cv_set):\n# cl = self.obj.predict(row, val)\n# if cl == classes_cv[j]:\n# cv_ok['tp'] += 1\n# elif cl in None:\n# cv_ok['fn'] += 1\n# else:\n# cv_ok['fp'] += 1\n# Test set\n# for j, row in enumerate(test_set):\n# cl = self.obj.predict(row, val)\n# if cl == classes_test[j]:\n# test_ok['tp'] += 1\n# elif cl is None:\n# test_ok['fn'] += 1\n# else:\n# test_ok['fp'] += 1\n\n# def power_set(self, lst, l):\n# \"\"\"Create a powerset of a list for given length\"\"\"\n# r = [[]]\n# for e in lst:\n# r.extend([s + [e] for s in r])\n# return set([j for j in r if len(j) <= l])\n\n# def next_order(self, kernel, next_o):\n\n# def make_order(self, p):\n# init_featsNo = self.featNo\n\n","license":"gpl-3.0"} {"repo_name":"vortex-exoplanet\/VIP","path":"vip_hci\/negfc\/utils_negfc.py","copies":"2","size":"8821","content":"#! \/usr\/bin\/env python\n\n\"\"\"\nModule with post-processing related functions called from within the NFC\nalgorithm.\n\"\"\"\n\n__author__ = 'Carlos Alberto Gomez Gonzalez'\n__all__ = ['cube_planet_free']\n\nimport numpy as np\nfrom ..metrics import cube_inject_companions\nimport math\nfrom matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n\n\ndef cube_planet_free(planet_parameter, cube, angs, psfn, plsc, imlib='opencv',\n interpolation='lanczos4',transmission=None):\n \"\"\"\n Return a cube in which we have injected negative fake companion at the\n position\/flux given by planet_parameter.\n\n Parameters\n ----------\n planet_parameter: numpy.array or list\n The (r, theta, flux) for all known companions. For a 4d cube r, \n theta and flux must all be 1d arrays with length equal to cube.shape[0];\n i.e. planet_parameter should have shape: (n_pl,3,n_ch).\n cube: numpy.array\n The cube of fits images expressed as a numpy.array.\n angs: numpy.array\n The parallactic angle fits image expressed as a numpy.array.\n psfn: numpy.array\n The scaled psf expressed as a numpy.array.\n plsc: float\n The platescale, in arcsec per pixel.\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n\n Returns\n -------\n cpf : numpy.array\n The cube with negative companions injected at the position given in\n planet_parameter.\n\n \"\"\"\n cpf = np.zeros_like(cube)\n\n planet_parameter = np.array(planet_parameter)\n\n if cube.ndim == 4:\n if planet_parameter.shape[3] != cube.shape[0]:\n raise TypeError(\"Input planet parameter with wrong dimensions.\")\n \n for i in range(planet_parameter.shape[0]):\n if i == 0:\n cube_temp = cube\n else:\n cube_temp = cpf\n\n if cube.ndim == 4:\n for j in cube.shape[0]:\n cpf[j] = cube_inject_companions(cube_temp[j], psfn[j], angs,\n flevel=-planet_parameter[i, 2, j], \n plsc=plsc,\n rad_dists=[planet_parameter[i, 0, j]],\n n_branches=1, \n theta=planet_parameter[i, 1, j],\n imlib=imlib, \n interpolation=interpolation,\n verbose=False,\n transmission=transmission)\n else:\n cpf = cube_inject_companions(cube_temp, psfn, angs,\n flevel=-planet_parameter[i, 2], plsc=plsc,\n rad_dists=[planet_parameter[i, 0]],\n n_branches=1, theta=planet_parameter[i, 1],\n imlib=imlib, interpolation=interpolation,\n verbose=False, transmission=transmission)\n return cpf\n\n\ndef radial_to_eq(r=1, t=0, rError=0, tError=0, display=False):\n \"\"\" \n Convert the position given in (r,t) into \\delta RA and \\delta DEC, as \n well as the corresponding uncertainties. \n t = 0 deg (resp. 90 deg) points toward North (resp. East). \n\n Parameters\n ----------\n r: float\n The radial coordinate.\n t: float\n The angular coordinate.\n rError: float\n The error bar related to r.\n tError: float\n The error bar related to t.\n display: boolean, optional\n If True, a figure illustrating the error ellipse is displayed.\n \n Returns\n -------\n out : tuple\n ((RA, RA error), (DEC, DEC error))\n \n \"\"\" \n ra = (r * np.sin(math.radians(t)))\n dec = (r * np.cos(math.radians(t))) \n u, v = (ra, dec)\n \n nu = np.mod(np.pi\/2-math.radians(t), 2*np.pi)\n a, b = (rError,r*np.sin(math.radians(tError)))\n\n beta = np.linspace(0, 2*np.pi, 5000)\n x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),\n v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))\n \n raErrorInf = u - np.amin(x)\n raErrorSup = np.amax(x) - u\n decErrorInf = v - np.amin(y)\n decErrorSup = np.amax(y) - v \n\n if display: \n plot(u,v,'ks',x,y,'r')\n plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',\n (r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')\n plot(r * np.cos(nu+math.radians(tError)), \n r*np.sin(nu+math.radians(tError)),'ok')\n plot(r * np.cos(nu-math.radians(tError)), \n r*np.sin(nu-math.radians(tError)),'ok')\n plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r, \n np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')\n plot([0,r*np.cos(nu+math.radians(tError*0))],\n [0,r*np.sin(nu+math.radians(tError*0))],'k')\n axes().set_aspect('equal')\n lim = np.amax([a,b]) * 2.\n xlim([ra-lim,ra+lim])\n ylim([dec-lim,dec+lim])\n gca().invert_xaxis()\n show()\n \n return ((ra,np.mean([raErrorInf,raErrorSup])),\n (dec,np.mean([decErrorInf,decErrorSup]))) \n \n \ndef cart_to_polar(y, x, ceny=0, cenx=0):\n \"\"\"\n Convert cartesian into polar coordinates (r,theta) with \n respect to a given center (cenx,ceny).\n \n Parameters\n ----------\n x,y: float\n The cartesian coordinates.\n \n Returns\n -------\n \n out : tuple\n The polar coordinates (r,theta) with respect to the (cenx,ceny). \n Note that theta is given in degrees.\n \n \"\"\"\n r = np.sqrt((y-ceny)**2 + (x-cenx)**2)\n theta = np.degrees(np.arctan2(y-ceny, x-cenx)) \n \n return r, np.mod(theta,360)\n\n\ndef polar_to_cart(r, theta, ceny=0, cenx=0):\n \"\"\"\n Convert polar coordinates with respect to the center (cenx,ceny) into \n cartesian coordinates (x,y) with respect to the bottom left corner of the \n image..\n \n Parameters\n ----------\n r,theta: float\n The polar coordinates.\n \n Returns\n -------\n \n out : tuple\n The cartesian coordinates (x,y) with respect to the bottom left corner \n of the image..\n \n \"\"\"\n x = r*np.cos(np.deg2rad(theta)) + cenx\n y = r*np.sin(np.deg2rad(theta)) + ceny \n \n return x,y\n\n\ndef ds9index_to_polar(y, x, ceny=0, cenx=0):\n \"\"\"\n Convert pixel index read on image displayed with DS9 into polar coordinates \n (r,theta) with respect to a given center (cenx,ceny).\n \n Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an \n image M is displayed with DS9, the coordinates of the center of the pixel \n associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the\n center of the coordinate system. As a conclusion, when you read\n (x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is\n (y-0.5, x-0.5) and the associated pixel value is\n M(np.floor(y)-1,np.floor(x)-1).\n \n Parameters\n ----------\n x,y: float\n The pixel index in DS9\n \n Returns\n -------\n \n out : tuple\n The polar coordinates (r,theta) with respect to the (cenx,ceny). \n Note that theta is given in degrees.\n \n \"\"\"\n r = np.sqrt((y-0.5-ceny)**2 + (x-0.5-cenx)**2)\n theta = np.degrees(np.arctan2(y-0.5-ceny, x-0.5-cenx))\n \n return r, np.mod(theta,360) \n \n \ndef polar_to_ds9index(r, theta, ceny=0, cenx=0):\n \"\"\"\n Convert position (r,theta) in an image with respect to a given center \n (cenx,ceny) into position in the image displayed with DS9.\n \n Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an \n image M is displayed with DS9, the coordinates of the center of the pixel \n associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the\n center of the coordinate system. As a conclusion, when you read\n (x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is\n (y-0.5, x-0.5) and the associated pixel value is\n M(np.floor(y)-1,np.floor(x)-1).\n \n Parameters\n ----------\n x,y: float\n The pixel index in DS9\n \n Returns\n -------\n \n out : tuple\n The polar coordinates (r,theta) with respect to the (cenx,ceny). \n Note that theta is given in degrees.\n \n \"\"\"\n x_ds9 = r*np.cos(np.deg2rad(theta)) + 0.5 + cenx\n y_ds9 = r*np.sin(np.deg2rad(theta)) + 0.5 + ceny \n \n return x_ds9, y_ds9","license":"mit"} {"repo_name":"chaluemwut\/smcdemo","path":"demo_filter.py","copies":"1","size":"2602","content":"import pickle\nfrom feature_process import FeatureMapping\nimport feature_process\nfrom text_processing import TextProcessing\nfrom sklearn.cross_validation import train_test_split\n\nis_not_important = {9:0,\n13:0,\n18:0,\n19:0,\n23:0,\n28:0,\n29:0,\n33:0,\n34:0,\n37:0,\n40:0,\n44:0,\n46:0,\n50:0,\n55:0,\n59:0,\n61:0,\n62:0,\n63:0,\n72:0,\n73:0,\n78:0,\n84:0,\n86:0,\n88:0,\n97:0,\n98:0,\n103:0 \n}\n\ndef create_training_data():\n data_lst = pickle.load(open('data\/harvest.data', 'rb'))\n feature_process.feature_map['source'] = {'Google':1, 'Twitter for iPad':2, 'Echofon':3,\n 'Bitly':4, 'twitterfeed':5, 'Twitter for iPhone':6,\n 'Foursquare':7, 'Facebook':8, 'Twitter for Android':9,\n 'TweetDeck':10, 'Twitter Web Client':11}\n feature_process.feature_map['geo'] = ['None']\n feature_process.feature_map['place'] = ['None']\n feature_process.feature_map['verified'] = ['False']\n feature_process.feature_map['geo_enabled'] = ['False']\n y = []\n x = []\n for i in range(0, len(data_lst)): \n try:\n label = is_not_important[i]\n except Exception as e:\n label = 1\n \n data = data_lst[i]\n text = TextProcessing.process(data[0])\n source = FeatureMapping.mapping('source', data[1])\n re_tweet = data[2]\n geo = FeatureMapping.mapping_other('geo', data[3])\n place = FeatureMapping.mapping_other('place', data[4])\n hash_tag = data[5]\n media = data[6]\n verified = FeatureMapping.mapping_other('verified', data[7])\n follower = data[8]\n statues = data[9]\n desc = TextProcessing.process(data[10])\n friend = data[11]\n location = TextProcessing.process(data[12])\n geo_enabled = FeatureMapping.mapping_other('geo_enabled', data[13])\n \n y.append(label)\n x.append([text, source, re_tweet, geo, place, hash_tag, media, verified, follower, statues, desc, friend, location, geo_enabled])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.metrics import f1_score, accuracy_score\n clf = RandomForestClassifier()\n clf.fit(x_train, y_train)\n y_pred = clf.predict(x_test)\n fsc = f1_score(y_test, y_pred)\n acc = accuracy_score(y_test, y_pred)\n print 'f1-score : ',fsc\n print 'accuracy : ',acc\n print y_pred\n print y_test\n \nif __name__ == '__main__':\n create_training_data()\n \n ","license":"apache-2.0"} {"repo_name":"ChanChiChoi\/scikit-learn","path":"examples\/exercises\/plot_iris_exercise.py","copies":"323","size":"1602","content":"\"\"\"\n================================\nSVM Exercise\n================================\n\nA tutorial exercise for using different SVM kernels.\n\nThis exercise is used in the :ref:`using_kernels_tut` part of the\n:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.\n\"\"\"\nprint(__doc__)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(0)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nX_train = X[:.9 * n_sample]\ny_train = y[:.9 * n_sample]\nX_test = X[.9 * n_sample:]\ny_test = y[.9 * n_sample:]\n\n# fit the model\nfor fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):\n clf = svm.SVC(kernel=kernel, gamma=10)\n clf.fit(X_train, y_train)\n\n plt.figure(fig_num)\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)\n\n # Circle out the test data\n plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],\n levels=[-.5, 0, .5])\n\n plt.title(kernel)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"ianatpn\/nupictest","path":"external\/linux32\/lib\/python2.6\/site-packages\/matplotlib\/pylab.py","copies":"70","size":"10245","content":"\"\"\"\nThis is a procedural interface to the matplotlib object-oriented\nplotting library.\n\nThe following plotting commands are provided; the majority have\nMatlab(TM) analogs and similar argument.\n\n_Plotting commands\n acorr - plot the autocorrelation function\n annotate - annotate something in the figure\n arrow - add an arrow to the axes\n axes - Create a new axes\n axhline - draw a horizontal line across axes\n axvline - draw a vertical line across axes\n axhspan - draw a horizontal bar across axes\n axvspan - draw a vertical bar across axes\n axis - Set or return the current axis limits\n bar - make a bar chart\n barh - a horizontal bar chart\n broken_barh - a set of horizontal bars with gaps\n box - set the axes frame on\/off state\n boxplot - make a box and whisker plot\n cla - clear current axes\n clabel - label a contour plot\n clf - clear a figure window\n clim - adjust the color limits of the current image\n close - close a figure window\n colorbar - add a colorbar to the current figure\n cohere - make a plot of coherence\n contour - make a contour plot\n contourf - make a filled contour plot\n csd - make a plot of cross spectral density\n delaxes - delete an axes from the current figure\n draw - Force a redraw of the current figure\n errorbar - make an errorbar graph\n figlegend - make legend on the figure rather than the axes\n figimage - make a figure image\n figtext - add text in figure coords\n figure - create or change active figure\n fill - make filled polygons\n findobj - recursively find all objects matching some criteria\n gca - return the current axes\n gcf - return the current figure\n gci - get the current image, or None\n getp - get a handle graphics property\n grid - set whether gridding is on\n hist - make a histogram\n hold - set the axes hold state\n ioff - turn interaction mode off\n ion - turn interaction mode on\n isinteractive - return True if interaction mode is on\n imread - load image file into array\n imshow - plot image data\n ishold - return the hold state of the current axes\n legend - make an axes legend\n loglog - a log log plot\n matshow - display a matrix in a new figure preserving aspect\n pcolor - make a pseudocolor plot\n pcolormesh - make a pseudocolor plot using a quadrilateral mesh\n pie - make a pie chart\n plot - make a line plot\n plot_date - plot dates\n plotfile - plot column data from an ASCII tab\/space\/comma delimited file\n pie - pie charts\n polar - make a polar plot on a PolarAxes\n psd - make a plot of power spectral density\n quiver - make a direction field (arrows) plot\n rc - control the default params\n rgrids - customize the radial grids and labels for polar\n savefig - save the current figure\n scatter - make a scatter plot\n setp - set a handle graphics property\n semilogx - log x axis\n semilogy - log y axis\n show - show the figures\n specgram - a spectrogram plot\n spy - plot sparsity pattern using markers or image\n stem - make a stem plot\n subplot - make a subplot (numrows, numcols, axesnum)\n subplots_adjust - change the params controlling the subplot positions of current figure\n subplot_tool - launch the subplot configuration tool\n suptitle - add a figure title\n table - add a table to the plot\n text - add some text at location x,y to the current axes\n thetagrids - customize the radial theta grids and labels for polar\n title - add a title to the current axes\n xcorr - plot the autocorrelation function of x and y\n xlim - set\/get the xlimits\n ylim - set\/get the ylimits\n xticks - set\/get the xticks\n yticks - set\/get the yticks\n xlabel - add an xlabel to the current axes\n ylabel - add a ylabel to the current axes\n\n autumn - set the default colormap to autumn\n bone - set the default colormap to bone\n cool - set the default colormap to cool\n copper - set the default colormap to copper\n flag - set the default colormap to flag\n gray - set the default colormap to gray\n hot - set the default colormap to hot\n hsv - set the default colormap to hsv\n jet - set the default colormap to jet\n pink - set the default colormap to pink\n prism - set the default colormap to prism\n spring - set the default colormap to spring\n summer - set the default colormap to summer\n winter - set the default colormap to winter\n spectral - set the default colormap to spectral\n\n_Event handling\n\n connect - register an event handler\n disconnect - remove a connected event handler\n\n_Matrix commands\n\n cumprod - the cumulative product along a dimension\n cumsum - the cumulative sum along a dimension\n detrend - remove the mean or besdt fit line from an array\n diag - the k-th diagonal of matrix\n diff - the n-th differnce of an array\n eig - the eigenvalues and eigen vectors of v\n eye - a matrix where the k-th diagonal is ones, else zero\n find - return the indices where a condition is nonzero\n fliplr - flip the rows of a matrix up\/down\n flipud - flip the columns of a matrix left\/right\n linspace - a linear spaced vector of N values from min to max inclusive\n logspace - a log spaced vector of N values from min to max inclusive\n meshgrid - repeat x and y to make regular matrices\n ones - an array of ones\n rand - an array from the uniform distribution [0,1]\n randn - an array from the normal distribution\n rot90 - rotate matrix k*90 degress counterclockwise\n squeeze - squeeze an array removing any dimensions of length 1\n tri - a triangular matrix\n tril - a lower triangular matrix\n triu - an upper triangular matrix\n vander - the Vandermonde matrix of vector x\n svd - singular value decomposition\n zeros - a matrix of zeros\n\n_Probability\n\n levypdf - The levy probability density function from the char. func.\n normpdf - The Gaussian probability density function\n rand - random numbers from the uniform distribution\n randn - random numbers from the normal distribution\n\n_Statistics\n\n corrcoef - correlation coefficient\n cov - covariance matrix\n amax - the maximum along dimension m\n mean - the mean along dimension m\n median - the median along dimension m\n amin - the minimum along dimension m\n norm - the norm of vector x\n prod - the product along dimension m\n ptp - the max-min along dimension m\n std - the standard deviation along dimension m\n asum - the sum along dimension m\n\n_Time series analysis\n\n bartlett - M-point Bartlett window\n blackman - M-point Blackman window\n cohere - the coherence using average periodiogram\n csd - the cross spectral density using average periodiogram\n fft - the fast Fourier transform of vector x\n hamming - M-point Hamming window\n hanning - M-point Hanning window\n hist - compute the histogram of x\n kaiser - M length Kaiser window\n psd - the power spectral density using average periodiogram\n sinc - the sinc function of array x\n\n_Dates\n\n date2num - convert python datetimes to numeric representation\n drange - create an array of numbers for date plots\n num2date - convert numeric type (float days since 0001) to datetime\n\n_Other\n\n angle - the angle of a complex array\n griddata - interpolate irregularly distributed data to a regular grid\n load - load ASCII data into array\n polyfit - fit x, y to an n-th order polynomial\n polyval - evaluate an n-th order polynomial\n roots - the roots of the polynomial coefficients in p\n save - save an array to an ASCII file\n trapz - trapezoidal integration\n\n__end\n\n\"\"\"\nimport sys, warnings\n\nfrom cbook import flatten, is_string_like, exception_to_str, popd, \\\n silent_list, iterable, dedent\n\nimport numpy as np\nfrom numpy import ma\n\nfrom matplotlib import mpl # pulls in most modules\n\nfrom matplotlib.dates import date2num, num2date,\\\n datestr2num, strpdate2num, drange,\\\n epoch2num, num2epoch, mx2num,\\\n DateFormatter, IndexDateFormatter, DateLocator,\\\n RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\\\n DayLocator, HourLocator, MinuteLocator, SecondLocator,\\\n rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\\\n WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta\nimport matplotlib.dates\n\n# bring all the symbols in so folks can import them from\n# pylab in one fell swoop\n\nfrom matplotlib.mlab import window_hanning, window_none,\\\n conv, detrend, detrend_mean, detrend_none, detrend_linear,\\\n polyfit, polyval, entropy, normpdf, griddata,\\\n levypdf, find, trapz, prepca, rem, norm, orth, rank,\\\n sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\\\n sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\\\n diagonal_matrix, base_repr, binary_repr, log2, ispower2,\\\n bivariate_normal, load, save\n\nfrom matplotlib.mlab import stineman_interp, slopes, \\\n stineman_interp, inside_poly, poly_below, poly_between, \\\n is_closed_polygon, path_length, distances_along_curve, vector_lengths\n\nfrom numpy import *\nfrom numpy.fft import *\nfrom numpy.random import *\nfrom numpy.linalg import *\n\nfrom matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \\\n detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \\\n find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \\\n center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \\\n dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \\\n save, load, exp_safe, \\\n amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \\\n base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\\\n mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector\n\n\n\n\n\nfrom matplotlib.pyplot import *\n\n# provide the recommended module abbrevs in the pylab namespace\nimport matplotlib.pyplot as plt\nimport numpy as np\n","license":"gpl-3.0"} {"repo_name":"ShujiaHuang\/AsmVar","path":"src\/AsmvarGenotype\/GMM\/GMM2D.py","copies":"2","size":"18363","content":"\"\"\"\n================================================\nMy own Gaussion Mixture Model for SV genotyping.\nLearn form scikit-learn\n================================================\n\nAuthor : Shujia Huang\nDate : 2014-01-06 14:33:45\n\n\"\"\"\nimport sys\nimport numpy as np\nfrom scipy import linalg\nfrom sklearn import cluster\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils.extmath import logsumexp\n\nEPS = np.finfo(float).eps\n\nclass GMM ( BaseEstimator ) :\n\n \"\"\"\n Copy from scikit-learn\n \"\"\"\n\n def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=1e-2, min_covar=1e-3,\n n_iter=100, n_init=10, params='wmc', init_params='wmc'):\n self.n_components = n_components\n self.covariance_type = covariance_type\n self.thresh = thresh\n self.min_covar = min_covar\n self.random_state = random_state\n self.n_iter = n_iter\n self.n_init = n_init\n self.params = params\n self.init_params = init_params\n\n self.init_means = []\n self.init_covars = []\n self.category = [] # For genotype\n\n if not covariance_type in ['spherical', 'tied', 'diag', 'full']:\n raise ValueError( 'Invalid value for covariance_type: %s' % covariance_type )\n\n if n_init < 1: raise ValueError('GMM estimation requires at least one run')\n\n self.weights_ = np.ones(self.n_components) \/ self.n_components\n\n # flag to indicate exit status of fit() method: converged (True) or\n # n_iter reached (False)\n def score_samples(self, X):\n \"\"\"Return the per-sample likelihood of the data under the model.\n\n Compute the log probability of X under the model and\n return the posterior distribution (responsibilities) of each\n mixture component for each element of X.\n\n Parameters\n ----------\n X: array_like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n Returns\n -------\n logprob : array_like, shape (n_samples,)\n Log probabilities of each data point in X.\n\n responsibilities : array_like, shape (n_samples, n_components)\n Posterior probabilities of each mixture component for each\n observation\n \"\"\"\n X = np.asarray(X)\n if X.ndim == 1:\n X = X[:, np.newaxis]\n if X.size == 0:\n return np.array([]), np.empty((0, self.n_components))\n if X.shape[1] != self.means_.shape[1]:\n raise ValueError('The shape of X is not compatible with self')\n\n lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,self.covariance_type)\n + np.log(self.weights_))\n\n logprob = logsumexp(lpr, axis=1)\n responsibilities = np.exp(lpr - logprob[:, np.newaxis])\n return logprob, responsibilities\n\n def predict(self, X):\n \"\"\"\n Predict label for data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n C : array, shape = (n_samples,)\n \"\"\"\n logprob, responsibilities = self.score_samples(X)\n return responsibilities.argmax(axis=1)\n\n def predict_proba(self, X):\n \"\"\"\n Predict posterior probability of data under each Gaussian\n in the model.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n responsibilities : array-like, shape = (n_samples, n_components)\n Returns the probability of the sample for each Gaussian\n (state) in the model.\n \"\"\"\n logprob, responsibilities = self.score_samples(X)\n return responsibilities\n\n def fit(self, X):\n \"\"\"\n Copy form scikit-learn: gmm.py\n Estimate model parameters with the expectation-maximization\n algorithm.\n\n A initialization step is performed before entering the em\n algorithm. If you want to avoid this step, set the keyword\n argument init_params to the empty string '' when creating the\n GMM object. Likewise, if you would like just to do an\n initialization, set n_iter=0.\n\n Parameters\n ----------\n X : array_like, shape (n, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n \"\"\"\n\n X = np.asarray(X, dtype=np.float)\n if X.ndim == 1:\n X = X[:, np.newaxis]\n if X.shape[0] < self.n_components:\n raise ValueError(\n 'GMM estimation with %s components, but got only %s samples' %\n (self.n_components, X.shape[0]))\n\n lowest_bias = np.infty\n\n c1,c2,c3 = '1\/1', '0\/1', '0\/0'\n m1,m2,m3 = 0.001 , 0.5 , 1.0\n v1,v2,v3 = 0.002, 0.002, 0.002\n category = np.array([ [c1,c2,c3],\n [c1,c2], [c1,c3], [c2,c3] ,\n [c1] , [c2] , [c3] ])\n init_means = np.array([ [[ m1],[ m2] , [ m3]], \n [[ m1],[ m2]], [[m1],[m3]], [[m2],[m3]], \n [[m1]] , [[m2]] , [[m3]] ])\n init_covars = np.array([ [[[ v1]],[[ v2]],[[ v3]]], \n [[[ v1]],[[ v2]]], [[[ v1]],[[ v3]]], [[[ v2]],[[ v3]]], \n [[[ v1]]] , [[[ v2]]] , [[[ v3]]] ])\n\n bestCovars, bestMeans, bestWeights, bestConverged, bestCategory = [], [], [], [], []\n for i, (m,v,c) in enumerate( zip(init_means, init_covars, category) ) : \n\n if i == 0 and self.n_components != 3 : continue\n if i < 4 and self.n_components == 1 : continue\n self.init_means = np.array(m)\n self.init_covars = np.array(v)\n self.category = np.array(c)\n best_params,bias = self.training(X)\n\n if lowest_bias > bias :\n lowest_bias = bias\n bestCovars = best_params['covars']\n bestMeans = best_params['means']\n bestWeights = best_params['weights']\n bestConverged = best_params['converged']\n bestCategory = best_params['category']\n\n if self.n_components == 3 : break\n if self.n_components == 2 and i == 3 : break\n\n bestWeights = np.tile(1.0 \/ self.n_components, self.n_components)\n\n self.covars_ = bestCovars\n self.means_ = bestMeans\n self.weights_ = bestWeights\n self.converged_ = bestConverged\n self.category = bestCategory\n\n return self\n\n ####\n def training(self, X):\n\n max_log_prob = -np.infty\n lowest_bias = np.infty\n\n wmin, wmax = 0.8, 1.2 # Factor intervel [wmin, wmax]\n for w in np.linspace(wmin, wmax, self.n_init):\n if 'm' in self.init_params or not hasattr(self, 'means_'):\n #self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_\n self.means_ = w * self.init_means\n\n if 'w' in self.init_params or not hasattr(self, 'weights_'):\n self.weights_= np.tile(1.0 \/ self.n_components, self.n_components)\n\n if 'c' in self.init_params or not hasattr(self, 'covars_'):\n \"\"\"\n cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])\n if not cv.shape :\n cv.shape = (1, 1)\n self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components)\n \"\"\"\n self.covars_ = self.init_covars\n\n # EM algorithms\n log_likelihood = []\n # reset self.converged_ to False\n self.converged_= False\n for i in range(self.n_iter):\n # Expectation step\n curr_log_likelihood, responsibilities = self.score_samples(X)\n log_likelihood.append(curr_log_likelihood.sum())\n\n # Check for convergence.\n if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < self.thresh:\n self.converged_ = True\n break\n #Maximization step\n self._do_mstep(X, responsibilities, self.params, self.min_covar)\n\n if self.n_components == 3:\n curr_bias =(self.means_[0][0]-self.init_means[0][0])+np.abs(self.means_[1][0]-self.init_means[1][0])+(self.init_means[2][0]-self.means_[2][0])\n elif self.n_components == 2:\n curr_bias =np.abs(self.means_[0][0] - self.init_means[0][0]) + np.abs(self.init_means[1][0] - self.means_[1][0])\n elif self.n_components == 1:\n curr_bias =np.abs (self.means_[0][0] - self.init_means[0][0])\n else :\n print >> sys.stderr, '[ERROR] The companent could only between [1,3]. But yours is ', self.n_components\n sys.exit(1)\n \n self.Label2Genotype()\n if w == wmin:\n max_log_prob = log_likelihood[-1]\n best_params = {'weights':self.weights_, \n 'means':self.means_, \n 'covars':self.covars_, \n 'converged':self.converged_, \n 'category':self.category}\n if self.converged_:\n lowest_bias = curr_bias\n\n if self.converged_ and lowest_bias > curr_bias:\n max_log_prob = log_likelihood[-1]\n lowest_bias = curr_bias\n best_params = {'weights': self.weights_, \n 'means': self.means_, \n 'covars': self.covars_,\n 'converged': self.converged_, \n 'category':self.category}\n\n # check the existence of an init param that was not subject to\n # likelihood computation issue.\n if np.isneginf(max_log_prob) and self.n_iter:\n raise RuntimeError(\n \"EM algorithm was never able to compute a valid likelihood \" +\n \"given initial parameters. Try different init parameters \" +\n \"(or increasing n_init) or check for degenerate data.\" )\n\n # if neendshift :\n # self.covars_ = tmp_params['covars']\n # self.means_ = tmp_params['means']\n # self.weights_ = tmp_params['weights']\n # self.converged_ = tmp_params['converged']\n # self.category = tmp_params['category']\n\n return best_params, lowest_bias\n\n def _do_mstep(self, X, responsibilities, params, min_covar=0):\n \"\"\" \n Perform the Mstep of the EM algorithm and return the class weihgts.\n \"\"\"\n weights = responsibilities.sum(axis=0)\n weighted_X_sum = np.dot(responsibilities.T, X)\n inverse_weights = 1.0 \/ (weights[:, np.newaxis] + 10 * EPS)\n\n if 'w' in params:\n self.weights_ = (weights \/ (weights.sum() + 10 * EPS) + EPS)\n if 'm' in params:\n self.means_ = weighted_X_sum * inverse_weights\n if 'c' in params:\n covar_mstep_func = _covar_mstep_funcs[self.covariance_type]\n self.covars_ = covar_mstep_func(self, X, responsibilities, weighted_X_sum, inverse_weights,min_covar)\n\n return weights\n\n \"\"\"\n Here is just for genotyping process\n \"\"\"\n # Decide the different guassion mu(mean) to seperate the genotype\n def Label2Genotype(self):\n\n label2genotype = {}\n if self.converged_:\n\n if len(self.means_) > 3 :\n print >> sys.stderr, 'Do not allow more than 3 components. But you set', len(self.means_)\n sys.exit(1)\n\n for label,mu in enumerate(self.means_[:,0]):\n\n best_distance, bestIndx = np.infty, 0\n for i,m in enumerate(self.init_means[:,0]):\n distance = np.abs(mu - m)\n if distance < best_distance:\n bestIndx = i\n best_distance = distance\n\n label2genotype[label] = self.category[bestIndx] \n\n # Put False if there are more than one 'label' points to the same 'genotype'\n g2c = {v:k for k,v in label2genotype.items()}\n if len(label2genotype) != len(g2c): self.converged_ = False \n else :\n label2genotype = { label: '.\/.' for label in range( self.n_components ) }\n\n return label2genotype\n\n def Mendel(self, genotype, sample2col, family):\n\n ngIndx = []\n\n m,n,num = 0.0,0.0,0 # m is match; n is not match\n for k,v in family.items():\n\n #if v[0] not in sample2col or v[1] not in sample2col : continue\n if k not in sample2col or v[0] not in sample2col or v[1] not in sample2col: continue\n if k not in sample2col :\n print >> sys.stderr, 'The sample name is not in vcf file! ', k\n sys.exit(1)\n\n # c1 is son; c2 and c3 are the parents\n c1,c2,c3 = genotype[ sample2col[k] ], genotype[ sample2col[v[0]] ], genotype[ sample2col[v[1]] ]\n\n if c1 == '.\/.' or c2 == '.\/.' or c3 == '.\/.': continue\n num += 1;\n ng = False\n if c2 == c3 :\n if c2 == '0\/0' or c2 == '1\/1' :\n if c1 == c2 : m += 1\n else : \n n += 1\n ng = True\n else : # c2 == '0\/1' and c3 == '0\/1'\n m += 1\n elif c2 == '0\/1' and c3 == '1\/1' :\n\n if c1 == '0\/0' : \n n += 1\n ng = True\n else : m += 1\n elif c2 == '0\/1' and c3 == '0\/0' :\n\n if c1 == '1\/1' : \n n += 1\n ng = True\n else : m += 1\n elif c2 == '1\/1' and c3 == '0\/1' :\n\n if c1 == '0\/0' : \n n += 1\n ng = True\n else : m += 1\n elif c2 == '1\/1' and c3 == '0\/0' :\n\n if c1 == '1\/1' or c1 == '0\/0': \n n += 1\n ng = True\n else : m += 1\n elif c2 == '0\/0' and c3 == '0\/1' :\n\n if c1 == '1\/1' : \n n += 1\n ng = True\n else : m += 1\n elif c2 == '0\/0' and c3 == '1\/1' :\n\n if c1 == '0\/0' or c1 == '1\/1' : \n n += 1\n ng = True\n else : m += 1\n\n if ng : \n ngIndx.append(sample2col[k])\n ngIndx.append(sample2col[v[0]])\n ngIndx.append(sample2col[v[1]])\n\n return m,n,num,set(ngIndx)\n###\ndef log_multivariate_normal_density(X, means, covars, covariance_type='full'):\n \"\"\"\n Log probability for full covariance matrices.\n \"\"\"\n X = np.asarray(X)\n if X.ndim == 1:\n X = X[:, np.newaxis]\n if X.size == 0:\n return np.array([])\n if X.shape[1] != means.shape[1]:\n raise ValueError('The shape of X is not compatible with self')\n\n log_multivariate_normal_density_dict = {\n 'full' : _log_multivariate_normal_density_full\n }\n\n return log_multivariate_normal_density_dict[covariance_type]( X, means, covars )\n\ndef _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):\n \"\"\"\n Log probability for full covariance matrices.\n \"\"\"\n if hasattr(linalg, 'solve_triangular'):\n # only in scipy since 0.9\n solve_triangular = linalg.solve_triangular\n else:\n # slower, but works\n solve_triangular = linalg.solve\n n_samples, n_dim = X.shape\n nmix = len(means)\n log_prob = np.empty((n_samples, nmix))\n for c, (mu, cv) in enumerate(zip(means, covars)):\n try:\n cv_chol = linalg.cholesky(cv, lower=True)\n except linalg.LinAlgError:\n # The model is most probabily stuck in a component with too\n # few observations, we need to reinitialize this components\n cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),\n lower=True)\n \n cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))\n cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T\n log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +\n n_dim * np.log(2 * np.pi) + cv_log_det)\n\n return log_prob\n\n\ndef distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components) :\n \"\"\"\n Create all the covariance matrices from a given template\n \"\"\"\n if covariance_type == 'spherical':\n cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),\n (n_components, 1))\n elif covariance_type == 'tied':\n cv = tied_cv\n elif covariance_type == 'diag':\n cv = np.tile(np.diag(tied_cv), (n_components, 1))\n elif covariance_type == 'full':\n cv = np.tile(tied_cv, (n_components, 1, 1))\n else:\n raise ValueError(\"covariance_type must be one of \" +\n \"'spherical', 'tied', 'diag', 'full'\")\n return cv\n\ndef _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):\n \"\"\"Performing the covariance M step for full cases\"\"\"\n # Eq. 12 from K. Murphy, \"Fitting a Conditional Linear Gaussian\n # Distribution\"\n n_features = X.shape[1]\n cv = np.empty((gmm.n_components, n_features, n_features))\n for c in range(gmm.n_components):\n post = responsibilities[:, c]\n # Underflow Errors in doing post * X.T are not important\n np.seterr(under='ignore')\n avg_cv = np.dot(post * X.T, X) \/ (post.sum() + 10 * EPS)\n mu = gmm.means_[c][np.newaxis]\n cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))\n return cv\n\n_covar_mstep_funcs = { 'full': _covar_mstep_full }\n\n\n","license":"mit"} {"repo_name":"kcyu1993\/ML_course_kyu","path":"projects\/project1\/scripts\/model.py","copies":"1","size":"19450","content":"from __future__ import absolute_import\n\nfrom abc import ABCMeta, abstractmethod\nimport copy\nfrom data_utils import build_k_indices\nfrom learning_model import *\nfrom regularizer import *\nfrom helpers import save_numpy_array\nimport numpy as np\n\nclass Model(object):\n \"\"\"\n Author: Kaicheng Yu\n\n Machine learning model engine\n Implement the optimizers\n sgd\n normal equations\n cross-validation of given parameters\n Abstract method:\n __call__ produce the raw prediction, use the latest weight obtained by training\n predict produce prediction values, could take weight as input\n get_gradient define gradient here, including the gradient for regularizer\n normalequ define normal equations\n\n Support:\n L1, L2 normalization\n\n Due to the distribution of work, only LogisticRegression is fully tested for\n fitting data, and cross-validation.\n LinearRegression model should also work but not fully tested.\n\n The goal of this class is not only specific to this learning project, but also for reusable and scalable\n to other problems, models.\n\n \"\"\"\n def __init__(self, train_data, validation=None, initial_weight=None,\n loss_function_name='mse', cal_weight='gradient',\n regularizer=None, regularizer_p=None):\n \"\"\"\n Initializer of all learning models.\n :param train_data: training data.\n :param validation_data:\n \"\"\"\n self.train_x = train_data[1]\n self.train_y = train_data[0]\n\n self.set_valid(validation)\n\n ''' Define the progress of history here '''\n self.losses = []\n self.iterations = 0\n self.weights = []\n self.misclass_rate = []\n\n ''' Define loss, weight calculation, regularizer '''\n self.loss_function = get_loss_function(loss_function_name)\n self.loss_function_name = loss_function_name\n self.calculate_weight = cal_weight\n self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p)\n self.regularizer_p = regularizer_p\n\n # Asserting degree\n if len(self.train_x.shape) > 1:\n degree = self.train_x.shape[1]\n else:\n degree = 1\n\n # Initialize the weight for linear model.\n if initial_weight is not None:\n self.weights.append(initial_weight)\n else:\n self.weights.append(np.random.rand(degree))\n\n def set_valid(self, validation):\n # Set validation here.\n self.validation = False\n self.valid_x = None\n self.valid_y = None\n self.valid_losses = None\n self.valid_misclass_rate = None\n if validation is not None:\n (valid_y, valid_x) = validation\n self.valid_x = valid_x\n self.valid_y = valid_y\n self.validation = True\n self.valid_losses = []\n self.valid_misclass_rate = []\n\n @abstractmethod\n def __call__(self, **kwargs):\n \"\"\"Define the fit function and get prediction\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_gradient(self, y, x, weight):\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, x, weight):\n raise NotImplementedError\n\n @abstractmethod\n def normalequ(self, **kwargs):\n ''' define normal equation method to calculate optimal weights'''\n raise NotImplementedError\n\n def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):\n \"\"\" Return weight under given parameter \"\"\"\n model = copy.copy(self)\n model.__setattr__('train_y', y)\n model.__setattr__('train_x', x)\n if test_x is not None and test_y is not None:\n model.set_valid((test_y, test_x))\n _kwargs = []\n for name, value in kwargs.items():\n # Recognize parameter \"\n if name is \"regularizer_p\":\n model.__setattr__(name, value)\n model.regularizer.set_parameter(value)\n else:\n _kwargs.append((name, value))\n _kwargs = dict(_kwargs)\n if model.calculate_weight is 'gradient':\n return model.sgd(**_kwargs)\n # elif model.calculate_weight is 'newton':\n # return model.newton(**_kwargs)\n elif model.calculate_weight is 'normalequ':\n return model.normalequ(**_kwargs)\n\n def get_history(self):\n \"\"\"\n Get the training history of current model\n :return: list as [iterations, [losses], [weights], [mis_class]]\n \"\"\"\n if self.validation:\n return self.iterations, (self.losses, self.valid_losses), \\\n (self.weights), (self.misclass_rate, self.valid_misclass_rate)\n return self.iterations, self.losses, self.weights, self.misclass_rate\n\n def train(self, optimizer='sgd', loss_function='mse', **kwargs):\n \"\"\"\n Train function to perform one time training\n Will based optimizer to select.\n TODO: Would add 'newton' in the future\n This\n :param optimizer: only support 'sgd'\n :param loss_function: loss_function name {mse, mae, logistic}\n :param kwargs: passed into sgd\n :return: best weight\n \"\"\"\n self.loss_function = get_loss_function(loss_function)\n self.loss_function_name = loss_function\n\n if optimizer is 'sgd':\n self.sgd(**kwargs)\n\n return self.weights[-1]\n\n \"\"\"====================================\"\"\"\n \"\"\" Beginning of the optimize Routines \"\"\"\n \"\"\"====================================\"\"\"\n\n def sgd(self, lr=0.01, decay=0.5, max_iters=1000,\n batch_size=128, early_stop=150, decay_intval=50, decay_lim=9):\n \"\"\"\n Define the SGD algorithm here\n Implementing weight decay, early stop.\n\n :param lr: learning rate\n :param decay: weight decay after fix iterations\n :param max_iters: maximum iterations\n :param batch_size: batch_size\n :param early_stop: early_stop after no improvement\n :return: final weight vector\n \"\"\"\n np.set_printoptions(precision=4)\n w = self.weights[0]\n loss = self.compute_loss(self.train_y, self.train_x, w)\n best_loss = loss\n best_counter = 0\n decay_counter = 0\n # print(\"initial loss is {} \".format(loss))\n for epoch in range(max_iters):\n\n for batch_y, batch_x in batch_iter(self.train_y, self.train_x, batch_size):\n grad = self.get_gradient(batch_y, batch_x, w)\n w = w - lr * grad\n loss = self.compute_loss(self.train_y, self.train_x, w)\n mis_class = self.compute_metrics(self.train_y, self.train_x, w)\n\n self.weights.append(w)\n self.losses.append(loss)\n self.misclass_rate.append(mis_class)\n if self.validation is True:\n valid_loss = self.compute_loss(self.valid_y, self.valid_x, w)\n valid_mis_class = self.compute_metrics(self.valid_y, self.valid_x, w)\n self.valid_losses.append(valid_loss)\n self.valid_misclass_rate.append(valid_mis_class)\n # Display every 25 epoch\n if (epoch + 1) % 25 == 0:\n print('Epoch {e} in {m}'.format(e=epoch + 1, m=max_iters), end=\"\\t\")\n if self.validation is True:\n # print('\\tTrain Loss {0:0.4f}, \\tTrain mis-class {0:0.4f}, '\n # '\\tvalid loss {0:0.4f}, \\tvalid mis-class {0:0.4f}'.\n # format(loss, mis_class, valid_loss, valid_mis_class))\n print('\\tTrain Loss {}, \\tTrain mis-class {}, '\n '\\tvalid loss {}, \\tvalid mis-class {}'.\n format(loss, mis_class, valid_loss, valid_mis_class))\n else:\n print('\\tTrain Loss {}, \\tTrain mis-class {}'.\n format(loss, mis_class))\n # judge the performance\n if best_loss - loss > 0.000001:\n best_loss = loss\n best_counter = 0\n else:\n best_counter += 1\n if best_counter > early_stop:\n print(\"Learning early stop since loss not improving for {} epoch.\".format(best_counter))\n break\n if best_counter % decay_intval == 0:\n print(\"weight decay by {}\".format(decay))\n lr *= decay\n decay_counter += 1\n if decay_counter > decay_lim:\n print(\"decay {} times, stop\".format(decay_lim))\n break\n return self.weights[-1]\n\n def newton(self, lr=0.01, max_iters=100):\n # TODO: implement newton method later\n raise NotImplementedError\n\n def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs):\n \"\"\"\n Cross validation method to acquire the best prediction parameters.\n It will use the train_x y as data and do K-fold cross validation.\n\n :param cv: cross validation times\n :param lambdas: array of lambdas to be validated\n :param lambda_name: the lambda name tag\n :param seed: random seed\n :param skip: skip the cross validation, only valid 1 time\n :param plot plot cross-validation plot, if machine does not\n support matplotlib.pyplot, set to false.\n :param kwargs: other parameters could pass into compute_weight\n :return: best weights, best_lambda, (training error, valid error)\n \"\"\"\n np.set_printoptions(precision=4)\n k_indices = build_k_indices(self.train_y, cv, seed)\n # define lists to store the loss of training data and test data\n err_tr = []\n err_te = []\n weights = []\n print(\"K-fold ({}) cross validation to examine [{}]\".\n format(cv, lambdas))\n for lamb in lambdas:\n print(\"For lambda: {}\".format(lamb))\n _mse_tr = []\n _mse_te = []\n _weight = []\n for k in range(cv):\n print('Cross valid iteration {}'.format(k))\n weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x,\n k_indices, k,\n lamb, lambda_name, **kwargs)\n _mse_tr += [loss_tr]\n _mse_te += [loss_te]\n _weight.append(weight)\n if skip:\n break\n avg_tr = np.average(_mse_tr)\n avg_te = np.average(_mse_te)\n err_tr += [avg_tr]\n err_te += [avg_te]\n weights.append(_weight)\n print(\"\\t train error {}, \\t valid error {}\".\n format(avg_tr, avg_te))\n # Select the best parameter during the cross validations.\n print('K-fold cross validation result: \\n {} \\n {}'.\n format(err_tr, err_te))\n # Select the best based on least err_te\n min_err_te = np.argmin(err_te)\n print('Best err_te result {}, lambda {}'.\n format(err_te[min_err_te], lambdas[min_err_te]))\n if plot:\n from plots import cross_validation_visualization\n cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name,\n error_name=self.loss_function_name)\n else:\n save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name)\n\n return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te)\n\n def _loop_cross_validation(self, y, x, k_indices, k, lamb, lambda_name, **kwargs):\n \"\"\"\n Single loop of cross validation\n :param y: train labels\n :param x: train data\n :param k_indices: indices array\n :param k: number of cross validations\n :param lamb: lambda to use\n :param lambda_name: lambda_name to pass into compute weight\n :return: weight, mis_tr, mis_te\n \"\"\"\n train_ind = np.concatenate((k_indices[:k], k_indices[k + 1:]), axis=0)\n train_ind = np.reshape(train_ind, (train_ind.size,))\n test_ind = k_indices[k]\n\n # Note: different from np.ndarray, tuple is name[index,]\n # ndarray is name[index,:]\n train_x = x[train_ind,]\n train_y = y[train_ind,]\n test_x = x[test_ind,]\n test_y = y[test_ind,]\n # Insert one more kwargs item\n kwargs[lambda_name] = lamb\n\n weight = self.compute_weight(train_y, train_x, test_x, test_y, **kwargs)\n\n # Compute the metrics and return\n loss_tr = self.compute_metrics(train_y, train_x, weight)\n loss_te = self.compute_metrics(test_y, test_x, weight)\n\n return weight, loss_tr, loss_te\n\n def compute_metrics(self, target, data, weight):\n \"\"\"\n Compute the following metrics\n Misclassification rate\n \"\"\"\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass \/ N\n\n def compute_loss(self, y, x, weight):\n return self.loss_function(y, x, weight)\n\n\nclass LogisticRegression(Model):\n \"\"\" Logistic regression \"\"\"\n\n def __init__(self, train, validation=None, initial_weight=None,\n loss_function_name='logistic',\n calculate_weight='gradient',\n regularizer=None, regularizer_p=None):\n \"\"\"\n Constructor of Logistic Regression model\n :param train: tuple (y, x)\n :param validation: tuple (y, x)\n :param initial_weight: weight vector, dim align x\n :param loss_function: f(x, y, weight)\n :param regularizer: \"Ridge\" || \"Lasso\"\n :param regularizer_p: parameter\n \"\"\"\n # Initialize the super class with given data.\n # Transform the y into {0,1}\n y, tx = train\n y[np.where(y < 0)] = 0\n train = (y, tx)\n if validation:\n val_y, val_tx = validation\n val_y[np.where(val_y < 0)] = 0\n validation = (val_y, val_tx)\n super(LogisticRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n # Set predicted label\n self.pred_label = [-1, 1]\n\n def __call__(self, x, weight=None):\n \"\"\"\n Define the fit function and get prediction,\n generate probability of occurrence\n \"\"\"\n if weight is None:\n weight = self.weights[-1]\n return sigmoid(np.dot(x, weight))\n\n def get_gradient(self, y, x, weight):\n \"\"\" calculate gradient given data and weight \"\"\"\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)\n\n def get_hessian(self, y, x, weight):\n # TODO: implement hessian for newton method\n raise NotImplementedError\n\n def predict(self, x, weight=None, cutting=0.5):\n \"\"\" Prediction of event {0,1} \"\"\"\n if weight is None: weight = self.weights[-1]\n pred = sigmoid(np.dot(x, weight))\n pred[np.where(pred <= cutting)] = 0\n pred[np.where(pred > cutting)] = 1\n return pred\n\n def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n \"\"\" Prediction result with labels \"\"\"\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred\n\n def train(self, loss_function='logistic',\n lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):\n \"\"\" Make the default loss logistic, set default parameters \"\"\"\n return super(LogisticRegression, self).train('sgd', loss_function,\n lr=lr,\n decay=decay, max_iters=max_iters,\n batch_size=batch_size, **kwargs)\n\n def normalequ(self, **kwargs):\n \"\"\" Should never call \"\"\"\n raise NotImplementedError\n\n\nclass LinearRegression(Model):\n \"\"\" Linear regression model\n This is not fully tested, especially the cross-validation, please refers\n to the implemenations.py for linear model.\n \"\"\"\n\n def __init__(self, train, validation=None, initial_weight=None,\n regularizer=None, regularizer_p=None,\n loss_function_name='mse', calculate_weight='normalequ'):\n # Initialize the super class with given data.\n super(LinearRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n\n def __call__(self, x):\n \"\"\" calulate prediction based on latest result \"\"\"\n return np.dot(x, self.weights[-1])\n\n def get_gradient(self, batch_y, batch_x, weight):\n \"\"\" return gradient of linear model, including the regularizer \"\"\"\n N = batch_y.shape[0]\n grad = np.empty(len(weight))\n for index in range(N):\n _y = batch_y[index]\n _x = batch_x[index]\n grad = grad + gradient_least_square(_y, _x, weight, self.loss_function_name)\n grad \/= N\n grad += self.regularizer.get_gradient(weight)\n return grad\n\n def predict(self, x, weight):\n \"\"\" Prediction function, predicting final result \"\"\"\n pred = np.dot(x, weight)\n pred[np.where(pred <= 0)] = -1\n pred[np.where(pred > 0)] = 1\n return pred\n\n def normalequ(self):\n \"\"\" Normal equation to get parameters \"\"\"\n tx = self.train_x\n y = self.train_y\n if self.regularizer is None:\n return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y))\n elif self.regularizer.name is 'Ridge':\n G = np.eye(tx.shape[1])\n G[0, 0] = 0\n hes = np.dot(tx.T, tx) + self.regularizer_p * G\n return np.linalg.solve(hes, np.dot(tx.T, y))\n else:\n raise NotImplementedError\n","license":"mit"} {"repo_name":"jmetzen\/scikit-learn","path":"sklearn\/base.py","copies":"22","size":"18131","content":"\"\"\"Base classes for all estimators.\"\"\"\n# Author: Gael Varoquaux \n# License: BSD 3 clause\n\nimport copy\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom .externals import six\nfrom .utils.fixes import signature\nfrom .utils.deprecation import deprecated\nfrom .exceptions import ChangedBehaviorWarning as ChangedBehaviorWarning_\n\n\nclass ChangedBehaviorWarning(ChangedBehaviorWarning_):\n pass\n\nChangedBehaviorWarning = deprecated(\"ChangedBehaviorWarning has been moved \"\n \"into the sklearn.exceptions module. \"\n \"It will not be available here from \"\n \"version 0.19\")(ChangedBehaviorWarning)\n\n\n##############################################################################\ndef clone(estimator, safe=True):\n \"\"\"Constructs a new estimator with the same parameters.\n\n Clone does a deep copy of the model in an estimator\n without actually copying attached data. It yields a new estimator\n with the same parameters that has not been fit on any data.\n\n Parameters\n ----------\n estimator: estimator object, or list, tuple or set of objects\n The estimator or group of estimators to be cloned\n\n safe: boolean, optional\n If safe is false, clone will fall back to a deepcopy on objects\n that are not estimators.\n\n \"\"\"\n estimator_type = type(estimator)\n # XXX: not handling dictionaries\n if estimator_type in (list, tuple, set, frozenset):\n return estimator_type([clone(e, safe=safe) for e in estimator])\n elif not hasattr(estimator, 'get_params'):\n if not safe:\n return copy.deepcopy(estimator)\n else:\n raise TypeError(\"Cannot clone object '%s' (type %s): \"\n \"it does not seem to be a scikit-learn estimator \"\n \"as it does not implement a 'get_params' methods.\"\n % (repr(estimator), type(estimator)))\n klass = estimator.__class__\n new_object_params = estimator.get_params(deep=False)\n for name, param in six.iteritems(new_object_params):\n new_object_params[name] = clone(param, safe=False)\n new_object = klass(**new_object_params)\n params_set = new_object.get_params(deep=False)\n\n # quick sanity check of the parameters of the clone\n for name in new_object_params:\n param1 = new_object_params[name]\n param2 = params_set[name]\n if isinstance(param1, np.ndarray):\n # For most ndarrays, we do not test for complete equality\n if not isinstance(param2, type(param1)):\n equality_test = False\n elif (param1.ndim > 0\n and param1.shape[0] > 0\n and isinstance(param2, np.ndarray)\n and param2.ndim > 0\n and param2.shape[0] > 0):\n equality_test = (\n param1.shape == param2.shape\n and param1.dtype == param2.dtype\n # We have to use '.flat' for 2D arrays\n and param1.flat[0] == param2.flat[0]\n and param1.flat[-1] == param2.flat[-1]\n )\n else:\n equality_test = np.all(param1 == param2)\n elif sparse.issparse(param1):\n # For sparse matrices equality doesn't work\n if not sparse.issparse(param2):\n equality_test = False\n elif param1.size == 0 or param2.size == 0:\n equality_test = (\n param1.__class__ == param2.__class__\n and param1.size == 0\n and param2.size == 0\n )\n else:\n equality_test = (\n param1.__class__ == param2.__class__\n and param1.data[0] == param2.data[0]\n and param1.data[-1] == param2.data[-1]\n and param1.nnz == param2.nnz\n and param1.shape == param2.shape\n )\n else:\n new_obj_val = new_object_params[name]\n params_set_val = params_set[name]\n # The following construct is required to check equality on special\n # singletons such as np.nan that are not equal to them-selves:\n equality_test = (new_obj_val == params_set_val or\n new_obj_val is params_set_val)\n if not equality_test:\n raise RuntimeError('Cannot clone object %s, as the constructor '\n 'does not seem to set parameter %s' %\n (estimator, name))\n\n return new_object\n\n\n###############################################################################\ndef _pprint(params, offset=0, printer=repr):\n \"\"\"Pretty print the dictionary 'params'\n\n Parameters\n ----------\n params: dict\n The dictionary to pretty print\n\n offset: int\n The offset in characters to add at the begin of each line.\n\n printer:\n The function to convert entries to strings, typically\n the builtin str or repr\n\n \"\"\"\n # Do a multi-line justified repr:\n options = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset \/\/ 2) * ' '\n for i, (k, v) in enumerate(sorted(six.iteritems(params))):\n if type(v) is float:\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines\n\n\n###############################################################################\nclass BaseEstimator(object):\n \"\"\"Base class for all estimators in scikit-learn\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep: boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n # We need deprecation warnings to always be on in order to\n # catch deprecated param values.\n # This is set in utils\/__init__.py but it gets overwritten\n # when running under python3 somehow.\n warnings.simplefilter(\"always\", DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n # if the parameter is deprecated, don't show it\n continue\n finally:\n warnings.filters.pop(0)\n\n # XXX: should we rather test if instance of estimator?\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as pipelines). The former have parameters of the form\n ``__`` so that it's possible to update each\n component of a nested object.\n\n Returns\n -------\n self\n \"\"\"\n if not params:\n # Simple optimisation to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n for key, value in six.iteritems(params):\n split = key.split('__', 1)\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n if name not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (key, self.__class__.__name__))\n setattr(self, key, value)\n return self\n\n def __repr__(self):\n class_name = self.__class__.__name__\n return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),\n offset=len(class_name),),)\n\n\n###############################################################################\nclass ClassifierMixin(object):\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\n _estimator_type = \"classifier\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Returns the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test samples.\n\n y : array-like, shape = (n_samples) or (n_samples, n_outputs)\n True labels for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) wrt. y.\n\n \"\"\"\n from .metrics import accuracy_score\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n\n\n###############################################################################\nclass RegressorMixin(object):\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\n _estimator_type = \"regressor\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Returns the coefficient of determination R^2 of the prediction.\n\n The coefficient R^2 is defined as (1 - u\/v), where u is the regression\n sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual\n sum of squares ((y_true - y_true.mean()) ** 2).sum().\n Best possible score is 1.0 and it can be negative (because the\n model can be arbitrarily worse). A constant model that always\n predicts the expected value of y, disregarding the input features,\n would get a R^2 score of 0.0.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test samples.\n\n y : array-like, shape = (n_samples) or (n_samples, n_outputs)\n True values for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n score : float\n R^2 of self.predict(X) wrt. y.\n \"\"\"\n\n from .metrics import r2_score\n return r2_score(y, self.predict(X), sample_weight=sample_weight,\n multioutput='variance_weighted')\n\n\n###############################################################################\nclass ClusterMixin(object):\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n _estimator_type = \"clusterer\"\n\n def fit_predict(self, X, y=None):\n \"\"\"Performs clustering on X and returns cluster labels.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n y : ndarray, shape (n_samples,)\n cluster labels\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n\nclass BiclusterMixin(object):\n \"\"\"Mixin class for all bicluster estimators in scikit-learn\"\"\"\n\n @property\n def biclusters_(self):\n \"\"\"Convenient way to get row and column indicators together.\n\n Returns the ``rows_`` and ``columns_`` members.\n \"\"\"\n return self.rows_, self.columns_\n\n def get_indices(self, i):\n \"\"\"Row and column indices of the i'th bicluster.\n\n Only works if ``rows_`` and ``columns_`` attributes exist.\n\n Returns\n -------\n row_ind : np.array, dtype=np.intp\n Indices of rows in the dataset that belong to the bicluster.\n col_ind : np.array, dtype=np.intp\n Indices of columns in the dataset that belong to the bicluster.\n\n \"\"\"\n rows = self.rows_[i]\n columns = self.columns_[i]\n return np.nonzero(rows)[0], np.nonzero(columns)[0]\n\n def get_shape(self, i):\n \"\"\"Shape of the i'th bicluster.\n\n Returns\n -------\n shape : (int, int)\n Number of rows and columns (resp.) in the bicluster.\n \"\"\"\n indices = self.get_indices(i)\n return tuple(len(i) for i in indices)\n\n def get_submatrix(self, i, data):\n \"\"\"Returns the submatrix corresponding to bicluster `i`.\n\n Works with sparse matrices. Only works if ``rows_`` and\n ``columns_`` attributes exist.\n\n \"\"\"\n from .utils.validation import check_array\n data = check_array(data, accept_sparse='csr')\n row_ind, col_ind = self.get_indices(i)\n return data[row_ind[:, np.newaxis], col_ind]\n\n\n###############################################################################\nclass TransformerMixin(object):\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Fit to data, then transform it.\n\n Fits transformer to X and y with optional parameters fit_params\n and returns a transformed version of X.\n\n Parameters\n ----------\n X : numpy array of shape [n_samples, n_features]\n Training set.\n\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n X_new : numpy array of shape [n_samples, n_features_new]\n Transformed array.\n\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)\n\n\n###############################################################################\nclass MetaEstimatorMixin(object):\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\n # this is just a tag for the moment\n\n\n###############################################################################\n\ndef is_classifier(estimator):\n \"\"\"Returns True if the given estimator is (probably) a classifier.\"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"classifier\"\n\n\ndef is_regressor(estimator):\n \"\"\"Returns True if the given estimator is (probably) a regressor.\"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"\n","license":"bsd-3-clause"} {"repo_name":"htygithub\/bokeh","path":"bokeh\/sampledata\/gapminder.py","copies":"41","size":"2655","content":"from __future__ import absolute_import\nimport pandas as pd\nfrom os.path import join\nimport sys\nfrom . import _data_dir\n\n\n'''\nThis module provides a pandas DataFrame instance of four\nof the datasets from gapminder.org.\n\nThese are read in from csvs that have been downloaded from Bokeh's\nsample data on S3. But the original code that generated the csvs from the\nraw gapminder data is available at the bottom of this file.\n'''\n\ndata_dir = _data_dir()\n\ndatasets = [\n 'fertility',\n 'life_expectancy',\n 'population',\n 'regions',\n]\n\nfor dataset in datasets:\n filename = join(data_dir, 'gapminder_%s.csv' % dataset)\n try:\n setattr(\n sys.modules[__name__],\n dataset,\n pd.read_csv(filename, index_col='Country')\n )\n except (IOError, OSError):\n raise RuntimeError('Could not load gapminder data file \"%s\". Please execute bokeh.sampledata.download()' % filename)\n\n__all__ = datasets\n\n\n# ====================================================\n\n# Original data is from Gapminder - www.gapminder.org.\n# The google docs links are maintained by gapminder\n\n# The following script was used to get the data from gapminder\n# and process it into the csvs stored in bokeh's sampledata.\n\n\"\"\"\npopulation_url = \"http:\/\/spreadsheets.google.com\/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls\"\nfertility_url = \"http:\/\/spreadsheets.google.com\/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls\"\nlife_expectancy_url = \"http:\/\/spreadsheets.google.com\/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls\"\nregions_url = \"https:\/\/docs.google.com\/spreadsheets\/d\/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os\/pub?gid=1&output=xls\"\n\ndef _get_data(url):\n # Get the data from the url and return only 1962 - 2013\n df = pd.read_excel(url, index_col=0)\n df = df.unstack().unstack()\n df = df[(df.index >= 1964) & (df.index <= 2013)]\n df = df.unstack().unstack()\n return df\n\nfertility_df = _get_data(fertility_url)\nlife_expectancy_df = _get_data(life_expectancy_url)\npopulation_df = _get_data(population_url)\nregions_df = pd.read_excel(regions_url, index_col=0)\n\n# have common countries across all data\nfertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))\npopulation_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))\nregions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))\n\nfertility_df.to_csv('gapminder_fertility.csv')\npopulation_df.to_csv('gapminder_population.csv')\nlife_expectancy_df.to_csv('gapminder_life_expectancy.csv')\nregions_df.to_csv('gapminder_regions.csv')\n\"\"\"\n\n# ======================================================\n","license":"bsd-3-clause"} {"repo_name":"huongttlan\/bokeh","path":"bokeh\/compat\/mplexporter\/renderers\/base.py","copies":"44","size":"14355","content":"import warnings\nimport itertools\nfrom contextlib import contextmanager\n\nimport numpy as np\nfrom matplotlib import transforms\n\nfrom .. import utils\nfrom .. import _py3k_compat as py3k\n\n\nclass Renderer(object):\n @staticmethod\n def ax_zoomable(ax):\n return bool(ax and ax.get_navigate())\n\n @staticmethod\n def ax_has_xgrid(ax):\n return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())\n\n @staticmethod\n def ax_has_ygrid(ax):\n return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())\n\n @property\n def current_ax_zoomable(self):\n return self.ax_zoomable(self._current_ax)\n\n @property\n def current_ax_has_xgrid(self):\n return self.ax_has_xgrid(self._current_ax)\n\n @property\n def current_ax_has_ygrid(self):\n return self.ax_has_ygrid(self._current_ax)\n\n @contextmanager\n def draw_figure(self, fig, props):\n if hasattr(self, \"_current_fig\") and self._current_fig is not None:\n warnings.warn(\"figure embedded in figure: something is wrong\")\n self._current_fig = fig\n self._fig_props = props\n self.open_figure(fig=fig, props=props)\n yield\n self.close_figure(fig=fig)\n self._current_fig = None\n self._fig_props = {}\n\n @contextmanager\n def draw_axes(self, ax, props):\n if hasattr(self, \"_current_ax\") and self._current_ax is not None:\n warnings.warn(\"axes embedded in axes: something is wrong\")\n self._current_ax = ax\n self._ax_props = props\n self.open_axes(ax=ax, props=props)\n yield\n self.close_axes(ax=ax)\n self._current_ax = None\n self._ax_props = {}\n\n @contextmanager\n def draw_legend(self, legend, props):\n self._current_legend = legend\n self._legend_props = props\n self.open_legend(legend=legend, props=props)\n yield\n self.close_legend(legend=legend)\n self._current_legend = None\n self._legend_props = {}\n\n # Following are the functions which should be overloaded in subclasses\n\n def open_figure(self, fig, props):\n \"\"\"\n Begin commands for a particular figure.\n\n Parameters\n ----------\n fig : matplotlib.Figure\n The Figure which will contain the ensuing axes and elements\n props : dictionary\n The dictionary of figure properties\n \"\"\"\n pass\n\n def close_figure(self, fig):\n \"\"\"\n Finish commands for a particular figure.\n\n Parameters\n ----------\n fig : matplotlib.Figure\n The figure which is finished being drawn.\n \"\"\"\n pass\n\n def open_axes(self, ax, props):\n \"\"\"\n Begin commands for a particular axes.\n\n Parameters\n ----------\n ax : matplotlib.Axes\n The Axes which will contain the ensuing axes and elements\n props : dictionary\n The dictionary of axes properties\n \"\"\"\n pass\n\n def close_axes(self, ax):\n \"\"\"\n Finish commands for a particular axes.\n\n Parameters\n ----------\n ax : matplotlib.Axes\n The Axes which is finished being drawn.\n \"\"\"\n pass\n\n def open_legend(self, legend, props):\n \"\"\"\n Beging commands for a particular legend.\n\n Parameters\n ----------\n legend : matplotlib.legend.Legend\n The Legend that will contain the ensuing elements\n props : dictionary\n The dictionary of legend properties\n \"\"\"\n pass\n\n def close_legend(self, legend):\n \"\"\"\n Finish commands for a particular legend.\n\n Parameters\n ----------\n legend : matplotlib.legend.Legend\n The Legend which is finished being drawn\n \"\"\"\n pass\n\n def draw_marked_line(self, data, coordinates, linestyle, markerstyle,\n label, mplobj=None):\n \"\"\"Draw a line that also has markers.\n\n If this isn't reimplemented by a renderer object, by default, it will\n make a call to BOTH draw_line and draw_markers when both markerstyle\n and linestyle are not None in the same Line2D object.\n\n \"\"\"\n if linestyle is not None:\n self.draw_line(data, coordinates, linestyle, label, mplobj)\n if markerstyle is not None:\n self.draw_markers(data, coordinates, markerstyle, label, mplobj)\n\n def draw_line(self, data, coordinates, style, label, mplobj=None):\n \"\"\"\n Draw a line. By default, draw the line via the draw_path() command.\n Some renderers might wish to override this and provide more\n fine-grained behavior.\n\n In matplotlib, lines are generally created via the plt.plot() command,\n though this command also can create marker collections.\n\n Parameters\n ----------\n data : array_like\n A shape (N, 2) array of datapoints.\n coordinates : string\n A string code, which should be either 'data' for data coordinates,\n or 'figure' for figure (pixel) coordinates.\n style : dictionary\n a dictionary specifying the appearance of the line.\n mplobj : matplotlib object\n the matplotlib plot element which generated this line\n \"\"\"\n pathcodes = ['M'] + (data.shape[0] - 1) * ['L']\n pathstyle = dict(facecolor='none', **style)\n pathstyle['edgecolor'] = pathstyle.pop('color')\n pathstyle['edgewidth'] = pathstyle.pop('linewidth')\n self.draw_path(data=data, coordinates=coordinates,\n pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)\n\n @staticmethod\n def _iter_path_collection(paths, path_transforms, offsets, styles):\n \"\"\"Build an iterator over the elements of the path collection\"\"\"\n N = max(len(paths), len(offsets))\n\n if not path_transforms:\n path_transforms = [np.eye(3)]\n\n edgecolor = styles['edgecolor']\n if np.size(edgecolor) == 0:\n edgecolor = ['none']\n facecolor = styles['facecolor']\n if np.size(facecolor) == 0:\n facecolor = ['none']\n\n elements = [paths, path_transforms, offsets,\n edgecolor, styles['linewidth'], facecolor]\n\n it = itertools\n return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)\n\n def draw_path_collection(self, paths, path_coordinates, path_transforms,\n offsets, offset_coordinates, offset_order,\n styles, mplobj=None):\n \"\"\"\n Draw a collection of paths. The paths, offsets, and styles are all\n iterables, and the number of paths is max(len(paths), len(offsets)).\n\n By default, this is implemented via multiple calls to the draw_path()\n function. For efficiency, Renderers may choose to customize this\n implementation.\n\n Examples of path collections created by matplotlib are scatter plots,\n histograms, contour plots, and many others.\n\n Parameters\n ----------\n paths : list\n list of tuples, where each tuple has two elements:\n (data, pathcodes). See draw_path() for a description of these.\n path_coordinates: string\n the coordinates code for the paths, which should be either\n 'data' for data coordinates, or 'figure' for figure (pixel)\n coordinates.\n path_transforms: array_like\n an array of shape (*, 3, 3), giving a series of 2D Affine\n transforms for the paths. These encode translations, rotations,\n and scalings in the standard way.\n offsets: array_like\n An array of offsets of shape (N, 2)\n offset_coordinates : string\n the coordinates code for the offsets, which should be either\n 'data' for data coordinates, or 'figure' for figure (pixel)\n coordinates.\n offset_order : string\n either \"before\" or \"after\". This specifies whether the offset\n is applied before the path transform, or after. The matplotlib\n backend equivalent is \"before\"->\"data\", \"after\"->\"screen\".\n styles: dictionary\n A dictionary in which each value is a list of length N, containing\n the style(s) for the paths.\n mplobj : matplotlib object\n the matplotlib plot element which generated this collection\n \"\"\"\n if offset_order == \"before\":\n raise NotImplementedError(\"offset before transform\")\n\n for tup in self._iter_path_collection(paths, path_transforms,\n offsets, styles):\n (path, path_transform, offset, ec, lw, fc) = tup\n vertices, pathcodes = path\n path_transform = transforms.Affine2D(path_transform)\n vertices = path_transform.transform(vertices)\n # This is a hack:\n if path_coordinates == \"figure\":\n path_coordinates = \"points\"\n style = {\"edgecolor\": utils.color_to_hex(ec),\n \"facecolor\": utils.color_to_hex(fc),\n \"edgewidth\": lw,\n \"dasharray\": \"10,0\",\n \"alpha\": styles['alpha'],\n \"zorder\": styles['zorder']}\n self.draw_path(data=vertices, coordinates=path_coordinates,\n pathcodes=pathcodes, style=style, offset=offset,\n offset_coordinates=offset_coordinates,\n mplobj=mplobj)\n\n def draw_markers(self, data, coordinates, style, label, mplobj=None):\n \"\"\"\n Draw a set of markers. By default, this is done by repeatedly\n calling draw_path(), but renderers should generally overload\n this method to provide a more efficient implementation.\n\n In matplotlib, markers are created using the plt.plot() command.\n\n Parameters\n ----------\n data : array_like\n A shape (N, 2) array of datapoints.\n coordinates : string\n A string code, which should be either 'data' for data coordinates,\n or 'figure' for figure (pixel) coordinates.\n style : dictionary\n a dictionary specifying the appearance of the markers.\n mplobj : matplotlib object\n the matplotlib plot element which generated this marker collection\n \"\"\"\n vertices, pathcodes = style['markerpath']\n pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',\n 'facecolor', 'zorder',\n 'edgewidth'])\n pathstyle['dasharray'] = \"10,0\"\n for vertex in data:\n self.draw_path(data=vertices, coordinates=\"points\",\n pathcodes=pathcodes, style=pathstyle,\n offset=vertex, offset_coordinates=coordinates,\n mplobj=mplobj)\n\n def draw_text(self, text, position, coordinates, style,\n text_type=None, mplobj=None):\n \"\"\"\n Draw text on the image.\n\n Parameters\n ----------\n text : string\n The text to draw\n position : tuple\n The (x, y) position of the text\n coordinates : string\n A string code, which should be either 'data' for data coordinates,\n or 'figure' for figure (pixel) coordinates.\n style : dictionary\n a dictionary specifying the appearance of the text.\n text_type : string or None\n if specified, a type of text such as \"xlabel\", \"ylabel\", \"title\"\n mplobj : matplotlib object\n the matplotlib plot element which generated this text\n \"\"\"\n raise NotImplementedError()\n\n def draw_path(self, data, coordinates, pathcodes, style,\n offset=None, offset_coordinates=\"data\", mplobj=None):\n \"\"\"\n Draw a path.\n\n In matplotlib, paths are created by filled regions, histograms,\n contour plots, patches, etc.\n\n Parameters\n ----------\n data : array_like\n A shape (N, 2) array of datapoints.\n coordinates : string\n A string code, which should be either 'data' for data coordinates,\n 'figure' for figure (pixel) coordinates, or \"points\" for raw\n point coordinates (useful in conjunction with offsets, below).\n pathcodes : list\n A list of single-character SVG pathcodes associated with the data.\n Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',\n 'S', 's', 'C', 'c', 'Z', 'z']\n See the SVG specification for details. Note that some path codes\n consume more than one datapoint (while 'Z' consumes none), so\n in general, the length of the pathcodes list will not be the same\n as that of the data array.\n style : dictionary\n a dictionary specifying the appearance of the line.\n offset : list (optional)\n the (x, y) offset of the path. If not given, no offset will\n be used.\n offset_coordinates : string (optional)\n A string code, which should be either 'data' for data coordinates,\n or 'figure' for figure (pixel) coordinates.\n mplobj : matplotlib object\n the matplotlib plot element which generated this path\n \"\"\"\n raise NotImplementedError()\n\n def draw_image(self, imdata, extent, coordinates, style, mplobj=None):\n \"\"\"\n Draw an image.\n\n Parameters\n ----------\n imdata : string\n base64 encoded png representation of the image\n extent : list\n the axes extent of the image: [xmin, xmax, ymin, ymax]\n coordinates: string\n A string code, which should be either 'data' for data coordinates,\n or 'figure' for figure (pixel) coordinates.\n style : dictionary\n a dictionary specifying the appearance of the image\n mplobj : matplotlib object\n the matplotlib plot object which generated this image\n \"\"\"\n raise NotImplementedError()\n","license":"bsd-3-clause"} {"repo_name":"joelfrederico\/SciSalt","path":"scisalt\/qt\/mplwidget.py","copies":"1","size":"13557","content":"from PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas\nfrom matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar\nimport matplotlib as _mpl\nimport numpy as _np\n\nfrom .Rectangle import Rectangle\n\nimport pdb\nimport traceback\n\nimport logging\nloggerlevel = logging.DEBUG\nlogger = logging.getLogger(__name__)\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Slider_and_Text(QtGui.QWidget):\n valueChanged = QtCore.pyqtSignal(int)\n sliderReleased = QtCore.pyqtSignal(int)\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self)\n self.setMaximumHeight(40)\n # Enable tracking by default\n self._tracking = True\n self.hLayout = QtGui.QHBoxLayout()\n self.slider = QtGui.QSlider()\n\n self.leftbutton = QtGui.QPushButton()\n self.leftbutton.setText(\"<\")\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth())\n # self.leftbutton.setSizePolicy(sizePolicy)\n self.leftbutton.clicked.connect(self._subone)\n\n self.rightbutton = QtGui.QPushButton()\n self.rightbutton.setText(\">\")\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth())\n # self.rightbutton.setSizePolicy(sizePolicy)\n self.rightbutton.clicked.connect(self._addone)\n\n self.v = QtGui.QIntValidator()\n self.box = QtGui.QLineEdit()\n self.box.setValidator(self.v)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth())\n # self.box.setSizePolicy(sizePolicy)\n\n self.hLayout.addWidget(self.leftbutton)\n self.hLayout.addWidget(self.slider)\n self.hLayout.addWidget(self.box)\n self.hLayout.addWidget(self.rightbutton)\n self.setLayout(self.hLayout)\n \n self.slider.valueChanged.connect(self._sliderChanged)\n self.box.editingFinished.connect(self._textChanged)\n self.setOrientation(QtCore.Qt.Horizontal)\n\n # Connect release so tracking works as expected\n self.slider.sliderReleased.connect(self._sliderReleased)\n\n def _addone(self):\n self.value = self.value + 1\n self.valueChanged.emit(self.value)\n\n def _subone(self):\n self.value = self.value - 1\n self.valueChanged.emit(self.value)\n\n def _sliderReleased(self):\n print('Released')\n self.sliderReleased.emit(self.slider.value)\n\n def setTracking(self, val):\n print('Tracking set to {}'.format(val))\n self._tracking = val\n\n def setMaximum(self, val):\n self.slider.setMaximum(val)\n self.v.setRange(self.slider.minimum(), self.slider.maximum())\n self.box.setValidator(self.v)\n\n def setMinimum(self, val):\n self.slider.setMinimum(val)\n self.v.setRange(self.slider.minimum(), self.slider.maximum())\n self.box.setValidator(self.v)\n\n def _sliderChanged(self, val):\n self.box.setText(str(val))\n if self._tracking:\n try:\n self.slider.sliderReleased.disconnect()\n except:\n pass\n self.valueChanged.emit(val)\n else:\n try:\n self.slider.sliderReleased.disconnect()\n except:\n pass\n self.slider.sliderReleased.connect(self._sliderChanged_notracking)\n\n def _sliderChanged_notracking(self):\n val = self.slider.value()\n # print('Value to be emitted is {}'.format(val))\n self.valueChanged.emit(val)\n\n def _textChanged(self):\n val = self.box.text()\n self.slider.setValue(int(val))\n self._sliderChanged_notracking()\n\n def setOrientation(self, *args, **kwargs):\n self.slider.setOrientation(*args, **kwargs)\n\n def _getValue(self):\n return self.slider.value()\n\n def _setValue(self, val):\n self.slider.setValue(val)\n self.box.setText(str(val))\n value = property(_getValue, _setValue)\n\n def setValue(self, val):\n self.slider.setValue(val)\n self.box.setText(str(val))\n # self.valueChanged.emit(val)\n\n\nclass Mpl_Plot(_FigureCanvas):\n def __init__(self, parent=None):\n # Initialize things\n self.fig = _mpl.figure.Figure()\n _FigureCanvas.__init__(self, self.fig)\n _FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n _FigureCanvas.updateGeometry(self)\n\n # Create axes\n self.ax = self.fig.add_subplot(111)\n\n def plot(self, *args, **kwargs):\n self.ax.clear()\n self.ax.plot(*args, **kwargs)\n self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')\n self.ax.figure.canvas.draw()\n\n\nclass Mpl_Image(QtGui.QWidget):\n # Signal for when the rectangle is changed\n rectChanged = QtCore.pyqtSignal(Rectangle)\n\n def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None):\n # Initialize things\n QtGui.QWidget.__init__(self)\n self.rectbool = rectbool\n self._clim_min = 0\n self._clim_max = 3600\n\n self._pressed = False\n\n # Add a vertical layout\n self.vLayout = QtGui.QVBoxLayout()\n\n # Add a figure\n self.fig = _mpl.figure.Figure()\n\n # Add a canvas containing the fig\n self.canvas = _FigureCanvas(self.fig)\n _FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n _FigureCanvas.updateGeometry(self.canvas)\n\n # Setup the layout\n if toolbarbool:\n self.toolbar = _NavigationToolbar(self.canvas, self)\n self.toolbar.setMaximumHeight(20)\n self.vLayout.addWidget(self.toolbar)\n self.vLayout.addWidget(self.canvas)\n self.setLayout(self.vLayout)\n\n # Create axes\n self.ax = self.fig.add_subplot(111)\n\n # Include rectangle functionality\n if rectbool:\n self.fig.canvas.mpl_connect('button_press_event', self.on_press)\n self.fig.canvas.mpl_connect('button_release_event', self.on_release)\n self.Rectangle = Rectangle(\n x = -10 ,\n y = 0 ,\n width = 0 ,\n height = 3 ,\n axes = self.ax\n )\n\n # Add image\n self.image = image\n\n def _get_img(self):\n return self._image\n\n def _set_img(self, image):\n self.ax.clear()\n self._image = image\n if image is not None:\n self._imgplot = self.ax.imshow(image, interpolation='none')\n if self.rectbool:\n self.ax.add_patch(self.Rectangle.get_rect())\n # imagemax = _np.max(_np.max(image))\n self.set_clim(self._clim_min, self._clim_max)\n image = property(_get_img, _set_img)\n\n def set_clim(self, clim_min, clim_max):\n if self.image is not None:\n self._clim_min = clim_min\n self._clim_max = clim_max\n self._imgplot.set_clim(clim_min, clim_max)\n self.ax.figure.canvas.draw()\n\n def on_press(self, event):\n if self.toolbar._active is None:\n self._pressed = True\n self.x0 = event.xdata\n self.y0 = event.ydata\n logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0))\n\n def on_release(self, event):\n if self._pressed:\n self._pressed = False\n print('release')\n self.x1 = event.xdata\n self.y1 = event.ydata\n width = self.x1 - self.x0\n height = self.y1 - self.y0\n\n logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format(\n self.x0 ,\n self.y0 ,\n self.x1 ,\n self.y1 ,\n width ,\n height\n )\n )\n\n self.Rectangle.set_xy((self.x0, self.y0))\n self.Rectangle.set_width(width)\n self.Rectangle.set_height(height)\n self.ax.figure.canvas.draw()\n\n self.rectChanged.emit(self.Rectangle)\n # print(self.rect)\n\n def zoom_rect(self, border=None, border_px=None):\n # ======================================\n # Get x coordinates\n # ======================================\n x0 = self.Rectangle.get_x()\n width = self.Rectangle.get_width()\n x1 = x0+width\n\n # ======================================\n # Get y coordinates\n # ======================================\n y0 = self.Rectangle.get_y()\n height = self.Rectangle.get_height()\n y1 = y0+height\n\n # ======================================\n # Validate borders\n # ======================================\n if (border_px is None) and (border is not None):\n xborder = border[0]*width\n yborder = border[1]*height\n elif (border_px is not None) and (border is None):\n xborder = border_px[0]\n yborder = border_px[1]\n elif (border_px is None) and (border is None):\n raise IOError('No border info specified!')\n elif (border_px is not None) and (border is not None):\n raise IOError('Too much border info specified, both border_px and border!')\n else:\n raise IOError('End of the line!')\n\n # ======================================\n # Add borders\n # ======================================\n x0 = x0 - xborder\n x1 = x1 + xborder\n y0 = y0 - yborder\n y1 = y1 + yborder\n\n # ======================================\n # Validate coordinates to prevent\n # unPythonic crash\n # ======================================\n if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])):\n print('X issue')\n print('Requested: x=({}, {})'.format(x0, x1))\n x0 = 0\n x1 = self.image.shape[1]\n if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])):\n print('y issue')\n print('Requested: y=({}, {})'.format(y0, y1))\n y0 = 0\n y1 = self.image.shape[0]\n\n # ======================================\n # Set viewable area\n # ======================================\n self.ax.set_xlim(x0, x1)\n self.ax.set_ylim(y0, y1)\n\n # ======================================\n # Redraw canvas to show updates\n # ======================================\n self.ax.figure.canvas.draw()\n\n\nclass Mpl_Image_Plus_Slider(QtGui.QWidget):\n # def __init__(self, parent=None, **kwargs):\n def __init__(self, parent=None, **kwargs):\n # Initialize self as a widget\n QtGui.QWidget.__init__(self, parent)\n\n # Add a vertical layout with parent self\n self.vLayout = QtGui.QVBoxLayout(self)\n self.vLayout.setObjectName(_fromUtf8(\"vLayout\"))\n\n # Add an Mpl_Image widget to vLayout,\n # save it to self._img\n # Pass arguments through to Mpl_Image.\n self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs)\n self._img.setObjectName(_fromUtf8(\"_img\"))\n self.vLayout.addWidget(self._img)\n\n # Add a slider to vLayout,\n # save it to self.max_slider\n # self.max_slider = QtGui.QSlider(self)\n self.max_slider = Slider_and_Text(self)\n self.max_slider.setObjectName(_fromUtf8(\"max_slider\"))\n self.max_slider.setOrientation(QtCore.Qt.Horizontal)\n self.vLayout.addWidget(self.max_slider)\n\n # Setup slider to work with _img's clims\n self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val))\n\n def _get_image(self):\n return self._img.image\n\n def _set_image(self, image):\n self._img.image = image\n maximage = _np.max(_np.max(image))\n self.max_slider.setMaximum(maximage)\n image = property(_get_image, _set_image)\n\n def _get_ax(self):\n return self._img.ax\n ax = property(_get_ax)\n\n def _get_Rectangle(self):\n return self._img.Rectangle\n # def _set_rect(self, rect):\n # self._img.rect(rect)\n Rectangle = property(_get_Rectangle)\n\n def zoom_rect(self, border=None, border_px=None):\n self._img.zoom_rect(border, border_px)\n\n def set_clim(self, *args, **kwargs):\n self._img.set_clim(*args, **kwargs)\n\n def setSliderValue(self, val):\n self.max_slider.setValue(val)\n","license":"mit"} {"repo_name":"mathhun\/scipy_2015_sklearn_tutorial","path":"notebooks\/figures\/plot_kneighbors_regularization.py","copies":"25","size":"1363","content":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.neighbors import KNeighborsRegressor\n\n\ndef make_dataset(n_samples=100):\n rnd = np.random.RandomState(42)\n x = np.linspace(-3, 3, n_samples)\n y_no_noise = np.sin(4 * x) + x\n y = y_no_noise + rnd.normal(size=len(x))\n return x, y\n\n\ndef plot_regression_datasets():\n fig, axes = plt.subplots(1, 3, figsize=(15, 5))\n for n_samples, ax in zip([10, 100, 1000], axes):\n x, y = make_dataset(n_samples)\n ax.plot(x, y, 'o', alpha=.6)\n\n\ndef plot_kneighbors_regularization():\n rnd = np.random.RandomState(42)\n x = np.linspace(-3, 3, 100)\n y_no_noise = np.sin(4 * x) + x\n y = y_no_noise + rnd.normal(size=len(x))\n X = x[:, np.newaxis]\n fig, axes = plt.subplots(1, 3, figsize=(15, 5))\n\n x_test = np.linspace(-3, 3, 1000)\n\n for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):\n kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)\n kneighbor_regression.fit(X, y)\n ax.plot(x, y_no_noise, label=\"true function\")\n ax.plot(x, y, \"o\", label=\"data\")\n ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),\n label=\"prediction\")\n ax.legend()\n ax.set_title(\"n_neighbors = %d\" % n_neighbors)\n\nif __name__ == \"__main__\":\n plot_kneighbors_regularization()\n plt.show()\n","license":"cc0-1.0"} {"repo_name":"qifeigit\/scikit-learn","path":"examples\/decomposition\/plot_pca_3d.py","copies":"354","size":"2432","content":"#!\/usr\/bin\/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n=========================================================\nPrincipal components analysis (PCA)\n=========================================================\n\nThese figures aid in illustrating how a point cloud\ncan be very flat in one direction--which is where PCA\ncomes in to choose a direction that is not flat.\n\n\"\"\"\nprint(__doc__)\n\n# Authors: Gael Varoquaux\n# Jaques Grobler\n# Kevin Hughes\n# License: BSD 3 clause\n\nfrom sklearn.decomposition import PCA\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\n###############################################################################\n# Create the data\n\ne = np.exp(1)\nnp.random.seed(4)\n\n\ndef pdf(x):\n return 0.5 * (stats.norm(scale=0.25 \/ e).pdf(x)\n + stats.norm(scale=4 \/ e).pdf(x))\n\ny = np.random.normal(scale=0.5, size=(30000))\nx = np.random.normal(scale=0.5, size=(30000))\nz = np.random.normal(scale=0.1, size=len(x))\n\ndensity = pdf(x) * pdf(y)\npdf_z = pdf(5 * z)\n\ndensity *= pdf_z\n\na = x + y\nb = 2 * y\nc = a - b + z\n\nnorm = np.sqrt(a.var() + b.var())\na \/= norm\nb \/= norm\n\n\n###############################################################################\n# Plot the figures\ndef plot_figs(fig_num, elev, azim):\n fig = plt.figure(fig_num, figsize=(4, 3))\n plt.clf()\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)\n\n ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)\n Y = np.c_[a, b, c]\n\n # Using SciPy's SVD, this would be:\n # _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)\n\n pca = PCA(n_components=3)\n pca.fit(Y)\n pca_score = pca.explained_variance_ratio_\n V = pca.components_\n\n x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score \/ pca_score.min()\n\n x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T\n x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]\n y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]\n z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]\n x_pca_plane.shape = (2, 2)\n y_pca_plane.shape = (2, 2)\n z_pca_plane.shape = (2, 2)\n ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n\n\nelev = -40\nazim = -80\nplot_figs(1, elev, azim)\n\nelev = 30\nazim = 20\nplot_figs(2, elev, azim)\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"sonyahanson\/assaytools","path":"examples\/ipynbs\/data-analysis\/spectra\/2015-12-18\/xml2png4scans-spectra.py","copies":"8","size":"5636","content":"# This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader\n# and makes quick and dirty images of the raw data.\n\n# But with scans and not just singlet reads.\n# This script specifically combines four spectrum scripts (AB, CD, EF, GH) into a single dataframe and plot.\n\n# The same procedure can be used to make matrices suitable for analysis using\n# matrix = dataframe.values\n\n# Made by Sonya Hanson, with some help from things that worked in xml2png.py and xml2png4scans.py\n# Friday, November 18,2015\n\n# Usage: python xml2png4scans-spectra.py *.xml\n\n############ For future to combine with xml2png.py\n#\n# for i, sect in enumerate(Sections):\n# reads = sect.xpath(\"*\/Well\")\n# parameters = root.xpath(path)[0]\n# if reads[0].attrib['Type'] == \"Scan\":\n#\n##############\n\nimport matplotlib.pyplot as plt\nfrom lxml import etree\nimport pandas as pd\nimport matplotlib.cm as cm\nimport seaborn\nimport sys\nimport os\n\n### Define xml files.\n\nxml_files = sys.argv[1:]\n\nso_many = len(xml_files)\nprint \"****This script is about to make png files for %s xml files. ****\" % so_many\n\n### Define extract function that extracts parameters\n\ndef extract(taglist):\n result = []\n for p in taglist:\n print \"Attempting to extract tag '%s'...\" % p\n try:\n param = parameters.xpath(\"*[@Name='\" + p + \"']\")[0]\n result.append( p + '=' + param.attrib['Value'])\n except:\n ### tag not found\n result.append(None)\n\n return result\n\n### Define an initial set of dataframes, one per each section\n\nlarge_dataframe0 = pd.DataFrame()\nlarge_dataframe1 = pd.DataFrame()\nlarge_dataframe2 = pd.DataFrame()\n\nfor file in xml_files:\n\n ### Parse XML file.\n\n root = etree.parse(file)\n\n ### Remove extension from xml filename.\n\n file_name = os.path.splitext(file)[0]\n\n ### Extract plate type and barcode.\n\n plate = root.xpath(\"\/*\/Header\/Parameters\/Parameter[@Name='Plate']\")[0]\n plate_type = plate.attrib['Value']\n\n try:\n bar = root.xpath(\"\/*\/Plate\/BC\")[0]\n barcode = bar.text\n except:\n barcode = 'no barcode'\n\n ### Define Sections.\n\n Sections = root.xpath(\"\/*\/Section\")\n much = len(Sections)\n print \"****The xml file \" + file + \" has %s data sections:****\" % much\n for sect in Sections:\n print sect.attrib['Name']\n\n for i, sect in enumerate(Sections):\n\n ### Extract Parameters for this section.\n\n path = \"\/*\/Section[@Name='\" + sect.attrib['Name'] + \"']\/Parameters\"\n parameters = root.xpath(path)[0]\n\n ### Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.\n # Attach these to title1, title2, or title3, depending on section which will be the same for all 4 files.\n\n if parameters[0].attrib['Value'] == \"Absorbance\":\n result = extract([\"Mode\", \"Wavelength Start\", \"Wavelength End\", \"Wavelength Step Size\"])\n globals()[\"title\"+str(i)] = '%s, %s, %s, %s' % tuple(result)\n\n else:\n result = extract([\"Gain\", \"Excitation Wavelength\", \"Emission Wavelength\", \"Part of Plate\", \"Mode\"])\n globals()[\"title\"+str(i)] = '%s, %s, %s, \\n %s, %s' % tuple(result)\n\n print \"****The %sth section has the parameters:****\" %i\n print globals()[\"title\"+str(i)]\n\n ### Extract Reads for this section.\n\n Sections = root.xpath(\"\/*\/Section\")\n\n reads = root.xpath(\"\/*\/Section[@Name='\" + sect.attrib['Name'] + \"']\/*\/Well\")\n\n wellIDs = [read.attrib['Pos'] for read in reads]\n\n data = [(s.text, float(s.attrib['WL']), r.attrib['Pos'])\n for r in reads\n for s in r]\n\n dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])\n \n ### dataframe_rep replaces 'OVER' (when fluorescence signal maxes out) with '3289277', an arbitrarily high number\n\n dataframe_rep = dataframe.replace({'OVER':'3289277'})\n\n dataframe_rep[['fluorescence']] = dataframe_rep[['fluorescence']].astype('float')\n\n ### Create large_dataframe1, large_dataframe2, and large_dataframe3 that collect data for each section\n ### as we run through cycle through sections and files.\n\n globals()[\"dataframe_pivot\"+str(i)] = pd.pivot_table(dataframe_rep, index = 'wavelength (nm)', columns= ['Well'])\n \n print 'The max fluorescence value in this dataframe is %s'% globals()[\"dataframe_pivot\"+str(i)].values.max()\n\n globals()[\"large_dataframe\"+str(i)] = pd.concat([globals()[\"large_dataframe\"+str(i)],globals()[\"dataframe_pivot\"+str(i)]])\n\n### Plot, making a separate png for each section.\n\nfor i, sect in enumerate(Sections):\n\n section_name = sect.attrib['Name']\n \n path = \"\/*\/Section[@Name='\" + sect.attrib['Name'] + \"']\/Parameters\"\n parameters = root.xpath(path)[0]\n \n if parameters[0].attrib['Value'] == \"Absorbance\":\n section_ylim = [0,0.2]\n else:\n section_ylim = [0,40000]\n\n Alphabet = ['A','B','C','D','E','F','G','H']\n\n fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))\n for j,A in enumerate(Alphabet):\n for k in range(1,12):\n try:\n globals()[\"large_dataframe\"+str(i)].fluorescence.get(A + str(k)).plot(ax=axes[(j\/3)%3,j%3], title=A, c=cm.hsv(k*15), ylim=section_ylim, xlim=[240,800])\n except:\n print \"****No row %s.****\" %A\n\n fig.suptitle('%s \\n %s \\n Barcode = %s' % (globals()[\"title\"+str(i)], plate_type, barcode), fontsize=14)\n fig.subplots_adjust(hspace=0.3)\n plt.savefig('%s_%s.png' % (file_name, section_name))\n","license":"lgpl-2.1"} {"repo_name":"nikitasingh981\/scikit-learn","path":"examples\/text\/hashing_vs_dict_vectorizer.py","copies":"93","size":"3243","content":"\"\"\"\n===========================================\nFeatureHasher and DictVectorizer Comparison\n===========================================\n\nCompares FeatureHasher and DictVectorizer by using both to vectorize\ntext documents.\n\nThe example demonstrates syntax and speed only; it doesn't actually do\nanything useful with the extracted vectors. See the example scripts\n{document_classification_20newsgroups,clustering}.py for actual learning\non text documents.\n\nA discrepancy between the number of terms reported for DictVectorizer and\nfor FeatureHasher is to be expected due to hash collisions.\n\"\"\"\n\n# Author: Lars Buitinck\n# License: BSD 3 clause\n\nfrom __future__ import print_function\nfrom collections import defaultdict\nimport re\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction import DictVectorizer, FeatureHasher\n\n\ndef n_nonzero_columns(X):\n \"\"\"Returns the number of non-zero columns in a CSR matrix X.\"\"\"\n return len(np.unique(X.nonzero()[1]))\n\n\ndef tokens(doc):\n \"\"\"Extract tokens from doc.\n\n This uses a simple regex to break strings into tokens. For a more\n principled approach, see CountVectorizer or TfidfVectorizer.\n \"\"\"\n return (tok.lower() for tok in re.findall(r\"\\w+\", doc))\n\n\ndef token_freqs(doc):\n \"\"\"Extract a dict mapping tokens from doc to their frequencies.\"\"\"\n freq = defaultdict(int)\n for tok in tokens(doc):\n freq[tok] += 1\n return freq\n\n\ncategories = [\n 'alt.atheism',\n 'comp.graphics',\n 'comp.sys.ibm.pc.hardware',\n 'misc.forsale',\n 'rec.autos',\n 'sci.space',\n 'talk.religion.misc',\n]\n# Uncomment the following line to use a larger set (11k+ documents)\n#categories = None\n\nprint(__doc__)\nprint(\"Usage: %s [n_features_for_hashing]\" % sys.argv[0])\nprint(\" The default number of features is 2**18.\")\nprint()\n\ntry:\n n_features = int(sys.argv[1])\nexcept IndexError:\n n_features = 2 ** 18\nexcept ValueError:\n print(\"not a valid number of features: %r\" % sys.argv[1])\n sys.exit(1)\n\n\nprint(\"Loading 20 newsgroups training data\")\nraw_data = fetch_20newsgroups(subset='train', categories=categories).data\ndata_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) \/ 1e6\nprint(\"%d documents - %0.3fMB\" % (len(raw_data), data_size_mb))\nprint()\n\nprint(\"DictVectorizer\")\nt0 = time()\nvectorizer = DictVectorizer()\nvectorizer.fit_transform(token_freqs(d) for d in raw_data)\nduration = time() - t0\nprint(\"done in %fs at %0.3fMB\/s\" % (duration, data_size_mb \/ duration))\nprint(\"Found %d unique terms\" % len(vectorizer.get_feature_names()))\nprint()\n\nprint(\"FeatureHasher on frequency dicts\")\nt0 = time()\nhasher = FeatureHasher(n_features=n_features)\nX = hasher.transform(token_freqs(d) for d in raw_data)\nduration = time() - t0\nprint(\"done in %fs at %0.3fMB\/s\" % (duration, data_size_mb \/ duration))\nprint(\"Found %d unique terms\" % n_nonzero_columns(X))\nprint()\n\nprint(\"FeatureHasher on raw tokens\")\nt0 = time()\nhasher = FeatureHasher(n_features=n_features, input_type=\"string\")\nX = hasher.transform(tokens(d) for d in raw_data)\nduration = time() - t0\nprint(\"done in %fs at %0.3fMB\/s\" % (duration, data_size_mb \/ duration))\nprint(\"Found %d unique terms\" % n_nonzero_columns(X))\n","license":"bsd-3-clause"} {"repo_name":"RomainBrault\/scikit-learn","path":"sklearn\/neighbors\/graph.py","copies":"36","size":"6650","content":"\"\"\"Nearest Neighbors graph functions\"\"\"\n\n# Author: Jake Vanderplas \n#\n# License: BSD 3 clause (C) INRIA, University of Amsterdam\n\nfrom .base import KNeighborsMixin, RadiusNeighborsMixin\nfrom .unsupervised import NearestNeighbors\n\n\ndef _check_params(X, metric, p, metric_params):\n \"\"\"Check the validity of the input parameters\"\"\"\n params = zip(['metric', 'p', 'metric_params'],\n [metric, p, metric_params])\n est_params = X.get_params()\n for param_name, func_param in params:\n if func_param != est_params[param_name]:\n raise ValueError(\n \"Got %s for %s, while the estimator has %s for \"\n \"the same parameter.\" % (\n func_param, param_name, est_params[param_name]))\n\n\ndef _query_include_self(X, include_self):\n \"\"\"Return the query based on include_self param\"\"\"\n if include_self:\n query = X._fit_X\n else:\n query = None\n\n return query\n\n\ndef kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',\n p=2, metric_params=None, include_self=False, n_jobs=1):\n \"\"\"Computes the (weighted) graph of k-Neighbors for points in X\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like or BallTree, shape = [n_samples, n_features]\n Sample data, in the form of a numpy array or a precomputed\n :class:`BallTree`.\n\n n_neighbors : int\n Number of neighbors for each sample.\n\n mode : {'connectivity', 'distance'}, optional\n Type of returned matrix: 'connectivity' will return the connectivity\n matrix with ones and zeros, and 'distance' will return the distances\n between neighbors according to the given metric.\n\n metric : string, default 'minkowski'\n The distance metric used to calculate the k-Neighbors for each sample\n point. The DistanceMetric class gives a list of available metrics.\n The default distance is 'euclidean' ('minkowski' metric with the p\n param equal to 2.)\n\n include_self : bool, default=False.\n Whether or not to mark each sample as the first nearest neighbor to\n itself. If `None`, then True is used for mode='connectivity' and False\n for mode='distance' as this will preserve backwards compatibilty.\n\n p : int, default 2\n Power parameter for the Minkowski metric. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, optional\n additional keyword arguments for the metric function.\n\n n_jobs : int, optional (default = 1)\n The number of parallel jobs to run for neighbors search.\n If ``-1``, then the number of jobs is set to the number of CPU cores.\n\n Returns\n -------\n A : sparse matrix in CSR format, shape = [n_samples, n_samples]\n A[i, j] is assigned the weight of edge that connects i to j.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import kneighbors_graph\n >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)\n >>> A.toarray()\n array([[ 1., 0., 1.],\n [ 0., 1., 1.],\n [ 1., 0., 1.]])\n\n See also\n --------\n radius_neighbors_graph\n \"\"\"\n if not isinstance(X, KNeighborsMixin):\n X = NearestNeighbors(n_neighbors, metric=metric, p=p,\n metric_params=metric_params, n_jobs=n_jobs).fit(X)\n else:\n _check_params(X, metric, p, metric_params)\n\n query = _query_include_self(X, include_self)\n return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)\n\n\ndef radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',\n p=2, metric_params=None, include_self=False, n_jobs=1):\n \"\"\"Computes the (weighted) graph of Neighbors for points in X\n\n Neighborhoods are restricted the points at a distance lower than\n radius.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like or BallTree, shape = [n_samples, n_features]\n Sample data, in the form of a numpy array or a precomputed\n :class:`BallTree`.\n\n radius : float\n Radius of neighborhoods.\n\n mode : {'connectivity', 'distance'}, optional\n Type of returned matrix: 'connectivity' will return the connectivity\n matrix with ones and zeros, and 'distance' will return the distances\n between neighbors according to the given metric.\n\n metric : string, default 'minkowski'\n The distance metric used to calculate the neighbors within a\n given radius for each sample point. The DistanceMetric class\n gives a list of available metrics. The default distance is\n 'euclidean' ('minkowski' metric with the param equal to 2.)\n\n include_self : bool, default=False\n Whether or not to mark each sample as the first nearest neighbor to\n itself. If `None`, then True is used for mode='connectivity' and False\n for mode='distance' as this will preserve backwards compatibilty.\n\n p : int, default 2\n Power parameter for the Minkowski metric. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n metric_params : dict, optional\n additional keyword arguments for the metric function.\n\n n_jobs : int, optional (default = 1)\n The number of parallel jobs to run for neighbors search.\n If ``-1``, then the number of jobs is set to the number of CPU cores.\n\n Returns\n -------\n A : sparse matrix in CSR format, shape = [n_samples, n_samples]\n A[i, j] is assigned the weight of edge that connects i to j.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import radius_neighbors_graph\n >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)\n >>> A.toarray()\n array([[ 1., 0., 1.],\n [ 0., 1., 0.],\n [ 1., 0., 1.]])\n\n See also\n --------\n kneighbors_graph\n \"\"\"\n if not isinstance(X, RadiusNeighborsMixin):\n X = NearestNeighbors(radius=radius, metric=metric, p=p,\n metric_params=metric_params, n_jobs=n_jobs).fit(X)\n else:\n _check_params(X, metric, p, metric_params)\n\n query = _query_include_self(X, include_self)\n return X.radius_neighbors_graph(query, radius, mode)\n","license":"bsd-3-clause"} {"repo_name":"micahcochran\/geopandas","path":"geopandas\/_version.py","copies":"3","size":"16750","content":"\n# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by githubs download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.16 (https:\/\/github.com\/warner\/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py\/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"$Format:%d$\"\n git_full = \"$Format:%H$\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"pep440\"\n cfg.tag_prefix = \"v\"\n cfg.parentdir_prefix = \"geopandas-\"\n cfg.versionfile_source = \"geopandas\/_version.py\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate\n\n\ndef run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n return None\n return stdout\n\n\ndef versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.\n \"\"\"\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\"guessing rootdir is '%s', but '%s' doesn't start with \"\n \"prefix '%s'\" % (root, dirname, parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None}\n\n\n@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n f.close()\n except EnvironmentError:\n pass\n return keywords\n\n\n@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs\/heads\/ and refs\/tags\/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r'\\d', r)])\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs-tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %s\" % r)\n return {\"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": None\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": \"no suitable tags\"}\n\n\n@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n raise NotThisMethod(\"no .git directory\")\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out = run_command(GITS, [\"describe\", \"--tags\", \"--dirty\",\n \"--always\", \"--long\",\n \"--match\", \"%s*\" % tag_prefix],\n cwd=root)\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[:git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r'^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = (\"unable to parse git-describe output: '%s'\"\n % describe_out)\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = (\"tag '%s' doesn't start with prefix '%s'\"\n % (full_tag, tag_prefix))\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix):]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"],\n cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n return pieces\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"],\n pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered\n\n\ndef render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered\n\n\ndef render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance\/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"]}\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\"version\": rendered, \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"], \"error\": None}\n\n\ndef get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT\/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe\/bbfreeze\/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,\n verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split('\/'):\n root = os.path.dirname(root)\n except NameError:\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\"}\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\"}\n","license":"bsd-3-clause"} {"repo_name":"zycdragonball\/tensorflow","path":"tensorflow\/contrib\/learn\/python\/learn\/estimators\/linear_test.py","copies":"58","size":"71789","content":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for estimators.linear.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport json\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\nfrom tensorflow.contrib.learn.python.learn import experiment\nfrom tensorflow.contrib.learn.python.learn.datasets import base\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils\nfrom tensorflow.contrib.learn.python.learn.estimators import head as head_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import linear\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.contrib.learn.python.learn.estimators import test_data\nfrom tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec\nfrom tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib\nfrom tensorflow.contrib.metrics.python.ops import metric_ops\nfrom tensorflow.python.feature_column import feature_column as fc_core\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import ftrl\nfrom tensorflow.python.training import input as input_lib\nfrom tensorflow.python.training import server_lib\n\n\ndef _prepare_iris_data_for_logistic_regression():\n # Converts iris data to a logistic regression problem.\n iris = base.load_iris()\n ids = np.where((iris.target == 0) | (iris.target == 1))\n iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])\n return iris\n\n\nclass LinearClassifierTest(test.TestCase):\n\n def testExperimentIntegration(self):\n cont_features = [\n feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n ]\n\n exp = experiment.Experiment(\n estimator=linear.LinearClassifier(\n n_classes=3, feature_columns=cont_features),\n train_input_fn=test_data.iris_input_multiclass_fn,\n eval_input_fn=test_data.iris_input_multiclass_fn)\n exp.test()\n\n def testEstimatorContract(self):\n estimator_test_utils.assert_estimator_contract(self,\n linear.LinearClassifier)\n\n def testTrain(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=100)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=200)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n\n def testJointTrain(self):\n \"\"\"Tests that loss goes down with training with joint weights.\"\"\"\n\n def input_fn():\n return {\n 'age':\n sparse_tensor.SparseTensor(\n values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)\n\n classifier = linear.LinearClassifier(\n _joint_weight=True, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=100)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=200)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n\n def testMultiClass_MatrixData(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(\n n_classes=3, feature_columns=[feature_column])\n\n classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n scores = classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=100)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testMultiClass_MatrixData_Labels1D(self):\n \"\"\"Same as the last test, but labels shape is [150] instead of [150, 1].\"\"\"\n\n def _input_fn():\n iris = base.load_iris()\n return {\n 'feature': constant_op.constant(\n iris.data, dtype=dtypes.float32)\n }, constant_op.constant(\n iris.target, shape=[150], dtype=dtypes.int32)\n\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(\n n_classes=3, feature_columns=[feature_column])\n\n classifier.fit(input_fn=_input_fn, steps=100)\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testMultiClass_NpMatrixData(self):\n \"\"\"Tests multi-class classification using numpy matrix data as input.\"\"\"\n iris = base.load_iris()\n train_x = iris.data\n train_y = iris.target\n feature_column = feature_column_lib.real_valued_column('', dimension=4)\n classifier = linear.LinearClassifier(\n n_classes=3, feature_columns=[feature_column])\n\n classifier.fit(x=train_x, y=train_y, steps=100)\n scores = classifier.evaluate(x=train_x, y=train_y, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testMultiClassLabelKeys(self):\n \"\"\"Tests n_classes > 2 with label_keys vocabulary for labels.\"\"\"\n # Byte literals needed for python3 test to pass.\n label_keys = [b'label0', b'label1', b'label2']\n\n def _input_fn(num_epochs=None):\n features = {\n 'language':\n sparse_tensor.SparseTensor(\n values=input_lib.limit_epochs(\n ['en', 'fr', 'zh'], num_epochs=num_epochs),\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n labels = constant_op.constant(\n [[label_keys[1]], [label_keys[0]], [label_keys[0]]],\n dtype=dtypes.string)\n return features, labels\n\n language_column = feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20)\n\n classifier = linear.LinearClassifier(\n n_classes=3,\n feature_columns=[language_column],\n label_keys=label_keys)\n\n classifier.fit(input_fn=_input_fn, steps=50)\n\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n self.assertIn('loss', scores)\n predict_input_fn = functools.partial(_input_fn, num_epochs=1)\n predicted_classes = list(\n classifier.predict_classes(\n input_fn=predict_input_fn, as_iterable=True))\n self.assertEqual(3, len(predicted_classes))\n for pred in predicted_classes:\n self.assertIn(pred, label_keys)\n predictions = list(\n classifier.predict(input_fn=predict_input_fn, as_iterable=True))\n self.assertAllEqual(predicted_classes, predictions)\n\n def testLogisticRegression_MatrixData(self):\n \"\"\"Tests binary classification using matrix data as input.\"\"\"\n\n def _input_fn():\n iris = _prepare_iris_data_for_logistic_regression()\n return {\n 'feature': constant_op.constant(\n iris.data, dtype=dtypes.float32)\n }, constant_op.constant(\n iris.target, shape=[100, 1], dtype=dtypes.int32)\n\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(feature_columns=[feature_column])\n\n classifier.fit(input_fn=_input_fn, steps=100)\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testEstimatorWithCoreFeatureColumns(self):\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[.8], [0.2], [.1]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=input_lib.limit_epochs(\n ['en', 'fr', 'zh'], num_epochs=num_epochs),\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)\n\n language_column = fc_core.categorical_column_with_hash_bucket(\n 'language', hash_bucket_size=20)\n feature_columns = [language_column, fc_core.numeric_column('age')]\n\n classifier = linear.LinearClassifier(feature_columns=feature_columns)\n classifier.fit(input_fn=_input_fn, steps=100)\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testLogisticRegression_MatrixData_Labels1D(self):\n \"\"\"Same as the last test, but labels shape is [100] instead of [100, 1].\"\"\"\n\n def _input_fn():\n iris = _prepare_iris_data_for_logistic_regression()\n return {\n 'feature': constant_op.constant(\n iris.data, dtype=dtypes.float32)\n }, constant_op.constant(\n iris.target, shape=[100], dtype=dtypes.int32)\n\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(feature_columns=[feature_column])\n\n classifier.fit(input_fn=_input_fn, steps=100)\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testLogisticRegression_NpMatrixData(self):\n \"\"\"Tests binary classification using numpy matrix data as input.\"\"\"\n iris = _prepare_iris_data_for_logistic_regression()\n train_x = iris.data\n train_y = iris.target\n feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]\n classifier = linear.LinearClassifier(feature_columns=feature_columns)\n\n classifier.fit(x=train_x, y=train_y, steps=100)\n scores = classifier.evaluate(x=train_x, y=train_y, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testWeightAndBiasNames(self):\n \"\"\"Tests that weight and bias names haven't changed.\"\"\"\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(\n n_classes=3, feature_columns=[feature_column])\n\n classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n\n variable_names = classifier.get_variable_names()\n self.assertIn('linear\/feature\/weight', variable_names)\n self.assertIn('linear\/bias_weight', variable_names)\n self.assertEqual(\n 4, len(classifier.get_variable_value('linear\/feature\/weight')))\n self.assertEqual(\n 3, len(classifier.get_variable_value('linear\/bias_weight')))\n\n def testCustomOptimizerByObject(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(\n n_classes=3,\n optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),\n feature_columns=[feature_column])\n\n classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n scores = classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=100)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testCustomOptimizerByString(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n def _optimizer():\n return ftrl.FtrlOptimizer(learning_rate=0.1)\n\n classifier = linear.LinearClassifier(\n n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])\n\n classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n scores = classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=100)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testCustomOptimizerByFunction(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n feature_column = feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n\n classifier = linear.LinearClassifier(\n n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])\n\n classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n scores = classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=100)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testCustomMetrics(self):\n \"\"\"Tests custom evaluation metrics.\"\"\"\n\n def _input_fn(num_epochs=None):\n # Create 4 rows, one of them (y = x), three of them (y=Not(x))\n labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)\n features = {\n 'x':\n input_lib.limit_epochs(\n array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n num_epochs=num_epochs)\n }\n return features, labels\n\n def _my_metric_op(predictions, labels):\n # For the case of binary classification, the 2nd column of \"predictions\"\n # denotes the model predictions.\n predictions = array_ops.strided_slice(\n predictions, [0, 1], [-1, 2], end_mask=1)\n return math_ops.reduce_sum(math_ops.multiply(predictions, labels))\n\n classifier = linear.LinearClassifier(\n feature_columns=[feature_column_lib.real_valued_column('x')])\n\n classifier.fit(input_fn=_input_fn, steps=100)\n scores = classifier.evaluate(\n input_fn=_input_fn,\n steps=100,\n metrics={\n 'my_accuracy':\n MetricSpec(\n metric_fn=metric_ops.streaming_accuracy,\n prediction_key='classes'),\n 'my_precision':\n MetricSpec(\n metric_fn=metric_ops.streaming_precision,\n prediction_key='classes'),\n 'my_metric':\n MetricSpec(\n metric_fn=_my_metric_op, prediction_key='probabilities')\n })\n self.assertTrue(\n set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(\n set(scores.keys())))\n predict_input_fn = functools.partial(_input_fn, num_epochs=1)\n predictions = np.array(list(classifier.predict_classes(\n input_fn=predict_input_fn)))\n self.assertEqual(\n _sklearn.accuracy_score([1, 0, 0, 0], predictions),\n scores['my_accuracy'])\n\n # Tests the case where the prediction_key is neither \"classes\" nor\n # \"probabilities\".\n with self.assertRaisesRegexp(KeyError, 'bad_type'):\n classifier.evaluate(\n input_fn=_input_fn,\n steps=100,\n metrics={\n 'bad_name':\n MetricSpec(\n metric_fn=metric_ops.streaming_auc,\n prediction_key='bad_type')\n })\n\n # Tests the case where the 2nd element of the key is neither \"classes\" nor\n # \"probabilities\".\n with self.assertRaises(KeyError):\n classifier.evaluate(\n input_fn=_input_fn,\n steps=100,\n metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})\n\n # Tests the case where the tuple of the key doesn't have 2 elements.\n with self.assertRaises(ValueError):\n classifier.evaluate(\n input_fn=_input_fn,\n steps=100,\n metrics={\n ('bad_length_name', 'classes', 'bad_length'):\n metric_ops.streaming_accuracy\n })\n\n def testLogisticFractionalLabels(self):\n \"\"\"Tests logistic training with fractional labels.\"\"\"\n\n def input_fn(num_epochs=None):\n return {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[1], [2]]), num_epochs=num_epochs),\n }, constant_op.constant(\n [[.7], [0]], dtype=dtypes.float32)\n\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(\n feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))\n classifier.fit(input_fn=input_fn, steps=500)\n\n predict_input_fn = functools.partial(input_fn, num_epochs=1)\n predictions_proba = list(\n classifier.predict_proba(input_fn=predict_input_fn))\n # Prediction probabilities mirror the labels column, which proves that the\n # classifier learns from float input.\n self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)\n\n def testTrainWithPartitionedVariables(self):\n \"\"\"Tests training with partitioned variables.\"\"\"\n\n def _input_fn():\n features = {\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n labels = constant_op.constant([[1], [0], [0]])\n return features, labels\n\n sparse_features = [\n # The given hash_bucket_size results in variables larger than the\n # default min_slice_size attribute, so the variables are partitioned.\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=2e7)\n ]\n\n tf_config = {\n 'cluster': {\n run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']\n }\n }\n with test.mock.patch.dict('os.environ',\n {'TF_CONFIG': json.dumps(tf_config)}):\n config = run_config.RunConfig()\n # Because we did not start a distributed cluster, we need to pass an\n # empty ClusterSpec, otherwise the device_setter will look for\n # distributed jobs, such as \"\/job:ps\" which are not present.\n config._cluster_spec = server_lib.ClusterSpec({})\n\n classifier = linear.LinearClassifier(\n feature_columns=sparse_features, config=config)\n classifier.fit(input_fn=_input_fn, steps=200)\n loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']\n self.assertLess(loss, 0.07)\n\n def testTrainSaveLoad(self):\n \"\"\"Tests that insures you can save and reload a trained model.\"\"\"\n\n def input_fn(num_epochs=None):\n return {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([1]), num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n model_dir = tempfile.mkdtemp()\n classifier = linear.LinearClassifier(\n model_dir=model_dir, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=30)\n predict_input_fn = functools.partial(input_fn, num_epochs=1)\n out1_class = list(\n classifier.predict_classes(\n input_fn=predict_input_fn, as_iterable=True))\n out1_proba = list(\n classifier.predict_proba(\n input_fn=predict_input_fn, as_iterable=True))\n del classifier\n\n classifier2 = linear.LinearClassifier(\n model_dir=model_dir, feature_columns=[age, language])\n out2_class = list(\n classifier2.predict_classes(\n input_fn=predict_input_fn, as_iterable=True))\n out2_proba = list(\n classifier2.predict_proba(\n input_fn=predict_input_fn, as_iterable=True))\n self.assertTrue(np.array_equal(out1_class, out2_class))\n self.assertTrue(np.array_equal(out1_proba, out2_proba))\n\n def testWeightColumn(self):\n \"\"\"Tests training with given weight column.\"\"\"\n\n def _input_fn_train():\n # Create 4 rows, one of them (y = x), three of them (y=Not(x))\n # First row has more weight than others. Model should fit (y=x) better\n # than (y=Not(x)) due to the relative higher weight of the first row.\n labels = constant_op.constant([[1], [0], [0], [0]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[100.], [3.], [2.], [2.]])\n }\n return features, labels\n\n def _input_fn_eval():\n # Create 4 rows (y = x)\n labels = constant_op.constant([[1], [1], [1], [1]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[1.], [1.], [1.], [1.]])\n }\n return features, labels\n\n classifier = linear.LinearClassifier(\n weight_column_name='w',\n feature_columns=[feature_column_lib.real_valued_column('x')],\n config=run_config.RunConfig(tf_random_seed=3))\n\n classifier.fit(input_fn=_input_fn_train, steps=100)\n scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)\n # All examples in eval data set are y=x.\n self.assertGreater(scores['labels\/actual_label_mean'], 0.9)\n # If there were no weight column, model would learn y=Not(x). Because of\n # weights, it learns y=x.\n self.assertGreater(scores['labels\/prediction_mean'], 0.9)\n # All examples in eval data set are y=x. So if weight column were ignored,\n # then accuracy would be zero. Because of weights, accuracy should be close\n # to 1.0.\n self.assertGreater(scores['accuracy'], 0.9)\n\n scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)\n # Considering weights, the mean label should be close to 1.0.\n # If weights were ignored, it would be 0.25.\n self.assertGreater(scores_train_set['labels\/actual_label_mean'], 0.9)\n # The classifier has learned y=x. If weight column were ignored in\n # evaluation, then accuracy for the train set would be 0.25.\n # Because weight is not ignored, accuracy is greater than 0.6.\n self.assertGreater(scores_train_set['accuracy'], 0.6)\n\n def testWeightColumnLoss(self):\n \"\"\"Test ensures that you can specify per-example weights for loss.\"\"\"\n\n def _input_fn():\n features = {\n 'age': constant_op.constant([[20], [20], [20]]),\n 'weights': constant_op.constant([[100], [1], [1]]),\n }\n labels = constant_op.constant([[1], [0], [0]])\n return features, labels\n\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(feature_columns=[age])\n classifier.fit(input_fn=_input_fn, steps=100)\n loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']\n\n classifier = linear.LinearClassifier(\n feature_columns=[age], weight_column_name='weights')\n classifier.fit(input_fn=_input_fn, steps=100)\n loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']\n\n self.assertLess(loss_weighted, loss_unweighted)\n\n def testExport(self):\n \"\"\"Tests that export model for servo works.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=100)\n\n export_dir = tempfile.mkdtemp()\n classifier.export(export_dir)\n\n def testDisableCenteredBias(self):\n \"\"\"Tests that we can disable centered bias.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(\n feature_columns=[age, language], enable_centered_bias=False)\n classifier.fit(input_fn=input_fn, steps=100)\n self.assertNotIn('centered_bias_weight', classifier.get_variable_names())\n\n def testEnableCenteredBias(self):\n \"\"\"Tests that we can enable centered bias.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearClassifier(\n feature_columns=[age, language], enable_centered_bias=True)\n classifier.fit(input_fn=input_fn, steps=100)\n self.assertIn('linear\/binary_logistic_head\/centered_bias_weight',\n classifier.get_variable_names())\n\n def testTrainOptimizerWithL1Reg(self):\n \"\"\"Tests l1 regularized model has higher loss.\"\"\"\n\n def input_fn():\n return {\n 'language':\n sparse_tensor.SparseTensor(\n values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n classifier_no_reg = linear.LinearClassifier(feature_columns=[language])\n classifier_with_reg = linear.LinearClassifier(\n feature_columns=[language],\n optimizer=ftrl.FtrlOptimizer(\n learning_rate=1.0, l1_regularization_strength=100.))\n loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(\n input_fn=input_fn, steps=1)['loss']\n loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,\n steps=100).evaluate(\n input_fn=input_fn,\n steps=1)['loss']\n self.assertLess(loss_no_reg, loss_with_reg)\n\n def testTrainWithMissingFeature(self):\n \"\"\"Tests that training works with missing features.\"\"\"\n\n def input_fn():\n return {\n 'language':\n sparse_tensor.SparseTensor(\n values=['Swahili', 'turkish'],\n indices=[[0, 0], [2, 0]],\n dense_shape=[3, 1])\n }, constant_op.constant([[1], [1], [1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n classifier = linear.LinearClassifier(feature_columns=[language])\n classifier.fit(input_fn=input_fn, steps=100)\n loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.07)\n\n def testSdcaOptimizerRealValuedFeatures(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and real valued features.\"\"\"\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2']),\n 'maintenance_cost': constant_op.constant([[500.0], [200.0]]),\n 'sq_footage': constant_op.constant([[800.0], [600.0]]),\n 'weights': constant_op.constant([[1.0], [1.0]])\n }, constant_op.constant([[0], [1]])\n\n maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')\n sq_footage = feature_column_lib.real_valued_column('sq_footage')\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[maintenance_cost, sq_footage],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=100)\n loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):\n \"\"\"Tests SDCAOptimizer with real valued features of higher dimension.\"\"\"\n\n # input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures\n # where 2 1-dimensional dense features have been replaced by 1 2-dimensional\n # feature.\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2']),\n 'dense_feature':\n constant_op.constant([[500.0, 800.0], [200.0, 600.0]])\n }, constant_op.constant([[0], [1]])\n\n dense_feature = feature_column_lib.real_valued_column(\n 'dense_feature', dimension=2)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[dense_feature], optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=100)\n loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testSdcaOptimizerBucketizedFeatures(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and bucketized features.\"\"\"\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2', '3']),\n 'price': constant_op.constant([[600.0], [1000.0], [400.0]]),\n 'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),\n 'weights': constant_op.constant([[1.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n price_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('price'),\n boundaries=[500.0, 700.0])\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id', symmetric_l2_regularization=1.0)\n classifier = linear.LinearClassifier(\n feature_columns=[price_bucket, sq_footage_bucket],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=50)\n scores = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testSdcaOptimizerSparseFeatures(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and sparse features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([0.4, 0.6, 0.3]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[1.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n price = feature_column_lib.real_valued_column('price')\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[price, country],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=50)\n scores = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testSdcaOptimizerWeightedSparseFeatures(self):\n \"\"\"LinearClassifier with SDCAOptimizer and weighted sparse features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n sparse_tensor.SparseTensor(\n values=[2., 3., 1.],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 5]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 5])\n }, constant_op.constant([[1], [0], [1]])\n\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n country_weighted_by_price = feature_column_lib.weighted_sparse_column(\n country, 'price')\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=50)\n scores = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testSdcaOptimizerCrossedFeatures(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and crossed features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english', 'italian', 'spanish'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 1]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['US', 'IT', 'MX'],\n indices=[[0, 0], [1, 0], [2, 0]],\n dense_shape=[3, 1])\n }, constant_op.constant([[0], [0], [1]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=5)\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n country_language = feature_column_lib.crossed_column(\n [language, country], hash_bucket_size=10)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[country_language], optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=10)\n scores = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testSdcaOptimizerMixedFeatures(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and a mix of features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([[0.6], [0.8], [0.3]]),\n 'sq_footage':\n constant_op.constant([[900.0], [700.0], [600.0]]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[3.0], [1.0], [1.0]])\n }, constant_op.constant([[1], [0], [1]])\n\n price = feature_column_lib.real_valued_column('price')\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'),\n boundaries=[650.0, 800.0])\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n sq_footage_country = feature_column_lib.crossed_column(\n [sq_footage_bucket, country], hash_bucket_size=10)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n classifier = linear.LinearClassifier(\n feature_columns=[price, sq_footage_bucket, country, sq_footage_country],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n classifier.fit(input_fn=input_fn, steps=50)\n scores = classifier.evaluate(input_fn=input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n\n def testEval(self):\n \"\"\"Tests that eval produces correct metrics.\n \"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([[1], [2]]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['greek', 'chinese'],\n indices=[[0, 0], [1, 0]],\n dense_shape=[2, 1]),\n }, constant_op.constant([[1], [0]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n classifier = linear.LinearClassifier(feature_columns=[age, language])\n\n # Evaluate on trained model\n classifier.fit(input_fn=input_fn, steps=100)\n classifier.evaluate(input_fn=input_fn, steps=1)\n\n # TODO(ispir): Enable accuracy check after resolving the randomness issue.\n # self.assertLess(evaluated_values['loss\/mean'], 0.3)\n # self.assertGreater(evaluated_values['accuracy\/mean'], .95)\n\n\nclass LinearRegressorTest(test.TestCase):\n\n def testExperimentIntegration(self):\n cont_features = [\n feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n ]\n\n exp = experiment.Experiment(\n estimator=linear.LinearRegressor(feature_columns=cont_features),\n train_input_fn=test_data.iris_input_logistic_fn,\n eval_input_fn=test_data.iris_input_logistic_fn)\n exp.test()\n\n def testEstimatorContract(self):\n estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)\n\n def testRegression(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[10.]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n classifier = linear.LinearRegressor(feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=100)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=200)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.5)\n\n def testRegression_MatrixData(self):\n \"\"\"Tests regression using matrix data as input.\"\"\"\n cont_features = [\n feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n ]\n\n regressor = linear.LinearRegressor(\n feature_columns=cont_features,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)\n scores = regressor.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=1)\n self.assertLess(scores['loss'], 0.2)\n\n def testRegression_TensorData(self):\n \"\"\"Tests regression using tensor data as input.\"\"\"\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(\n [1.0, 0., 0.2], dtype=dtypes.float32)\n\n feature_columns = [\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20),\n feature_column_lib.real_valued_column('age')\n ]\n\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n\n scores = regressor.evaluate(input_fn=_input_fn, steps=1)\n self.assertLess(scores['loss'], 0.2)\n\n def testLoss(self):\n \"\"\"Tests loss calculation.\"\"\"\n\n def _input_fn_train():\n # Create 4 rows, one of them (y = x), three of them (y=Not(x))\n # The algorithm should learn (y = 0.25).\n labels = constant_op.constant([[1.], [0.], [0.], [0.]])\n features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}\n return features, labels\n\n regressor = linear.LinearRegressor(\n feature_columns=[feature_column_lib.real_valued_column('x')],\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn_train, steps=100)\n scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)\n # Average square loss = (0.75^2 + 3*0.25^2) \/ 4 = 0.1875\n self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)\n\n def testLossWithWeights(self):\n \"\"\"Tests loss calculation with weights.\"\"\"\n\n def _input_fn_train():\n # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))\n # The algorithm should learn (y = 0.25).\n labels = constant_op.constant([[1.], [0.], [0.], [0.]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[1.], [1.], [1.], [1.]])\n }\n return features, labels\n\n def _input_fn_eval():\n # 4 rows, with different weights.\n labels = constant_op.constant([[1.], [0.], [0.], [0.]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[7.], [1.], [1.], [1.]])\n }\n return features, labels\n\n regressor = linear.LinearRegressor(\n weight_column_name='w',\n feature_columns=[feature_column_lib.real_valued_column('x')],\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn_train, steps=100)\n scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)\n # Weighted average square loss = (7*0.75^2 + 3*0.25^2) \/ 10 = 0.4125\n self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)\n\n def testTrainWithWeights(self):\n \"\"\"Tests training with given weight column.\"\"\"\n\n def _input_fn_train():\n # Create 4 rows, one of them (y = x), three of them (y=Not(x))\n # First row has more weight than others. Model should fit (y=x) better\n # than (y=Not(x)) due to the relative higher weight of the first row.\n labels = constant_op.constant([[1.], [0.], [0.], [0.]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[100.], [3.], [2.], [2.]])\n }\n return features, labels\n\n def _input_fn_eval():\n # Create 4 rows (y = x)\n labels = constant_op.constant([[1.], [1.], [1.], [1.]])\n features = {\n 'x': array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n 'w': constant_op.constant([[1.], [1.], [1.], [1.]])\n }\n return features, labels\n\n regressor = linear.LinearRegressor(\n weight_column_name='w',\n feature_columns=[feature_column_lib.real_valued_column('x')],\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn_train, steps=100)\n scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)\n # The model should learn (y = x) because of the weights, so the loss should\n # be close to zero.\n self.assertLess(scores['loss'], 0.1)\n\n def testPredict_AsIterableFalse(self):\n \"\"\"Tests predict method with as_iterable=False.\"\"\"\n labels = [1.0, 0., 0.2]\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(labels, dtype=dtypes.float32)\n\n feature_columns = [\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20),\n feature_column_lib.real_valued_column('age')\n ]\n\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n\n scores = regressor.evaluate(input_fn=_input_fn, steps=1)\n self.assertLess(scores['loss'], 0.1)\n predicted_scores = regressor.predict_scores(\n input_fn=_input_fn, as_iterable=False)\n self.assertAllClose(labels, predicted_scores, atol=0.1)\n predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)\n self.assertAllClose(predicted_scores, predictions)\n\n def testPredict_AsIterable(self):\n \"\"\"Tests predict method with as_iterable=True.\"\"\"\n labels = [1.0, 0., 0.2]\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(labels, dtype=dtypes.float32)\n\n feature_columns = [\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20),\n feature_column_lib.real_valued_column('age')\n ]\n\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n\n scores = regressor.evaluate(input_fn=_input_fn, steps=1)\n self.assertLess(scores['loss'], 0.1)\n predict_input_fn = functools.partial(_input_fn, num_epochs=1)\n predicted_scores = list(\n regressor.predict_scores(\n input_fn=predict_input_fn, as_iterable=True))\n self.assertAllClose(labels, predicted_scores, atol=0.1)\n predictions = list(\n regressor.predict(\n input_fn=predict_input_fn, as_iterable=True))\n self.assertAllClose(predicted_scores, predictions)\n\n def testCustomMetrics(self):\n \"\"\"Tests custom evaluation metrics.\"\"\"\n\n def _input_fn(num_epochs=None):\n # Create 4 rows, one of them (y = x), three of them (y=Not(x))\n labels = constant_op.constant([[1.], [0.], [0.], [0.]])\n features = {\n 'x':\n input_lib.limit_epochs(\n array_ops.ones(\n shape=[4, 1], dtype=dtypes.float32),\n num_epochs=num_epochs)\n }\n return features, labels\n\n def _my_metric_op(predictions, labels):\n return math_ops.reduce_sum(math_ops.multiply(predictions, labels))\n\n regressor = linear.LinearRegressor(\n feature_columns=[feature_column_lib.real_valued_column('x')],\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n scores = regressor.evaluate(\n input_fn=_input_fn,\n steps=1,\n metrics={\n 'my_error':\n MetricSpec(\n metric_fn=metric_ops.streaming_mean_squared_error,\n prediction_key='scores'),\n 'my_metric':\n MetricSpec(\n metric_fn=_my_metric_op, prediction_key='scores')\n })\n self.assertIn('loss', set(scores.keys()))\n self.assertIn('my_error', set(scores.keys()))\n self.assertIn('my_metric', set(scores.keys()))\n predict_input_fn = functools.partial(_input_fn, num_epochs=1)\n predictions = np.array(list(\n regressor.predict_scores(input_fn=predict_input_fn)))\n self.assertAlmostEqual(\n _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),\n scores['my_error'])\n\n # Tests the case where the prediction_key is not \"scores\".\n with self.assertRaisesRegexp(KeyError, 'bad_type'):\n regressor.evaluate(\n input_fn=_input_fn,\n steps=1,\n metrics={\n 'bad_name':\n MetricSpec(\n metric_fn=metric_ops.streaming_auc,\n prediction_key='bad_type')\n })\n\n # Tests the case where the 2nd element of the key is not \"scores\".\n with self.assertRaises(KeyError):\n regressor.evaluate(\n input_fn=_input_fn,\n steps=1,\n metrics={\n ('my_error', 'predictions'):\n metric_ops.streaming_mean_squared_error\n })\n\n # Tests the case where the tuple of the key doesn't have 2 elements.\n with self.assertRaises(ValueError):\n regressor.evaluate(\n input_fn=_input_fn,\n steps=1,\n metrics={\n ('bad_length_name', 'scores', 'bad_length'):\n metric_ops.streaming_mean_squared_error\n })\n\n def testTrainSaveLoad(self):\n \"\"\"Tests that insures you can save and reload a trained model.\"\"\"\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(\n [1.0, 0., 0.2], dtype=dtypes.float32)\n\n feature_columns = [\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20),\n feature_column_lib.real_valued_column('age')\n ]\n\n model_dir = tempfile.mkdtemp()\n regressor = linear.LinearRegressor(\n model_dir=model_dir,\n feature_columns=feature_columns,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n predict_input_fn = functools.partial(_input_fn, num_epochs=1)\n predictions = list(regressor.predict_scores(input_fn=predict_input_fn))\n del regressor\n\n regressor2 = linear.LinearRegressor(\n model_dir=model_dir, feature_columns=feature_columns)\n predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))\n self.assertAllClose(predictions, predictions2)\n\n def testTrainWithPartitionedVariables(self):\n \"\"\"Tests training with partitioned variables.\"\"\"\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(\n [1.0, 0., 0.2], dtype=dtypes.float32)\n\n feature_columns = [\n # The given hash_bucket_size results in variables larger than the\n # default min_slice_size attribute, so the variables are partitioned.\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=2e7),\n feature_column_lib.real_valued_column('age')\n ]\n\n tf_config = {\n 'cluster': {\n run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']\n }\n }\n with test.mock.patch.dict('os.environ',\n {'TF_CONFIG': json.dumps(tf_config)}):\n config = run_config.RunConfig(tf_random_seed=1)\n # Because we did not start a distributed cluster, we need to pass an\n # empty ClusterSpec, otherwise the device_setter will look for\n # distributed jobs, such as \"\/job:ps\" which are not present.\n config._cluster_spec = server_lib.ClusterSpec({})\n\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns, config=config)\n\n regressor.fit(input_fn=_input_fn, steps=100)\n\n scores = regressor.evaluate(input_fn=_input_fn, steps=1)\n self.assertLess(scores['loss'], 0.1)\n\n def testDisableCenteredBias(self):\n \"\"\"Tests that we can disable centered bias.\"\"\"\n\n def _input_fn(num_epochs=None):\n features = {\n 'age':\n input_lib.limit_epochs(\n constant_op.constant([[0.8], [0.15], [0.]]),\n num_epochs=num_epochs),\n 'language':\n sparse_tensor.SparseTensor(\n values=['en', 'fr', 'zh'],\n indices=[[0, 0], [0, 1], [2, 0]],\n dense_shape=[3, 2])\n }\n return features, constant_op.constant(\n [1.0, 0., 0.2], dtype=dtypes.float32)\n\n feature_columns = [\n feature_column_lib.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20),\n feature_column_lib.real_valued_column('age')\n ]\n\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns,\n enable_centered_bias=False,\n config=run_config.RunConfig(tf_random_seed=1))\n\n regressor.fit(input_fn=_input_fn, steps=100)\n\n scores = regressor.evaluate(input_fn=_input_fn, steps=1)\n self.assertLess(scores['loss'], 0.1)\n\n def testRecoverWeights(self):\n rng = np.random.RandomState(67)\n n = 1000\n n_weights = 10\n bias = 2\n x = rng.uniform(-1, 1, (n, n_weights))\n weights = 10 * rng.randn(n_weights)\n y = np.dot(x, weights)\n y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)\n feature_columns = estimator.infer_real_valued_columns_from_input(x)\n regressor = linear.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))\n regressor.fit(x, y, batch_size=64, steps=2000)\n self.assertIn('linear\/\/weight', regressor.get_variable_names())\n regressor_weights = regressor.get_variable_value('linear\/\/weight')\n # Have to flatten weights since they come in (x, 1) shape.\n self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)\n # TODO(ispir): Disable centered_bias.\n # assert abs(bias - regressor.bias_) < 0.1\n\n def testSdcaOptimizerRealValuedLinearFeatures(self):\n \"\"\"Tests LinearRegressor with SDCAOptimizer and real valued features.\"\"\"\n x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]\n weights = [[3.0], [-1.2], [0.5]]\n y = np.dot(x, weights)\n\n def input_fn():\n return {\n 'example_id': constant_op.constant(['1', '2', '3']),\n 'x': constant_op.constant(x),\n 'weights': constant_op.constant([[10.0], [10.0], [10.0]])\n }, constant_op.constant(y)\n\n x_column = feature_column_lib.real_valued_column('x', dimension=3)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n regressor = linear.LinearRegressor(\n feature_columns=[x_column],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n regressor.fit(input_fn=input_fn, steps=20)\n loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.01)\n self.assertIn('linear\/x\/weight', regressor.get_variable_names())\n regressor_weights = regressor.get_variable_value('linear\/x\/weight')\n self.assertAllClose(\n [w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)\n\n def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):\n \"\"\"Tests LinearRegressor with SDCAOptimizer and a mix of features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([0.6, 0.8, 0.3]),\n 'sq_footage':\n constant_op.constant([[900.0], [700.0], [600.0]]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[3.0], [5.0], [7.0]])\n }, constant_op.constant([[1.55], [-1.25], [-3.0]])\n\n price = feature_column_lib.real_valued_column('price')\n sq_footage_bucket = feature_column_lib.bucketized_column(\n feature_column_lib.real_valued_column('sq_footage'),\n boundaries=[650.0, 800.0])\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n sq_footage_country = feature_column_lib.crossed_column(\n [sq_footage_bucket, country], hash_bucket_size=10)\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id', symmetric_l2_regularization=1.0)\n regressor = linear.LinearRegressor(\n feature_columns=[price, sq_footage_bucket, country, sq_footage_country],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n regressor.fit(input_fn=input_fn, steps=20)\n loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss, 0.05)\n\n def testSdcaOptimizerSparseFeaturesWithL1Reg(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and sparse features.\"\"\"\n\n def input_fn():\n return {\n 'example_id':\n constant_op.constant(['1', '2', '3']),\n 'price':\n constant_op.constant([[0.4], [0.6], [0.3]]),\n 'country':\n sparse_tensor.SparseTensor(\n values=['IT', 'US', 'GB'],\n indices=[[0, 0], [1, 3], [2, 1]],\n dense_shape=[3, 5]),\n 'weights':\n constant_op.constant([[10.0], [10.0], [10.0]])\n }, constant_op.constant([[1.4], [-0.8], [2.6]])\n\n price = feature_column_lib.real_valued_column('price')\n country = feature_column_lib.sparse_column_with_hash_bucket(\n 'country', hash_bucket_size=5)\n # Regressor with no L1 regularization.\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n regressor = linear.LinearRegressor(\n feature_columns=[price, country],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n regressor.fit(input_fn=input_fn, steps=20)\n no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n variable_names = regressor.get_variable_names()\n self.assertIn('linear\/price\/weight', variable_names)\n self.assertIn('linear\/country\/weights', variable_names)\n no_l1_reg_weights = {\n 'linear\/price\/weight': regressor.get_variable_value(\n 'linear\/price\/weight'),\n 'linear\/country\/weights': regressor.get_variable_value(\n 'linear\/country\/weights'),\n }\n\n # Regressor with L1 regularization.\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id', symmetric_l1_regularization=1.0)\n regressor = linear.LinearRegressor(\n feature_columns=[price, country],\n weight_column_name='weights',\n optimizer=sdca_optimizer)\n regressor.fit(input_fn=input_fn, steps=20)\n l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']\n l1_reg_weights = {\n 'linear\/price\/weight': regressor.get_variable_value(\n 'linear\/price\/weight'),\n 'linear\/country\/weights': regressor.get_variable_value(\n 'linear\/country\/weights'),\n }\n\n # Unregularized loss is lower when there is no L1 regularization.\n self.assertLess(no_l1_reg_loss, l1_reg_loss)\n self.assertLess(no_l1_reg_loss, 0.05)\n\n # But weights returned by the regressor with L1 regularization have smaller\n # L1 norm.\n l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0\n for var_name in sorted(l1_reg_weights):\n l1_reg_weights_norm += sum(\n np.absolute(l1_reg_weights[var_name].flatten()))\n no_l1_reg_weights_norm += sum(\n np.absolute(no_l1_reg_weights[var_name].flatten()))\n print('Var name: %s, value: %s' %\n (var_name, no_l1_reg_weights[var_name].flatten()))\n self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)\n\n def testSdcaOptimizerBiasOnly(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and validates bias weight.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when it's the only feature present.\n\n All of the instances in this input only have the bias feature, and a\n 1\/4 of the labels are positive. This means that the expected weight for\n the bias should be close to the average prediction, i.e 0.25.\n Returns:\n Training data for the test.\n \"\"\"\n num_examples = 40\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n # place_holder is an empty column which is always 0 (absent), because\n # LinearClassifier requires at least one column.\n 'place_holder':\n constant_op.constant([[0.0]] * num_examples),\n }, constant_op.constant(\n [[1 if i % 4 is 0 else 0] for i in range(num_examples)])\n\n place_holder = feature_column_lib.real_valued_column('place_holder')\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n regressor = linear.LinearRegressor(\n feature_columns=[place_holder], optimizer=sdca_optimizer)\n regressor.fit(input_fn=input_fn, steps=100)\n\n self.assertNear(\n regressor.get_variable_value('linear\/bias_weight')[0], 0.25, err=0.1)\n\n def testSdcaOptimizerBiasAndOtherColumns(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and validates bias weight.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when there are other features present.\n\n 1\/2 of the instances in this input have feature 'a', the rest have\n feature 'b', and we expect the bias to be added to each instance as well.\n 0.4 of all instances that have feature 'a' are positive, and 0.2 of all\n instances that have feature 'b' are positive. The labels in the dataset\n are ordered to appear shuffled since SDCA expects shuffled data, and\n converges faster with this pseudo-random ordering.\n If the bias was centered we would expect the weights to be:\n bias: 0.3\n a: 0.1\n b: -0.1\n Until b\/29339026 is resolved, the bias gets regularized with the same\n global value for the other columns, and so the expected weights get\n shifted and are:\n bias: 0.2\n a: 0.2\n b: 0.0\n Returns:\n The test dataset.\n \"\"\"\n num_examples = 200\n half = int(num_examples \/ 2)\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n 'a':\n constant_op.constant([[1]] * int(half) + [[0]] * int(half)),\n 'b':\n constant_op.constant([[0]] * int(half) + [[1]] * int(half)),\n }, constant_op.constant(\n [[x]\n for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half \/ 10) +\n [0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half \/ 10)])\n\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n regressor = linear.LinearRegressor(\n feature_columns=[\n feature_column_lib.real_valued_column('a'),\n feature_column_lib.real_valued_column('b')\n ],\n optimizer=sdca_optimizer)\n\n regressor.fit(input_fn=input_fn, steps=200)\n\n variable_names = regressor.get_variable_names()\n self.assertIn('linear\/bias_weight', variable_names)\n self.assertIn('linear\/a\/weight', variable_names)\n self.assertIn('linear\/b\/weight', variable_names)\n # TODO(b\/29339026): Change the expected results to expect a centered bias.\n self.assertNear(\n regressor.get_variable_value('linear\/bias_weight')[0], 0.2, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear\/a\/weight')[0], 0.2, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear\/b\/weight')[0], 0.0, err=0.05)\n\n def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):\n \"\"\"Tests LinearClassifier with SDCAOptimizer and validates bias weight.\"\"\"\n\n def input_fn():\n \"\"\"Testing the bias weight when there are other features present.\n\n 1\/2 of the instances in this input have feature 'a', the rest have\n feature 'b', and we expect the bias to be added to each instance as well.\n 0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of\n all instances that have feature 'b' have a label of -1.\n We can expect the weights to be:\n bias: 0.0\n a: 0.1\n b: -0.1\n Returns:\n The test dataset.\n \"\"\"\n num_examples = 200\n half = int(num_examples \/ 2)\n return {\n 'example_id':\n constant_op.constant([str(x + 1) for x in range(num_examples)]),\n 'a':\n constant_op.constant([[1]] * int(half) + [[0]] * int(half)),\n 'b':\n constant_op.constant([[0]] * int(half) + [[1]] * int(half)),\n }, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +\n [[-1 if x % 10 == 0 else 0] for x in range(half)])\n\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n regressor = linear.LinearRegressor(\n feature_columns=[\n feature_column_lib.real_valued_column('a'),\n feature_column_lib.real_valued_column('b')\n ],\n optimizer=sdca_optimizer)\n\n regressor.fit(input_fn=input_fn, steps=100)\n\n variable_names = regressor.get_variable_names()\n self.assertIn('linear\/bias_weight', variable_names)\n self.assertIn('linear\/a\/weight', variable_names)\n self.assertIn('linear\/b\/weight', variable_names)\n self.assertNear(\n regressor.get_variable_value('linear\/bias_weight')[0], 0.0, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear\/a\/weight')[0], 0.1, err=0.05)\n self.assertNear(\n regressor.get_variable_value('linear\/b\/weight')[0], -0.1, err=0.05)\n\n\nclass LinearEstimatorTest(test.TestCase):\n\n def testExperimentIntegration(self):\n cont_features = [\n feature_column_lib.real_valued_column(\n 'feature', dimension=4)\n ]\n exp = experiment.Experiment(\n estimator=linear.LinearEstimator(feature_columns=cont_features,\n head=head_lib.regression_head()),\n train_input_fn=test_data.iris_input_logistic_fn,\n eval_input_fn=test_data.iris_input_logistic_fn)\n exp.test()\n\n def testEstimatorContract(self):\n estimator_test_utils.assert_estimator_contract(self,\n linear.LinearEstimator)\n\n def testLinearRegression(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[10.]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n linear_estimator = linear.LinearEstimator(feature_columns=[age, language],\n head=head_lib.regression_head())\n linear_estimator.fit(input_fn=input_fn, steps=100)\n loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']\n linear_estimator.fit(input_fn=input_fn, steps=400)\n loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']\n\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.5)\n\n def testPoissonRegression(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n\n def input_fn():\n return {\n 'age':\n constant_op.constant([1]),\n 'language':\n sparse_tensor.SparseTensor(\n values=['english'], indices=[[0, 0]], dense_shape=[1, 1])\n }, constant_op.constant([[10.]])\n\n language = feature_column_lib.sparse_column_with_hash_bucket('language',\n 100)\n age = feature_column_lib.real_valued_column('age')\n\n linear_estimator = linear.LinearEstimator(\n feature_columns=[age, language],\n head=head_lib.poisson_regression_head())\n linear_estimator.fit(input_fn=input_fn, steps=10)\n loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']\n linear_estimator.fit(input_fn=input_fn, steps=100)\n loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']\n\n self.assertLess(loss2, loss1)\n # Here loss of 2.1 implies a prediction of ~9.9998\n self.assertLess(loss2, 2.1)\n\n def testSDCANotSupported(self):\n \"\"\"Tests that we detect error for SDCA.\"\"\"\n maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')\n sq_footage = feature_column_lib.real_valued_column('sq_footage')\n sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(\n example_id_column='example_id')\n with self.assertRaises(ValueError):\n linear.LinearEstimator(\n head=head_lib.regression_head(label_dimension=1),\n feature_columns=[maintenance_cost, sq_footage],\n optimizer=sdca_optimizer,\n _joint_weights=True)\n\n\ndef boston_input_fn():\n boston = base.load_boston()\n features = math_ops.cast(\n array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),\n dtypes.float32)\n labels = math_ops.cast(\n array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),\n dtypes.float32)\n return features, labels\n\n\nclass FeatureColumnTest(test.TestCase):\n\n def testTrain(self):\n feature_columns = estimator.infer_real_valued_columns_from_input_fn(\n boston_input_fn)\n est = linear.LinearRegressor(feature_columns=feature_columns)\n est.fit(input_fn=boston_input_fn, steps=1)\n _ = est.evaluate(input_fn=boston_input_fn, steps=1)\n\n\nif __name__ == '__main__':\n test.main()\n","license":"apache-2.0"} {"repo_name":"yqzhang\/OpenANN","path":"benchmarks\/iris\/benchmark.py","copies":"5","size":"3308","content":"## \\page IrisBenchmark Iris Flower Dataset\n#\n# The iris dataset is a standard machine learning dataset.\n# See e.g. the Wikipedia article<\/a> for more details.\n#\n# You can start the benchmark with the script:\n# \\verbatim\n# python benchmark.py [run]\n# \\endverbatim\n# Note that you need Scikit Learn to load the dataset.\n#\n# The result will look like\n# \\verbatim\n# Iris data set has 4 inputs, 3 classes and 150 examples\n# The data has been split up input training and validation set.\n# Correct predictions on training set: 120\/120\n# Confusion matrix:\n# [[ 40. 0. 0.]\n# [ 0. 40. 0.]\n# [ 0. 0. 40.]]\n# Correct predictions on test set: 30\/30\n# Confusion matrix:\n# [[ 10. 0. 0.]\n# [ 0. 10. 0.]\n# [ 0. 0. 10.]]\n# \\endverbatim\n\nimport sys\ntry:\n from sklearn import datasets\nexcept:\n print(\"scikit-learn is required to run this example.\")\n exit(1)\ntry:\n from openann import *\nexcept:\n print(\"OpenANN Python bindings are not installed!\")\n exit(1)\n\n\ndef print_usage():\n print(\"Usage:\")\n print(\" python benchmark [run]\")\n\n\ndef run_iris():\n # Load IRIS dataset\n iris = datasets.load_iris()\n X = iris.data\n Y = iris.target\n D = X.shape[1]\n F = len(numpy.unique(Y))\n N = len(X)\n\n # Preprocess data (normalization and 1-of-c encoding)\n X = (X - X.mean(axis=0)) \/ X.std(axis=0)\n T = numpy.zeros((N, F))\n T[(range(N), Y)] = 1.0\n\n # Setup network\n net = Net()\n net.set_regularization(0.0, 0.01, 0.0)\n net.input_layer(D)\n net.fully_connected_layer(100, Activation.RECTIFIER)\n net.fully_connected_layer(100, Activation.RECTIFIER)\n net.output_layer(F, Activation.SOFTMAX)\n net.set_error_function(Error.CE)\n\n # Split dataset into training set and validation set and make sure that\n # each class is equally distributed in the datasets\n X1 = numpy.vstack((X[0:40], X[50:90], X[100:140]))\n T1 = numpy.vstack((T[0:40], T[50:90], T[100:140]))\n training_set = DataSet(X1, T1)\n X2 = numpy.vstack((X[40:50], X[90:100], X[140:150]))\n T2 = numpy.vstack((T[40:50], T[90:100], T[140:150]))\n validation_set = DataSet(X2, T2)\n\n # Train for 500 episodes (with tuned parameters for MBSGD)\n optimizer = MBSGD({\"maximal_iterations\": 500}, learning_rate=0.7,\n learning_rate_decay=0.999, min_learning_rate=0.001, momentum=0.5,\n batch_size=16)\n Log.set_info() # Deactivate debug output\n optimizer.optimize(net, training_set)\n\n print(\"Iris data set has %d inputs, %d classes and %d examples\" % (D, F, N))\n print(\"The data has been split up input training and validation set.\")\n print(\"Correct predictions on training set: %d\/%d\"\n % (classification_hits(net, training_set), len(X1)))\n print(\"Confusion matrix:\")\n print(confusion_matrix(net, training_set))\n print(\"Correct predictions on test set: %d\/%d\"\n % (classification_hits(net, validation_set), len(X2)))\n print(\"Confusion matrix:\")\n print(confusion_matrix(net, validation_set))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print_usage()\n\n for command in sys.argv[1:]:\n if command == \"run\":\n run_iris()\n else:\n print_usage()\n exit(1)\n","license":"gpl-3.0"} {"repo_name":"alexei-matveev\/ase-local","path":"doc\/exercises\/siesta1\/answer1.py","copies":"3","size":"1197","content":"# -*- coding: utf-8 -*-\n# creates: ener.png distance.png angle.png\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport pylab as plt\n\n\ne_s = [0.01,0.1,0.2,0.3,0.4,0.5]\nE = [-463.2160, -462.9633, -462.4891, -462.0551,\n -461.5426, -461.1714]\nd = [1.1131, 1.1046, 1.0960, 1.0901,\n 1.0857, 1.0810]\nalpha = [100.832453365, 99.568214268, 99.1486065462,\n 98.873671379, 98.1726341945, 98.0535643778]\n\nfig=plt.figure(figsize=(3, 2.5))\nfig.subplots_adjust(left=.29, right=.96, top=.9, bottom=0.16)\nplt.plot(e_s, E, 'o-')\nplt.xlabel(u'Energy shift [eV]')\nplt.ylabel(u'Energy [eV]')\nplt.title('Total Energy vs Eshift')\nplt.savefig('ener.png')\n\nfig=plt.figure(figsize=(3, 2.5))\nfig.subplots_adjust(left=.24, right=.96, top=.9, bottom=0.16)\nplt.plot(e_s, d, 'o-')\nplt.xlabel(u'Energy shift [eV]')\nplt.ylabel(u'O-H distance [\u00c5]')\nlimits = plt.axis('tight')\nplt.title('O-H distance vs Eshift')\nplt.savefig('distance.png')\n\nfig=plt.figure(figsize=(3, 2.5))\nfig.subplots_adjust(left=.26, right=.96, top=.9, bottom=0.16)\nplt.plot(e_s, alpha, 'o-')\nplt.xlabel(u'Energy shift [eV]')\nplt.ylabel(u'H20 angle')\nlimits = plt.axis('tight')\nplt.title('O-H distance vs Eshift')\nplt.savefig('angle.png')\n","license":"gpl-2.0"} {"repo_name":"mugwizaleon\/PCRasterMapstacks","path":"pcrastermapstackvisualisation.py","copies":"1","size":"17920","content":"# -*- coding: utf-8 -*-\n\"\"\"\n\/***************************************************************************\n PcrasterMapstackVisualisation\n A QGIS plugin\n PCRaster Mapstack visualisation\n -------------------\n begin : 2014-06-28\n copyright : (C) 2014 by Leon\n email : mugwizal@gmail.com\n ***************************************************************************\/\n\n\/***************************************************************************\n * *\n * This program is free software; you can redistribute it and\/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************\/\n\"\"\"\n# Import the PyQt and QGIS libraries\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.gui import *\nimport qgis.utils\n# Initialize Qt resources from file resources.py\nimport resources_rc\n# Import the code for the dialog\nfrom pcrastermapstackvisualisationdialog import PcrasterMapstackVisualisationDialog\nfrom Animationdialog import AnimationDialog\nfrom TSSvisualizationdialog import TSSVisualizationDialog\n# Import modules\nimport os.path\nimport os, glob\nimport time\nimport sys\nimport string\n\nclass PcrasterMapstackVisualisation:\n\n def __init__(self, iface):\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value(\"locale\/userLocale\")[0:2]\n localePath = os.path.join(self.plugin_dir, 'i18n', 'pcrastermapstackvisualisation_{}.qm'.format(locale))\n if os.path.exists(localePath):\n self.translator = QTranslator()\n self.translator.load(localePath)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Create the dialog (after translation) and keep reference\n self.dlg = PcrasterMapstackVisualisationDialog()\n self.dlg2 = AnimationDialog()\n self.dlg3 = TSSVisualizationDialog()\n\n # Mapstack series visualization\n QObject.connect( self.dlg.ui.pushButton_7, SIGNAL( \"clicked()\" ), self.DisplayTSSnames)\n QObject.connect( self.dlg.ui.pushButton_6, SIGNAL( \"clicked()\" ), self.TSSgraphs)\n \n QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( \"clicked()\" ), self.selectDir ) #link the button to the function of selecting the directory\n QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( \"clicked()\" ), self.loadMapStackCoreName ) #link the button to the function of selecting the directory\n QObject.connect( self.dlg.ui.pushButton_5, SIGNAL( \"clicked()\" ), self.actionStart)\n QObject.connect( self.dlg2.ui.pushButton_2, SIGNAL( \"clicked()\" ), self.ActionAnim)\n QObject.connect( self.dlg2.ui.pushButton_3, SIGNAL( \"clicked()\" ), self.actionNext)\n QObject.connect( self.dlg2.ui.pushButton, SIGNAL( \"clicked()\" ), self.actionPrevious)\n \n QObject.connect( self.dlg2.ui.pushButton_4, SIGNAL( \"clicked()\" ), self.actionStart)\n QObject.connect( self.dlg2.ui.pushButton_5, SIGNAL( \"clicked()\" ), self.actionLast)\n QObject.connect(self.dlg.ui.comboBox, SIGNAL(\"currentIndexChanged (const QString&)\"), self.changelist) #Change the list of mapstacks\n \n #Close dialogs widgets\n QObject.connect( self.dlg.ui.pushButton, SIGNAL( \"clicked()\" ), self.close1)\n QObject.connect( self.dlg3.ui.pushButton, SIGNAL( \"clicked()\" ), self.close2)\n QObject.connect( self.dlg2.ui.pushButton_6, SIGNAL( \"clicked()\" ), self.close3)\n \n def initGui(self):\n # Create action that will start plugin configuration\n self.action = QAction(\n QIcon(\":\/plugins\/pcrastermapstackvisualisation\/Myicon.png\"),\n u\"Mapstacks_visualisation\", self.iface.mainWindow())\n # connect the action to the run method\n self.action.triggered.connect(self.run)\n\n # Add toolbar button and menu item\n self.iface.addToolBarIcon(self.action)\n self.iface.addPluginToMenu(u\"&PCRaster Mapstacks Viewer\", self.action)\n self.iface.addPluginToRasterMenu(u\"&PCRaster Mapstacks Viewer\", self.action)\n \n def unload(self):\n # Remove the plugin menu item and icon\n self.iface.removePluginMenu(u\"&PCRaster Time series Viewer\", self.action)\n self.iface.removeToolBarIcon(self.action)\n\n # run method that performs all the real work\n def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n \n def close1(self):\n self.dlg.close()\n\n def TSSview(self):\n self.dlg3.move(10, 300)\n self.dlg3.show()# show the dialog \n \n def close2(self):\n self.dlg3.close()\n self.dlg.show()\n \n def AnimationDlg (self):\n self.dlg2.move(200, 200)\n self.dlg2.show()# show the dialog\n \n def close3(self):\n self.dlg2.close()\n self.dlg.show()\n \n # Selecting the directory containg files \n def selectDir( self ):\n self.dlg.hide()\n settings = QSettings()\n path = QFileDialog.getExistingDirectory( self.iface.mainWindow(), \"Select a directory\")\n if path: self.dlg.ui.txtBaseDir2_5.setText( path )\n self.dlg.show()\n \n def actionRemove(self):\n layers = self.iface.legendInterface().layers()\n layer = qgis.utils.iface.activeLayer()\n self.PrincipalLayer = layer.name()\n for layer in layers :\n if layer.name() == self.PrincipalLayer : pass\n else : self.iface.legendInterface().moveLayer( layer, 0 )\n self.iface.legendInterface().removeGroup(0)\n \n def AddLayer(self, input): \n layerPath = os.path.join(self.dataDir, input)\n fileInfo = QFileInfo(layerPath)\n baseName = fileInfo.baseName()\n layer = QgsRasterLayer(layerPath, baseName)\n uri = os.path.join(self.dataDir, 'MyFile.qml')\n layer.loadNamedStyle(uri)\n QgsMapLayerRegistry.instance().addMapLayer(layer)\n \n def loadFiles(self, filename):\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n file_list = glob.glob(filename)\n for index in file_list:\n list = index.split(\".\")\n if (len(list) < 2) :\n file_list.remove(index)\n for index in file_list:\n if index.endswith(\".tss\"):\n file_list.remove(index)\n for index in file_list:\n if index.endswith(\".xml\") or index.endswith(\".aux.xml\") :\n file_list.remove(index)\n for index in file_list:\n if index.endswith(\".tss\"):\n file_list.remove(index)\n file_list.sort()\n return file_list\n \n def loadMapStackCoreName(self):\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n files= os.listdir(self.dataDir)\n self.dlg.ui.comboBox.clear()\n self.dlg.ui.comboBox_2.clear()\n MyList=[]\n MyList2 =[]\n MyList3 = []\n for index in files:\n list = index.split(\".\")\n if (len(list)==2) and (len(list[0])== 8) and (len(list[1])== 3) and (list[1].isdigit()):\n MyList.append(index)\n if index.endswith(\".tss\"):\n MyList3.append(index)\n for index in MyList:\n list = index.split(\".\")\n words = list[0].replace(\"0\", \"\")\n MyList2.append(words)\n FinalList = []\n for i in MyList2:\n if i not in FinalList:\n FinalList.append(i)\n self.dlg.ui.comboBox.addItems(FinalList)\n self.dlg.ui.comboBox_2.addItems(MyList3)\n\n def DisplayTSSnames(self):\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n if not self.dataDir : pass\n else:\n os.chdir(self.dataDir )\n if not self.dlg.ui.comboBox.currentText(): pass\n else:\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename) \n self.dlg.ui.listWidget.clear()\n for index, file in enumerate(file_list):\n self.dlg.ui.listWidget.addItem(file)\n \n def changelist(self):\n self.dlg.ui.listWidget.clear()\n \n def ActionAnim(self):\n self.actionRemove()\n Group = self.iface.legendInterface().addGroup(\"group_foo\") \n import numpy\n numpy.seterr(divide='ignore', invalid='ignore', over='ignore')\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename) \n legend = self.iface.legendInterface() \n self.dlg2.ui.pushButton_6.setEnabled(False)\n for index, file in enumerate(file_list):\n canvas = qgis.utils.iface.mapCanvas()\n import Styling\n Styling.style1(file_list[index], 'value', self.dataDir, file_list )\n uri = os.path.join(self.dataDir, 'MyFile.qml')\n self.iface.addRasterLayer(file, os.path.basename(str(file))).loadNamedStyle(uri)\n canvas.refresh()\n canvas.zoomToFullExtent() \n rlayer = qgis.utils.iface.activeLayer()\n legend.moveLayer( rlayer, 0 )\n time.sleep(float(self.dlg2.ui.txtBaseDir2_5.text()))\n self.dlg2.ui.pushButton_6.setEnabled(True)\n\n def actionStart(self):\n import Styling\n self.dlg.hide() \n self.iface.messageBar().clearWidgets ()\n layers = self.iface.legendInterface().layers()\n for layer in layers : \n if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)\n import numpy\n numpy.seterr(divide='ignore', invalid='ignore', over='ignore')\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n if not self.dataDir : \n QMessageBox.information( self.iface.mainWindow(),\"Info\", \"Please select a directory first\") \n self.dlg.show()\n else : \n os.chdir(self.dataDir )\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename)\n if not self.dlg.ui.comboBox.currentText():\n QMessageBox.information( self.iface.mainWindow(),\"Info\", \"The are no PCRaster mapstacks in this directory\")\n self.dlg.show()\n# return\n else:\n self.AnimationDlg()\n Styling.style1(filename, 'value', self.dataDir, file_list )\n s = QSettings()\n oldValidation = s.value( \"\/Projections\/defaultBehaviour\", \"useGlobal\" )\n s.setValue( \"\/Projections\/defaultBehaviour\", \"useGlobal\" )\n self.AddLayer(str(file_list[0]))\n s.setValue( \"\/Projections\/defaultBehaviour\", oldValidation )\n layer = qgis.utils.iface.activeLayer()\n# self.PrincipalLayer = layer.name()\n# print self.PrincipalLayer\n self.iface.legendInterface().setLayerExpanded(layer, True)\n \n def actionLast(self):\n self.actionRemove()\n self.dlg.hide()\n self.AnimationDlg()\n self.iface.messageBar().clearWidgets ()\n layers = self.iface.legendInterface().layers()\n for layer in layers : \n if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)\n import numpy\n numpy.seterr(divide='ignore', invalid='ignore', over='ignore')\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename)\n index = len(file_list) - 1\n canvas = qgis.utils.iface.mapCanvas() \n import Styling\n Styling.style1(file_list[index], 'value', self.dataDir, file_list )\n uri = os.path.join(self.dataDir, 'MyFile.qml')\n self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)\n canvas.refresh()\n canvas.zoomToFullExtent()\n\n def actionNext(self):\n self.actionRemove()\n self.iface.messageBar().clearWidgets ()\n import numpy\n numpy.seterr(divide='ignore', invalid='ignore', over='ignore')\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename)\n layer = qgis.utils.iface.activeLayer()\n self.PrincipalLayer = layer.name()\n if layer is None :\n index = 0\n elif layer.name() not in file_list:\n index = 0\n else :\n counter = file_list.index(layer.name())\n index = counter + 1\n if counter == len(file_list) - 1 :\n layers = self.iface.legendInterface().layers()\n self.iface.legendInterface().addGroup(\"group_foo\")\n for layer in layers : \n if layer.name() == self.PrincipalLayer : pass\n elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )\n index = 0 \n canvas = qgis.utils.iface.mapCanvas() \n import Styling\n Styling.style1(file_list[index], 'value', self.dataDir, file_list )\n uri = os.path.join(self.dataDir, 'MyFile.qml')\n self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)\n canvas.refresh()\n canvas.zoomToFullExtent() \n \n def actionPrevious(self):\n self.actionRemove()\n self.iface.messageBar().clearWidgets ()\n import numpy\n numpy.seterr(divide='ignore', invalid='ignore', over='ignore')\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'\n file_list = self.loadFiles(filename)\n layer = qgis.utils.iface.activeLayer()\n self.PrincipalLayer = layer.name()\n if layer is None :\n index = len(file_list) - 1\n elif layer.name() not in file_list:\n index = len(file_list) - 1\n else :\n counter = file_list.index(layer.name())\n index = counter - 1\n if counter == 0 :\n layers = self.iface.legendInterface().layers()\n self.iface.legendInterface().addGroup(\"group_foo\")\n for layer in layers : \n if layer.name() == self.PrincipalLayer : pass\n elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )\n index = len(file_list) - 1\n canvas = qgis.utils.iface.mapCanvas() \n import Styling\n Styling.style1(file_list[index], 'value', self.dataDir, file_list )\n uri = os.path.join(self.dataDir, 'MyFile.qml')\n self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)\n canvas.refresh()\n canvas.zoomToFullExtent() \n\n def TSSgraphs(self):# wtih matplotlib\n self.dlg.hide()\n filename = str(self.dlg.ui.comboBox_2.currentText()) \n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n file = os.path.join (self.dataDir, filename)\n if os.path.isfile(file):\n self.TSSview()\n self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())\n os.chdir(self.dataDir )\n stripped = []\n stripper = open(filename, 'r')\n st_lines = stripper.readlines()[4:]\n stripper.close()\n for lines in st_lines:\n stripped_line = \" \".join(lines.split())\n stripped.append(stripped_line)\n data = \"\\n\".join(stripped)\n data = data.split('\\n')\n values = []\n dates = []\n years = 0\n yl = []\n for row in data:\n x, y = row.split()\n values.append(float(y))\n year = (int(x.translate(string.maketrans(\"\\n\\t\\r\", \" \")).strip()))\n dates.append(year)\n years = years +1\n yl.append(years)\n xlabels = yl\n self.dlg3.ui.widget.canvas.ax.clear()\n self.dlg3.ui.widget.canvas.ax.set_position([0.155,0.15,0.82,0.75])\n self.dlg3.ui.widget.canvas.ax.set_title(filename) \n self.dlg3.ui.widget.canvas.ax.set_xlabel ('Time step')\n self.dlg3.ui.widget.canvas.ax.set_ylabel ('Values')\n self.dlg3.ui.widget.canvas.ax.plot(dates, values)\n self.dlg3.ui.widget.canvas.ax.set_xticks(dates) \n self.dlg3.ui.widget.canvas.ax.set_xticklabels(xlabels, rotation=30, fontsize=10) \n self.dlg3.ui.widget.canvas.draw()\n else: \n QMessageBox.information( self.iface.mainWindow(),\"Info\", \"The are no PCRaster timeseries this directory\") \n self.dlg.show()\n\n\n","license":"apache-2.0"} {"repo_name":"jreback\/pandas","path":"pandas\/io\/formats\/latex.py","copies":"2","size":"25201","content":"\"\"\"\nModule for formatting output data in Latex.\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import Iterator, List, Optional, Sequence, Tuple, Type, Union\n\nimport numpy as np\n\nfrom pandas.core.dtypes.generic import ABCMultiIndex\n\nfrom pandas.io.formats.format import DataFrameFormatter\n\n\ndef _split_into_full_short_caption(\n caption: Optional[Union[str, Tuple[str, str]]]\n) -> Tuple[str, str]:\n \"\"\"Extract full and short captions from caption string\/tuple.\n\n Parameters\n ----------\n caption : str or tuple, optional\n Either table caption string or tuple (full_caption, short_caption).\n If string is provided, then it is treated as table full caption,\n while short_caption is considered an empty string.\n\n Returns\n -------\n full_caption, short_caption : tuple\n Tuple of full_caption, short_caption strings.\n \"\"\"\n if caption:\n if isinstance(caption, str):\n full_caption = caption\n short_caption = \"\"\n else:\n try:\n full_caption, short_caption = caption\n except ValueError as err:\n msg = \"caption must be either a string or a tuple of two strings\"\n raise ValueError(msg) from err\n else:\n full_caption = \"\"\n short_caption = \"\"\n return full_caption, short_caption\n\n\nclass RowStringConverter(ABC):\n r\"\"\"Converter for dataframe rows into LaTeX strings.\n\n Parameters\n ----------\n formatter : `DataFrameFormatter`\n Instance of `DataFrameFormatter`.\n multicolumn: bool, optional\n Whether to use \\multicolumn macro.\n multicolumn_format: str, optional\n Multicolumn format.\n multirow: bool, optional\n Whether to use \\multirow macro.\n\n \"\"\"\n\n def __init__(\n self,\n formatter: DataFrameFormatter,\n multicolumn: bool = False,\n multicolumn_format: Optional[str] = None,\n multirow: bool = False,\n ):\n self.fmt = formatter\n self.frame = self.fmt.frame\n self.multicolumn = multicolumn\n self.multicolumn_format = multicolumn_format\n self.multirow = multirow\n self.clinebuf: List[List[int]] = []\n self.strcols = self._get_strcols()\n self.strrows = list(zip(*self.strcols))\n\n def get_strrow(self, row_num: int) -> str:\n \"\"\"Get string representation of the row.\"\"\"\n row = self.strrows[row_num]\n\n is_multicol = (\n row_num < self.column_levels and self.fmt.header and self.multicolumn\n )\n\n is_multirow = (\n row_num >= self.header_levels\n and self.fmt.index\n and self.multirow\n and self.index_levels > 1\n )\n\n is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1\n\n crow = self._preprocess_row(row)\n\n if is_multicol:\n crow = self._format_multicolumn(crow)\n if is_multirow:\n crow = self._format_multirow(crow, row_num)\n\n lst = []\n lst.append(\" & \".join(crow))\n lst.append(\" \\\\\\\\\")\n if is_cline_maybe_required:\n cline = self._compose_cline(row_num, len(self.strcols))\n lst.append(cline)\n return \"\".join(lst)\n\n @property\n def _header_row_num(self) -> int:\n \"\"\"Number of rows in header.\"\"\"\n return self.header_levels if self.fmt.header else 0\n\n @property\n def index_levels(self) -> int:\n \"\"\"Integer number of levels in index.\"\"\"\n return self.frame.index.nlevels\n\n @property\n def column_levels(self) -> int:\n return self.frame.columns.nlevels\n\n @property\n def header_levels(self) -> int:\n nlevels = self.column_levels\n if self.fmt.has_index_names and self.fmt.show_index_names:\n nlevels += 1\n return nlevels\n\n def _get_strcols(self) -> List[List[str]]:\n \"\"\"String representation of the columns.\"\"\"\n if self.fmt.frame.empty:\n strcols = [[self._empty_info_line]]\n else:\n strcols = self.fmt.get_strcols()\n\n # reestablish the MultiIndex that has been joined by get_strcols()\n if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):\n out = self.frame.index.format(\n adjoin=False,\n sparsify=self.fmt.sparsify,\n names=self.fmt.has_index_names,\n na_rep=self.fmt.na_rep,\n )\n\n # index.format will sparsify repeated entries with empty strings\n # so pad these with some empty space\n def pad_empties(x):\n for pad in reversed(x):\n if pad:\n break\n return [x[0]] + [i if i else \" \" * len(pad) for i in x[1:]]\n\n gen = (pad_empties(i) for i in out)\n\n # Add empty spaces for each column level\n clevels = self.frame.columns.nlevels\n out = [[\" \" * len(i[-1])] * clevels + i for i in gen]\n\n # Add the column names to the last index column\n cnames = self.frame.columns.names\n if any(cnames):\n new_names = [i if i else \"{}\" for i in cnames]\n out[self.frame.index.nlevels - 1][:clevels] = new_names\n\n # Get rid of old multiindex column and add new ones\n strcols = out + strcols[1:]\n return strcols\n\n @property\n def _empty_info_line(self):\n return (\n f\"Empty {type(self.frame).__name__}\\n\"\n f\"Columns: {self.frame.columns}\\n\"\n f\"Index: {self.frame.index}\"\n )\n\n def _preprocess_row(self, row: Sequence[str]) -> List[str]:\n \"\"\"Preprocess elements of the row.\"\"\"\n if self.fmt.escape:\n crow = _escape_symbols(row)\n else:\n crow = [x if x else \"{}\" for x in row]\n if self.fmt.bold_rows and self.fmt.index:\n crow = _convert_to_bold(crow, self.index_levels)\n return crow\n\n def _format_multicolumn(self, row: List[str]) -> List[str]:\n r\"\"\"\n Combine columns belonging to a group to a single multicolumn entry\n according to self.multicolumn_format\n\n e.g.:\n a & & & b & c &\n will become\n \\multicolumn{3}{l}{a} & b & \\multicolumn{2}{l}{c}\n \"\"\"\n row2 = row[: self.index_levels]\n ncol = 1\n coltext = \"\"\n\n def append_col():\n # write multicolumn if needed\n if ncol > 1:\n row2.append(\n f\"\\\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}\"\n f\"{{{coltext.strip()}}}\"\n )\n # don't modify where not needed\n else:\n row2.append(coltext)\n\n for c in row[self.index_levels :]:\n # if next col has text, write the previous\n if c.strip():\n if coltext:\n append_col()\n coltext = c\n ncol = 1\n # if not, add it to the previous multicolumn\n else:\n ncol += 1\n # write last column name\n if coltext:\n append_col()\n return row2\n\n def _format_multirow(self, row: List[str], i: int) -> List[str]:\n r\"\"\"\n Check following rows, whether row should be a multirow\n\n e.g.: becomes:\n a & 0 & \\multirow{2}{*}{a} & 0 &\n & 1 & & 1 &\n b & 0 & \\cline{1-2}\n b & 0 &\n \"\"\"\n for j in range(self.index_levels):\n if row[j].strip():\n nrow = 1\n for r in self.strrows[i + 1 :]:\n if not r[j].strip():\n nrow += 1\n else:\n break\n if nrow > 1:\n # overwrite non-multirow entry\n row[j] = f\"\\\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}\"\n # save when to end the current block with \\cline\n self.clinebuf.append([i + nrow - 1, j + 1])\n return row\n\n def _compose_cline(self, i: int, icol: int) -> str:\n \"\"\"\n Create clines after multirow-blocks are finished.\n \"\"\"\n lst = []\n for cl in self.clinebuf:\n if cl[0] == i:\n lst.append(f\"\\n\\\\cline{{{cl[1]:d}-{icol:d}}}\")\n # remove entries that have been written to buffer\n self.clinebuf = [x for x in self.clinebuf if x[0] != i]\n return \"\".join(lst)\n\n\nclass RowStringIterator(RowStringConverter):\n \"\"\"Iterator over rows of the header or the body of the table.\"\"\"\n\n @abstractmethod\n def __iter__(self) -> Iterator[str]:\n \"\"\"Iterate over LaTeX string representations of rows.\"\"\"\n\n\nclass RowHeaderIterator(RowStringIterator):\n \"\"\"Iterator for the table header rows.\"\"\"\n\n def __iter__(self) -> Iterator[str]:\n for row_num in range(len(self.strrows)):\n if row_num < self._header_row_num:\n yield self.get_strrow(row_num)\n\n\nclass RowBodyIterator(RowStringIterator):\n \"\"\"Iterator for the table body rows.\"\"\"\n\n def __iter__(self) -> Iterator[str]:\n for row_num in range(len(self.strrows)):\n if row_num >= self._header_row_num:\n yield self.get_strrow(row_num)\n\n\nclass TableBuilderAbstract(ABC):\n \"\"\"\n Abstract table builder producing string representation of LaTeX table.\n\n Parameters\n ----------\n formatter : `DataFrameFormatter`\n Instance of `DataFrameFormatter`.\n column_format: str, optional\n Column format, for example, 'rcl' for three columns.\n multicolumn: bool, optional\n Use multicolumn to enhance MultiIndex columns.\n multicolumn_format: str, optional\n The alignment for multicolumns, similar to column_format.\n multirow: bool, optional\n Use multirow to enhance MultiIndex rows.\n caption: str, optional\n Table caption.\n short_caption: str, optional\n Table short caption.\n label: str, optional\n LaTeX label.\n position: str, optional\n Float placement specifier, for example, 'htb'.\n \"\"\"\n\n def __init__(\n self,\n formatter: DataFrameFormatter,\n column_format: Optional[str] = None,\n multicolumn: bool = False,\n multicolumn_format: Optional[str] = None,\n multirow: bool = False,\n caption: Optional[str] = None,\n short_caption: Optional[str] = None,\n label: Optional[str] = None,\n position: Optional[str] = None,\n ):\n self.fmt = formatter\n self.column_format = column_format\n self.multicolumn = multicolumn\n self.multicolumn_format = multicolumn_format\n self.multirow = multirow\n self.caption = caption\n self.short_caption = short_caption\n self.label = label\n self.position = position\n\n def get_result(self) -> str:\n \"\"\"String representation of LaTeX table.\"\"\"\n elements = [\n self.env_begin,\n self.top_separator,\n self.header,\n self.middle_separator,\n self.env_body,\n self.bottom_separator,\n self.env_end,\n ]\n result = \"\\n\".join([item for item in elements if item])\n trailing_newline = \"\\n\"\n result += trailing_newline\n return result\n\n @property\n @abstractmethod\n def env_begin(self) -> str:\n \"\"\"Beginning of the environment.\"\"\"\n\n @property\n @abstractmethod\n def top_separator(self) -> str:\n \"\"\"Top level separator.\"\"\"\n\n @property\n @abstractmethod\n def header(self) -> str:\n \"\"\"Header lines.\"\"\"\n\n @property\n @abstractmethod\n def middle_separator(self) -> str:\n \"\"\"Middle level separator.\"\"\"\n\n @property\n @abstractmethod\n def env_body(self) -> str:\n \"\"\"Environment body.\"\"\"\n\n @property\n @abstractmethod\n def bottom_separator(self) -> str:\n \"\"\"Bottom level separator.\"\"\"\n\n @property\n @abstractmethod\n def env_end(self) -> str:\n \"\"\"End of the environment.\"\"\"\n\n\nclass GenericTableBuilder(TableBuilderAbstract):\n \"\"\"Table builder producing string representation of LaTeX table.\"\"\"\n\n @property\n def header(self) -> str:\n iterator = self._create_row_iterator(over=\"header\")\n return \"\\n\".join(list(iterator))\n\n @property\n def top_separator(self) -> str:\n return \"\\\\toprule\"\n\n @property\n def middle_separator(self) -> str:\n return \"\\\\midrule\" if self._is_separator_required() else \"\"\n\n @property\n def env_body(self) -> str:\n iterator = self._create_row_iterator(over=\"body\")\n return \"\\n\".join(list(iterator))\n\n def _is_separator_required(self) -> bool:\n return bool(self.header and self.env_body)\n\n @property\n def _position_macro(self) -> str:\n r\"\"\"Position macro, extracted from self.position, like [h].\"\"\"\n return f\"[{self.position}]\" if self.position else \"\"\n\n @property\n def _caption_macro(self) -> str:\n r\"\"\"Caption macro, extracted from self.caption.\n\n With short caption:\n \\caption[short_caption]{caption_string}.\n\n Without short caption:\n \\caption{caption_string}.\n \"\"\"\n if self.caption:\n return \"\".join(\n [\n r\"\\caption\",\n f\"[{self.short_caption}]\" if self.short_caption else \"\",\n f\"{{{self.caption}}}\",\n ]\n )\n return \"\"\n\n @property\n def _label_macro(self) -> str:\n r\"\"\"Label macro, extracted from self.label, like \\label{ref}.\"\"\"\n return f\"\\\\label{{{self.label}}}\" if self.label else \"\"\n\n def _create_row_iterator(self, over: str) -> RowStringIterator:\n \"\"\"Create iterator over header or body of the table.\n\n Parameters\n ----------\n over : {'body', 'header'}\n Over what to iterate.\n\n Returns\n -------\n RowStringIterator\n Iterator over body or header.\n \"\"\"\n iterator_kind = self._select_iterator(over)\n return iterator_kind(\n formatter=self.fmt,\n multicolumn=self.multicolumn,\n multicolumn_format=self.multicolumn_format,\n multirow=self.multirow,\n )\n\n def _select_iterator(self, over: str) -> Type[RowStringIterator]:\n \"\"\"Select proper iterator over table rows.\"\"\"\n if over == \"header\":\n return RowHeaderIterator\n elif over == \"body\":\n return RowBodyIterator\n else:\n msg = f\"'over' must be either 'header' or 'body', but {over} was provided\"\n raise ValueError(msg)\n\n\nclass LongTableBuilder(GenericTableBuilder):\n \"\"\"Concrete table builder for longtable.\n\n >>> from pandas import DataFrame\n >>> from pandas.io.formats import format as fmt\n >>> df = DataFrame({\"a\": [1, 2], \"b\": [\"b1\", \"b2\"]})\n >>> formatter = fmt.DataFrameFormatter(df)\n >>> builder = LongTableBuilder(formatter, caption='a long table',\n ... label='tab:long', column_format='lrl')\n >>> table = builder.get_result()\n >>> print(table)\n \\\\begin{longtable}{lrl}\n \\\\caption{a long table}\n \\\\label{tab:long}\\\\\\\\\n \\\\toprule\n {} & a & b \\\\\\\\\n \\\\midrule\n \\\\endfirsthead\n \\\\caption[]{a long table} \\\\\\\\\n \\\\toprule\n {} & a & b \\\\\\\\\n \\\\midrule\n \\\\endhead\n \\\\midrule\n \\\\multicolumn{3}{r}{{Continued on next page}} \\\\\\\\\n \\\\midrule\n \\\\endfoot\n \n \\\\bottomrule\n \\\\endlastfoot\n 0 & 1 & b1 \\\\\\\\\n 1 & 2 & b2 \\\\\\\\\n \\\\end{longtable}\n \n \"\"\"\n\n @property\n def env_begin(self) -> str:\n first_row = (\n f\"\\\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}\"\n )\n elements = [first_row, f\"{self._caption_and_label()}\"]\n return \"\\n\".join([item for item in elements if item])\n\n def _caption_and_label(self) -> str:\n if self.caption or self.label:\n double_backslash = \"\\\\\\\\\"\n elements = [f\"{self._caption_macro}\", f\"{self._label_macro}\"]\n caption_and_label = \"\\n\".join([item for item in elements if item])\n caption_and_label += double_backslash\n return caption_and_label\n else:\n return \"\"\n\n @property\n def middle_separator(self) -> str:\n iterator = self._create_row_iterator(over=\"header\")\n\n # the content between \\endfirsthead and \\endhead commands\n # mitigates repeated List of Tables entries in the final LaTeX\n # document when dealing with longtable environments; GH #34360\n elements = [\n \"\\\\midrule\",\n \"\\\\endfirsthead\",\n f\"\\\\caption[]{{{self.caption}}} \\\\\\\\\" if self.caption else \"\",\n self.top_separator,\n self.header,\n \"\\\\midrule\",\n \"\\\\endhead\",\n \"\\\\midrule\",\n f\"\\\\multicolumn{{{len(iterator.strcols)}}}{{r}}\"\n \"{{Continued on next page}} \\\\\\\\\",\n \"\\\\midrule\",\n \"\\\\endfoot\\n\",\n \"\\\\bottomrule\",\n \"\\\\endlastfoot\",\n ]\n if self._is_separator_required():\n return \"\\n\".join(elements)\n return \"\"\n\n @property\n def bottom_separator(self) -> str:\n return \"\"\n\n @property\n def env_end(self) -> str:\n return \"\\\\end{longtable}\"\n\n\nclass RegularTableBuilder(GenericTableBuilder):\n \"\"\"Concrete table builder for regular table.\n\n >>> from pandas import DataFrame\n >>> from pandas.io.formats import format as fmt\n >>> df = DataFrame({\"a\": [1, 2], \"b\": [\"b1\", \"b2\"]})\n >>> formatter = fmt.DataFrameFormatter(df)\n >>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',\n ... column_format='lrc')\n >>> table = builder.get_result()\n >>> print(table)\n \\\\begin{table}\n \\\\centering\n \\\\caption{caption}\n \\\\label{lab}\n \\\\begin{tabular}{lrc}\n \\\\toprule\n {} & a & b \\\\\\\\\n \\\\midrule\n 0 & 1 & b1 \\\\\\\\\n 1 & 2 & b2 \\\\\\\\\n \\\\bottomrule\n \\\\end{tabular}\n \\\\end{table}\n \n \"\"\"\n\n @property\n def env_begin(self) -> str:\n elements = [\n f\"\\\\begin{{table}}{self._position_macro}\",\n \"\\\\centering\",\n f\"{self._caption_macro}\",\n f\"{self._label_macro}\",\n f\"\\\\begin{{tabular}}{{{self.column_format}}}\",\n ]\n return \"\\n\".join([item for item in elements if item])\n\n @property\n def bottom_separator(self) -> str:\n return \"\\\\bottomrule\"\n\n @property\n def env_end(self) -> str:\n return \"\\n\".join([\"\\\\end{tabular}\", \"\\\\end{table}\"])\n\n\nclass TabularBuilder(GenericTableBuilder):\n \"\"\"Concrete table builder for tabular environment.\n\n >>> from pandas import DataFrame\n >>> from pandas.io.formats import format as fmt\n >>> df = DataFrame({\"a\": [1, 2], \"b\": [\"b1\", \"b2\"]})\n >>> formatter = fmt.DataFrameFormatter(df)\n >>> builder = TabularBuilder(formatter, column_format='lrc')\n >>> table = builder.get_result()\n >>> print(table)\n \\\\begin{tabular}{lrc}\n \\\\toprule\n {} & a & b \\\\\\\\\n \\\\midrule\n 0 & 1 & b1 \\\\\\\\\n 1 & 2 & b2 \\\\\\\\\n \\\\bottomrule\n \\\\end{tabular}\n \n \"\"\"\n\n @property\n def env_begin(self) -> str:\n return f\"\\\\begin{{tabular}}{{{self.column_format}}}\"\n\n @property\n def bottom_separator(self) -> str:\n return \"\\\\bottomrule\"\n\n @property\n def env_end(self) -> str:\n return \"\\\\end{tabular}\"\n\n\nclass LatexFormatter:\n r\"\"\"\n Used to render a DataFrame to a LaTeX tabular\/longtable environment output.\n\n Parameters\n ----------\n formatter : `DataFrameFormatter`\n longtable : bool, default False\n Use longtable environment.\n column_format : str, default None\n The columns format as specified in `LaTeX table format\n `__ e.g 'rcl' for 3 columns\n multicolumn : bool, default False\n Use \\multicolumn to enhance MultiIndex columns.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows.\n caption : str or tuple, optional\n Tuple (full_caption, short_caption),\n which results in \\caption[short_caption]{full_caption};\n if a single string is passed, no short caption will be set.\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n position : str, optional\n The LaTeX positional argument for tables, to be placed after\n ``\\begin{}`` in the output.\n\n See Also\n --------\n HTMLFormatter\n \"\"\"\n\n def __init__(\n self,\n formatter: DataFrameFormatter,\n longtable: bool = False,\n column_format: Optional[str] = None,\n multicolumn: bool = False,\n multicolumn_format: Optional[str] = None,\n multirow: bool = False,\n caption: Optional[Union[str, Tuple[str, str]]] = None,\n label: Optional[str] = None,\n position: Optional[str] = None,\n ):\n self.fmt = formatter\n self.frame = self.fmt.frame\n self.longtable = longtable\n self.column_format = column_format\n self.multicolumn = multicolumn\n self.multicolumn_format = multicolumn_format\n self.multirow = multirow\n self.caption, self.short_caption = _split_into_full_short_caption(caption)\n self.label = label\n self.position = position\n\n def to_string(self) -> str:\n \"\"\"\n Render a DataFrame to a LaTeX tabular, longtable, or table\/tabular\n environment output.\n \"\"\"\n return self.builder.get_result()\n\n @property\n def builder(self) -> TableBuilderAbstract:\n \"\"\"Concrete table builder.\n\n Returns\n -------\n TableBuilder\n \"\"\"\n builder = self._select_builder()\n return builder(\n formatter=self.fmt,\n column_format=self.column_format,\n multicolumn=self.multicolumn,\n multicolumn_format=self.multicolumn_format,\n multirow=self.multirow,\n caption=self.caption,\n short_caption=self.short_caption,\n label=self.label,\n position=self.position,\n )\n\n def _select_builder(self) -> Type[TableBuilderAbstract]:\n \"\"\"Select proper table builder.\"\"\"\n if self.longtable:\n return LongTableBuilder\n if any([self.caption, self.label, self.position]):\n return RegularTableBuilder\n return TabularBuilder\n\n @property\n def column_format(self) -> Optional[str]:\n \"\"\"Column format.\"\"\"\n return self._column_format\n\n @column_format.setter\n def column_format(self, input_column_format: Optional[str]) -> None:\n \"\"\"Setter for column format.\"\"\"\n if input_column_format is None:\n self._column_format = (\n self._get_index_format() + self._get_column_format_based_on_dtypes()\n )\n elif not isinstance(input_column_format, str):\n raise ValueError(\n f\"column_format must be str or unicode, \"\n f\"not {type(input_column_format)}\"\n )\n else:\n self._column_format = input_column_format\n\n def _get_column_format_based_on_dtypes(self) -> str:\n \"\"\"Get column format based on data type.\n\n Right alignment for numbers and left - for strings.\n \"\"\"\n\n def get_col_type(dtype):\n if issubclass(dtype.type, np.number):\n return \"r\"\n return \"l\"\n\n dtypes = self.frame.dtypes._values\n return \"\".join(map(get_col_type, dtypes))\n\n def _get_index_format(self) -> str:\n \"\"\"Get index column format.\"\"\"\n return \"l\" * self.frame.index.nlevels if self.fmt.index else \"\"\n\n\ndef _escape_symbols(row: Sequence[str]) -> List[str]:\n \"\"\"Carry out string replacements for special symbols.\n\n Parameters\n ----------\n row : list\n List of string, that may contain special symbols.\n\n Returns\n -------\n list\n list of strings with the special symbols replaced.\n \"\"\"\n return [\n (\n x.replace(\"\\\\\", \"\\\\textbackslash \")\n .replace(\"_\", \"\\\\_\")\n .replace(\"%\", \"\\\\%\")\n .replace(\"$\", \"\\\\$\")\n .replace(\"#\", \"\\\\#\")\n .replace(\"{\", \"\\\\{\")\n .replace(\"}\", \"\\\\}\")\n .replace(\"~\", \"\\\\textasciitilde \")\n .replace(\"^\", \"\\\\textasciicircum \")\n .replace(\"&\", \"\\\\&\")\n if (x and x != \"{}\")\n else \"{}\"\n )\n for x in row\n ]\n\n\ndef _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:\n \"\"\"Convert elements in ``crow`` to bold.\"\"\"\n return [\n f\"\\\\textbf{{{x}}}\" if j < ilevels and x.strip() not in [\"\", \"{}\"] else x\n for j, x in enumerate(crow)\n ]\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","license":"bsd-3-clause"} {"repo_name":"trungnt13\/scikit-learn","path":"examples\/feature_selection\/plot_rfe_with_cross_validation.py","copies":"226","size":"1384","content":"\"\"\"\n===================================================\nRecursive feature elimination with cross-validation\n===================================================\n\nA recursive feature elimination example with automatic tuning of the\nnumber of features selected with cross-validation.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.datasets import make_classification\n\n# Build a classification task using 3 informative features\nX, y = make_classification(n_samples=1000, n_features=25, n_informative=3,\n n_redundant=2, n_repeated=0, n_classes=8,\n n_clusters_per_class=1, random_state=0)\n\n# Create the RFE object and compute a cross-validated score.\nsvc = SVC(kernel=\"linear\")\n# The \"accuracy\" scoring is proportional to the number of correct\n# classifications\nrfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),\n scoring='accuracy')\nrfecv.fit(X, y)\n\nprint(\"Optimal number of features : %d\" % rfecv.n_features_)\n\n# Plot number of features VS. cross-validation scores\nplt.figure()\nplt.xlabel(\"Number of features selected\")\nplt.ylabel(\"Cross validation score (nb of correct classifications)\")\nplt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"michelp\/pywt","path":"util\/refguide_check.py","copies":"2","size":"27051","content":"#!\/usr\/bin\/env python\n\"\"\"\nrefguide_check.py [OPTIONS] [-- ARGS]\n\nCheck for a PyWavelets submodule whether the objects in its __all__ dict\ncorrespond to the objects included in the reference guide.\n\nExample of usage::\n\n $ python refguide_check.py optimize\n\nNote that this is a helper script to be able to check if things are missing;\nthe output of this script does need to be checked manually. In some cases\nobjects are left out of the refguide for a good reason (it's an alias of\nanother function, or deprecated, or ...)\n\nAnother use of this helper script is to check validity of code samples\nin docstrings. This is different from doctesting [we do not aim to have\nscipy docstrings doctestable!], this is just to make sure that code in\ndocstrings is valid python::\n\n $ python refguide_check.py --check_docs optimize\n\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport re\nimport copy\nimport inspect\nimport warnings\nimport doctest\nimport tempfile\nimport io\nimport docutils.core\nfrom docutils.parsers.rst import directives\nimport shutil\nimport glob\nfrom doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL\nfrom argparse import ArgumentParser\nimport numpy as np\n\n# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc',\n# 'sphinxext'))\nfrom numpydoc.docscrape_sphinx import get_doc_object\n# Remove sphinx directives that don't run without Sphinx environment\ndirectives._directives.pop('versionadded', None)\ndirectives._directives.pop('versionchanged', None)\ndirectives._directives.pop('moduleauthor', None)\ndirectives._directives.pop('sectionauthor', None)\ndirectives._directives.pop('codeauthor', None)\ndirectives._directives.pop('toctree', None)\n\n\nBASE_MODULE = \"pywt\"\n\nPUBLIC_SUBMODULES = []\n\n# Docs for these modules are included in the parent module\nOTHER_MODULE_DOCS = {}\n\n# these names are known to fail doctesting and we like to keep it that way\n# e.g. sometimes pseudocode is acceptable etc\nDOCTEST_SKIPLIST = set([])\n\n# these names are not required to be present in ALL despite being in\n# autosummary:: listing\nREFGUIDE_ALL_SKIPLIST = []\n\nHAVE_MATPLOTLIB = False\n\n\ndef short_path(path, cwd=None):\n \"\"\"\n Return relative or absolute path name, whichever is shortest.\n \"\"\"\n if not isinstance(path, str):\n return path\n if cwd is None:\n cwd = os.getcwd()\n abspath = os.path.abspath(path)\n relpath = os.path.relpath(path, cwd)\n if len(abspath) <= len(relpath):\n return abspath\n return relpath\n\n\ndef find_names(module, names_dict):\n # Refguide entries:\n #\n # - 3 spaces followed by function name, and maybe some spaces, some\n # dashes, and an explanation; only function names listed in\n # refguide are formatted like this (mostly, there may be some false\n # positives)\n #\n # - special directives, such as data and function\n #\n # - (scipy.constants only): quoted list\n #\n patterns = [\n r\"^\\s\\s\\s([a-z_0-9A-Z]+)(\\s+-+.*)?$\",\n r\"^\\.\\. (?:data|function)::\\s*([a-z_0-9A-Z]+)\\s*$\"\n ]\n\n if module.__name__ == 'scipy.constants':\n patterns += [\"^``([a-z_0-9A-Z]+)``\"]\n\n patterns = [re.compile(pattern) for pattern in patterns]\n module_name = module.__name__\n\n for line in module.__doc__.splitlines():\n res = re.search(r\"^\\s*\\.\\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\\s*$\", line)\n if res:\n module_name = res.group(1)\n continue\n\n for pattern in patterns:\n res = re.match(pattern, line)\n if res is not None:\n name = res.group(1)\n entry = '.'.join([module_name, name])\n names_dict.setdefault(module_name, set()).add(name)\n break\n\n\ndef get_all_dict(module):\n \"\"\"Return a copy of the __all__ dict with irrelevant items removed.\"\"\"\n if hasattr(module, \"__all__\"):\n all_dict = copy.deepcopy(module.__all__)\n else:\n all_dict = copy.deepcopy(dir(module))\n all_dict = [name for name in all_dict\n if not name.startswith(\"_\")]\n for name in ['absolute_import', 'division', 'print_function']:\n try:\n all_dict.remove(name)\n except ValueError:\n pass\n\n # Modules are almost always private; real submodules need a separate\n # run of refguide_check.\n all_dict = [name for name in all_dict\n if not inspect.ismodule(getattr(module, name, None))]\n\n deprecated = []\n not_deprecated = []\n for name in all_dict:\n f = getattr(module, name, None)\n if callable(f) and is_deprecated(f):\n deprecated.append(name)\n else:\n not_deprecated.append(name)\n\n others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))\n\n return not_deprecated, deprecated, others\n\n\ndef compare(all_dict, others, names, module_name):\n \"\"\"Return sets of objects only in __all__, refguide, or completely missing.\"\"\"\n only_all = set()\n for name in all_dict:\n if name not in names:\n only_all.add(name)\n\n only_ref = set()\n missing = set()\n for name in names:\n if name not in all_dict:\n for pat in REFGUIDE_ALL_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n if name not in others:\n missing.add(name)\n break\n else:\n only_ref.add(name)\n\n return only_all, only_ref, missing\n\n\ndef is_deprecated(f):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n try:\n f(**{\"not a kwarg\": None})\n except DeprecationWarning:\n return True\n except:\n pass\n return False\n\n\ndef check_items(all_dict, names, deprecated, others, module_name, dots=True):\n num_all = len(all_dict)\n num_ref = len(names)\n\n output = \"\"\n\n output += \"Non-deprecated objects in __all__: %i\\n\" % num_all\n output += \"Objects in refguide: %i\\n\\n\" % num_ref\n\n only_all, only_ref, missing = compare(all_dict, others, names, module_name)\n dep_in_ref = set(only_ref).intersection(deprecated)\n only_ref = set(only_ref).difference(deprecated)\n\n if len(dep_in_ref) > 0:\n output += \"Deprecated objects in refguide::\\n\\n\"\n for name in sorted(deprecated):\n output += \" \" + name + \"\\n\"\n\n if len(only_all) == len(only_ref) == len(missing) == 0:\n if dots:\n output_dot('.')\n return [(None, True, output)]\n else:\n if len(only_all) > 0:\n output += \"ERROR: objects in %s.__all__ but not in refguide::\\n\\n\" % module_name\n for name in sorted(only_all):\n output += \" \" + name + \"\\n\"\n\n if len(only_ref) > 0:\n output += \"ERROR: objects in refguide but not in %s.__all__::\\n\\n\" % module_name\n for name in sorted(only_ref):\n output += \" \" + name + \"\\n\"\n\n if len(missing) > 0:\n output += \"ERROR: missing objects::\\n\\n\"\n for name in sorted(missing):\n output += \" \" + name + \"\\n\"\n\n if dots:\n output_dot('F')\n return [(None, False, output)]\n\n\ndef validate_rst_syntax(text, name, dots=True):\n if text is None:\n if dots:\n output_dot('E')\n return False, \"ERROR: %s: no documentation\" % (name,)\n\n ok_unknown_items = set([\n 'mod', 'currentmodule', 'autosummary', 'data',\n 'obj', 'versionadded', 'versionchanged', 'module', 'class',\n 'ref', 'func', 'toctree', 'moduleauthor',\n 'sectionauthor', 'codeauthor', 'eq',\n ])\n\n # Run through docutils\n error_stream = io.StringIO()\n\n def resolve(name, is_label=False):\n return (\"http:\/\/foo\", name)\n\n token = ''\n\n docutils.core.publish_doctree(\n text, token,\n settings_overrides = dict(halt_level=5,\n traceback=True,\n default_reference_context='title-reference',\n default_role='emphasis',\n link_base='',\n resolve_name=resolve,\n stylesheet_path='',\n raw_enabled=0,\n file_insertion_enabled=0,\n warning_stream=error_stream))\n\n # Print errors, disregarding unimportant ones\n error_msg = error_stream.getvalue()\n errors = error_msg.split(token)\n success = True\n output = \"\"\n\n for error in errors:\n lines = error.splitlines()\n if not lines:\n continue\n\n m = re.match(r'.*Unknown (?:interpreted text role|directive type) \"(.*)\".*$', lines[0])\n if m:\n if m.group(1) in ok_unknown_items:\n continue\n\n m = re.match(r'.*Error in \"math\" directive:.*unknown option: \"label\"', \" \".join(lines), re.S)\n if m:\n continue\n\n output += name + lines[0] + \"::\\n \" + \"\\n \".join(lines[1:]).rstrip() + \"\\n\"\n success = False\n\n if not success:\n output += \" \" + \"-\"*72 + \"\\n\"\n for lineno, line in enumerate(text.splitlines()):\n output += \" %-4d %s\\n\" % (lineno+1, line)\n output += \" \" + \"-\"*72 + \"\\n\\n\"\n\n if dots:\n output_dot('.' if success else 'F')\n return success, output\n\n\ndef output_dot(msg='.', stream=sys.stderr):\n stream.write(msg)\n stream.flush()\n\n\ndef check_rest(module, names, dots=True):\n \"\"\"\n Check reStructuredText formatting of docstrings\n\n Returns: [(name, success_flag, output), ...]\n \"\"\"\n\n try:\n skip_types = (dict, str, unicode, float, int)\n except NameError:\n # python 3\n skip_types = (dict, str, float, int)\n\n results = []\n\n if module.__name__[6:] not in OTHER_MODULE_DOCS:\n results += [(module.__name__,) +\n validate_rst_syntax(inspect.getdoc(module),\n module.__name__, dots=dots)]\n\n for name in names:\n full_name = module.__name__ + '.' + name\n obj = getattr(module, name, None)\n\n if obj is None:\n results.append((full_name, False, \"%s has no docstring\" % (full_name,)))\n continue\n elif isinstance(obj, skip_types):\n continue\n\n if inspect.ismodule(obj):\n text = inspect.getdoc(obj)\n else:\n try:\n text = str(get_doc_object(obj))\n except:\n import traceback\n results.append((full_name, False,\n \"Error in docstring format!\\n\" +\n traceback.format_exc()))\n continue\n\n m = re.search(\"([\\x00-\\x09\\x0b-\\x1f])\", text)\n if m:\n msg = (\"Docstring contains a non-printable character %r! \"\n \"Maybe forgot r\\\"\\\"\\\"?\" % (m.group(1),))\n results.append((full_name, False, msg))\n continue\n\n try:\n src_file = short_path(inspect.getsourcefile(obj))\n except TypeError:\n src_file = None\n\n if src_file:\n file_full_name = src_file + ':' + full_name\n else:\n file_full_name = full_name\n\n results.append((full_name,) +\n validate_rst_syntax(text, file_full_name, dots=dots))\n\n return results\n\n\n### Doctest helpers ####\n\n# the namespace to run examples in\nDEFAULT_NAMESPACE = {'np': np}\n\n# the namespace to do checks in\nCHECK_NAMESPACE = {\n 'np': np,\n 'assert_allclose': np.testing.assert_allclose,\n 'assert_equal': np.testing.assert_equal,\n # recognize numpy repr's\n 'array': np.array,\n 'matrix': np.matrix,\n 'int64': np.int64,\n 'uint64': np.uint64,\n 'int8': np.int8,\n 'int32': np.int32,\n 'float64': np.float64,\n 'dtype': np.dtype,\n 'nan': np.nan,\n 'NaN': np.nan,\n 'inf': np.inf,\n 'Inf': np.inf, }\n\n\nclass DTRunner(doctest.DocTestRunner):\n DIVIDER = \"\\n\"\n\n def __init__(self, item_name, checker=None, verbose=None, optionflags=0):\n self._item_name = item_name\n doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,\n optionflags=optionflags)\n\n def _report_item_name(self, out, new_line=False):\n if self._item_name is not None:\n if new_line:\n out(\"\\n\")\n self._item_name = None\n\n def report_start(self, out, test, example):\n self._checker._source = example.source\n return doctest.DocTestRunner.report_start(self, out, test, example)\n\n def report_success(self, out, test, example, got):\n if self._verbose:\n self._report_item_name(out, new_line=True)\n return doctest.DocTestRunner.report_success(\n self, out, test, example, got)\n\n def report_unexpected_exception(self, out, test, example, exc_info):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_unexpected_exception(\n self, out, test, example, exc_info)\n\n def report_failure(self, out, test, example, got):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_failure(self, out, test,\n example, got)\n\nclass Checker(doctest.OutputChecker):\n obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')\n vanilla = doctest.OutputChecker()\n rndm_markers = {'# random', '# Random', '#random', '#Random', \"# may vary\"}\n stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',\n 'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',\n '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim',\n 'set_xlim', '# reformatted'}\n\n def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):\n self.parse_namedtuples = parse_namedtuples\n self.atol, self.rtol = atol, rtol\n if ns is None:\n self.ns = dict(CHECK_NAMESPACE)\n else:\n self.ns = ns\n\n def check_output(self, want, got, optionflags):\n # cut it short if they are equal\n if want == got:\n return True\n\n # skip stopwords in source\n if any(word in self._source for word in self.stopwords):\n return True\n\n # skip random stuff\n if any(word in want for word in self.rndm_markers):\n return True\n\n # skip function\/object addresses\n if self.obj_pattern.search(got):\n return True\n\n # ignore comments (e.g. signal.freqresp)\n if want.lstrip().startswith(\"#\"):\n return True\n\n # try the standard doctest\n try:\n if self.vanilla.check_output(want, got, optionflags):\n return True\n except Exception:\n pass\n\n # OK then, convert strings to objects\n try:\n a_want = eval(want, dict(self.ns))\n a_got = eval(got, dict(self.ns))\n except:\n if not self.parse_namedtuples:\n return False\n # suppose that \"want\" is a tuple, and \"got\" is smth like\n # MoodResult(statistic=10, pvalue=0.1).\n # Then convert the latter to the tuple (10, 0.1),\n # and then compare the tuples.\n try:\n num = len(a_want)\n regex = ('[\\w\\d_]+\\(' +\n ', '.join(['[\\w\\d_]+=(.+)']*num) +\n '\\)')\n grp = re.findall(regex, got.replace('\\n', ' '))\n if len(grp) > 1: # no more than one for now\n return False\n # fold it back to a tuple\n got_again = '(' + ', '.join(grp[0]) + ')'\n return self.check_output(want, got_again, optionflags)\n except Exception:\n return False\n\n # ... and defer to numpy\n try:\n return self._do_check(a_want, a_got)\n except Exception:\n # heterog tuple, eg (1, np.array([1., 2.]))\n try:\n return all(self._do_check(w, g) for w, g in zip(a_want, a_got))\n except (TypeError, ValueError):\n return False\n\n def _do_check(self, want, got):\n # This should be done exactly as written to correctly handle all of\n # numpy-comparable objects, strings, and heterogenous tuples\n try:\n if want == got:\n return True\n except Exception:\n pass\n return np.allclose(want, got, atol=self.atol, rtol=self.rtol)\n\n\ndef _run_doctests(tests, full_name, verbose, doctest_warnings):\n \"\"\"Run modified doctests for the set of `tests`.\n\n Returns: list of [(success_flag, output), ...]\n \"\"\"\n flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL\n runner = DTRunner(full_name, checker=Checker(), optionflags=flags,\n verbose=verbose)\n\n output = []\n success = True\n def out(msg):\n output.append(msg)\n\n class MyStderr(object):\n \"\"\"Redirect stderr to the current stdout\"\"\"\n def write(self, msg):\n if doctest_warnings:\n sys.stdout.write(msg)\n else:\n out(msg)\n\n # Run tests, trying to restore global state afterward\n old_printoptions = np.get_printoptions()\n old_errstate = np.seterr()\n old_stderr = sys.stderr\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n sys.stderr = MyStderr()\n try:\n os.chdir(tmpdir)\n\n # try to ensure random seed is NOT reproducible\n np.random.seed(None)\n\n for t in tests:\n t.filename = short_path(t.filename, cwd)\n fails, successes = runner.run(t, out=out)\n if fails > 0:\n success = False\n finally:\n sys.stderr = old_stderr\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n np.set_printoptions(**old_printoptions)\n np.seterr(**old_errstate)\n\n return success, output\n\n\ndef check_doctests(module, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in docstrings of the module's public symbols.\n\n Returns: list of [(item_name, success_flag, output), ...]\n \"\"\"\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n # Loop over non-deprecated items\n results = []\n\n for name in get_all_dict(module)[0]:\n full_name = module.__name__ + '.' + name\n\n if full_name in DOCTEST_SKIPLIST:\n continue\n\n try:\n obj = getattr(module, name)\n except AttributeError:\n import traceback\n results.append((full_name, False,\n \"Missing item!\\n\" +\n traceback.format_exc()))\n continue\n\n finder = doctest.DocTestFinder()\n try:\n tests = finder.find(obj, name, globs=dict(ns))\n except:\n import traceback\n results.append((full_name, False,\n \"Failed to get doctests!\\n\" +\n traceback.format_exc()))\n continue\n\n success, output = _run_doctests(tests, full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, \"\".join(output)))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef check_doctests_testfile(fname, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in a text file.\n\n Mimic `check_doctests` above, differing mostly in test discovery.\n (which is borrowed from stdlib's doctest.testfile here,\n https:\/\/github.com\/python-git\/python\/blob\/master\/Lib\/doctest.py)\n\n Returns: list of [(item_name, success_flag, output), ...]\n\n Notes\n -----\n\n We also try to weed out pseudocode:\n * We maintain a list of exceptions which signal pseudocode,\n * We split the text file into \"blocks\" of code separated by empty lines\n and\/or intervening text.\n * If a block contains a marker, the whole block is then assumed to be\n pseudocode. It is then not being doctested.\n\n The rationale is that typically, the text looks like this:\n\n blah\n \n >>> from numpy import some_module # pseudocode!\n >>> func = some_module.some_function\n >>> func(42) # still pseudocode\n 146\n \n blah\n \n >>> 2 + 3 # real code, doctest it\n 5\n\n \"\"\"\n results = []\n\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n _, short_name = os.path.split(fname)\n if short_name in DOCTEST_SKIPLIST:\n return results\n\n full_name = fname\n text = open(fname).read()\n\n PSEUDOCODE = set(['some_function', 'some_module', 'import example',\n 'ctypes.CDLL', # likely need compiling, skip it\n 'integrate.nquad(func,' # ctypes integrate tutotial\n ])\n\n # split the text into \"blocks\" and try to detect and omit pseudocode blocks.\n parser = doctest.DocTestParser()\n good_parts = []\n for part in text.split('\\n\\n'):\n tests = parser.get_doctest(part, ns, fname, fname, 0)\n if any(word in ex.source for word in PSEUDOCODE\n for ex in tests.examples):\n # omit it\n pass\n else:\n # `part` looks like a good code, let's doctest it\n good_parts += [part]\n\n # Reassemble the good bits and doctest them:\n good_text = '\\n\\n'.join(good_parts)\n tests = parser.get_doctest(good_text, ns, fname, fname, 0)\n success, output = _run_doctests([tests], full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, \"\".join(output)))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef init_matplotlib():\n global HAVE_MATPLOTLIB\n\n try:\n import matplotlib\n matplotlib.use('Agg')\n HAVE_MATPLOTLIB = True\n except ImportError:\n HAVE_MATPLOTLIB = False\n\n\ndef main(argv):\n parser = ArgumentParser(usage=__doc__.lstrip())\n parser.add_argument(\"module_names\", metavar=\"SUBMODULES\", default=[],\n nargs='*',\n help=\"Submodules to check (default: all public)\")\n parser.add_argument(\"--doctests\", action=\"store_true\",\n help=\"Run also doctests\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n parser.add_argument(\"--doctest-warnings\", action=\"store_true\",\n help=\"Enforce warning checking for doctests\")\n parser.add_argument(\"--skip-examples\", action=\"store_true\",\n help=\"Skip running doctests in the examples.\")\n args = parser.parse_args(argv)\n\n modules = []\n names_dict = {}\n\n if args.module_names:\n args.skip_examples = True\n else:\n args.module_names = list(PUBLIC_SUBMODULES)\n\n os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'\n\n module_names = list(args.module_names)\n for name in list(module_names):\n if name in OTHER_MODULE_DOCS:\n name = OTHER_MODULE_DOCS[name]\n if name not in module_names:\n module_names.append(name)\n\n for submodule_name in module_names:\n module_name = BASE_MODULE + '.' + submodule_name\n __import__(module_name)\n module = sys.modules[module_name]\n\n if submodule_name not in OTHER_MODULE_DOCS:\n find_names(module, names_dict)\n\n if submodule_name in args.module_names:\n modules.append(module)\n\n dots = True\n success = True\n results = []\n\n print(\"Running checks for %d modules:\" % (len(modules),))\n\n if args.doctests or not args.skip_examples:\n init_matplotlib()\n\n for module in modules:\n if dots:\n if module is not modules[0]:\n sys.stderr.write(' ')\n sys.stderr.write(module.__name__ + ' ')\n sys.stderr.flush()\n\n all_dict, deprecated, others = get_all_dict(module)\n names = names_dict.get(module.__name__, set())\n\n mod_results = []\n mod_results += check_items(all_dict, names, deprecated, others, module.__name__)\n mod_results += check_rest(module, set(names).difference(deprecated),\n dots=dots)\n if args.doctests:\n mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n for v in mod_results:\n assert isinstance(v, tuple), v\n\n results.append((module, mod_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n if not args.skip_examples:\n examples_path = os.path.join(\n os.getcwd(), 'doc', 'source', 'regression', '*.rst')\n print('\\nChecking examples files at %s:' % examples_path)\n for filename in sorted(glob.glob(examples_path)):\n if dots:\n sys.stderr.write('\\n')\n sys.stderr.write(os.path.split(filename)[1] + ' ')\n sys.stderr.flush()\n\n examples_results = check_doctests_testfile(\n filename, (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n def scratch(): pass # stub out a \"module\", see below\n scratch.__name__ = filename\n results.append((scratch, examples_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n # Report results\n all_success = True\n\n for module, mod_results in results:\n success = all(x[1] for x in mod_results)\n all_success = all_success and success\n\n if success and args.verbose == 0:\n continue\n\n print(\"\")\n print(\"=\" * len(module.__name__))\n print(module.__name__)\n print(\"=\" * len(module.__name__))\n print(\"\")\n\n for name, success, output in mod_results:\n if name is None:\n if not success or args.verbose >= 1:\n print(output.strip())\n print(\"\")\n elif not success or (args.verbose >= 2 and output.strip()):\n print(name)\n print(\"-\"*len(name))\n print(\"\")\n print(output.strip())\n print(\"\")\n\n if all_success:\n print(\"\\nOK: refguide and doctests checks passed!\")\n sys.exit(0)\n else:\n print(\"\\nERROR: refguide or doctests have errors\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main(argv=sys.argv[1:])\n","license":"mit"} {"repo_name":"samklr\/spark-timeseries","path":"python\/sparkts\/test\/test_timeseriesrdd.py","copies":"6","size":"5407","content":"from test_utils import PySparkTestCase\nfrom sparkts.timeseriesrdd import *\nfrom sparkts.timeseriesrdd import _TimeSeriesSerializer\nfrom sparkts.datetimeindex import *\nimport pandas as pd\nimport numpy as np\nfrom unittest import TestCase\nfrom io import BytesIO\nfrom pyspark.sql import SQLContext\n\nclass TimeSeriesSerializerTestCase(TestCase):\n def test_times_series_serializer(self):\n serializer = _TimeSeriesSerializer()\n stream = BytesIO()\n series = [('abc', np.array([4.0, 4.0, 5.0])), ('123', np.array([1.0, 2.0, 3.0]))]\n serializer.dump_stream(iter(series), stream)\n stream.seek(0)\n reconstituted = list(serializer.load_stream(stream))\n self.assertEquals(reconstituted[0][0], series[0][0])\n self.assertEquals(reconstituted[1][0], series[1][0])\n self.assertTrue((reconstituted[0][1] == series[0][1]).all())\n self.assertTrue((reconstituted[1][1] == series[1][1]).all())\n\nclass TimeSeriesRDDTestCase(PySparkTestCase):\n def test_time_series_rdd(self):\n freq = DayFrequency(1, self.sc)\n start = '2015-04-09'\n dt_index = uniform(start, periods=10, freq=freq, sc=self.sc)\n vecs = [np.arange(0, 10), np.arange(10, 20), np.arange(20, 30)]\n rdd = self.sc.parallelize(vecs).map(lambda x: (str(x[0]), x))\n tsrdd = TimeSeriesRDD(dt_index, rdd)\n self.assertEquals(tsrdd.count(), 3)\n\n contents = tsrdd.collectAsMap()\n self.assertEquals(len(contents), 3)\n self.assertTrue((contents[\"0\"] == np.arange(0, 10)).all())\n self.assertTrue((contents[\"10\"] == np.arange(10, 20)).all())\n self.assertTrue((contents[\"20\"] == np.arange(20, 30)).all())\n\n subslice = tsrdd['2015-04-10':'2015-04-15']\n self.assertEquals(subslice.index(), uniform('2015-04-10', periods=6, freq=freq, sc=self.sc))\n contents = subslice.collectAsMap()\n self.assertEquals(len(contents), 3)\n self.assertTrue((contents[\"0\"] == np.arange(1, 7)).all())\n self.assertTrue((contents[\"10\"] == np.arange(11, 17)).all())\n self.assertTrue((contents[\"20\"] == np.arange(21, 27)).all())\n\n def test_to_instants(self):\n vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]\n labels = ['a', 'b', 'c', 'd', 'e']\n start = '2015-4-9'\n dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)\n rdd = self.sc.parallelize(zip(labels, vecs), 3)\n tsrdd = TimeSeriesRDD(dt_index, rdd)\n samples = tsrdd.to_instants().collect()\n target_dates = ['2015-4-9', '2015-4-10', '2015-4-11', '2015-4-12']\n self.assertEquals([x[0] for x in samples], [pd.Timestamp(x) for x in target_dates])\n self.assertTrue((samples[0][1] == np.arange(0, 20, 4)).all())\n self.assertTrue((samples[1][1] == np.arange(1, 20, 4)).all())\n self.assertTrue((samples[2][1] == np.arange(2, 20, 4)).all())\n self.assertTrue((samples[3][1] == np.arange(3, 20, 4)).all())\n\n def test_to_observations(self):\n sql_ctx = SQLContext(self.sc)\n vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]\n labels = ['a', 'b', 'c', 'd', 'e']\n start = '2015-4-9'\n dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)\n print(dt_index._jdt_index.size())\n rdd = self.sc.parallelize(zip(labels, vecs), 3)\n tsrdd = TimeSeriesRDD(dt_index, rdd)\n\n obsdf = tsrdd.to_observations_dataframe(sql_ctx)\n tsrdd_from_df = time_series_rdd_from_observations( \\\n dt_index, obsdf, 'timestamp', 'key', 'value')\n \n ts1 = tsrdd.collect()\n ts1.sort(key = lambda x: x[0])\n ts2 = tsrdd_from_df.collect()\n ts2.sort(key = lambda x: x[0])\n self.assertTrue(all([pair[0][0] == pair[1][0] and (pair[0][1] == pair[1][1]).all() \\\n for pair in zip(ts1, ts2)]))\n \n df1 = obsdf.collect()\n df1.sort(key = lambda x: x.value)\n df2 = tsrdd_from_df.to_observations_dataframe(sql_ctx).collect()\n df2.sort(key = lambda x: x.value)\n self.assertEquals(df1, df2)\n\n def test_filter(self):\n vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]\n labels = ['a', 'b', 'c', 'd', 'e']\n start = '2015-4-9'\n dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)\n rdd = self.sc.parallelize(zip(labels, vecs), 3)\n tsrdd = TimeSeriesRDD(dt_index, rdd)\n filtered = tsrdd.filter(lambda x: x[0] == 'a' or x[0] == 'b')\n self.assertEquals(filtered.count(), 2)\n # assert it has TimeSeriesRDD functionality:\n filtered['2015-04-10':'2015-04-15'].count()\n\n def test_to_pandas_series_rdd(self):\n vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]\n labels = ['a', 'b', 'c', 'd', 'e']\n start = '2015-4-9'\n dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)\n rdd = self.sc.parallelize(zip(labels, vecs), 3)\n tsrdd = TimeSeriesRDD(dt_index, rdd)\n\n series_arr = tsrdd.to_pandas_series_rdd().collect()\n\n pd_index = dt_index.to_pandas_index()\n self.assertEquals(len(vecs), len(series_arr))\n for i in xrange(len(vecs)):\n self.assertEquals(series_arr[i][0], labels[i])\n self.assertTrue(pd.Series(vecs[i], pd_index).equals(series_arr[i][1]))\n\n","license":"apache-2.0"} {"repo_name":"jblackburne\/scikit-learn","path":"sklearn\/tree\/tests\/test_tree.py","copies":"7","size":"55471","content":"\"\"\"\nTesting for the tree module (sklearn.tree).\n\"\"\"\nimport pickle\nfrom functools import partial\nfrom itertools import product\nimport struct\n\nimport numpy as np\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import coo_matrix\n\nfrom sklearn.random_projection import sparse_random_matrix\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_in\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_greater_equal\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_less_equal\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import raises\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.utils.validation import check_random_state\n\nfrom sklearn.exceptions import NotFittedError\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import ExtraTreeClassifier\nfrom sklearn.tree import ExtraTreeRegressor\n\nfrom sklearn import tree\nfrom sklearn.tree._tree import TREE_LEAF\nfrom sklearn import datasets\n\nfrom sklearn.utils import compute_sample_weight\n\nCLF_CRITERIONS = (\"gini\", \"entropy\")\nREG_CRITERIONS = (\"mse\", \"mae\")\n\nCLF_TREES = {\n \"DecisionTreeClassifier\": DecisionTreeClassifier,\n \"Presort-DecisionTreeClassifier\": partial(DecisionTreeClassifier,\n presort=True),\n \"ExtraTreeClassifier\": ExtraTreeClassifier,\n}\n\nREG_TREES = {\n \"DecisionTreeRegressor\": DecisionTreeRegressor,\n \"Presort-DecisionTreeRegressor\": partial(DecisionTreeRegressor,\n presort=True),\n \"ExtraTreeRegressor\": ExtraTreeRegressor,\n}\n\nALL_TREES = dict()\nALL_TREES.update(CLF_TREES)\nALL_TREES.update(REG_TREES)\n\nSPARSE_TREES = [\"DecisionTreeClassifier\", \"DecisionTreeRegressor\",\n \"ExtraTreeClassifier\", \"ExtraTreeRegressor\"]\n\n\nX_small = np.array([\n [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],\n [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],\n [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],\n [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],\n [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],\n [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],\n [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],\n [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],\n [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],\n [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],\n [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],\n [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],\n [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],\n [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],\n [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],\n [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],\n [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],\n [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],\n [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],\n [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],\n [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],\n [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],\n [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])\n\ny_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,\n 0, 0]\ny_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,\n 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]\n\n# toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny = [-1, -1, -1, 1, 1, 1]\nT = [[-1, -1], [2, 2], [3, 2]]\ntrue_result = [-1, 1, 1]\n\n# also load the iris dataset\n# and randomly permute it\niris = datasets.load_iris()\nrng = np.random.RandomState(1)\nperm = rng.permutation(iris.target.size)\niris.data = iris.data[perm]\niris.target = iris.target[perm]\n\n# also load the boston dataset\n# and randomly permute it\nboston = datasets.load_boston()\nperm = rng.permutation(boston.target.size)\nboston.data = boston.data[perm]\nboston.target = boston.target[perm]\n\ndigits = datasets.load_digits()\nperm = rng.permutation(digits.target.size)\ndigits.data = digits.data[perm]\ndigits.target = digits.target[perm]\n\nrandom_state = check_random_state(0)\nX_multilabel, y_multilabel = datasets.make_multilabel_classification(\n random_state=0, n_samples=30, n_features=10)\n\nX_sparse_pos = random_state.uniform(size=(20, 5))\nX_sparse_pos[X_sparse_pos <= 0.8] = 0.\ny_random = random_state.randint(0, 4, size=(20, ))\nX_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)\n\n\nDATASETS = {\n \"iris\": {\"X\": iris.data, \"y\": iris.target},\n \"boston\": {\"X\": boston.data, \"y\": boston.target},\n \"digits\": {\"X\": digits.data, \"y\": digits.target},\n \"toy\": {\"X\": X, \"y\": y},\n \"clf_small\": {\"X\": X_small, \"y\": y_small},\n \"reg_small\": {\"X\": X_small, \"y\": y_small_reg},\n \"multilabel\": {\"X\": X_multilabel, \"y\": y_multilabel},\n \"sparse-pos\": {\"X\": X_sparse_pos, \"y\": y_random},\n \"sparse-neg\": {\"X\": - X_sparse_pos, \"y\": y_random},\n \"sparse-mix\": {\"X\": X_sparse_mix, \"y\": y_random},\n \"zeros\": {\"X\": np.zeros((20, 3)), \"y\": y_random}\n}\n\nfor name in DATASETS:\n DATASETS[name][\"X_sparse\"] = csc_matrix(DATASETS[name][\"X\"])\n\n\ndef assert_tree_equal(d, s, message):\n assert_equal(s.node_count, d.node_count,\n \"{0}: inequal number of node ({1} != {2})\"\n \"\".format(message, s.node_count, d.node_count))\n\n assert_array_equal(d.children_right, s.children_right,\n message + \": inequal children_right\")\n assert_array_equal(d.children_left, s.children_left,\n message + \": inequal children_left\")\n\n external = d.children_right == TREE_LEAF\n internal = np.logical_not(external)\n\n assert_array_equal(d.feature[internal], s.feature[internal],\n message + \": inequal features\")\n assert_array_equal(d.threshold[internal], s.threshold[internal],\n message + \": inequal threshold\")\n assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),\n message + \": inequal sum(n_node_samples)\")\n assert_array_equal(d.n_node_samples, s.n_node_samples,\n message + \": inequal n_node_samples\")\n\n assert_almost_equal(d.impurity, s.impurity,\n err_msg=message + \": inequal impurity\")\n\n assert_array_almost_equal(d.value[external], s.value[external],\n err_msg=message + \": inequal value\")\n\n\ndef test_classification_toy():\n # Check classification on a toy dataset.\n for name, Tree in CLF_TREES.items():\n clf = Tree(random_state=0)\n clf.fit(X, y)\n assert_array_equal(clf.predict(T), true_result,\n \"Failed with {0}\".format(name))\n\n clf = Tree(max_features=1, random_state=1)\n clf.fit(X, y)\n assert_array_equal(clf.predict(T), true_result,\n \"Failed with {0}\".format(name))\n\n\ndef test_weighted_classification_toy():\n # Check classification on a weighted toy dataset.\n for name, Tree in CLF_TREES.items():\n clf = Tree(random_state=0)\n\n clf.fit(X, y, sample_weight=np.ones(len(X)))\n assert_array_equal(clf.predict(T), true_result,\n \"Failed with {0}\".format(name))\n\n clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)\n assert_array_equal(clf.predict(T), true_result,\n \"Failed with {0}\".format(name))\n\n\ndef test_regression_toy():\n # Check regression on a toy dataset.\n for name, Tree in REG_TREES.items():\n reg = Tree(random_state=1)\n reg.fit(X, y)\n assert_almost_equal(reg.predict(T), true_result,\n err_msg=\"Failed with {0}\".format(name))\n\n clf = Tree(max_features=1, random_state=1)\n clf.fit(X, y)\n assert_almost_equal(reg.predict(T), true_result,\n err_msg=\"Failed with {0}\".format(name))\n\n\ndef test_xor():\n # Check on a XOR problem\n y = np.zeros((10, 10))\n y[:5, :5] = 1\n y[5:, 5:] = 1\n\n gridx, gridy = np.indices(y.shape)\n\n X = np.vstack([gridx.ravel(), gridy.ravel()]).T\n y = y.ravel()\n\n for name, Tree in CLF_TREES.items():\n clf = Tree(random_state=0)\n clf.fit(X, y)\n assert_equal(clf.score(X, y), 1.0,\n \"Failed with {0}\".format(name))\n\n clf = Tree(random_state=0, max_features=1)\n clf.fit(X, y)\n assert_equal(clf.score(X, y), 1.0,\n \"Failed with {0}\".format(name))\n\n\ndef test_iris():\n # Check consistency on dataset iris.\n for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):\n clf = Tree(criterion=criterion, random_state=0)\n clf.fit(iris.data, iris.target)\n score = accuracy_score(clf.predict(iris.data), iris.target)\n assert_greater(score, 0.9,\n \"Failed with {0}, criterion = {1} and score = {2}\"\n \"\".format(name, criterion, score))\n\n clf = Tree(criterion=criterion, max_features=2, random_state=0)\n clf.fit(iris.data, iris.target)\n score = accuracy_score(clf.predict(iris.data), iris.target)\n assert_greater(score, 0.5,\n \"Failed with {0}, criterion = {1} and score = {2}\"\n \"\".format(name, criterion, score))\n\n\ndef test_boston():\n # Check consistency on dataset boston house prices.\n\n for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):\n reg = Tree(criterion=criterion, random_state=0)\n reg.fit(boston.data, boston.target)\n score = mean_squared_error(boston.target, reg.predict(boston.data))\n assert_less(score, 1,\n \"Failed with {0}, criterion = {1} and score = {2}\"\n \"\".format(name, criterion, score))\n\n # using fewer features reduces the learning ability of this tree,\n # but reduces training time.\n reg = Tree(criterion=criterion, max_features=6, random_state=0)\n reg.fit(boston.data, boston.target)\n score = mean_squared_error(boston.target, reg.predict(boston.data))\n assert_less(score, 2,\n \"Failed with {0}, criterion = {1} and score = {2}\"\n \"\".format(name, criterion, score))\n\n\ndef test_probability():\n # Predict probabilities using DecisionTreeClassifier.\n\n for name, Tree in CLF_TREES.items():\n clf = Tree(max_depth=1, max_features=1, random_state=42)\n clf.fit(iris.data, iris.target)\n\n prob_predict = clf.predict_proba(iris.data)\n assert_array_almost_equal(np.sum(prob_predict, 1),\n np.ones(iris.data.shape[0]),\n err_msg=\"Failed with {0}\".format(name))\n assert_array_equal(np.argmax(prob_predict, 1),\n clf.predict(iris.data),\n err_msg=\"Failed with {0}\".format(name))\n assert_almost_equal(clf.predict_proba(iris.data),\n np.exp(clf.predict_log_proba(iris.data)), 8,\n err_msg=\"Failed with {0}\".format(name))\n\n\ndef test_arrayrepr():\n # Check the array representation.\n # Check resize\n X = np.arange(10000)[:, np.newaxis]\n y = np.arange(10000)\n\n for name, Tree in REG_TREES.items():\n reg = Tree(max_depth=None, random_state=0)\n reg.fit(X, y)\n\n\ndef test_pure_set():\n # Check when y is pure.\n X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\n y = [1, 1, 1, 1, 1, 1]\n\n for name, TreeClassifier in CLF_TREES.items():\n clf = TreeClassifier(random_state=0)\n clf.fit(X, y)\n assert_array_equal(clf.predict(X), y,\n err_msg=\"Failed with {0}\".format(name))\n\n for name, TreeRegressor in REG_TREES.items():\n reg = TreeRegressor(random_state=0)\n reg.fit(X, y)\n assert_almost_equal(clf.predict(X), y,\n err_msg=\"Failed with {0}\".format(name))\n\n\ndef test_numerical_stability():\n # Check numerical stability.\n X = np.array([\n [152.08097839, 140.40744019, 129.75102234, 159.90493774],\n [142.50700378, 135.81935120, 117.82884979, 162.75781250],\n [127.28772736, 140.40744019, 129.75102234, 159.90493774],\n [132.37025452, 143.71923828, 138.35694885, 157.84558105],\n [103.10237122, 143.71928406, 138.35696411, 157.84559631],\n [127.71276855, 143.71923828, 138.35694885, 157.84558105],\n [120.91514587, 140.40744019, 129.75102234, 159.90493774]])\n\n y = np.array(\n [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])\n\n with np.errstate(all=\"raise\"):\n for name, Tree in REG_TREES.items():\n reg = Tree(random_state=0)\n reg.fit(X, y)\n reg.fit(X, -y)\n reg.fit(-X, y)\n reg.fit(-X, -y)\n\n\ndef test_importances():\n # Check variable importances.\n X, y = datasets.make_classification(n_samples=2000,\n n_features=10,\n n_informative=3,\n n_redundant=0,\n n_repeated=0,\n shuffle=False,\n random_state=0)\n\n for name, Tree in CLF_TREES.items():\n clf = Tree(random_state=0)\n\n clf.fit(X, y)\n importances = clf.feature_importances_\n n_important = np.sum(importances > 0.1)\n\n assert_equal(importances.shape[0], 10, \"Failed with {0}\".format(name))\n assert_equal(n_important, 3, \"Failed with {0}\".format(name))\n\n X_new = assert_warns(\n DeprecationWarning, clf.transform, X, threshold=\"mean\")\n assert_less(0, X_new.shape[1], \"Failed with {0}\".format(name))\n assert_less(X_new.shape[1], X.shape[1], \"Failed with {0}\".format(name))\n\n # Check on iris that importances are the same for all builders\n clf = DecisionTreeClassifier(random_state=0)\n clf.fit(iris.data, iris.target)\n clf2 = DecisionTreeClassifier(random_state=0,\n max_leaf_nodes=len(iris.data))\n clf2.fit(iris.data, iris.target)\n\n assert_array_equal(clf.feature_importances_,\n clf2.feature_importances_)\n\n\n@raises(ValueError)\ndef test_importances_raises():\n # Check if variable importance before fit raises ValueError.\n clf = DecisionTreeClassifier()\n clf.feature_importances_\n\n\ndef test_importances_gini_equal_mse():\n # Check that gini is equivalent to mse for binary output variable\n\n X, y = datasets.make_classification(n_samples=2000,\n n_features=10,\n n_informative=3,\n n_redundant=0,\n n_repeated=0,\n shuffle=False,\n random_state=0)\n\n # The gini index and the mean square error (variance) might differ due\n # to numerical instability. Since those instabilities mainly occurs at\n # high tree depth, we restrict this maximal depth.\n clf = DecisionTreeClassifier(criterion=\"gini\", max_depth=5,\n random_state=0).fit(X, y)\n reg = DecisionTreeRegressor(criterion=\"mse\", max_depth=5,\n random_state=0).fit(X, y)\n\n assert_almost_equal(clf.feature_importances_, reg.feature_importances_)\n assert_array_equal(clf.tree_.feature, reg.tree_.feature)\n assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)\n assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)\n assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)\n\n\ndef test_max_features():\n # Check max_features.\n for name, TreeRegressor in REG_TREES.items():\n reg = TreeRegressor(max_features=\"auto\")\n reg.fit(boston.data, boston.target)\n assert_equal(reg.max_features_, boston.data.shape[1])\n\n for name, TreeClassifier in CLF_TREES.items():\n clf = TreeClassifier(max_features=\"auto\")\n clf.fit(iris.data, iris.target)\n assert_equal(clf.max_features_, 2)\n\n for name, TreeEstimator in ALL_TREES.items():\n est = TreeEstimator(max_features=\"sqrt\")\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_,\n int(np.sqrt(iris.data.shape[1])))\n\n est = TreeEstimator(max_features=\"log2\")\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_,\n int(np.log2(iris.data.shape[1])))\n\n est = TreeEstimator(max_features=1)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_, 1)\n\n est = TreeEstimator(max_features=3)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_, 3)\n\n est = TreeEstimator(max_features=0.01)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_, 1)\n\n est = TreeEstimator(max_features=0.5)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_,\n int(0.5 * iris.data.shape[1]))\n\n est = TreeEstimator(max_features=1.0)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_, iris.data.shape[1])\n\n est = TreeEstimator(max_features=None)\n est.fit(iris.data, iris.target)\n assert_equal(est.max_features_, iris.data.shape[1])\n\n # use values of max_features that are invalid\n est = TreeEstimator(max_features=10)\n assert_raises(ValueError, est.fit, X, y)\n\n est = TreeEstimator(max_features=-1)\n assert_raises(ValueError, est.fit, X, y)\n\n est = TreeEstimator(max_features=0.0)\n assert_raises(ValueError, est.fit, X, y)\n\n est = TreeEstimator(max_features=1.5)\n assert_raises(ValueError, est.fit, X, y)\n\n est = TreeEstimator(max_features=\"foobar\")\n assert_raises(ValueError, est.fit, X, y)\n\n\ndef test_error():\n # Test that it gives proper exception on deficient input.\n for name, TreeEstimator in CLF_TREES.items():\n # predict before fit\n est = TreeEstimator()\n assert_raises(NotFittedError, est.predict_proba, X)\n\n est.fit(X, y)\n X2 = [[-2, -1, 1]] # wrong feature shape for sample\n assert_raises(ValueError, est.predict_proba, X2)\n\n for name, TreeEstimator in ALL_TREES.items():\n # Invalid values for parameters\n assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)\n assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)\n assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)\n assert_raises(ValueError,\n TreeEstimator(min_weight_fraction_leaf=-1).fit,\n X, y)\n assert_raises(ValueError,\n TreeEstimator(min_weight_fraction_leaf=0.51).fit,\n X, y)\n assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,\n X, y)\n assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,\n X, y)\n assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,\n X, y)\n assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)\n assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)\n assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)\n\n # Wrong dimensions\n est = TreeEstimator()\n y2 = y[:-1]\n assert_raises(ValueError, est.fit, X, y2)\n\n # Test with arrays that are non-contiguous.\n Xf = np.asfortranarray(X)\n est = TreeEstimator()\n est.fit(Xf, y)\n assert_almost_equal(est.predict(T), true_result)\n\n # predict before fitting\n est = TreeEstimator()\n assert_raises(NotFittedError, est.predict, T)\n\n # predict on vector with different dims\n est.fit(X, y)\n t = np.asarray(T)\n assert_raises(ValueError, est.predict, t[:, 1:])\n\n # wrong sample shape\n Xt = np.array(X).T\n\n est = TreeEstimator()\n est.fit(np.dot(X, Xt), y)\n assert_raises(ValueError, est.predict, X)\n assert_raises(ValueError, est.apply, X)\n\n clf = TreeEstimator()\n clf.fit(X, y)\n assert_raises(ValueError, clf.predict, Xt)\n assert_raises(ValueError, clf.apply, Xt)\n\n # apply before fitting\n est = TreeEstimator()\n assert_raises(NotFittedError, est.apply, T)\n\n\ndef test_min_samples_split():\n \"\"\"Test min_samples_split parameter\"\"\"\n X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))\n y = iris.target\n\n # test both DepthFirstTreeBuilder and BestFirstTreeBuilder\n # by setting max_leaf_nodes\n for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):\n TreeEstimator = ALL_TREES[name]\n\n # test for integer parameter\n est = TreeEstimator(min_samples_split=10,\n max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n est.fit(X, y)\n # count samples on nodes, -1 means it is a leaf\n node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]\n\n assert_greater(np.min(node_samples), 9,\n \"Failed with {0}\".format(name))\n\n # test for float parameter\n est = TreeEstimator(min_samples_split=0.2,\n max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n est.fit(X, y)\n # count samples on nodes, -1 means it is a leaf\n node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]\n\n assert_greater(np.min(node_samples), 9,\n \"Failed with {0}\".format(name))\n\n\n\ndef test_min_samples_leaf():\n # Test if leaves contain more than leaf_count training examples\n X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))\n y = iris.target\n\n # test both DepthFirstTreeBuilder and BestFirstTreeBuilder\n # by setting max_leaf_nodes\n for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):\n TreeEstimator = ALL_TREES[name]\n\n # test integer parameter\n est = TreeEstimator(min_samples_leaf=5,\n max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n est.fit(X, y)\n out = est.tree_.apply(X)\n node_counts = np.bincount(out)\n # drop inner nodes\n leaf_count = node_counts[node_counts != 0]\n assert_greater(np.min(leaf_count), 4,\n \"Failed with {0}\".format(name))\n\n # test float parameter\n est = TreeEstimator(min_samples_leaf=0.1,\n max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n est.fit(X, y)\n out = est.tree_.apply(X)\n node_counts = np.bincount(out)\n # drop inner nodes\n leaf_count = node_counts[node_counts != 0]\n assert_greater(np.min(leaf_count), 4,\n \"Failed with {0}\".format(name))\n\n\ndef check_min_weight_fraction_leaf(name, datasets, sparse=False):\n \"\"\"Test if leaves contain at least min_weight_fraction_leaf of the\n training set\"\"\"\n if sparse:\n X = DATASETS[datasets][\"X_sparse\"].astype(np.float32)\n else:\n X = DATASETS[datasets][\"X\"].astype(np.float32)\n y = DATASETS[datasets][\"y\"]\n\n weights = rng.rand(X.shape[0])\n total_weight = np.sum(weights)\n\n TreeEstimator = ALL_TREES[name]\n\n # test both DepthFirstTreeBuilder and BestFirstTreeBuilder\n # by setting max_leaf_nodes\n for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):\n est = TreeEstimator(min_weight_fraction_leaf=frac,\n max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n est.fit(X, y, sample_weight=weights)\n\n if sparse:\n out = est.tree_.apply(X.tocsr())\n\n else:\n out = est.tree_.apply(X)\n\n node_weights = np.bincount(out, weights=weights)\n # drop inner nodes\n leaf_weights = node_weights[node_weights != 0]\n assert_greater_equal(\n np.min(leaf_weights),\n total_weight * est.min_weight_fraction_leaf,\n \"Failed with {0} \"\n \"min_weight_fraction_leaf={1}\".format(\n name, est.min_weight_fraction_leaf))\n\n\ndef test_min_weight_fraction_leaf():\n # Check on dense input\n for name in ALL_TREES:\n yield check_min_weight_fraction_leaf, name, \"iris\"\n\n # Check on sparse input\n for name in SPARSE_TREES:\n yield check_min_weight_fraction_leaf, name, \"multilabel\", True\n\n\ndef test_min_impurity_split():\n # test if min_impurity_split creates leaves with impurity\n # [0, min_impurity_split) when min_samples_leaf = 1 and\n # min_samples_split = 2.\n X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))\n y = iris.target\n\n # test both DepthFirstTreeBuilder and BestFirstTreeBuilder\n # by setting max_leaf_nodes\n for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):\n TreeEstimator = ALL_TREES[name]\n min_impurity_split = .5\n\n # verify leaf nodes without min_impurity_split less than\n # impurity 1e-7\n est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,\n random_state=0)\n assert_less_equal(est.min_impurity_split, 1e-7,\n \"Failed, min_impurity_split = {0} > 1e-7\".format(\n est.min_impurity_split))\n est.fit(X, y)\n for node in range(est.tree_.node_count):\n if (est.tree_.children_left[node] == TREE_LEAF or\n est.tree_.children_right[node] == TREE_LEAF):\n assert_equal(est.tree_.impurity[node], 0.,\n \"Failed with {0} \"\n \"min_impurity_split={1}\".format(\n est.tree_.impurity[node],\n est.min_impurity_split))\n\n # verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split\n est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,\n min_impurity_split=min_impurity_split,\n random_state=0)\n est.fit(X, y)\n for node in range(est.tree_.node_count):\n if (est.tree_.children_left[node] == TREE_LEAF or\n est.tree_.children_right[node] == TREE_LEAF):\n assert_greater_equal(est.tree_.impurity[node], 0,\n \"Failed with {0}, \"\n \"min_impurity_split={1}\".format(\n est.tree_.impurity[node],\n est.min_impurity_split))\n assert_less_equal(est.tree_.impurity[node], min_impurity_split,\n \"Failed with {0}, \"\n \"min_impurity_split={1}\".format(\n est.tree_.impurity[node],\n est.min_impurity_split))\n\n\ndef test_pickle():\n\n for name, TreeEstimator in ALL_TREES.items():\n if \"Classifier\" in name:\n X, y = iris.data, iris.target\n else:\n X, y = boston.data, boston.target\n\n est = TreeEstimator(random_state=0)\n est.fit(X, y)\n score = est.score(X, y)\n fitted_attribute = dict()\n for attribute in [\"max_depth\", \"node_count\", \"capacity\"]:\n fitted_attribute[attribute] = getattr(est.tree_, attribute)\n\n serialized_object = pickle.dumps(est)\n est2 = pickle.loads(serialized_object)\n assert_equal(type(est2), est.__class__)\n score2 = est2.score(X, y)\n assert_equal(score, score2,\n \"Failed to generate same score after pickling \"\n \"with {0}\".format(name))\n\n for attribute in fitted_attribute:\n assert_equal(getattr(est2.tree_, attribute),\n fitted_attribute[attribute],\n \"Failed to generate same attribute {0} after \"\n \"pickling with {1}\".format(attribute, name))\n\n\n\ndef test_multioutput():\n # Check estimators on multi-output problems.\n X = [[-2, -1],\n [-1, -1],\n [-1, -2],\n [1, 1],\n [1, 2],\n [2, 1],\n [-2, 1],\n [-1, 1],\n [-1, 2],\n [2, -1],\n [1, -1],\n [1, -2]]\n\n y = [[-1, 0],\n [-1, 0],\n [-1, 0],\n [1, 1],\n [1, 1],\n [1, 1],\n [-1, 2],\n [-1, 2],\n [-1, 2],\n [1, 3],\n [1, 3],\n [1, 3]]\n\n T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]\n y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]\n\n # toy classification problem\n for name, TreeClassifier in CLF_TREES.items():\n clf = TreeClassifier(random_state=0)\n y_hat = clf.fit(X, y).predict(T)\n assert_array_equal(y_hat, y_true)\n assert_equal(y_hat.shape, (4, 2))\n\n proba = clf.predict_proba(T)\n assert_equal(len(proba), 2)\n assert_equal(proba[0].shape, (4, 2))\n assert_equal(proba[1].shape, (4, 4))\n\n log_proba = clf.predict_log_proba(T)\n assert_equal(len(log_proba), 2)\n assert_equal(log_proba[0].shape, (4, 2))\n assert_equal(log_proba[1].shape, (4, 4))\n\n # toy regression problem\n for name, TreeRegressor in REG_TREES.items():\n reg = TreeRegressor(random_state=0)\n y_hat = reg.fit(X, y).predict(T)\n assert_almost_equal(y_hat, y_true)\n assert_equal(y_hat.shape, (4, 2))\n\n\ndef test_classes_shape():\n # Test that n_classes_ and classes_ have proper shape.\n for name, TreeClassifier in CLF_TREES.items():\n # Classification, single output\n clf = TreeClassifier(random_state=0)\n clf.fit(X, y)\n\n assert_equal(clf.n_classes_, 2)\n assert_array_equal(clf.classes_, [-1, 1])\n\n # Classification, multi-output\n _y = np.vstack((y, np.array(y) * 2)).T\n clf = TreeClassifier(random_state=0)\n clf.fit(X, _y)\n assert_equal(len(clf.n_classes_), 2)\n assert_equal(len(clf.classes_), 2)\n assert_array_equal(clf.n_classes_, [2, 2])\n assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])\n\n\ndef test_unbalanced_iris():\n # Check class rebalancing.\n unbalanced_X = iris.data[:125]\n unbalanced_y = iris.target[:125]\n sample_weight = compute_sample_weight(\"balanced\", unbalanced_y)\n\n for name, TreeClassifier in CLF_TREES.items():\n clf = TreeClassifier(random_state=0)\n clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)\n assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)\n\n\ndef test_memory_layout():\n # Check that it works no matter the memory layout\n for (name, TreeEstimator), dtype in product(ALL_TREES.items(),\n [np.float64, np.float32]):\n est = TreeEstimator(random_state=0)\n\n # Nothing\n X = np.asarray(iris.data, dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n # C-order\n X = np.asarray(iris.data, order=\"C\", dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n # F-order\n X = np.asarray(iris.data, order=\"F\", dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n # Contiguous\n X = np.ascontiguousarray(iris.data, dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n if not est.presort:\n # csr matrix\n X = csr_matrix(iris.data, dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n # csc_matrix\n X = csc_matrix(iris.data, dtype=dtype)\n y = iris.target\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n # Strided\n X = np.asarray(iris.data[::3], dtype=dtype)\n y = iris.target[::3]\n assert_array_equal(est.fit(X, y).predict(X), y)\n\n\ndef test_sample_weight():\n # Check sample weighting.\n # Test that zero-weighted samples are not taken into account\n X = np.arange(100)[:, np.newaxis]\n y = np.ones(100)\n y[:50] = 0.0\n\n sample_weight = np.ones(100)\n sample_weight[y == 0] = 0.0\n\n clf = DecisionTreeClassifier(random_state=0)\n clf.fit(X, y, sample_weight=sample_weight)\n assert_array_equal(clf.predict(X), np.ones(100))\n\n # Test that low weighted samples are not taken into account at low depth\n X = np.arange(200)[:, np.newaxis]\n y = np.zeros(200)\n y[50:100] = 1\n y[100:200] = 2\n X[100:200, 0] = 200\n\n sample_weight = np.ones(200)\n\n sample_weight[y == 2] = .51 # Samples of class '2' are still weightier\n clf = DecisionTreeClassifier(max_depth=1, random_state=0)\n clf.fit(X, y, sample_weight=sample_weight)\n assert_equal(clf.tree_.threshold[0], 149.5)\n\n sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier\n clf = DecisionTreeClassifier(max_depth=1, random_state=0)\n clf.fit(X, y, sample_weight=sample_weight)\n assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved\n\n # Test that sample weighting is the same as having duplicates\n X = iris.data\n y = iris.target\n\n duplicates = rng.randint(0, X.shape[0], 100)\n\n clf = DecisionTreeClassifier(random_state=1)\n clf.fit(X[duplicates], y[duplicates])\n\n sample_weight = np.bincount(duplicates, minlength=X.shape[0])\n clf2 = DecisionTreeClassifier(random_state=1)\n clf2.fit(X, y, sample_weight=sample_weight)\n\n internal = clf.tree_.children_left != tree._tree.TREE_LEAF\n assert_array_almost_equal(clf.tree_.threshold[internal],\n clf2.tree_.threshold[internal])\n\n\ndef test_sample_weight_invalid():\n # Check sample weighting raises errors.\n X = np.arange(100)[:, np.newaxis]\n y = np.ones(100)\n y[:50] = 0.0\n\n clf = DecisionTreeClassifier(random_state=0)\n\n sample_weight = np.random.rand(100, 1)\n assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)\n\n sample_weight = np.array(0)\n assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)\n\n sample_weight = np.ones(101)\n assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)\n\n sample_weight = np.ones(99)\n assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)\n\n\ndef check_class_weights(name):\n \"\"\"Check class_weights resemble sample_weights behavior.\"\"\"\n TreeClassifier = CLF_TREES[name]\n\n # Iris is balanced, so no effect expected for using 'balanced' weights\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target)\n clf2 = TreeClassifier(class_weight='balanced', random_state=0)\n clf2.fit(iris.data, iris.target)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n # Make a multi-output problem with three copies of Iris\n iris_multi = np.vstack((iris.target, iris.target, iris.target)).T\n # Create user-defined weights that should balance over the outputs\n clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},\n {0: 2., 1: 1., 2: 2.},\n {0: 1., 1: 2., 2: 2.}],\n random_state=0)\n clf3.fit(iris.data, iris_multi)\n assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)\n # Check against multi-output \"auto\" which should also have no effect\n clf4 = TreeClassifier(class_weight='balanced', random_state=0)\n clf4.fit(iris.data, iris_multi)\n assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)\n\n # Inflate importance of class 1, check against user-defined weights\n sample_weight = np.ones(iris.target.shape)\n sample_weight[iris.target == 1] *= 100\n class_weight = {0: 1., 1: 100., 2: 1.}\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target, sample_weight)\n clf2 = TreeClassifier(class_weight=class_weight, random_state=0)\n clf2.fit(iris.data, iris.target)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n # Check that sample_weight and class_weight are multiplicative\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target, sample_weight ** 2)\n clf2 = TreeClassifier(class_weight=class_weight, random_state=0)\n clf2.fit(iris.data, iris.target, sample_weight)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n\ndef test_class_weights():\n for name in CLF_TREES:\n yield check_class_weights, name\n\n\ndef check_class_weight_errors(name):\n # Test if class_weight raises errors and warnings when expected.\n TreeClassifier = CLF_TREES[name]\n _y = np.vstack((y, np.array(y) * 2)).T\n\n # Invalid preset string\n clf = TreeClassifier(class_weight='the larch', random_state=0)\n assert_raises(ValueError, clf.fit, X, y)\n assert_raises(ValueError, clf.fit, X, _y)\n\n # Not a list or preset for multi-output\n clf = TreeClassifier(class_weight=1, random_state=0)\n assert_raises(ValueError, clf.fit, X, _y)\n\n # Incorrect length list for multi-output\n clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)\n assert_raises(ValueError, clf.fit, X, _y)\n\n\ndef test_class_weight_errors():\n for name in CLF_TREES:\n yield check_class_weight_errors, name\n\n\ndef test_max_leaf_nodes():\n # Test greedy trees with max_depth + 1 leafs.\n from sklearn.tree._tree import TREE_LEAF\n X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)\n k = 4\n for name, TreeEstimator in ALL_TREES.items():\n est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)\n tree = est.tree_\n assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)\n\n # max_leaf_nodes in (0, 1) should raise ValueError\n est = TreeEstimator(max_depth=None, max_leaf_nodes=0)\n assert_raises(ValueError, est.fit, X, y)\n est = TreeEstimator(max_depth=None, max_leaf_nodes=1)\n assert_raises(ValueError, est.fit, X, y)\n est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)\n assert_raises(ValueError, est.fit, X, y)\n\n\ndef test_max_leaf_nodes_max_depth():\n # Test precedence of max_leaf_nodes over max_depth.\n X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)\n k = 4\n for name, TreeEstimator in ALL_TREES.items():\n est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)\n tree = est.tree_\n assert_greater(tree.max_depth, 1)\n\n\ndef test_arrays_persist():\n # Ensure property arrays' memory stays alive when tree disappears\n # non-regression for #2726\n for attr in ['n_classes', 'value', 'children_left', 'children_right',\n 'threshold', 'impurity', 'feature', 'n_node_samples']:\n value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)\n # if pointing to freed memory, contents may be arbitrary\n assert_true(-3 <= value.flat[0] < 3,\n 'Array points to arbitrary memory')\n\n\ndef test_only_constant_features():\n random_state = check_random_state(0)\n X = np.zeros((10, 20))\n y = random_state.randint(0, 2, (10, ))\n for name, TreeEstimator in ALL_TREES.items():\n est = TreeEstimator(random_state=0)\n est.fit(X, y)\n assert_equal(est.tree_.max_depth, 0)\n\n\ndef test_with_only_one_non_constant_features():\n X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),\n np.zeros((4, 1000))])\n\n y = np.array([0., 1., 0., 1.0])\n for name, TreeEstimator in CLF_TREES.items():\n est = TreeEstimator(random_state=0, max_features=1)\n est.fit(X, y)\n assert_equal(est.tree_.max_depth, 1)\n assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))\n\n for name, TreeEstimator in REG_TREES.items():\n est = TreeEstimator(random_state=0, max_features=1)\n est.fit(X, y)\n assert_equal(est.tree_.max_depth, 1)\n assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))\n\n\ndef test_big_input():\n # Test if the warning for too large inputs is appropriate.\n X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)\n clf = DecisionTreeClassifier()\n try:\n clf.fit(X, [0, 1, 0, 1])\n except ValueError as e:\n assert_in(\"float32\", str(e))\n\n\ndef test_realloc():\n from sklearn.tree._utils import _realloc_test\n assert_raises(MemoryError, _realloc_test)\n\n\ndef test_huge_allocations():\n n_bits = 8 * struct.calcsize(\"P\")\n\n X = np.random.randn(10, 2)\n y = np.random.randint(0, 2, 10)\n\n # Sanity check: we cannot request more memory than the size of the address\n # space. Currently raises OverflowError.\n huge = 2 ** (n_bits + 1)\n clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)\n assert_raises(Exception, clf.fit, X, y)\n\n # Non-regression test: MemoryError used to be dropped by Cython\n # because of missing \"except *\".\n huge = 2 ** (n_bits - 1) - 1\n clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)\n assert_raises(MemoryError, clf.fit, X, y)\n\n\ndef check_sparse_input(tree, dataset, max_depth=None):\n TreeEstimator = ALL_TREES[tree]\n X = DATASETS[dataset][\"X\"]\n X_sparse = DATASETS[dataset][\"X_sparse\"]\n y = DATASETS[dataset][\"y\"]\n\n # Gain testing time\n if dataset in [\"digits\", \"boston\"]:\n n_samples = X.shape[0] \/\/ 5\n X = X[:n_samples]\n X_sparse = X_sparse[:n_samples]\n y = y[:n_samples]\n\n for sparse_format in (csr_matrix, csc_matrix, coo_matrix):\n X_sparse = sparse_format(X_sparse)\n\n # Check the default (depth first search)\n d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)\n s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)\n\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n\n y_pred = d.predict(X)\n if tree in CLF_TREES:\n y_proba = d.predict_proba(X)\n y_log_proba = d.predict_log_proba(X)\n\n for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):\n X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)\n\n assert_array_almost_equal(s.predict(X_sparse_test), y_pred)\n\n if tree in CLF_TREES:\n assert_array_almost_equal(s.predict_proba(X_sparse_test),\n y_proba)\n assert_array_almost_equal(s.predict_log_proba(X_sparse_test),\n y_log_proba)\n\n\ndef test_sparse_input():\n for tree, dataset in product(SPARSE_TREES,\n (\"clf_small\", \"toy\", \"digits\", \"multilabel\",\n \"sparse-pos\", \"sparse-neg\", \"sparse-mix\",\n \"zeros\")):\n max_depth = 3 if dataset == \"digits\" else None\n yield (check_sparse_input, tree, dataset, max_depth)\n\n # Due to numerical instability of MSE and too strict test, we limit the\n # maximal depth\n for tree, dataset in product(REG_TREES, [\"boston\", \"reg_small\"]):\n if tree in SPARSE_TREES:\n yield (check_sparse_input, tree, dataset, 2)\n\n\ndef check_sparse_parameters(tree, dataset):\n TreeEstimator = ALL_TREES[tree]\n X = DATASETS[dataset][\"X\"]\n X_sparse = DATASETS[dataset][\"X_sparse\"]\n y = DATASETS[dataset][\"y\"]\n\n # Check max_features\n d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)\n s = TreeEstimator(random_state=0, max_features=1,\n max_depth=2).fit(X_sparse, y)\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n assert_array_almost_equal(s.predict(X), d.predict(X))\n\n # Check min_samples_split\n d = TreeEstimator(random_state=0, max_features=1,\n min_samples_split=10).fit(X, y)\n s = TreeEstimator(random_state=0, max_features=1,\n min_samples_split=10).fit(X_sparse, y)\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n assert_array_almost_equal(s.predict(X), d.predict(X))\n\n # Check min_samples_leaf\n d = TreeEstimator(random_state=0,\n min_samples_leaf=X_sparse.shape[0] \/\/ 2).fit(X, y)\n s = TreeEstimator(random_state=0,\n min_samples_leaf=X_sparse.shape[0] \/\/ 2).fit(X_sparse, y)\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n assert_array_almost_equal(s.predict(X), d.predict(X))\n\n # Check best-first search\n d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)\n s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n assert_array_almost_equal(s.predict(X), d.predict(X))\n\n\ndef test_sparse_parameters():\n for tree, dataset in product(SPARSE_TREES,\n [\"sparse-pos\", \"sparse-neg\", \"sparse-mix\",\n \"zeros\"]):\n yield (check_sparse_parameters, tree, dataset)\n\n\ndef check_sparse_criterion(tree, dataset):\n TreeEstimator = ALL_TREES[tree]\n X = DATASETS[dataset][\"X\"]\n X_sparse = DATASETS[dataset][\"X_sparse\"]\n y = DATASETS[dataset][\"y\"]\n\n # Check various criterion\n CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS\n for criterion in CRITERIONS:\n d = TreeEstimator(random_state=0, max_depth=3,\n criterion=criterion).fit(X, y)\n s = TreeEstimator(random_state=0, max_depth=3,\n criterion=criterion).fit(X_sparse, y)\n\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n assert_array_almost_equal(s.predict(X), d.predict(X))\n\n\ndef test_sparse_criterion():\n for tree, dataset in product(SPARSE_TREES,\n [\"sparse-pos\", \"sparse-neg\", \"sparse-mix\",\n \"zeros\"]):\n yield (check_sparse_criterion, tree, dataset)\n\n\ndef check_explicit_sparse_zeros(tree, max_depth=3,\n n_features=10):\n TreeEstimator = ALL_TREES[tree]\n\n # n_samples set n_feature to ease construction of a simultaneous\n # construction of a csr and csc matrix\n n_samples = n_features\n samples = np.arange(n_samples)\n\n # Generate X, y\n random_state = check_random_state(0)\n indices = []\n data = []\n offset = 0\n indptr = [offset]\n for i in range(n_features):\n n_nonzero_i = random_state.binomial(n_samples, 0.5)\n indices_i = random_state.permutation(samples)[:n_nonzero_i]\n indices.append(indices_i)\n data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1\n data.append(data_i)\n offset += n_nonzero_i\n indptr.append(offset)\n\n indices = np.concatenate(indices)\n data = np.array(np.concatenate(data), dtype=np.float32)\n X_sparse = csc_matrix((data, indices, indptr),\n shape=(n_samples, n_features))\n X = X_sparse.toarray()\n X_sparse_test = csr_matrix((data, indices, indptr),\n shape=(n_samples, n_features))\n X_test = X_sparse_test.toarray()\n y = random_state.randint(0, 3, size=(n_samples, ))\n\n # Ensure that X_sparse_test owns its data, indices and indptr array\n X_sparse_test = X_sparse_test.copy()\n\n # Ensure that we have explicit zeros\n assert_greater((X_sparse.data == 0.).sum(), 0)\n assert_greater((X_sparse_test.data == 0.).sum(), 0)\n\n # Perform the comparison\n d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)\n s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)\n\n assert_tree_equal(d.tree_, s.tree_,\n \"{0} with dense and sparse format gave different \"\n \"trees\".format(tree))\n\n Xs = (X_test, X_sparse_test)\n for X1, X2 in product(Xs, Xs):\n assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))\n assert_array_almost_equal(s.apply(X1), d.apply(X2))\n assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))\n\n assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),\n d.tree_.decision_path(X2).toarray())\n assert_array_almost_equal(s.decision_path(X1).toarray(),\n d.decision_path(X2).toarray())\n assert_array_almost_equal(s.decision_path(X1).toarray(),\n s.tree_.decision_path(X1).toarray())\n\n assert_array_almost_equal(s.predict(X1), d.predict(X2))\n\n if tree in CLF_TREES:\n assert_array_almost_equal(s.predict_proba(X1),\n d.predict_proba(X2))\n\n\ndef test_explicit_sparse_zeros():\n for tree in SPARSE_TREES:\n yield (check_explicit_sparse_zeros, tree)\n\n\n@ignore_warnings\ndef check_raise_error_on_1d_input(name):\n TreeEstimator = ALL_TREES[name]\n\n X = iris.data[:, 0].ravel()\n X_2d = iris.data[:, 0].reshape((-1, 1))\n y = iris.target\n\n assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)\n\n est = TreeEstimator(random_state=0)\n est.fit(X_2d, y)\n assert_raises(ValueError, est.predict, [X])\n\n\n@ignore_warnings\ndef test_1d_input():\n for name in ALL_TREES:\n yield check_raise_error_on_1d_input, name\n\n\ndef _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):\n # Private function to keep pretty printing in nose yielded tests\n est = TreeEstimator(random_state=0)\n est.fit(X, y, sample_weight=sample_weight)\n assert_equal(est.tree_.max_depth, 1)\n\n est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)\n est.fit(X, y, sample_weight=sample_weight)\n assert_equal(est.tree_.max_depth, 0)\n\n\ndef check_min_weight_leaf_split_level(name):\n TreeEstimator = ALL_TREES[name]\n\n X = np.array([[0], [0], [0], [0], [1]])\n y = [0, 0, 0, 0, 1]\n sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]\n _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)\n\n if not TreeEstimator().presort:\n _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,\n sample_weight)\n\n\ndef test_min_weight_leaf_split_level():\n for name in ALL_TREES:\n yield check_min_weight_leaf_split_level, name\n\n\ndef check_public_apply(name):\n X_small32 = X_small.astype(tree._tree.DTYPE)\n\n est = ALL_TREES[name]()\n est.fit(X_small, y_small)\n assert_array_equal(est.apply(X_small),\n est.tree_.apply(X_small32))\n\n\ndef check_public_apply_sparse(name):\n X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))\n\n est = ALL_TREES[name]()\n est.fit(X_small, y_small)\n assert_array_equal(est.apply(X_small),\n est.tree_.apply(X_small32))\n\n\ndef test_public_apply():\n for name in ALL_TREES:\n yield (check_public_apply, name)\n\n for name in SPARSE_TREES:\n yield (check_public_apply_sparse, name)\n\n\ndef check_presort_sparse(est, X, y):\n assert_raises(ValueError, est.fit, X, y)\n\n\ndef test_presort_sparse():\n ests = (DecisionTreeClassifier(presort=True),\n DecisionTreeRegressor(presort=True))\n sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)\n\n y, X = datasets.make_multilabel_classification(random_state=0,\n n_samples=50,\n n_features=1,\n n_classes=20)\n y = y[:, 0]\n\n for est, sparse_matrix in product(ests, sparse_matrices):\n yield check_presort_sparse, est, sparse_matrix(X), y\n\n\ndef test_decision_path_hardcoded():\n X = iris.data\n y = iris.target\n est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)\n node_indicator = est.decision_path(X[:2]).toarray()\n assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])\n\n\ndef check_decision_path(name):\n X = iris.data\n y = iris.target\n n_samples = X.shape[0]\n\n TreeEstimator = ALL_TREES[name]\n est = TreeEstimator(random_state=0, max_depth=2)\n est.fit(X, y)\n\n node_indicator_csr = est.decision_path(X)\n node_indicator = node_indicator_csr.toarray()\n assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))\n\n # Assert that leaves index are correct\n leaves = est.apply(X)\n leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]\n assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))\n\n # Ensure only one leave node per sample\n all_leaves = est.tree_.children_left == TREE_LEAF\n assert_array_almost_equal(np.dot(node_indicator, all_leaves),\n np.ones(shape=n_samples))\n\n # Ensure max depth is consistent with sum of indicator\n max_depth = node_indicator.sum(axis=1).max()\n assert_less_equal(est.tree_.max_depth, max_depth)\n\n\ndef test_decision_path():\n for name in ALL_TREES:\n yield (check_decision_path, name)\n\n\ndef check_no_sparse_y_support(name):\n X, y = X_multilabel, csr_matrix(y_multilabel)\n TreeEstimator = ALL_TREES[name]\n assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)\n\n\ndef test_no_sparse_y_support():\n # Currently we don't support sparse y\n for name in ALL_TREES:\n yield (check_no_sparse_y_support, name)\n\ndef test_mae():\n # check MAE criterion produces correct results\n # on small toy dataset\n dt_mae = DecisionTreeRegressor(random_state=0, criterion=\"mae\",\n max_leaf_nodes=2)\n dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])\n assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0\/3.0])\n assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])\n\n dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])\n assert_array_equal(dt_mae.tree_.impurity, [7.0\/2.3, 3.0\/0.7, 4.0\/1.6])\n assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])\n","license":"bsd-3-clause"} {"repo_name":"andaag\/scikit-learn","path":"examples\/semi_supervised\/plot_label_propagation_versus_svm_iris.py","copies":"286","size":"2378","content":"\"\"\"\n=====================================================================\nDecision boundary of label propagation versus SVM on the Iris dataset\n=====================================================================\n\nComparison for decision boundary generated on iris dataset\nbetween Label Propagation and SVM.\n\nThis demonstrates Label Propagation learning a good boundary\neven with a small amount of labeled data.\n\n\"\"\"\nprint(__doc__)\n\n# Authors: Clay Woolam \n# Licence: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.semi_supervised import label_propagation\n\nrng = np.random.RandomState(0)\n\niris = datasets.load_iris()\n\nX = iris.data[:, :2]\ny = iris.target\n\n# step size in the mesh\nh = .02\n\ny_30 = np.copy(y)\ny_30[rng.rand(len(y)) < 0.3] = -1\ny_50 = np.copy(y)\ny_50[rng.rand(len(y)) < 0.5] = -1\n# we create an instance of SVM and fit out data. We do not scale our\n# data since we want to plot the support vectors\nls30 = (label_propagation.LabelSpreading().fit(X, y_30),\n y_30)\nls50 = (label_propagation.LabelSpreading().fit(X, y_50),\n y_50)\nls100 = (label_propagation.LabelSpreading().fit(X, y), y)\nrbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)\n\n# create a mesh to plot in\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n# title for the plots\ntitles = ['Label Spreading 30% data',\n 'Label Spreading 50% data',\n 'Label Spreading 100% data',\n 'SVC with rbf kernel']\n\ncolor_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}\n\nfor i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n plt.subplot(2, 2, i + 1)\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)\n plt.axis('off')\n\n # Plot also the training points\n colors = [color_map[y] for y in y_train]\n plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)\n\n plt.title(titles[i])\n\nplt.text(.90, 0, \"Unlabeled points are colored white\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"liberatorqjw\/scikit-learn","path":"sklearn\/tree\/export.py","copies":"30","size":"4529","content":"\"\"\"\nThis module defines export functions for decision trees.\n\"\"\"\n\n# Authors: Gilles Louppe \n# Peter Prettenhofer \n# Brian Holt \n# Noel Dawe \n# Satrajit Gosh \n# Licence: BSD 3 clause\n\nfrom ..externals import six\n\nfrom . import _tree\n\n\ndef export_graphviz(decision_tree, out_file=\"tree.dot\", feature_names=None,\n max_depth=None):\n \"\"\"Export a decision tree in DOT format.\n\n This function generates a GraphViz representation of the decision tree,\n which is then written into `out_file`. Once exported, graphical renderings\n can be generated using, for example::\n\n $ dot -Tps tree.dot -o tree.ps (PostScript format)\n $ dot -Tpng tree.dot -o tree.png (PNG format)\n\n The sample counts that are shown are weighted with any sample_weights that\n might be present.\n\n Parameters\n ----------\n decision_tree : decision tree classifier\n The decision tree to be exported to GraphViz.\n\n out_file : file object or string, optional (default=\"tree.dot\")\n Handle or name of the output file.\n\n feature_names : list of strings, optional (default=None)\n Names of each of the features.\n\n max_depth : int, optional (default=None)\n The maximum depth of the representation. If None, the tree is fully\n generated.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn import tree\n\n >>> clf = tree.DecisionTreeClassifier()\n >>> iris = load_iris()\n\n >>> clf = clf.fit(iris.data, iris.target)\n >>> tree.export_graphviz(clf,\n ... out_file='tree.dot') # doctest: +SKIP\n \"\"\"\n def node_to_str(tree, node_id, criterion):\n if not isinstance(criterion, six.string_types):\n criterion = \"impurity\"\n\n value = tree.value[node_id]\n if tree.n_outputs == 1:\n value = value[0, :]\n\n if tree.children_left[node_id] == _tree.TREE_LEAF:\n return \"%s = %.4f\\\\nsamples = %s\\\\nvalue = %s\" \\\n % (criterion,\n tree.impurity[node_id],\n tree.n_node_samples[node_id],\n value)\n else:\n if feature_names is not None:\n feature = feature_names[tree.feature[node_id]]\n else:\n feature = \"X[%s]\" % tree.feature[node_id]\n\n return \"%s <= %.4f\\\\n%s = %s\\\\nsamples = %s\" \\\n % (feature,\n tree.threshold[node_id],\n criterion,\n tree.impurity[node_id],\n tree.n_node_samples[node_id])\n\n def recurse(tree, node_id, criterion, parent=None, depth=0):\n if node_id == _tree.TREE_LEAF:\n raise ValueError(\"Invalid node_id %s\" % _tree.TREE_LEAF)\n\n left_child = tree.children_left[node_id]\n right_child = tree.children_right[node_id]\n\n # Add node with description\n if max_depth is None or depth <= max_depth:\n out_file.write('%d [label=\"%s\", shape=\"box\"] ;\\n' %\n (node_id, node_to_str(tree, node_id, criterion)))\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d ;\\n' % (parent, node_id))\n\n if left_child != _tree.TREE_LEAF:\n recurse(tree, left_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n recurse(tree, right_child, criterion=criterion, parent=node_id,\n depth=depth + 1)\n\n else:\n out_file.write('%d [label=\"(...)\", shape=\"box\"] ;\\n' % node_id)\n\n if parent is not None:\n # Add edge to parent\n out_file.write('%d -> %d ;\\n' % (parent, node_id))\n\n own_file = False\n try:\n if isinstance(out_file, six.string_types):\n if six.PY3:\n out_file = open(out_file, \"w\", encoding=\"utf-8\")\n else:\n out_file = open(out_file, \"wb\")\n own_file = True\n\n out_file.write(\"digraph Tree {\\n\")\n\n if isinstance(decision_tree, _tree.Tree):\n recurse(decision_tree, 0, criterion=\"impurity\")\n else:\n recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)\n out_file.write(\"}\")\n\n finally:\n if own_file:\n out_file.close()\n","license":"bsd-3-clause"} {"repo_name":"crichardson17\/starburst_atlas","path":"Low_resolution_sims\/DustFree_LowRes\/Padova_inst\/padova_inst_6\/Optical1.py","copies":"33","size":"7366","content":"import csv\nimport matplotlib.pyplot as plt\nfrom numpy import *\nimport scipy.interpolate\nimport math \nfrom pylab import *\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nimport matplotlib.patches as patches\nfrom matplotlib.path import Path\nimport os\n\n# ------------------------------------------------------------------------------------------------------\n#inputs\nfor file in os.listdir('.'):\n if file.endswith(\".grd\"):\n \tinputfile = file\n\nfor file in os.listdir('.'):\n if file.endswith(\".txt\"):\n \tinputfile2 = file\n# ------------------------------------------------------------------------------------------------------\n#Patches data\n\n#for the Kewley and Levesque data\nverts = [\n (1., 7.97712125471966000000), # left, bottom\n (1., 9.57712125471966000000), # left, top\n (2., 10.57712125471970000000), # right, top\n (2., 8.97712125471966000000), # right, bottom\n (0., 0.), # ignored\n ]\ncodes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n\npath = Path(verts, codes)\n# ------------------------\n#for the Kewley 01 data\nverts2 = [\n (2.4, 9.243038049), # left, bottom\n (2.4, 11.0211893), # left, top\n (2.6, 11.0211893), # right, top\n (2.6, 9.243038049), # right, bottom\n (0, 0.), # ignored\n ]\npath = Path(verts, codes)\npath2 = Path(verts2, codes)\n# -------------------------\n#for the Moy et al data\nverts3 = [\n (1., 6.86712125471966000000), # left, bottom\n (1., 10.18712125471970000000), # left, top\n (3., 12.18712125471970000000), # right, top\n (3., 8.86712125471966000000), # right, bottom\n (0., 0.), # ignored\n ]\npath = Path(verts, codes)\npath3 = Path(verts3, codes)\n# ------------------------------------------------------------------------------------------------------\n\n#the routine to add patches for others peoples' data onto our plots. \ndef add_patches(ax):\n\tpatch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)\n\tpatch2 = patches.PathPatch(path2, facecolor='green', lw=0)\n\tpatch = patches.PathPatch(path, facecolor='red', lw=0)\n\tax1.add_patch(patch3)\n\tax1.add_patch(patch2)\n\tax1.add_patch(patch)\n# ------------------------------------------------------------------------------------------------------\n\n#the subplot routine\n\ndef add_sub_plot(sub_num):\n\tnumplots = 16\n\n\tplt.subplot(numplots\/4.,4,sub_num)\n\t\n\trbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')\n\tzi = rbf(xi, yi)\n\n\tcontour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')\n\tcontour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)\n\t\n\tplt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')\n\tplt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)\n\tplt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)\n\t\n\tif sub_num == numplots \/ 2.:\n\t\tprint \"half the plots are complete\"\n#axis limits\n\n\tyt_min = 8\n\tyt_max = 23\n\txt_min = 0\n\txt_max = 12\n\tplt.ylim(yt_min,yt_max)\n\tplt.xlim(xt_min,xt_max) \n\tplt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)\n\tplt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)\n\n\tif sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:\n\t\tplt.tick_params(labelleft = 'off')\n\telse:\n\t\tplt.tick_params(labelleft = 'on')\n\t\tplt.ylabel('Log ($ \\phi _{\\mathrm{H}} $)')\n\n\tif sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:\n\t\tplt.tick_params(labelbottom = 'off')\n\telse: \t\t\n\t\tplt.tick_params(labelbottom = 'on')\n\t\tplt.xlabel('Log($n _{\\mathrm{H}} $)')\n\n\tif sub_num == 1:\n\t\tplt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)\n\tif sub_num == 13:\n\t\tplt.yticks(arange(yt_min,yt_max,1),fontsize=10)\n\t\tplt.xticks(arange(xt_min,xt_max,1), fontsize = 10)\n\tif sub_num == 16 :\n\t\tplt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)\n\n# ---------------------------------------------------\n#this is where the grid information (phi and hdens) is read in and saved to grid. \ngrid = [];\nwith open(inputfile, 'rb') as f:\n\tcsvReader = csv.reader(f,delimiter='\\t')\n\tfor row in csvReader:\n\t\tgrid.append(row);\n\tgrid = asarray(grid)\n\n#here is where the data for each line is read in and saved to dataEmissionlines\ndataEmissionlines = [];\nwith open(inputfile2, 'rb') as f:\n\tcsvReader = csv.reader(f,delimiter='\\t')\n\theaders = csvReader.next()\n\tfor row in csvReader:\n\t\tdataEmissionlines.append(row);\t\t\n\tdataEmissionlines = asarray(dataEmissionlines)\nprint \"import files complete\"\n# ---------------------------------------------------\n#for grid\nphi_values = grid[1:len(dataEmissionlines)+1,6]\nhdens_values = grid[1:len(dataEmissionlines)+1,7]\n\n#for lines\nheaders = headers[1:]\nEmissionlines = dataEmissionlines[:, 1:]\nconcatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))\nmax_values = zeros((len(Emissionlines[0]),4))\n#select the scaling factor\n\n#for 1215\n#incident = Emissionlines[1:,4] \n\n#for 4860\nincident = Emissionlines[:,57] \n\n#take the ratio of incident and all the lines and put it all in an array concatenated_data\n\nfor i in range(len(Emissionlines)):\n\tfor j in range(len(Emissionlines[0])):\n\t\t\tif math.log(4860.*(float(Emissionlines[i,j])\/float(Emissionlines[i,57])), 10) > 0:\n\t\t\t\tconcatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])\/float(Emissionlines[i,57])), 10)\n\t\t\telse:\n\t\t\t\tconcatenated_data[i,j] == 0\n# for 1215\n#for i in range(len(Emissionlines)):\n#\tfor j in range(len(Emissionlines[0])):\n#\t\t\tif math.log(1215.*(float(Emissionlines[i,j])\/float(Emissionlines[i,4])), 10) > 0:\n#\t\t\t\tconcatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])\/float(Emissionlines[i,4])), 10)\n#\t\t\telse:\n#\t\t\t\tconcatenated_data[i,j] == 0\n\n\n#find the maxima to plot onto the contour plots\nfor j in range(len(concatenated_data[0])):\n\tmax_values[j,0] = max(concatenated_data[:,j])\n\tmax_values[j,1] = argmax(concatenated_data[:,j], axis = 0)\n\tmax_values[j,2] = hdens_values[max_values[j,1]]\n\tmax_values[j,3] = phi_values[max_values[j,1]]\n\n#to round off the maxima \nmax_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]\nprint \"data arranged\"\n\n# ---------------------------------------------------\n\n#Creating the grid to interpolate with for contours. \ngridarray = zeros((len(Emissionlines),2))\ngridarray[:,0] = hdens_values\ngridarray[:,1] = phi_values\n\nx = gridarray[:,0]\ny = gridarray[:,1]\n\n#change desired lines here!\n\nline = [36, #NE 3 3343A\n\t\t38, #BA C\n\t\t39, #3646\n\t\t40, #3726\n\t\t41, #3727\n\t\t42, #3729\n\t\t43, #3869\n\t\t44, #3889\n\t\t45, #3933\n\t\t46, #4026\n\t\t47, #4070\n\t\t48, #4074\n\t\t49, #4078\n\t\t50, #4102\n\t\t51, #4340\n\t\t52] #4363\n#create z array for this plot\nz = concatenated_data[:,line[:]]\n\n# ---------------------------------------------------\n# Interpolate\nprint \"starting interpolation\"\nxi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10) \nxi, yi = meshgrid(xi, yi)\n# ---------------------------------------------------\nprint \"interpolatation complete; now plotting\"\n#plot\nplt.subplots_adjust(wspace=0, hspace=0) #remove space between plots\nlevels = arange(10**-1,10, .2)\nlevels2 = arange(10**-2,10**2, 1)\nplt.suptitle(\"Optical Lines\", fontsize=14)\n# ---------------------------------------------------\nfor i in range(16):\n\tadd_sub_plot(i)\nax1 = plt.subplot(4,4,1)\nadd_patches(ax1)\n\nprint \"complete\"\n\nplt.savefig('optical_lines.pdf')\nplt.clf()\n\n\n\n\n","license":"gpl-2.0"} {"repo_name":"rphlypo\/parietalretreat","path":"setup_data_path_salma.py","copies":"1","size":"6001","content":"import glob\nimport os.path\nfrom pandas import DataFrame\nimport pandas\n\n\ndef get_all_paths(data_set=None, root_dir=\"\/\"):\n # TODO\n # if data_set ... collections.Sequence\n # iterate over list\n if data_set is None:\n data_set = {\"hcp\", \"henson2010faces\", \"ds105\", \"ds107\"}\n list_ = list()\n head, tail_ = os.path.split(root_dir)\n counter = 0\n while tail_:\n head, tail_ = os.path.split(head)\n counter += 1\n\n if hasattr(data_set, \"__iter__\"):\n df_ = list()\n for ds in data_set:\n df_.append(get_all_paths(data_set=ds, root_dir=root_dir))\n df = pandas.concat(df_, keys=data_set)\n elif data_set.startswith(\"ds\") or data_set == \"henson2010faces\":\n base_path = os.path.join(root_dir,\n \"storage\/workspace\/brainpedia\/preproc\/\",\n data_set)\n with open(os.path.join(base_path, \"scan_key.txt\")) as file_:\n TR = file_.readline()[3:-1]\n for fun_path in glob.iglob(os.path.join(base_path,\n \"sub*\/model\/model*\/\"\n \"BOLD\/task*\/bold.nii.gz\")):\n head, tail_ = os.path.split(fun_path)\n tail = [tail_]\n while tail_:\n head, tail_ = os.path.split(head)\n tail.append(tail_)\n tail.reverse()\n subj_id = tail[6 + counter][-3:]\n model = tail[8 + counter][-3:]\n task, run = tail[10 + counter].split(\"_\")\n\n tmp_base = os.path.split(os.path.split(fun_path)[0])[0]\n\n anat = os.path.join(tmp_base,\n \"anatomy\",\n \"highres{}.nii.gz\".format(model[-3:]))\n\n onsets = glob.glob(os.path.join(tmp_base, \"onsets\",\n \"task{}_run{}\".format(task, run),\n \"cond*.txt\"))\n\n confds = os.path.join(os.path.split(fun_path)[0], \"motion.txt\")\n list_.append({\"subj_id\": subj_id,\n \"model\": model,\n \"task\": task[-3:],\n \"run\": run[-3:],\n \"func\": fun_path,\n \"anat\": anat,\n \"confds\": confds,\n \"TR\": TR})\n if onsets:\n list_[-1][\"onsets\"] = onsets\n\n df = DataFrame(list_)\n elif data_set == \"hcp\":\n base_path = os.path.join(root_dir, \"storage\/data\/HCP\/Q2\/\")\n for fun_path in glob.iglob(os.path.join(base_path,\n \"*\/MNINonLinear\/Results\/\",\n \"*\/*.nii.gz\")):\n\n head, tail = os.path.split(fun_path)\n if head[-2:] not in [\"LR\", \"RL\"]:\n continue\n tail = [tail]\n while head != \"\/\":\n head, t = os.path.split(head)\n tail.append(t)\n if tail[0][:-7] != tail[1]:\n continue\n tail.reverse()\n subj_id = tail[4 + counter]\n task = tail[7 + counter][6:-3]\n if tail[7 + counter].startswith(\"rfMRI\"):\n run = task[-1]\n task = task[:-1]\n mode = tail[7 + counter][-2:]\n\n anat = os.path.join(base_path, subj_id, \"MNINonLinear\/T1w.nii.gz\")\n\n confds = os.path.join(os.path.split(fun_path)[0],\n \"Movement_Regressors.txt\")\n list_.append({\"subj_id\": subj_id,\n \"task\": task,\n \"mode\": mode,\n \"func\": fun_path,\n \"anat\": anat,\n \"confds\": confds,\n \"TR\": 0.72})\n if tail[8 + counter].startswith(\"rfMRI\"):\n list_[-1][\"run\"] = run\n else:\n onsets = [onset\n for onset in glob.glob(os.path.join(\n os.path.split(fun_path)[0], \"EVs\/*.txt\"))\n if os.path.split(onset)[1][0] != \"S\"]\n list_[-1][\"onsets\"] = onsets\n df = DataFrame(list_)\n return df\n\n\nif __name__ == \"__main__\":\n from nilearn.input_data import MultiNiftiMasker, NiftiMapsMasker\n from joblib import Memory, Parallel, delayed\n import joblib\n from sklearn.base import clone\n import nibabel\n\n root_dir = \"\/media\/Elements\/volatile\/new\/salma\"\n\n mem = Memory(cachedir=os.path.join(root_dir,\n (\"storage\/workspace\/brainpedia\"\n \"\/preproc\/henson2010faces\/dump\/\")))\n print \"Loading all paths and variables into memory\"\n df = get_all_paths(root_dir=root_dir,\n data_set=[\"henson2010faces\"])\n target_affine_ = nibabel.load(df[\"func\"][0]).get_affine()\n target_shape_ = nibabel.load(df[\"func\"][0]).shape[:-1]\n print \"preparing and running MultiNiftiMasker\"\n mnm = MultiNiftiMasker(mask_strategy=\"epi\", memory=mem, n_jobs=1,\n verbose=10, target_affine=target_affine_,\n target_shape=target_shape_)\n mask_img = mnm.fit(list(df[\"func\"])).mask_img_\n print \"preparing and running NiftiMapsMasker\"\n nmm = NiftiMapsMasker(\n maps_img=os.path.join(\"\/usr\/share\/fsl\/data\/atlases\/HarvardOxford\/\",\n \"HarvardOxford-cortl-prob-2mm.nii.gz\"),\n mask_img=mask_img, detrend=True, smoothing_fwhm=5, standardize=True,\n low_pass=None, high_pass=None, memory=mem, verbose=10)\n region_ts = [clone(nmm).fit_transform(niimg, n_hv_confounds=5)\n for niimg in list(df[\"func\"])]\n joblib.dump(region_ts, \"\/home\/storage\/workspace\/rphlypo\/retreat\/results\/\")\n region_signals = DataFrame({\"region_signals\": region_ts}, index=df.index)\n df.join(region_signals)\n","license":"bsd-2-clause"} {"repo_name":"debsankha\/bedtime-programming","path":"ls222\/visual-lotka.py","copies":"1","size":"5120","content":"#!\/usr\/bin\/env python\nfrom math import *\nimport thread\nimport random\nimport time\nimport pygtk\npygtk.require(\"2.0\")\nimport gtk\nimport gtk.glade\nimport commands\nimport matplotlib.pyplot\n\nclass rodent:\n\tdef __init__(self):\n\t\tself.time_from_last_childbirth=0\n\nclass felix:\n\tdef __init__(self):\n\t\tself.size=0\n\t\tself.is_virgin=1\n\t\tself.reproduction_gap=0\n\t\tself.time_from_last_childbirth=0\n\t\tself.age=0\n\n\n#\tprint 'painted'\n\n\n\t\n\nclass gui_display:\n\tdef __init__(self):\n\t\tself.gladefile='.\/lvshort.glade'\n\t\tself.wTree = gtk.glade.XML(self.gladefile)\n\t\tdic={\"on_start_clicked\":self.dynamics,\"on_mainwin_destroy\":gtk.main_quit}\n\t\tself.wTree.signal_autoconnect(dic)\n\t\tself.wTree.get_widget(\"mainwin\").show()\n\t\tself.wTree.get_widget(\"image\").set_from_file(\".\/start.png\")\n\n\tdef visualize(self,catn,mousen):\n#\twhile True:\n\t\tnum=40\n\t\tsize=10\n\t\tcatno=catn*num**2\/(catn+mousen)\n\t\tcats=random.sample(range(num**2),catno)\n\t\n\t\tfor i in range(num**2):\n\t\t\tif i in cats:\n\t\t\t\tself.dic[i].color=visual.color.red\n\t\t\telse :\n\t\t\t\tself.dic[i].color=visual.color.green\n\t\n\tdef dynamics(self,*args,**kwargs):\n\t\tself.wTree.get_widget(\"image\").set_from_file(\".\/wait.png\")\n\t\tprint 'dynamics started'\n\t\tmouse_size=20\t\t\t\t\t\t\t\t#ind parameter\n\t\tcat_mature_size=60\t\t\t\t\t\t\t#ind parameter\n\t\t\n#\t\tcatch_rate=5*10**-4 \t #parameter\n#\t\tcat_efficiency=0.8 #parameter\n#\t\ta=0.2 #will get from slider\n#\t\tc=0.2 #will get from slider\n\t\t\n\t\tcat_catch_rate=self.wTree.get_widget(\"catchrate\").get_value()*10**-4 #parameter\n\t\tcat_efficiency=self.wTree.get_widget(\"efficiency\").get_value() #parameter\n\t\ta=self.wTree.get_widget(\"a\").get_value() #parameter\n\t\tc=self.wTree.get_widget(\"c\").get_value() #parameter \n\t\t\n\t\tmouse_no=1000\n\t\tcat_no=1000\n\t\tt=0\n\t\ttmax=200\n\t\tdt=1\n\t\t\n\t\ttimeli=[]\n\t\tmiceli=[]\n\t\tcatli=[]\n\t\t\t\n\t\tmice=[rodent() for i in range(mouse_no)]\n\t\tcats=[felix() for i in range(cat_no)]\n\t\t\n\t\tcatn=len(cats)\n\t\tmousen=len(mice)\n\t\t\n\t\tself.dic={}\n\t\tnum=40\n\t\tsize=10\n\t\tcatno=catn*num**2\/(catn+mousen)\n\t\tdisp_cats=random.sample(range(num**2),catno)\n\t\n\t\tif self.wTree.get_widget(\"anim\").get_active()==1:\n\t\t\tprint 'yay!'\n\t\t\tfor i in range(num**2):\n\t\t\t\tcoords=((i%num)*size*2-num*size,(i\/num)*size*2-num*size)\n\t\t\t\tif i in disp_cats:\n\t\t\t\t\tself.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red)\n\t\t\t\telse :\n\t\t\t\t\tself.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green)\n\t\tprint self.dic\n\t\tcatn=len(cats)\n\t\tmousen=len(mice)\n\t\tdata=open('tempdata.dat','w')\n\t\ttimestart=time.time()\n\t\twhile (len(mice)>0 or len(cats)>0) and t=1\/a:\n\t\t\t\t\tmouse.time_from_last_childbirth=0\n\t\t\t\t\tmice.append(rodent())\n\t\n\t\t\t\tmouse.time_from_last_childbirth+=dt\n\t\n\t\t\tind=0\n\t\t\twhile indrandom.uniform(0,1):\n\t\t\t\t\tcaught=random.randint(0,len(mice)-1)\n\t\t\t\t\tcat.size+=mouse_size*cat_efficiency\t\t\t#size increases\n\t\t\t\t\tmice.pop(caught)\n\t\t\t\t\n\t\t\t\tif cat.size>cat_mature_size:\n\t\t\t\t\tif cat.is_virgin:\n\t\t\t\t\t\tcat.is_virgin=0\n\t\t\t\t\t\tcat.reproduction_gap=cat.age\n\t\t\t\t\t\tcats.append(felix())\n\t\t\t\t\telse :\n\t\t\t\t\t\tif cat.time_from_last_childbirth>cat.reproduction_gap:\n\t\t\t\t\t\t\tcats.append(felix())\n\t\t\t\t\t\t\tcat.time_from_last_childbirth=0\n\t\n\t\t\t\tif cat.is_virgin==0:\n\t\t\t\t\tcat.time_from_last_childbirth+=dt\n\t\n\t\n\t\t\t\tif len(cats)>0:\n\t\t\t\t\tif c*dt*2*atan(0.05*len(cats))\/pi>random.uniform(0,1):\n\t\t\t\t\t\tcats.pop(ind)\n\t\t\t\t\telse :\n\t\t\t\t\t\tind+=1\n\t\t\t\telse :\n\t\t\t\t\tind+=1\n\t\n\t\n\t\t\ttimeli.append(t)\n\t\t\tmiceli.append(len(mice))\n\t\t\tcatli.append(len(cats))\n\t\t\tprint t,'\\t',len(mice),'\\t',len(cats)\n\t\t\tprint >> data, t,'\\t',len(mice),'\\t',len(cats)\n\t\n\t\t\tt+=dt\n\t\tdata.close()\n\t\t\n\t\tupper_limit=1.2*len(mice)\n\t\tpltfile=open('lv.plt','w')\n\t\tprint >> pltfile,\"\"\"se te png\nse o \"\/tmp\/lv.png\"\nunse ke\n#se yrange [0:%f]\nse xl \"Time\"\nse yl \"Number of Prey\/Predator\"\np 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l\n\"\"\"%upper_limit\n\t\tpltfile.close()\n\t\tcommands.getoutput('gnuplot lv.plt')\n\t\tself.wTree.get_widget(\"image\").set_from_file(\"\/tmp\/lv.png\")\n\t\tprint 'dynamics ended'\n\t\treload(matplotlib.pyplot)\n\t\tmatplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-')\n\t\tmatplotlib.pyplot.xlabel(\"Time\")\n\t\tmatplotlib.pyplot.ylabel(\"Number of mice and cats\")\n\t\tmatplotlib.pyplot.show()\n\ngui=gui_display()\ngtk.main()\n\n\n#dynamics()\n\n#import matplotlib.pyplot as plt\n#plt.plot(timeli,miceli,'go',timeli,catli,'ro')\n#plt.show()\n\n\n","license":"gpl-3.0"} {"repo_name":"blaze\/distributed","path":"distributed\/protocol\/tests\/test_collection_cuda.py","copies":"1","size":"2448","content":"import pytest\n\nfrom distributed.protocol import serialize, deserialize\nfrom dask.dataframe.utils import assert_eq\nimport pandas as pd\n\n\n@pytest.mark.parametrize(\"collection\", [tuple, dict])\n@pytest.mark.parametrize(\"y,y_serializer\", [(50, \"cuda\"), (None, \"pickle\")])\ndef test_serialize_cupy(collection, y, y_serializer):\n cupy = pytest.importorskip(\"cupy\")\n\n x = cupy.arange(100)\n if y is not None:\n y = cupy.arange(y)\n if issubclass(collection, dict):\n header, frames = serialize(\n {\"x\": x, \"y\": y}, serializers=(\"cuda\", \"dask\", \"pickle\")\n )\n else:\n header, frames = serialize((x, y), serializers=(\"cuda\", \"dask\", \"pickle\"))\n t = deserialize(header, frames, deserializers=(\"cuda\", \"dask\", \"pickle\", \"error\"))\n\n assert header[\"is-collection\"] is True\n sub_headers = header[\"sub-headers\"]\n assert sub_headers[0][\"serializer\"] == \"cuda\"\n assert sub_headers[1][\"serializer\"] == y_serializer\n assert isinstance(t, collection)\n\n assert ((t[\"x\"] if isinstance(t, dict) else t[0]) == x).all()\n if y is None:\n assert (t[\"y\"] if isinstance(t, dict) else t[1]) is None\n else:\n assert ((t[\"y\"] if isinstance(t, dict) else t[1]) == y).all()\n\n\n@pytest.mark.parametrize(\"collection\", [tuple, dict])\n@pytest.mark.parametrize(\n \"df2,df2_serializer\",\n [(pd.DataFrame({\"C\": [3, 4, 5], \"D\": [2.5, 3.5, 4.5]}), \"cuda\"), (None, \"pickle\")],\n)\ndef test_serialize_pandas_pandas(collection, df2, df2_serializer):\n cudf = pytest.importorskip(\"cudf\")\n\n df1 = cudf.DataFrame({\"A\": [1, 2, None], \"B\": [1.0, 2.0, None]})\n if df2 is not None:\n df2 = cudf.from_pandas(df2)\n if issubclass(collection, dict):\n header, frames = serialize(\n {\"df1\": df1, \"df2\": df2}, serializers=(\"cuda\", \"dask\", \"pickle\")\n )\n else:\n header, frames = serialize((df1, df2), serializers=(\"cuda\", \"dask\", \"pickle\"))\n t = deserialize(header, frames, deserializers=(\"cuda\", \"dask\", \"pickle\"))\n\n assert header[\"is-collection\"] is True\n sub_headers = header[\"sub-headers\"]\n assert sub_headers[0][\"serializer\"] == \"cuda\"\n assert sub_headers[1][\"serializer\"] == df2_serializer\n assert isinstance(t, collection)\n\n assert_eq(t[\"df1\"] if isinstance(t, dict) else t[0], df1)\n if df2 is None:\n assert (t[\"df2\"] if isinstance(t, dict) else t[1]) is None\n else:\n assert_eq(t[\"df2\"] if isinstance(t, dict) else t[1], df2)\n","license":"bsd-3-clause"} {"repo_name":"nicholaschris\/landsatpy","path":"utils.py","copies":"1","size":"2693","content":"import operator\nimport pandas as pd\nimport numpy as np\nfrom numpy import ma\nfrom scipy.misc import imresize\nimport scipy.ndimage as ndimage\nfrom skimage.morphology import disk, dilation\n\ndef get_truth(input_one, input_two, comparison): # too much abstraction\n ops = {'>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=': operator.eq}\n return ops[comparison](input_one, input_two)\n \ndef convert_to_celsius(brightness_temp_input):\n return brightness_temp_input - 272.15\n \ndef calculate_percentile(input_masked_array, percentile): \n flat_fill_input = input_masked_array.filled(np.nan).flatten()\n df = pd.DataFrame(flat_fill_input)\n percentile = df.quantile(percentile\/100.0)\n return percentile[0]\n \ndef save_object(obj, filename):\n import pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output)\n\ndef downsample(input_array, factor=4):\n output_array = input_array[::2, ::2] \/ 4 + input_array[1::2, ::2] \/ 4 + input_array[::2, 1::2] \/ 4 + input_array[1::2, 1::2] \/ 4\n return output_array\n\ndef dilate_boolean_array(input_array, disk_size=3):\n selem = disk(disk_size)\n dilated = dilation(input_array, selem)\n return dilated\n\ndef get_resized_array(img, size):\n lena = imresize(img, (size, size))\n return lena\n\ndef interp_and_resize(array, new_length):\n orig_y_length, orig_x_length = array.shape\n\n interp_factor_y = new_length \/ orig_y_length\n interp_factor_x = new_length \/ orig_x_length\n\n\n y = round(interp_factor_y * orig_y_length)\n x = round(interp_factor_x * orig_x_length)\n # http:\/\/docs.scipy.org\/doc\/numpy\/reference\/generated\/numpy.mgrid.html\n new_indicies = np.mgrid[0:orig_y_length:y * 1j, 0:orig_x_length:x * 1j]\n # order=1 indicates bilinear interpolation.\n interp_array = ndimage.map_coordinates(array, new_indicies, \n order=1, output=array.dtype)\n interp_array = interp_array.reshape((y, x))\n return interp_array\n\ndef parse_mtl(in_file):\n awesome = True\n f = open(in_file, 'r')\n print(in_file)\n mtl_dict = {}\n with open(in_file, 'r') as f:\n while awesome:\n line = f.readline()\n if line.strip() == '' or line.strip() == 'END':\n return mtl_dict\n elif 'END_GROUP' in line:\n pass\n elif 'GROUP' in line:\n curr_group = line.split('=')[1].strip()\n mtl_dict[curr_group] = {}\n else:\n attr, value = line.split('=')[0].strip(), line.split('=')[1].strip()\n mtl_dict[curr_group][attr] = value\n ","license":"mit"} {"repo_name":"kwilliams-mo\/iris","path":"lib\/iris\/tests\/test_plot.py","copies":"1","size":"32122","content":"# (C) British Crown Copyright 2010 - 2013, Met Office\n#\n# This file is part of Iris.\n#\n# Iris is free software: you can redistribute it and\/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Iris is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with Iris. If not, see .\n\n\n# import iris tests first so that some things can be initialised before\n# importing anything else\nimport iris.tests as tests\n\nfrom functools import wraps\nimport types\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport iris\nimport iris.coords as coords\nimport iris.plot as iplt\nimport iris.quickplot as qplt\nimport iris.symbols\nimport iris.tests.stock\nimport iris.tests.test_mapping as test_mapping\n\n\ndef simple_cube():\n cube = iris.tests.stock.realistic_4d()\n cube = cube[:, 0, 0, :]\n cube.coord('time').guess_bounds()\n return cube\n\n\nclass TestSimple(tests.GraphicsTest):\n def test_points(self):\n cube = simple_cube()\n qplt.contourf(cube)\n self.check_graphic()\n\n def test_bounds(self):\n cube = simple_cube()\n qplt.pcolor(cube)\n self.check_graphic()\n\n\nclass TestMissingCoord(tests.GraphicsTest):\n def _check(self, cube):\n qplt.contourf(cube)\n self.check_graphic()\n\n qplt.pcolor(cube)\n self.check_graphic()\n\n def test_no_u(self):\n cube = simple_cube()\n cube.remove_coord('grid_longitude')\n self._check(cube)\n\n def test_no_v(self):\n cube = simple_cube()\n cube.remove_coord('time')\n self._check(cube)\n\n def test_none(self):\n cube = simple_cube()\n cube.remove_coord('grid_longitude')\n cube.remove_coord('time')\n self._check(cube)\n\n\n@iris.tests.skip_data\nclass TestMissingCS(tests.GraphicsTest):\n @iris.tests.skip_data\n def test_missing_cs(self):\n cube = tests.stock.simple_pp()\n cube.coord(\"latitude\").coord_system = None\n cube.coord(\"longitude\").coord_system = None\n qplt.contourf(cube)\n qplt.plt.gca().coastlines()\n self.check_graphic()\n\n\nclass TestHybridHeight(tests.GraphicsTest):\n def setUp(self):\n self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]\n\n def _check(self, plt_method, test_altitude=True):\n plt_method(self.cube)\n self.check_graphic()\n\n plt_method(self.cube, coords=['level_height', 'grid_longitude'])\n self.check_graphic()\n\n plt_method(self.cube, coords=['grid_longitude', 'level_height'])\n self.check_graphic()\n\n if test_altitude:\n plt_method(self.cube, coords=['grid_longitude', 'altitude'])\n self.check_graphic()\n\n plt_method(self.cube, coords=['altitude', 'grid_longitude'])\n self.check_graphic()\n\n def test_points(self):\n self._check(qplt.contourf)\n\n def test_bounds(self):\n self._check(qplt.pcolor, test_altitude=False)\n\n def test_orography(self):\n qplt.contourf(self.cube)\n iplt.orography_at_points(self.cube)\n iplt.points(self.cube)\n self.check_graphic()\n\n coords = ['altitude', 'grid_longitude']\n qplt.contourf(self.cube, coords=coords)\n iplt.orography_at_points(self.cube, coords=coords)\n iplt.points(self.cube, coords=coords)\n self.check_graphic()\n\n # TODO: Test bounds once they are supported.\n with self.assertRaises(NotImplementedError):\n qplt.pcolor(self.cube)\n iplt.orography_at_bounds(self.cube)\n iplt.outline(self.cube)\n self.check_graphic()\n\n\nclass Test1dPlotMultiArgs(tests.GraphicsTest):\n # tests for iris.plot using multi-argument calling convention\n\n def setUp(self):\n self.cube1d = _load_4d_testcube()[0, :, 0, 0]\n self.draw_method = iplt.plot\n\n def test_cube(self):\n # just plot a cube against its dim coord\n self.draw_method(self.cube1d) # altitude vs temp\n self.check_graphic()\n\n def test_coord(self):\n # plot the altitude coordinate\n self.draw_method(self.cube1d.coord('altitude'))\n self.check_graphic()\n\n def test_coord_cube(self):\n # plot temperature against sigma\n self.draw_method(self.cube1d.coord('sigma'), self.cube1d)\n self.check_graphic()\n\n def test_cube_coord(self):\n # plot a vertical profile of temperature\n self.draw_method(self.cube1d, self.cube1d.coord('altitude'))\n self.check_graphic()\n\n def test_coord_coord(self):\n # plot two coordinates that are not mappable\n self.draw_method(self.cube1d.coord('sigma'),\n self.cube1d.coord('altitude'))\n self.check_graphic()\n\n def test_coord_coord_map(self):\n # plot lat-lon aux coordinates of a trajectory, which draws a map\n lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],\n standard_name='longitude',\n units='degrees_north')\n lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],\n standard_name='latitude',\n units='degrees_north')\n self.draw_method(lon, lat)\n plt.gca().coastlines()\n self.check_graphic()\n\n def test_cube_cube(self):\n # plot two phenomena against eachother, in this case just dummy data\n cube1 = self.cube1d.copy()\n cube2 = self.cube1d.copy()\n cube1.rename('some phenomenon')\n cube2.rename('some other phenomenon')\n cube1.units = iris.unit.Unit('no_unit')\n cube2.units = iris.unit.Unit('no_unit')\n cube1.data[:] = np.linspace(0, 1, 7)\n cube2.data[:] = np.exp(cube1.data)\n self.draw_method(cube1, cube2)\n self.check_graphic()\n\n def test_incompatible_objects(self):\n # incompatible objects (not the same length) should raise an error\n with self.assertRaises(ValueError):\n self.draw_method(self.cube1d.coord('time'), (self.cube1d))\n\n def test_multimidmensional(self):\n # multidimensional cubes are not allowed\n cube = _load_4d_testcube()[0, :, :, 0]\n with self.assertRaises(ValueError):\n self.draw_method(cube)\n\n def test_not_cube_or_coord(self):\n # inputs must be cubes or coordinates, otherwise an error should be\n # raised\n xdim = np.arange(self.cube1d.shape[0])\n with self.assertRaises(TypeError):\n self.draw_method(xdim, self.cube1d)\n\n def test_coords_deprecated(self):\n # ensure a warning is raised if the old coords keyword argument is\n # used, and make sure the plot produced is consistent with the old\n # interface\n msg = 'Missing deprecation warning for coords keyword.'\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n self.draw_method(self.cube1d, coords=['sigma'])\n self.assertEqual(len(w), 1, msg)\n self.check_graphic()\n\n def test_coords_deprecation_too_many(self):\n # in deprecation mode, too many coords is an error\n with self.assertRaises(ValueError):\n self.draw_method(self.cube1d, coords=['sigma', 'sigma'])\n\n def test_coords_deprecation_invalid_span(self):\n # in deprecation mode, a coordinate that doesn't span data is an error\n with self.assertRaises(ValueError):\n self.draw_method(self.cube1d, coords=['time'])\n\n\nclass Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):\n # tests for iris.plot using multi-argument calling convention\n\n def setUp(self):\n self.cube1d = _load_4d_testcube()[0, :, 0, 0]\n self.draw_method = qplt.plot\n\n\n@tests.skip_data\nclass Test1dScatter(tests.GraphicsTest):\n\n def setUp(self):\n self.cube = iris.load_cube(\n tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),\n 'Temperature')\n self.draw_method = iplt.scatter\n\n def test_coord_coord(self):\n x = self.cube.coord('longitude')\n y = self.cube.coord('height')\n c = self.cube.data\n self.draw_method(x, y, c=c, edgecolor='none')\n self.check_graphic()\n\n def test_coord_coord_map(self):\n x = self.cube.coord('longitude')\n y = self.cube.coord('latitude')\n c = self.cube.data\n self.draw_method(x, y, c=c, edgecolor='none')\n plt.gca().coastlines()\n self.check_graphic()\n\n def test_coord_cube(self):\n x = self.cube.coord('latitude')\n y = self.cube\n c = self.cube.coord('Travel Time').points\n self.draw_method(x, y, c=c, edgecolor='none')\n self.check_graphic()\n\n def test_cube_coord(self):\n x = self.cube\n y = self.cube.coord('height')\n c = self.cube.coord('Travel Time').points\n self.draw_method(x, y, c=c, edgecolor='none')\n self.check_graphic()\n\n def test_cube_cube(self):\n x = iris.load_cube(\n tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),\n 'Rel Humidity')\n y = self.cube\n c = self.cube.coord('Travel Time').points\n self.draw_method(x, y, c=c, edgecolor='none')\n self.check_graphic()\n\n def test_incompatible_objects(self):\n # cubes\/coordinates of different sizes cannot be plotted\n x = self.cube\n y = self.cube.coord('height')[:-1]\n with self.assertRaises(ValueError):\n self.draw_method(x, y)\n\n def test_multidimensional(self):\n # multidimensional cubes\/coordinates are not allowed\n x = _load_4d_testcube()[0, :, :, 0]\n y = x.coord('model_level_number')\n with self.assertRaises(ValueError):\n self.draw_method(x, y)\n\n def test_not_cube_or_coord(self):\n # inputs must be cubes or coordinates\n x = np.arange(self.cube.shape[0])\n y = self.cube\n with self.assertRaises(TypeError):\n self.draw_method(x, y)\n\n\n@tests.skip_data\nclass Test1dQuickplotScatter(Test1dScatter):\n\n def setUp(self):\n self.cube = iris.load_cube(\n tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),\n 'Temperature')\n self.draw_method = qplt.scatter\n\n\n@iris.tests.skip_data\nclass TestAttributePositive(tests.GraphicsTest):\n def test_1d_positive_up(self):\n path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))\n cube = iris.load_cube(path)\n qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])\n self.check_graphic()\n\n def test_1d_positive_down(self):\n path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))\n cube = iris.load_cube(path)\n qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))\n self.check_graphic()\n\n def test_2d_positive_up(self):\n path = tests.get_data_path(('NetCDF', 'testing',\n 'small_theta_colpex.nc'))\n cube = iris.load_cube(path)[0, :, 42, :]\n qplt.pcolormesh(cube)\n self.check_graphic()\n\n def test_2d_positive_down(self):\n path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))\n cube = iris.load_cube(path)[0, :, 42, :]\n qplt.pcolormesh(cube)\n self.check_graphic()\n\n\n# Caches _load_4d_testcube so subsequent calls are faster\ndef cache(fn, cache={}):\n def inner(*args, **kwargs):\n key = fn.__name__\n if key not in cache:\n cache[key] = fn(*args, **kwargs)\n return cache[key]\n return inner\n\n\n@cache\ndef _load_4d_testcube():\n # Load example 4d data (TZYX).\n test_cube = iris.tests.stock.realistic_4d()\n # Replace forecast_period coord with a multi-valued version.\n time_coord = test_cube.coord('time')\n n_times = len(time_coord.points)\n forecast_dims = test_cube.coord_dims(time_coord)\n test_cube.remove_coord('forecast_period')\n # Make up values (including bounds), to roughly match older testdata.\n point_values = np.linspace((1 + 1.0 \/ 6), 2.0, n_times)\n point_uppers = point_values + (point_values[1] - point_values[0])\n bound_values = np.column_stack([point_values, point_uppers])\n # NOTE: this must be a DimCoord\n # - an equivalent AuxCoord produces different plots.\n new_forecast_coord = iris.coords.DimCoord(\n points=point_values,\n bounds=bound_values,\n standard_name='forecast_period',\n units=iris.unit.Unit('hours')\n )\n test_cube.add_aux_coord(new_forecast_coord, forecast_dims)\n # Heavily reduce dimensions for faster testing.\n # NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.\n test_cube = test_cube[:, ::10, ::10, ::10]\n return test_cube\n\n\n@cache\ndef _load_wind_no_bounds():\n # Load the COLPEX data => TZYX\n path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))\n wind = iris.load_cube(path, 'eastward_wind')\n\n # Remove bounds from all coords that have them.\n wind.coord('grid_latitude').bounds = None\n wind.coord('grid_longitude').bounds = None\n wind.coord('level_height').bounds = None\n wind.coord('sigma').bounds = None\n\n return wind[:, :, :50, :50]\n\n\ndef _time_series(src_cube):\n # Until we have plotting support for multiple axes on the same dimension,\n # remove the time coordinate and its axis.\n cube = src_cube.copy()\n cube.remove_coord('time')\n return cube\n\n\ndef _date_series(src_cube):\n # Until we have plotting support for multiple axes on the same dimension,\n # remove the forecast_period coordinate and its axis.\n cube = src_cube.copy()\n cube.remove_coord('forecast_period')\n return cube\n\n\nclass SliceMixin(object):\n \"\"\"Mixin class providing tests for each 2-dimensional permutation of axes.\n\n Requires self.draw_method to be the relevant plotting function,\n and self.results to be a dictionary containing the desired test results.\"\"\"\n\n def test_yx(self):\n cube = self.wind[0, 0, :, :]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_zx(self):\n cube = self.wind[0, :, 0, :]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_tx(self):\n cube = _time_series(self.wind[:, 0, 0, :])\n self.draw_method(cube)\n self.check_graphic()\n\n def test_zy(self):\n cube = self.wind[0, :, :, 0]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_ty(self):\n cube = _time_series(self.wind[:, 0, :, 0])\n self.draw_method(cube)\n self.check_graphic()\n\n def test_tz(self):\n cube = _time_series(self.wind[:, :, 0, 0])\n self.draw_method(cube)\n self.check_graphic()\n\n\n@iris.tests.skip_data\nclass TestContour(tests.GraphicsTest, SliceMixin):\n \"\"\"Test the iris.plot.contour routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = iplt.contour\n\n\n@iris.tests.skip_data\nclass TestContourf(tests.GraphicsTest, SliceMixin):\n \"\"\"Test the iris.plot.contourf routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = iplt.contourf\n\n\n@iris.tests.skip_data\nclass TestPcolor(tests.GraphicsTest, SliceMixin):\n \"\"\"Test the iris.plot.pcolor routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = iplt.pcolor\n\n\n@iris.tests.skip_data\nclass TestPcolormesh(tests.GraphicsTest, SliceMixin):\n \"\"\"Test the iris.plot.pcolormesh routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = iplt.pcolormesh\n\n\ndef check_warnings(method):\n \"\"\"\n Decorator that adds a catch_warnings and filter to assert\n the method being decorated issues a UserWarning.\n\n \"\"\"\n @wraps(method)\n def decorated_method(self, *args, **kwargs):\n # Force reset of iris.coords warnings registry to avoid suppression of\n # repeated warnings. warnings.resetwarnings() does not do this.\n if hasattr(coords, '__warningregistry__'):\n coords.__warningregistry__.clear()\n\n # Check that method raises warning.\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n with self.assertRaises(UserWarning):\n return method(self, *args, **kwargs)\n return decorated_method\n\n\ndef ignore_warnings(method):\n \"\"\"\n Decorator that adds a catch_warnings and filter to suppress\n any warnings issues by the method being decorated.\n\n \"\"\"\n @wraps(method)\n def decorated_method(self, *args, **kwargs):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return method(self, *args, **kwargs)\n return decorated_method\n\n\nclass CheckForWarningsMetaclass(type):\n \"\"\"\n Metaclass that adds a further test for each base class test\n that checks that each test raises a UserWarning. Each base\n class test is then overriden to ignore warnings in order to\n check the underlying functionality.\n\n \"\"\"\n def __new__(cls, name, bases, local):\n def add_decorated_methods(attr_dict, target_dict, decorator):\n for key, value in attr_dict.items():\n if (isinstance(value, types.FunctionType) and\n key.startswith('test')):\n new_key = '_'.join((key, decorator.__name__))\n if new_key not in target_dict:\n wrapped = decorator(value)\n wrapped.__name__ = new_key\n target_dict[new_key] = wrapped\n else:\n raise RuntimeError('A attribute called {!r} '\n 'already exists.'.format(new_key))\n\n def override_with_decorated_methods(attr_dict, target_dict,\n decorator):\n for key, value in attr_dict.items():\n if (isinstance(value, types.FunctionType) and\n key.startswith('test')):\n target_dict[key] = decorator(value)\n\n # Add decorated versions of base methods\n # to check for warnings.\n for base in bases:\n add_decorated_methods(base.__dict__, local, check_warnings)\n\n # Override base methods to ignore warnings.\n for base in bases:\n override_with_decorated_methods(base.__dict__, local,\n ignore_warnings)\n\n return type.__new__(cls, name, bases, local)\n\n\n@iris.tests.skip_data\nclass TestPcolorNoBounds(tests.GraphicsTest, SliceMixin):\n \"\"\"\n Test the iris.plot.pcolor routine on a cube with coordinates\n that have no bounds.\n\n \"\"\"\n __metaclass__ = CheckForWarningsMetaclass\n\n def setUp(self):\n self.wind = _load_wind_no_bounds()\n self.draw_method = iplt.pcolor\n\n\n@iris.tests.skip_data\nclass TestPcolormeshNoBounds(tests.GraphicsTest, SliceMixin):\n \"\"\"\n Test the iris.plot.pcolormesh routine on a cube with coordinates\n that have no bounds.\n\n \"\"\"\n __metaclass__ = CheckForWarningsMetaclass\n\n def setUp(self):\n self.wind = _load_wind_no_bounds()\n self.draw_method = iplt.pcolormesh\n\n\nclass Slice1dMixin(object):\n \"\"\"Mixin class providing tests for each 1-dimensional permutation of axes.\n\n Requires self.draw_method to be the relevant plotting function,\n and self.results to be a dictionary containing the desired test results.\"\"\"\n\n def test_x(self):\n cube = self.wind[0, 0, 0, :]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_y(self):\n cube = self.wind[0, 0, :, 0]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_z(self):\n cube = self.wind[0, :, 0, 0]\n self.draw_method(cube)\n self.check_graphic()\n\n def test_t(self):\n cube = _time_series(self.wind[:, 0, 0, 0])\n self.draw_method(cube)\n self.check_graphic()\n\n def test_t_dates(self):\n cube = _date_series(self.wind[:, 0, 0, 0])\n self.draw_method(cube)\n plt.gcf().autofmt_xdate()\n plt.xlabel('Phenomenon time')\n\n self.check_graphic()\n\n\n@iris.tests.skip_data\nclass TestPlot(tests.GraphicsTest, Slice1dMixin):\n \"\"\"Test the iris.plot.plot routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = iplt.plot\n\n\n@iris.tests.skip_data\nclass TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):\n \"\"\"Test the iris.quickplot.plot routine.\"\"\"\n def setUp(self):\n self.wind = _load_4d_testcube()\n self.draw_method = qplt.plot\n\n\n_load_cube_once_cache = {}\n\n\ndef load_cube_once(filename, constraint):\n \"\"\"Same syntax as load_cube, but will only load a file once,\n\n then cache the answer in a dictionary.\n\n \"\"\"\n global _load_cube_once_cache\n key = (filename, str(constraint))\n cube = _load_cube_once_cache.get(key, None)\n\n if cube is None:\n cube = iris.load_cube(filename, constraint)\n _load_cube_once_cache[key] = cube\n\n return cube\n\n\nclass LambdaStr(object):\n \"\"\"Provides a callable function which has a sensible __repr__.\"\"\"\n def __init__(self, repr, lambda_fn):\n self.repr = repr\n self.lambda_fn = lambda_fn\n\n def __call__(self, *args, **kwargs):\n return self.lambda_fn(*args, **kwargs)\n\n def __repr__(self):\n return self.repr\n\n\n@iris.tests.skip_data\nclass TestPlotCoordinatesGiven(tests.GraphicsTest):\n def setUp(self):\n filename = tests.get_data_path(('PP', 'COLPEX',\n 'theta_and_orog_subset.pp'))\n self.cube = load_cube_once(filename, 'air_potential_temperature')\n\n self.draw_module = iris.plot\n self.contourf = LambdaStr('iris.plot.contourf',\n lambda cube, *args, **kwargs:\n iris.plot.contourf(cube, *args, **kwargs))\n self.contour = LambdaStr('iris.plot.contour',\n lambda cube, *args, **kwargs:\n iris.plot.contour(cube, *args, **kwargs))\n self.points = LambdaStr('iris.plot.points',\n lambda cube, *args, **kwargs:\n iris.plot.points(cube, c=cube.data,\n *args, **kwargs))\n self.plot = LambdaStr('iris.plot.plot',\n lambda cube, *args, **kwargs:\n iris.plot.plot(cube, *args, **kwargs))\n\n self.results = {'yx': ([self.contourf, ['grid_latitude',\n 'grid_longitude']],\n [self.contourf, ['grid_longitude',\n 'grid_latitude']],\n [self.contour, ['grid_latitude',\n 'grid_longitude']],\n [self.contour, ['grid_longitude',\n 'grid_latitude']],\n [self.points, ['grid_latitude',\n 'grid_longitude']],\n [self.points, ['grid_longitude',\n 'grid_latitude']],),\n 'zx': ([self.contourf, ['model_level_number',\n 'grid_longitude']],\n [self.contourf, ['grid_longitude',\n 'model_level_number']],\n [self.contour, ['model_level_number',\n 'grid_longitude']],\n [self.contour, ['grid_longitude',\n 'model_level_number']],\n [self.points, ['model_level_number',\n 'grid_longitude']],\n [self.points, ['grid_longitude',\n 'model_level_number']],),\n 'tx': ([self.contourf, ['time', 'grid_longitude']],\n [self.contourf, ['grid_longitude', 'time']],\n [self.contour, ['time', 'grid_longitude']],\n [self.contour, ['grid_longitude', 'time']],\n [self.points, ['time', 'grid_longitude']],\n [self.points, ['grid_longitude', 'time']],),\n 'x': ([self.plot, ['grid_longitude']],),\n 'y': ([self.plot, ['grid_latitude']],)\n }\n\n def draw(self, draw_method, *args, **kwargs):\n draw_fn = getattr(self.draw_module, draw_method)\n draw_fn(*args, **kwargs)\n self.check_graphic()\n\n def run_tests(self, cube, results):\n for draw_method, coords in results:\n draw_method(cube, coords=coords)\n try:\n self.check_graphic()\n except AssertionError, err:\n self.fail('Draw method %r failed with coords: %r. '\n 'Assertion message: %s' % (draw_method, coords, err))\n\n def run_tests_1d(self, cube, results):\n # there is a different calling convention for 1d plots\n for draw_method, coords in results:\n draw_method(cube.coord(coords[0]), cube)\n try:\n self.check_graphic()\n except AssertionError as err:\n msg = 'Draw method {!r} failed with coords: {!r}. ' \\\n 'Assertion message: {!s}'\n self.fail(msg.format(draw_method, coords, err))\n\n def test_yx(self):\n test_cube = self.cube[0, 0, :, :]\n self.run_tests(test_cube, self.results['yx'])\n\n def test_zx(self):\n test_cube = self.cube[0, :15, 0, :]\n self.run_tests(test_cube, self.results['zx'])\n\n def test_tx(self):\n test_cube = self.cube[:, 0, 0, :]\n self.run_tests(test_cube, self.results['tx'])\n\n def test_x(self):\n test_cube = self.cube[0, 0, 0, :]\n self.run_tests_1d(test_cube, self.results['x'])\n\n def test_y(self):\n test_cube = self.cube[0, 0, :, 0]\n self.run_tests_1d(test_cube, self.results['y'])\n\n def test_badcoords(self):\n cube = self.cube[0, 0, :, :]\n draw_fn = getattr(self.draw_module, 'contourf')\n self.assertRaises(ValueError, draw_fn, cube,\n coords=['grid_longitude', 'grid_longitude'])\n self.assertRaises(ValueError, draw_fn, cube,\n coords=['grid_longitude', 'grid_longitude',\n 'grid_latitude'])\n self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,\n cube, coords=['grid_longitude', 'wibble'])\n self.assertRaises(ValueError, draw_fn, cube, coords=[])\n self.assertRaises(ValueError, draw_fn, cube,\n coords=[cube.coord('grid_longitude'),\n cube.coord('grid_longitude')])\n self.assertRaises(ValueError, draw_fn, cube,\n coords=[cube.coord('grid_longitude'),\n cube.coord('grid_longitude'),\n cube.coord('grid_longitude')])\n\n def test_non_cube_coordinate(self):\n cube = self.cube[0, :, :, 0]\n pts = -100 + np.arange(cube.shape[1]) * 13\n x = coords.DimCoord(pts, standard_name='model_level_number',\n attributes={'positive': 'up'})\n self.draw('contourf', cube, coords=['grid_latitude', x])\n\n\n@iris.tests.skip_data\nclass TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):\n def setUp(self):\n filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',\n 'rotPole_landAreaFraction.nc'))\n self.cube = iris.load_cube(filename)\n\n def test_default(self):\n iplt.contourf(self.cube)\n plt.gca().coastlines()\n self.check_graphic()\n\n def test_coords(self):\n # Pass in dimension coords.\n rlat = self.cube.coord('grid_latitude')\n rlon = self.cube.coord('grid_longitude')\n iplt.contourf(self.cube, coords=[rlon, rlat])\n plt.gca().coastlines()\n self.check_graphic()\n # Pass in auxiliary coords.\n lat = self.cube.coord('latitude')\n lon = self.cube.coord('longitude')\n iplt.contourf(self.cube, coords=[lon, lat])\n plt.gca().coastlines()\n self.check_graphic()\n\n def test_coord_names(self):\n # Pass in names of dimension coords.\n iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])\n plt.gca().coastlines()\n self.check_graphic()\n # Pass in names of auxiliary coords.\n iplt.contourf(self.cube, coords=['longitude', 'latitude'])\n plt.gca().coastlines()\n self.check_graphic()\n\n def test_yx_order(self):\n # Do not attempt to draw coastlines as it is not a map.\n iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])\n self.check_graphic()\n iplt.contourf(self.cube, coords=['latitude', 'longitude'])\n self.check_graphic()\n\n\nclass TestSymbols(tests.GraphicsTest):\n def test_cloud_cover(self):\n iplt.symbols(range(10), [0] * 10, [iris.symbols.CLOUD_COVER[i]\n for i in range(10)], 0.375)\n self.check_graphic()\n\n\nclass TestPlottingExceptions(tests.IrisTest):\n def setUp(self):\n self.bounded_cube = tests.stock.lat_lon_cube()\n self.bounded_cube.coord(\"latitude\").guess_bounds()\n self.bounded_cube.coord(\"longitude\").guess_bounds()\n\n def test_boundmode_multidim(self):\n # Test exception translation.\n # We can't get contiguous bounded grids from multi-d coords.\n cube = self.bounded_cube\n cube.remove_coord(\"latitude\")\n cube.add_aux_coord(coords.AuxCoord(points=cube.data,\n standard_name='latitude',\n units='degrees'), [0, 1])\n with self.assertRaises(ValueError):\n iplt.pcolormesh(cube, coords=['longitude', 'latitude'])\n\n def test_boundmode_4bounds(self):\n # Test exception translation.\n # We can only get contiguous bounded grids with 2 bounds per point.\n cube = self.bounded_cube\n lat = coords.AuxCoord.from_coord(cube.coord(\"latitude\"))\n lat.bounds = np.array([lat.points, lat.points + 1,\n lat.points + 2, lat.points + 3]).transpose()\n cube.remove_coord(\"latitude\")\n cube.add_aux_coord(lat, 0)\n with self.assertRaises(ValueError):\n iplt.pcolormesh(cube, coords=['longitude', 'latitude'])\n\n def test_different_coord_systems(self):\n cube = self.bounded_cube\n lat = cube.coord('latitude')\n lon = cube.coord('longitude')\n lat.coord_system = iris.coord_systems.GeogCS(7000000)\n lon.coord_system = iris.coord_systems.GeogCS(7000001)\n with self.assertRaises(ValueError):\n iplt.pcolormesh(cube, coords=['longitude', 'latitude'])\n\n\n@iris.tests.skip_data\nclass TestPlotOtherCoordSystems(tests.GraphicsTest):\n def test_plot_tmerc(self):\n filename = tests.get_data_path(('NetCDF', 'transverse_mercator',\n 'tmean_1910_1910.nc'))\n self.cube = iris.load_cube(filename)\n iplt.pcolormesh(self.cube[0])\n plt.gca().coastlines()\n self.check_graphic()\n\n\nif __name__ == \"__main__\":\n tests.main()\n","license":"gpl-3.0"} {"repo_name":"kaichogami\/scikit-learn","path":"sklearn\/utils\/multiclass.py","copies":"40","size":"12966","content":"\n# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi\n#\n# License: BSD 3 clause\n\"\"\"\nMulti-class \/ multi-label utility function\n==========================================\n\n\"\"\"\nfrom __future__ import division\nfrom collections import Sequence\nfrom itertools import chain\n\nfrom scipy.sparse import issparse\nfrom scipy.sparse.base import spmatrix\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse import lil_matrix\n\nimport numpy as np\n\nfrom ..externals.six import string_types\nfrom .validation import check_array\nfrom ..utils.fixes import bincount\nfrom ..utils.fixes import array_equal\n\n\ndef _unique_multiclass(y):\n if hasattr(y, '__array__'):\n return np.unique(np.asarray(y))\n else:\n return set(y)\n\n\ndef _unique_indicator(y):\n return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])\n\n\n_FN_UNIQUE_LABELS = {\n 'binary': _unique_multiclass,\n 'multiclass': _unique_multiclass,\n 'multilabel-indicator': _unique_indicator,\n}\n\n\ndef unique_labels(*ys):\n \"\"\"Extract an ordered array of unique labels\n\n We don't allow:\n - mix of multilabel and multiclass (single label) targets\n - mix of label indicator matrix and anything else,\n because there are no explicit labels)\n - mix of label indicator matrices of different sizes\n - mix of string and integer labels\n\n At the moment, we also don't allow \"multiclass-multioutput\" input type.\n\n Parameters\n ----------\n *ys : array-likes,\n\n Returns\n -------\n out : numpy array of shape [n_unique_labels]\n An ordered array of unique labels.\n\n Examples\n --------\n >>> from sklearn.utils.multiclass import unique_labels\n >>> unique_labels([3, 5, 5, 5, 7, 7])\n array([3, 5, 7])\n >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])\n array([1, 2, 3, 4])\n >>> unique_labels([1, 2, 10], [5, 11])\n array([ 1, 2, 5, 10, 11])\n \"\"\"\n if not ys:\n raise ValueError('No argument has been passed.')\n # Check that we don't mix label format\n\n ys_types = set(type_of_target(x) for x in ys)\n if ys_types == set([\"binary\", \"multiclass\"]):\n ys_types = set([\"multiclass\"])\n\n if len(ys_types) > 1:\n raise ValueError(\"Mix type of y not allowed, got types %s\" % ys_types)\n\n label_type = ys_types.pop()\n\n # Check consistency for the indicator format\n if (label_type == \"multilabel-indicator\" and\n len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]\n for y in ys)) > 1):\n raise ValueError(\"Multi-label binary indicator input with \"\n \"different numbers of labels\")\n\n # Get the unique set of labels\n _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)\n if not _unique_labels:\n raise ValueError(\"Unknown label type: %s\" % repr(ys))\n\n ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))\n\n # Check that we don't mix string type with number type\n if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):\n raise ValueError(\"Mix of label input types (string and number)\")\n\n return np.array(sorted(ys_labels))\n\n\ndef _is_integral_float(y):\n return y.dtype.kind == 'f' and np.all(y.astype(int) == y)\n\n\ndef is_multilabel(y):\n \"\"\" Check if ``y`` is in a multilabel format.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n out : bool,\n Return ``True``, if ``y`` is in a multilabel format, else ```False``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.utils.multiclass import is_multilabel\n >>> is_multilabel([0, 1, 0, 1])\n False\n >>> is_multilabel([[1], [0, 2], []])\n False\n >>> is_multilabel(np.array([[1, 0], [0, 0]]))\n True\n >>> is_multilabel(np.array([[1], [0], [0]]))\n False\n >>> is_multilabel(np.array([[1, 0, 0]]))\n True\n \"\"\"\n if hasattr(y, '__array__'):\n y = np.asarray(y)\n if not (hasattr(y, \"shape\") and y.ndim == 2 and y.shape[1] > 1):\n return False\n\n if issparse(y):\n if isinstance(y, (dok_matrix, lil_matrix)):\n y = y.tocsr()\n return (len(y.data) == 0 or np.unique(y.data).size == 1 and\n (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(np.unique(y.data))))\n else:\n labels = np.unique(y)\n\n return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint\n _is_integral_float(labels))\n\ndef check_classification_targets(y):\n \"\"\"Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput', \n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n \"\"\"\n y_type = type_of_target(y)\n if y_type not in ['binary', 'multiclass', 'multiclass-multioutput', \n 'multilabel-indicator', 'multilabel-sequences']:\n raise ValueError(\"Unknown label type: %r\" % y_type)\n\n\n\ndef type_of_target(y):\n \"\"\"Determine the type of data indicated by target `y`\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n target_type : string\n One of:\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d array of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d array that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, an array\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n array, sequence of sequences, or an array of non-sequence objects.\n\n Examples\n --------\n >>> import numpy as np\n >>> type_of_target([0.1, 0.6])\n 'continuous'\n >>> type_of_target([1, -1, -1, 1])\n 'binary'\n >>> type_of_target(['a', 'b', 'a'])\n 'binary'\n >>> type_of_target([1.0, 2.0])\n 'binary'\n >>> type_of_target([1, 0, 2])\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0])\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c'])\n 'multiclass'\n >>> type_of_target(np.array([[1, 2], [3, 1]]))\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]])\n 'multiclass-multioutput'\n >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))\n 'continuous-multioutput'\n >>> type_of_target(np.array([[0, 1], [1, 1]]))\n 'multilabel-indicator'\n \"\"\"\n valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))\n and not isinstance(y, string_types))\n\n if not valid:\n raise ValueError('Expected array-like (array or non-string sequence), '\n 'got %r' % y)\n\n if is_multilabel(y):\n return 'multilabel-indicator'\n\n try:\n y = np.asarray(y)\n except ValueError:\n # Known to fail in numpy 1.3 for array of arrays\n return 'unknown'\n\n # The old sequence of sequences format\n try:\n if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)\n and not isinstance(y[0], string_types)):\n raise ValueError('You appear to be using a legacy multi-label data'\n ' representation. Sequence of sequences are no'\n ' longer supported; use a binary array or sparse'\n ' matrix instead.')\n except IndexError:\n pass\n\n # Invalid inputs\n if y.ndim > 2 or (y.dtype == object and len(y) and\n not isinstance(y.flat[0], string_types)):\n return 'unknown' # [[[1, 2]]] or [obj_1] and not [\"label_1\"]\n\n if y.ndim == 2 and y.shape[1] == 0:\n return 'unknown' # [[]]\n\n if y.ndim == 2 and y.shape[1] > 1:\n suffix = \"-multioutput\" # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # check float and contains non-integer float values\n if y.dtype.kind == 'f' and np.any(y != y.astype(int)):\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n return 'continuous' + suffix\n\n if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):\n return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n else:\n return 'binary' # [1, 2] or [[\"a\"], [\"b\"]]\n\n\ndef _check_partial_fit_first_call(clf, classes=None):\n \"\"\"Private helper function for factorizing common classes param logic\n\n Estimators that implement the ``partial_fit`` API need to be provided with\n the list of possible classes at the first call to partial_fit.\n\n Subsequent calls to partial_fit should check that ``classes`` is still\n consistent with a previous value of ``clf.classes_`` when provided.\n\n This function returns True if it detects that this was the first call to\n ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also\n set on ``clf``.\n\n \"\"\"\n if getattr(clf, 'classes_', None) is None and classes is None:\n raise ValueError(\"classes must be passed on the first call \"\n \"to partial_fit.\")\n\n elif classes is not None:\n if getattr(clf, 'classes_', None) is not None:\n if not array_equal(clf.classes_, unique_labels(classes)):\n raise ValueError(\n \"`classes=%r` is not the same as on last call \"\n \"to partial_fit, was: %r\" % (classes, clf.classes_))\n\n else:\n # This is the first call to partial_fit\n clf.classes_ = unique_labels(classes)\n return True\n\n # classes is None and clf.classes_ has already previously been set:\n # nothing to do\n return False\n\n\ndef class_distribution(y, sample_weight=None):\n \"\"\"Compute class priors from multioutput-multiclass target data\n\n Parameters\n ----------\n y : array like or sparse matrix of size (n_samples, n_outputs)\n The labels for each example.\n\n sample_weight : array-like of shape = (n_samples,), optional\n Sample weights.\n\n Returns\n -------\n classes : list of size n_outputs of arrays of size (n_classes,)\n List of classes for each column.\n\n n_classes : list of integers of size n_outputs\n Number of classes in each column\n\n class_prior : list of size n_outputs of arrays of size (n_classes,)\n Class distribution of each column.\n\n \"\"\"\n classes = []\n n_classes = []\n class_prior = []\n\n n_samples, n_outputs = y.shape\n\n if issparse(y):\n y = y.tocsc()\n y_nnz = np.diff(y.indptr)\n\n for k in range(n_outputs):\n col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]\n # separate sample weights for zero and non-zero elements\n if sample_weight is not None:\n nz_samp_weight = np.asarray(sample_weight)[col_nonzero]\n zeros_samp_weight_sum = (np.sum(sample_weight) -\n np.sum(nz_samp_weight))\n else:\n nz_samp_weight = None\n zeros_samp_weight_sum = y.shape[0] - y_nnz[k]\n\n classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],\n return_inverse=True)\n class_prior_k = bincount(y_k, weights=nz_samp_weight)\n\n # An explicit zero was found, combine its weight with the weight\n # of the implicit zeros\n if 0 in classes_k:\n class_prior_k[classes_k == 0] += zeros_samp_weight_sum\n\n # If an there is an implicit zero and it is not in classes and\n # class_prior, make an entry for it\n if 0 not in classes_k and y_nnz[k] < y.shape[0]:\n classes_k = np.insert(classes_k, 0, 0)\n class_prior_k = np.insert(class_prior_k, 0,\n zeros_samp_weight_sum)\n\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior.append(class_prior_k \/ class_prior_k.sum())\n else:\n for k in range(n_outputs):\n classes_k, y_k = np.unique(y[:, k], return_inverse=True)\n classes.append(classes_k)\n n_classes.append(classes_k.shape[0])\n class_prior_k = bincount(y_k, weights=sample_weight)\n class_prior.append(class_prior_k \/ class_prior_k.sum())\n\n return (classes, n_classes, class_prior)\n","license":"bsd-3-clause"} {"repo_name":"ONEcampaign\/humanitarian-data-service","path":"displacement_tracker_data.py","copies":"1","size":"27157","content":"import requests\nimport pandas as pd\nimport os.path\nimport resources.constants\nimport json\nfrom pandas.io.json import json_normalize\nfrom utils.data_utils import get_ordinal_number\n\n\"\"\"\nThis script aggregates data from multiple endpoints and returns a single .json file containing all data\nused in the displacement tracker project.\n\nScheduling this script would mean that the \/displacement_tracker endpoint always returned the latest data\ncontained within the Humanitarian Data Service API.\n\"\"\"\n\n# For development\n#ROOT = 'http:\/\/localhost:5000'\n\n# For live\nROOT = 'http:\/\/ec2-34-200-18-111.compute-1.amazonaws.com'\n\n# Set year for country-level funding data\nFUNDING_YEAR = 2016\n\n# Define all endpoints\nURL_POPULATIONS_REFUGEELIKE_ASYLUM = '\/populations\/refugeelike\/asylum\/index'\nURL_POPULATIONS_REFUGEELIKE_ORIGIN = '\/populations\/refugeelike\/origin\/index'\nURL_INDICATORS_GNI = '\/indicators\/gni\/index'\nURL_PLANS_PROGRESS = '\/funding\/plans\/progress\/index'\nURL_POPULATION = '\/populations\/totals\/index'\nURL_FRAGILE_STATE = '\/fragility\/fragile-state-index\/index'\nURL_NEEDS = '\/needs\/plans\/index'\nURL_FUNDING_DEST_COUNTRY = '\/funding\/countries\/destination\/index\/{}'.format(FUNDING_YEAR)\nURL_FUNDING_DEST_DONORS = '\/funding\/countries\/donors\/index'\n\n\n\n# Define path for raw country names data\ncountry_names_path = os.path.join(resources.constants.EXAMPLE_RAW_DATA_PATH, 'UNSD Methodology.csv')\n\n# Define path for relatable geography populations data\nrelatable_population_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, '2017_relatable_population_rankings.csv')\n\n# Define path for stories of displacement\ndisplacement_stories_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'stories_of_displacement_links.csv')\n\n# Create a blank dictionary to store metadata for each field\nmetadata_dict = {}\n\n\ndef merge_data(\n funding_year = FUNDING_YEAR,\n country_names_path=country_names_path,\n relatable_population_path=relatable_population_path,\n displacement_stories_path=displacement_stories_path,\n url_populations_refugeelike_asylum=(ROOT + URL_POPULATIONS_REFUGEELIKE_ASYLUM),\n url_populations_refugeelike_origin=(ROOT + URL_POPULATIONS_REFUGEELIKE_ORIGIN),\n url_indicators_gni=(ROOT + URL_INDICATORS_GNI),\n url_plans_progress=(ROOT + URL_PLANS_PROGRESS),\n url_population=(ROOT + URL_POPULATION),\n url_fragile_state=(ROOT + URL_FRAGILE_STATE),\n url_needs=(ROOT + URL_NEEDS),\n url_funding_dest_country=(ROOT + URL_FUNDING_DEST_COUNTRY),\n url_funding_dest_donors=(ROOT + URL_FUNDING_DEST_DONORS)\n ):\n\n #################### COUNTRY NAMES ####################\n # Get the data from .csv\n df_country_names = pd.read_csv(country_names_path, encoding='utf-8')\n\n # Select relevant fields\n df_country_names = df_country_names[[\n 'Country or Area',\n 'ISO-alpha3 Code'\n ]]\n\n # Add Taiwan\n df_country_names.loc[-1] = [\"Taiwan\", \"TWN\"]\n\n # Drop null values\n df_country_names = df_country_names.dropna()\n\n # Set country code to be the index\n df_country_names = df_country_names.set_index('ISO-alpha3 Code')\n\n # Rename fields\n df_country_names.rename(columns={'Country or Area': 'Country'}, inplace=True)\n\n\n #################### DISPLACEMENT STORIES ####################\n # Get the data from .csv\n df_displacement_stories = pd.read_csv(displacement_stories_path, encoding='utf-8')\n\n # Set country code to be the index\n df_displacement_stories = df_displacement_stories.set_index('countryCode')\n\n # Select relevant fields\n df_displacement_stories = df_displacement_stories[[\n 'storyTitle', 'storySource',\n 'storyTagLine', 'storyURL'\n ]]\n\n # Drop null values\n df_displacement_stories = df_displacement_stories.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_displacement_stories.columns:\n metadata_dict[column] = {}\n\n\n #################### POPULATIONS ####################\n # Get the data from the API\n population_data = requests.get(url_population).json()\n\n # Extract metadata\n if 'metadata' in population_data:\n population_metadata = population_data['metadata']\n else:\n population_metadata = {}\n\n # Build dataframe\n df_population = pd.DataFrame(population_data['data']).T\n\n # Select relevant fields\n df_population = df_population[[\n 'PopTotal'\n ]]\n\n # Rename fields\n df_population.rename(columns={'PopTotal': 'Population'}, inplace=True)\n\n # Drop null values\n df_population = df_population.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_population.columns:\n metadata_dict[column] = population_metadata\n\n\n #################### FRAGILE STATE ####################\n # Get the data from the API\n fragile_state_data = requests.get(url_fragile_state).json()\n\n # Extract metadata\n if 'metadata' in fragile_state_data:\n fragile_state_metadata = fragile_state_data['metadata']\n else:\n fragile_state_metadata = {}\n\n # Build a dataframe\n df_fragile_state = pd.DataFrame(fragile_state_data['data']).T\n\n # Select relevant fields\n df_fragile_state = df_fragile_state[[\n 'Total', 'Rank'\n ]]\n\n # Rename fields\n df_fragile_state.rename(columns={'Total': 'Fragile State Index Score',\n 'Rank': 'Fragile State Index Rank'}, inplace=True)\n\n # Drop null values\n df_fragile_state = df_fragile_state.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_fragile_state.columns:\n metadata_dict[column] = fragile_state_metadata\n\n\n #################### POPULATIONS_REFUGEELIKE_ASYLUM ####################\n # Get the data from the API\n populations_refugeelike_asylum_data = requests.get(url_populations_refugeelike_asylum).json()\n\n # Extract metadata\n if 'metadata' in populations_refugeelike_asylum_data:\n populations_refugeelike_asylum_metadata = populations_refugeelike_asylum_data['metadata']\n else:\n populations_refugeelike_asylum_metadata = {}\n\n # Build a dataframe\n df_populations_refugeelike_asylum = pd.DataFrame(populations_refugeelike_asylum_data['data']).T\n\n # Select relevant fields\n df_populations_refugeelike_asylum = df_populations_refugeelike_asylum[[\n 'Total population of concern', 'Total Refugee and people in refugee-like situations',\n 'IDPs protected\/assisted by UNHCR, incl. people in IDP-like situations','Asylum-seekers'\n ]]\n\n # Rename fields\n df_populations_refugeelike_asylum.rename(columns={\n 'IDPs protected\/assisted by UNHCR, incl. people in IDP-like situations': 'IDPs protected\/assisted by UNHCR',\n 'Asylum-seekers': 'Asylum-seekers (asylum)'\n }, inplace=True)\n\n\n # Add field to rank total total population of concern\n df_populations_refugeelike_asylum['Rank of total population of concern'] = df_populations_refugeelike_asylum[\n 'Total population of concern'].rank(ascending=False, method='min').astype(int)\n\n # Add field to add refugees and asylum-seekers\n df_populations_refugeelike_asylum['Total refugees and asylum-seekers (asylum)'] = df_populations_refugeelike_asylum[\n 'Total Refugee and people in refugee-like situations'] + df_populations_refugeelike_asylum['Asylum-seekers (asylum)']\n\n # Drop null values\n df_populations_refugeelike_asylum = df_populations_refugeelike_asylum.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_populations_refugeelike_asylum.columns:\n metadata_dict[column] = populations_refugeelike_asylum_metadata\n\n\n #################### POPULATIONS_REFUGEELIKE_ORIGIN ####################\n # Get the data from the API\n populations_refugeelike_origin_data = requests.get(url_populations_refugeelike_origin).json()\n\n # Extract metadata\n if 'metadata' in populations_refugeelike_origin_data:\n populations_refugeelike_origin_metadata = populations_refugeelike_origin_data['metadata']\n else:\n populations_refugeelike_origin_metadata = {}\n\n # Build a dataframe\n df_populations_refugeelike_origin = pd.DataFrame(populations_refugeelike_origin_data['data']).T\n\n # Select relevant fields\n df_populations_refugeelike_origin = df_populations_refugeelike_origin[[\n 'Total Refugee and people in refugee-like situations', 'Asylum-seekers'\n ]]\n\n # Rename fields\n df_populations_refugeelike_origin.rename(columns={\n 'Total Refugee and people in refugee-like situations': 'Total refugees who have fled from country',\n 'Asylum-seekers': 'Asylum-seekers (origin)'\n }, inplace=True)\n\n\n # Add field to add refugees and asylum-seekers\n df_populations_refugeelike_origin['Total refugees and asylum-seekers (origin)'] = df_populations_refugeelike_origin[\n 'Total refugees who have fled from country'] + df_populations_refugeelike_origin['Asylum-seekers (origin)']\n\n # Drop null values\n df_populations_refugeelike_origin = df_populations_refugeelike_origin.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_populations_refugeelike_origin.columns:\n metadata_dict[column] = populations_refugeelike_origin_metadata\n\n\n #################### INDICATORS GNI ####################\n # Get the data from the API\n indicators_gni_data = requests.get(url_indicators_gni).json()\n\n # Extract metadata\n if 'metadata' in indicators_gni_data:\n indicators_gni_metadata = indicators_gni_data['metadata']\n else:\n indicators_gni_metadata = {}\n\n # Build a dataframe\n df_indicators_gni = pd.DataFrame(indicators_gni_data['data']).T\n\n # Select relevant fields\n df_indicators_gni = df_indicators_gni[[\n '2015'\n ]]\n\n # Rename fields\n df_indicators_gni.rename(columns={'2015': 'GDP Per Capita'}, inplace=True)\n\n # Drop null values\n df_indicators_gni = df_indicators_gni.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_indicators_gni.columns:\n metadata_dict[column] = indicators_gni_metadata\n\n\n #################### PLANS PROGRESS ####################\n # Get the data from the API\n plans_progress_data = requests.get(url_plans_progress).json()\n\n # Extract metadata\n if 'metadata' in plans_progress_data:\n plans_progress_metadata = plans_progress_data['metadata']\n else:\n plans_progress_metadata = {}\n\n # Build a dataframe\n df_plans_progress = pd.DataFrame(plans_progress_data['data']).T\n\n # Select relevant fields\n df_plans_progress = df_plans_progress[[\n 'appealFunded', 'revisedRequirements', 'neededFunding'\n ]]\n\n # Rename fields\n df_plans_progress.rename(columns={'appealFunded': 'Appeal funds committed to date',\n 'revisedRequirements': 'Appeal funds requested',\n 'neededFunding': 'Appeal funds still needed'}, inplace=True)\n\n df_plans_progress['Appeal percent funded'] = df_plans_progress['Appeal funds committed to date']\/df_plans_progress['Appeal funds requested']\n\n # Drop null values\n df_plans_progress = df_plans_progress.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_plans_progress.columns:\n metadata_dict[column] = plans_progress_metadata\n\n # Add an FTS data as-of date so it can be included in the .csv data dump\n df_plans_progress['FTS funding data as-of date'] = plans_progress_data['metadata']['source_data']\n\n\n ######## FUNDING BY DESTINATION COUNTRY ############\n #Get the data from the API\n funding_dest_country_data = requests.get(url_funding_dest_country).json()\n\n # Extract metadata\n if 'metadata' in funding_dest_country_data:\n funding_dest_country_metadata = funding_dest_country_data['metadata']\n else:\n funding_dest_country_metadata = {}\n\n # Build a dataframe\n df_funding_dest_country = pd.DataFrame(funding_dest_country_data['data']).T\n\n # Select relevant fields\n df_funding_dest_country = df_funding_dest_country[[\n 'totalFunding'\n ]]\n\n # Keep only records where totalFunding > 0\n df_funding_dest_country = df_funding_dest_country[df_funding_dest_country['totalFunding'] > 0]\n\n # Rename fields\n df_funding_dest_country.rename(columns={'totalFunding': 'Humanitarian aid received'},\n inplace=True)\n\n # Add field to rank total total population of concern\n df_funding_dest_country['Rank of humanitarian aid received'] = df_funding_dest_country[\n 'Humanitarian aid received'].rank(ascending=False, method='min').astype(int)\n\n # Drop null values\n df_funding_dest_country = df_funding_dest_country.dropna()\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_funding_dest_country.columns:\n metadata_dict[column] = funding_dest_country_metadata\n\n\n ################## TOP 5 DONORS TO EACH DESTINATION COUNTRY ###################\n #Get the data from the API\n funding_dest_donors_data = requests.get(url_funding_dest_donors).json()\n\n # Extract metadata\n if 'metadata' in funding_dest_donors_data:\n funding_dest_donors_metadata = funding_dest_donors_data['metadata']\n else:\n funding_dest_donors_metadata = {}\n\n # Build a dataframe\n df_funding_dest_donors = json_normalize(funding_dest_donors_data['data']).T\n #df_funding_dest_donors = pd.DataFrame(funding_dest_donors_data['data']).T\n\n df_funding_dest_donors.columns = (['Top 5 Donors'])\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_funding_dest_donors.columns:\n metadata_dict[column] = funding_dest_donors_metadata\n\n\n #################### NEEDS ####################\n # Get the data from the API\n needs_data = requests.get(url_needs).json()\n\n # Extract metadata\n if 'metadata' in needs_data:\n needs_metadata = needs_data['metadata']\n else:\n needs_metadata = {}\n\n # Build a dataframe\n df_needs = pd.DataFrame(needs_data['data']).T\n\n # Exclude rows where country code is missing\n df_needs = df_needs.drop('null')\n\n # Select relevant fields\n df_needs = df_needs[[\n 'inNeedTotal', 'inNeedHealth', 'inNeedEducation',\n 'inNeedFoodSecurity', 'inNeedProtection', 'sourceURL',\n 'inNeedShelter-CCCM-NFI', 'inNeedWASH', 'sourceType'\n ]]\n\n # Rename fields\n df_needs.rename(columns={'inNeedTotal': 'Total people in need',\n 'inNeedHealth': 'People in need of health support',\n 'inNeedEducation': 'Children in need of education',\n 'inNeedFoodSecurity': 'People who are food insecure',\n 'inNeedProtection': 'People in need of protection',\n 'inNeedShelter-CCCM-NFI': 'People in need of shelter',\n 'inNeedWASH': 'People in need of water, sanitization & hygiene',\n 'sourceURL': 'Source of needs data',\n 'sourceType': 'Source type of needs data'\n }, inplace=True)\n\n # Add metadata for each field to overall metadata dictionary\n for column in df_needs.columns:\n metadata_dict[column] = needs_metadata\n\n\n ######## FIND PLACES WITH SIMILAR POPULATIONS TO PEOPLE IN NEED ########\n\n # Get the relateable populations data from .csv\n df_relatable_populations = pd.read_csv(relatable_population_path)\n df_relatable_populations['Population'] = df_relatable_populations[[\n 'Population - World Bank (2015)','Population - UNFPA (2016)'\n ]].max(axis=1)\n\n df_relatable_populations = df_relatable_populations[['City, State, Country','Population']].dropna()\n\n def find_nearest_place_population(reference_value):\n\n if reference_value:\n nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]\n nearest_population = nearest_row['Population']\n else:\n nearest_population = 0.00\n\n return nearest_population\n\n def find_nearest_place(reference_value):\n\n if reference_value:\n nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]\n nearest_place = nearest_row['City, State, Country']\n else:\n nearest_place = ''\n\n return nearest_place\n\n df_needs['Place with similar population as people in need'] = df_needs['Total people in need'].apply(\n find_nearest_place)\n # Add metadata\n metadata_dict['Place with similar population as people in need'] = {}\n\n df_needs['Population of place with similar population'] = df_needs['Total people in need'].apply(\n find_nearest_place_population)\n # Add metadata\n metadata_dict['Population of place with similar population'] = {}\n\n #################### SAMPLE CLUSTERS ####################\n\n # Build a dataframe\n # df_clusters = pd.read_json('sample_clusters.json').T\n # df_clusters = df_clusters[['clusters']]\n\n\n ################# COMBINE ALL DATA ##############\n\n # Make a list of all dataframes\n all_dataframes = [\n df_country_names,\n df_populations_refugeelike_asylum,\n df_indicators_gni,\n df_plans_progress,\n df_population,\n df_fragile_state,\n df_needs,\n df_funding_dest_country,\n df_funding_dest_donors,\n df_displacement_stories,\n df_populations_refugeelike_origin\n # df_clusters\n ]\n\n df_final = pd.concat(all_dataframes, axis=1)\n\n # Add calculation for displaced people as a ratio of total population\n df_final['Population of concern per 1000 population'] = (df_final['Total population of concern'] \/ df_final[\n 'Population'])*1000\n # And metadata\n metadata_dict['Population of concern per 1000 population'] = {}\n metadata_dict['Population of concern per 1000 population']['Calculation'] = '(Total population of concern \/ Population) * 1000'\n\n # Add calculation for displaced people per million GDP\n df_final['Population of concern per million GDP'] = ((df_final['Total population of concern'] * 1000000) \/ (df_final[\n 'GDP Per Capita'] * df_final['Population']))\n # And metadata\n metadata_dict['Population of concern per million GDP'] = {}\n metadata_dict['Population of concern per million GDP']['Calculation'] = '(Total population of concern] * 1000000) \/ (GDP Per Capita * Population)'\n\n # Add field to specify whether country has current humanitarian appeal in FTS\n df_final['Country has current appeal'] = df_final['Appeal funds requested'].notnull()\n # And metadata\n metadata_dict['Country has current appeal'] = {}\n metadata_dict['Country has current appeal']['Calculation'] = 'Is Appeal funds requested not null'\n\n\n # Make the ranked variables ordinal\n\n def get_ordinal_number(value):\n try:\n value = int(value)\n except ValueError:\n return value\n\n if value % 100 \/\/ 10 != 1:\n if value % 10 == 1:\n ordval = u\"%d%s\" % (value, \"st\")\n elif value % 10 == 2:\n ordval = u\"%d%s\" % (value, \"nd\")\n elif value % 10 == 3:\n ordval = u\"%d%s\" % (value, \"rd\")\n else:\n ordval = u\"%d%s\" % (value, \"th\")\n else:\n ordval = u\"%d%s\" % (value, \"th\")\n\n return ordval\n\n df_final['Rank of total population of concern'] = df_final['Rank of total population of concern'].apply(\n get_ordinal_number)\n\n df_final['Rank of humanitarian aid received'] = df_final['Rank of humanitarian aid received'].apply(\n get_ordinal_number)\n\n\n ################## STRUCTURE DICTIONARY ##################\n\n # Clean up NaN values\n df_final = df_final.fillna('')\n\n # Transform dataframe to dictionary\n df_as_dict = df_final.to_dict(orient='index')\n\n # Define field names for each strand\n strand_01_fields = ['Appeal funds still needed', 'Appeal funds requested', 'Appeal funds committed to date',\n 'Appeal percent funded', 'Source of needs data', 'Source type of needs data',\n 'Total people in need', 'Place with similar population as people in need',\n 'Population of place with similar population']\n strand_02_fields = ['Population of concern per 1000 population', 'Fragile State Index Score',\n 'Total population of concern',\n 'IDPs protected\/assisted by UNHCR',\n 'GDP Per Capita',\n 'Total refugees and asylum-seekers (asylum)',\n 'Total refugees and asylum-seekers (origin)']\n strand_03_fields = ['Humanitarian aid received', 'Appeal funds requested', 'Appeal percent funded',\n 'Rank of total population of concern', 'Rank of humanitarian aid received']\n\n needs_fields = ['People in need of health support','Children in need of education',\n 'People who are food insecure','People in need of protection','People in need of shelter',\n 'People in need of water, sanitization & hygiene']\n\n story_fields = ['storyTitle', 'storySource', 'storyTagLine', 'storyURL']\n\n # For every object, get \/ group the values by strand\n data = {}\n for x in df_as_dict.keys():\n\n # Create an empty dict\n country_dict = {}\n\n # Populate the dict with those value that don't require nesting\n country_dict['Country'] = df_as_dict[x]['Country']\n country_dict['Fragile State Index Rank'] = df_as_dict[x]['Fragile State Index Rank']\n country_dict['Country has current appeal'] = df_as_dict[x]['Country has current appeal']\n\n # Populate the dict with story fields\n story_fields_dict = {}\n if df_as_dict[x]['storyURL']:\n for field in story_fields:\n story_fields_dict[field] = (df_as_dict[x][field])\n country_dict['Displacement_story'] = story_fields_dict\n\n # Populate the dict with strand 1 data if the country has a current appeal\n strand_01_dict = {}\n if df_as_dict[x]['Country has current appeal']:\n strand_01_dict['Needs_Data'] = {}\n for names_01 in strand_01_fields:\n strand_01_dict[names_01] = (df_as_dict[x][names_01])\n for name in needs_fields:\n if df_as_dict[x][name] != '':\n strand_01_dict['Needs_Data'][name] = (df_as_dict[x][name])\n country_dict['Strand_01_Needs'] = strand_01_dict\n\n # Populate the dict with strand 2 data\n strand_02_dict = {}\n for names_02 in strand_02_fields:\n strand_02_dict[names_02] = (df_as_dict[x][names_02])\n country_dict['Strand_02_People'] = strand_02_dict\n\n # Populate the dict with strand 3 data\n strand_03_dict = {}\n strand_03_dict['Top 5 donors of humanitarian aid'] = []\n for names_03 in strand_03_fields:\n strand_03_dict[names_03] = (df_as_dict[x][names_03])\n if df_as_dict[x]['Top 5 Donors']:\n strand_03_dict['Top 5 donors of humanitarian aid'] = df_as_dict[x]['Top 5 Donors']\n country_dict['Strand_03_Aid'] = strand_03_dict\n\n # Add the country dict to the data dict\n data[x] = country_dict\n\n\n # Add World totals\n # Create an empty dict\n world_dict = {}\n\n # Populate the dict with aggregated strand 1 data\n strand_01_dict = {}\n strand_01_dict['Needs_Data'] = {}\n strand_01_dict['Total people in need'] = df_needs['Total people in need'].sum()\n strand_01_dict['Count of current crises with people in need'] = df_needs['Total people in need'].count()\n strand_01_dict['Place with similar population as people in need'] = find_nearest_place(\n df_needs['Total people in need'].sum()\n )\n strand_01_dict['Population of place with similar population'] = find_nearest_place_population(\n df_needs['Total people in need'].sum()\n )\n for name in needs_fields:\n strand_01_dict['Needs_Data'][name] = df_needs[name].sum()\n world_dict['Strand_01_Needs'] = strand_01_dict\n\n # Add the world dict to the data dict\n data['WORLD'] = world_dict\n\n\n\n # Create the metadata dict\n metadata = {}\n\n # Populate the dict with those value that don't require nesting\n #metadata['Country'] = metadata_dict['Country']\n metadata['Fragile State Index Rank'] = metadata_dict['Fragile State Index Rank']\n metadata['Country has current appeal'] = metadata_dict['Country has current appeal']\n\n # Populate the dict with story fields\n story_fields_dict = {}\n if metadata_dict['storyURL']:\n for field in story_fields:\n story_fields_dict[field] = (metadata_dict[field])\n metadata['Displacement_story'] = story_fields_dict\n\n # Populate the dict with strand 1 data if the country has a current appeal\n strand_01_dict = {}\n strand_01_dict['Needs_Data'] = {}\n for names_01 in strand_01_fields:\n strand_01_dict[names_01] = (metadata_dict[names_01])\n metadata['Strand_01_Needs'] = strand_01_dict\n\n # Populate the dict with strand 2 data\n strand_02_dict = {}\n for names_02 in strand_02_fields:\n strand_02_dict[names_02] = (metadata_dict[names_02])\n metadata['Strand_02_People'] = strand_02_dict\n\n # Populate the dict with strand 3 data\n strand_03_dict = {}\n strand_03_dict['Top 5 donors of humanitarian aid'] = []\n for names_03 in strand_03_fields:\n strand_03_dict[names_03] = (metadata_dict[names_03])\n if metadata_dict['Top 5 Donors']:\n strand_03_dict['Top 5 donors of humanitarian aid'] = metadata_dict['Top 5 Donors']\n metadata['Strand_03_Aid'] = strand_03_dict\n\n\n # At the higher level, structure the json with 'data' and 'metadata'\n final_json = {\n 'data': data,\n 'metadata': metadata\n }\n\n return final_json, metadata, df_final\n\n\ndef run():\n print 'Pulling and merging data'\n final_json, metadata, final_csv = merge_data()\n\n print 'Writing Combined JSON file'\n with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.json'), 'w') as outfile:\n json.dump(final_json, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)\n\n print 'Writing Combined JSON metadata file'\n with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker_metadata.json'), 'w') as outfile:\n json.dump(metadata, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)\n\n print 'Writing Combined CSV file'\n final_csv.to_csv(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.csv'), index_label='CountryCode', encoding='utf-8')\n\n\nif __name__ == \"__main__\":\n run()\n","license":"mit"} {"repo_name":"Tong-Chen\/scikit-learn","path":"sklearn\/tests\/test_cross_validation.py","copies":"4","size":"30858","content":"\"\"\"Test the cross_validation module\"\"\"\nfrom __future__ import division\nimport warnings\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\n\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.utils.fixes import unique\n\nfrom sklearn import cross_validation as cval\nfrom sklearn.base import BaseEstimator\nfrom sklearn.datasets import make_regression\nfrom sklearn.datasets import load_digits\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import fbeta_score\nfrom sklearn.metrics import make_scorer\n\nfrom sklearn.externals import six\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import SVC\n\n\nclass MockListClassifier(BaseEstimator):\n \"\"\"Dummy classifier to test the cross-validation.\n\n Checks that GridSearchCV didn't convert X to array.\n \"\"\"\n def __init__(self, foo_param=0):\n self.foo_param = foo_param\n\n def fit(self, X, Y):\n assert_true(len(X) == len(Y))\n assert_true(isinstance(X, list))\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n def score(self, X=None, Y=None):\n if self.foo_param > 1:\n score = 1.\n else:\n score = 0.\n return score\n\n\nclass MockClassifier(BaseEstimator):\n \"\"\"Dummy classifier to test the cross-validation\"\"\"\n\n def __init__(self, a=0):\n self.a = a\n\n def fit(self, X, Y=None, sample_weight=None, class_prior=None):\n if sample_weight is not None:\n assert_true(sample_weight.shape[0] == X.shape[0],\n 'MockClassifier extra fit_param sample_weight.shape[0]'\n ' is {0}, should be {1}'.format(sample_weight.shape[0],\n X.shape[0]))\n if class_prior is not None:\n assert_true(class_prior.shape[0] == len(np.unique(y)),\n 'MockClassifier extra fit_param class_prior.shape[0]'\n ' is {0}, should be {1}'.format(class_prior.shape[0],\n len(np.unique(y))))\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n def score(self, X=None, Y=None):\n return 1. \/ (1 + np.abs(self.a))\n\n\nX = np.ones((10, 2))\nX_sparse = coo_matrix(X)\ny = np.arange(10) \/\/ 2\n\n##############################################################################\n# Tests\n\ndef check_valid_split(train, test, n_samples=None):\n # Use python sets to get more informative assertion failure messages\n train, test = set(train), set(test)\n\n # Train and test split should not overlap\n assert_equal(train.intersection(test), set())\n\n if n_samples is not None:\n # Check that the union of train an test split cover all the indices\n assert_equal(train.union(test), set(range(n_samples)))\n\n\ndef check_cv_coverage(cv, expected_n_iter=None, n_samples=None):\n # Check that a all the samples appear at least once in a test fold\n if expected_n_iter is not None:\n assert_equal(len(cv), expected_n_iter)\n else:\n expected_n_iter = len(cv)\n\n collected_test_samples = set()\n iterations = 0\n for train, test in cv:\n check_valid_split(train, test, n_samples=n_samples)\n iterations += 1\n collected_test_samples.update(test)\n\n # Check that the accumulated test samples cover the whole dataset\n assert_equal(iterations, expected_n_iter)\n if n_samples is not None:\n assert_equal(collected_test_samples, set(range(n_samples)))\n\n\ndef test_kfold_valueerrors():\n # Check that errors are raised if there is not enough samples\n assert_raises(ValueError, cval.KFold, 3, 4)\n\n # Check that a warning is raised if the least populated class has too few\n # members.\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n y = [3, 3, -1, -1, 2]\n cv = cval.StratifiedKFold(y, 3)\n # checking there was only one warning.\n assert_equal(len(w), 1)\n # checking it has the right type\n assert_equal(w[0].category, Warning)\n # checking it's the right warning. This might be a bad test since it's\n # a characteristic of the code and not a behavior\n assert_true(\"The least populated class\" in str(w[0]))\n\n # Check that despite the warning the folds are still computed even\n # though all the classes are not necessarily represented at on each\n # side of the split at each split\n check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))\n\n # Error when number of folds is <= 1\n assert_raises(ValueError, cval.KFold, 2, 0)\n assert_raises(ValueError, cval.KFold, 2, 1)\n assert_raises(ValueError, cval.StratifiedKFold, y, 0)\n assert_raises(ValueError, cval.StratifiedKFold, y, 1)\n\n # When n is not integer:\n assert_raises(ValueError, cval.KFold, 2.5, 2)\n\n # When n_folds is not integer:\n assert_raises(ValueError, cval.KFold, 5, 1.5)\n assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)\n\n\ndef test_kfold_indices():\n # Check all indices are returned in the test folds\n kf = cval.KFold(300, 3)\n check_cv_coverage(kf, expected_n_iter=3, n_samples=300)\n\n # Check all indices are returned in the test folds even when equal-sized\n # folds are not possible\n kf = cval.KFold(17, 3)\n check_cv_coverage(kf, expected_n_iter=3, n_samples=17)\n\n\ndef test_kfold_no_shuffle():\n # Manually check that KFold preserves the data ordering on toy datasets\n splits = iter(cval.KFold(4, 2))\n train, test = next(splits)\n assert_array_equal(test, [0, 1])\n assert_array_equal(train, [2, 3])\n\n train, test = next(splits)\n assert_array_equal(test, [2, 3])\n assert_array_equal(train, [0, 1])\n\n splits = iter(cval.KFold(5, 2))\n train, test = next(splits)\n assert_array_equal(test, [0, 1, 2])\n assert_array_equal(train, [3, 4])\n\n train, test = next(splits)\n assert_array_equal(test, [3, 4])\n assert_array_equal(train, [0, 1, 2])\n\n\ndef test_stratified_kfold_no_shuffle():\n # Manually check that StratifiedKFold preserves the data ordering as much\n # as possible on toy datasets in order to avoid hiding sample dependencies\n # when possible\n splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))\n train, test = next(splits)\n assert_array_equal(test, [0, 2])\n assert_array_equal(train, [1, 3])\n\n train, test = next(splits)\n assert_array_equal(test, [1, 3])\n assert_array_equal(train, [0, 2])\n\n splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))\n train, test = next(splits)\n assert_array_equal(test, [0, 1, 3, 4])\n assert_array_equal(train, [2, 5, 6])\n\n train, test = next(splits)\n assert_array_equal(test, [2, 5, 6])\n assert_array_equal(train, [0, 1, 3, 4])\n\n\ndef test_stratified_kfold_ratios():\n # Check that stratified kfold preserves label ratios in individual splits\n n_samples = 1000\n labels = np.array([4] * int(0.10 * n_samples) +\n [0] * int(0.89 * n_samples) +\n [1] * int(0.01 * n_samples))\n\n for train, test in cval.StratifiedKFold(labels, 5):\n assert_almost_equal(np.sum(labels[train] == 4) \/ len(train), 0.10, 2)\n assert_almost_equal(np.sum(labels[train] == 0) \/ len(train), 0.89, 2)\n assert_almost_equal(np.sum(labels[train] == 1) \/ len(train), 0.01, 2)\n assert_almost_equal(np.sum(labels[test] == 4) \/ len(test), 0.10, 2)\n assert_almost_equal(np.sum(labels[test] == 0) \/ len(test), 0.89, 2)\n assert_almost_equal(np.sum(labels[test] == 1) \/ len(test), 0.01, 2)\n\n\ndef test_kfold_balance():\n # Check that KFold returns folds with balanced sizes\n for kf in [cval.KFold(i, 5) for i in range(11, 17)]:\n sizes = []\n for _, test in kf:\n sizes.append(len(test))\n\n assert_true((np.max(sizes) - np.min(sizes)) <= 1)\n assert_equal(np.sum(sizes), kf.n)\n\n\ndef test_stratifiedkfold_balance():\n # Check that KFold returns folds with balanced sizes (only when\n # stratification is possible)\n labels = [0] * 3 + [1] * 14\n for skf in [cval.StratifiedKFold(labels[:i], 3) for i in range(11, 17)]:\n sizes = []\n for _, test in skf:\n sizes.append(len(test))\n\n assert_true((np.max(sizes) - np.min(sizes)) <= 1)\n assert_equal(np.sum(sizes), skf.n)\n\n\ndef test_shuffle_kfold():\n # Check the indices are shuffled properly, and that all indices are\n # returned in the different test folds\n kf = cval.KFold(300, 3, shuffle=True, random_state=0)\n ind = np.arange(300)\n\n all_folds = None\n for train, test in kf:\n sorted_array = np.arange(100)\n assert_true(np.any(sorted_array != ind[train]))\n sorted_array = np.arange(101, 200)\n assert_true(np.any(sorted_array != ind[train]))\n sorted_array = np.arange(201, 300)\n assert_true(np.any(sorted_array != ind[train]))\n if all_folds is None:\n all_folds = ind[test].copy()\n else:\n all_folds = np.concatenate((all_folds, ind[test]))\n\n all_folds.sort()\n assert_array_equal(all_folds, ind)\n\n\ndef test_kfold_can_detect_dependent_samples_on_digits(): # see #2372\n # The digits samples are dependent: they are apparently grouped by authors\n # although we don't have any information on the groups segment locations\n # for this data. We can highlight this fact be computing k-fold cross-\n # validation with and without shuffling: we observe that the shuffling case\n # wrongly makes the IID assumption and is therefore too optimistic: it\n # estimates a much higher accuracy (around 0.96) than than the non\n # shuffling variant (around 0.86).\n\n digits = load_digits()\n X, y = digits.data[:800], digits.target[:800]\n model = SVC(C=10, gamma=0.005)\n n = len(y)\n\n cv = cval.KFold(n, 5, shuffle=False)\n mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()\n assert_greater(0.88, mean_score)\n assert_greater(mean_score, 0.85)\n\n # Shuffling the data artificially breaks the dependency and hides the\n # overfitting of the model w.r.t. the writing style of the authors\n # by yielding a seriously overestimated score:\n\n cv = cval.KFold(n, 5, shuffle=True, random_state=0)\n mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()\n assert_greater(mean_score, 0.95)\n\n cv = cval.KFold(n, 5, shuffle=True, random_state=1)\n mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()\n assert_greater(mean_score, 0.95)\n\n # Similarly, StratifiedKFold should try to shuffle the data as little\n # as possible (while respecting the balanced class constraints)\n # and thus be able to detect the dependency by not overestimating\n # the CV score either. As the digits dataset is approximately balanced\n # the estimated mean score is close to the score measured with\n # non-shuffled KFold\n\n cv = cval.StratifiedKFold(y, 5)\n mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()\n assert_greater(0.88, mean_score)\n assert_greater(mean_score, 0.85)\n\n\ndef test_shuffle_split():\n ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)\n ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)\n ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)\n for typ in six.integer_types:\n ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)\n for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):\n assert_array_equal(t1[0], t2[0])\n assert_array_equal(t2[0], t3[0])\n assert_array_equal(t3[0], t4[0])\n assert_array_equal(t1[1], t2[1])\n assert_array_equal(t2[1], t3[1])\n assert_array_equal(t3[1], t4[1])\n\n\ndef test_stratified_shuffle_split_init():\n y = np.asarray([0, 1, 1, 1, 2, 2, 2])\n # Check that error is raised if there is a class with only one sample\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)\n\n # Check that error is raised if the test set size is smaller than n_classes\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)\n # Check that error is raised if the train set size is smaller than\n # n_classes\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)\n\n y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])\n # Check that errors are raised if there is not enough samples\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)\n\n # Train size or test size too small\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)\n assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)\n\n\ndef test_stratified_shuffle_split_iter():\n ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),\n np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),\n np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),\n np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),\n np.array([-1] * 800 + [1] * 50)\n ]\n\n for y in ys:\n sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,\n random_state=0)\n for train, test in sss:\n assert_array_equal(unique(y[train]), unique(y[test]))\n # Checks if folds keep classes proportions\n p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) \/\n float(len(y[train])))\n p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) \/\n float(len(y[test])))\n assert_array_almost_equal(p_train, p_test, 1)\n assert_equal(y[train].size + y[test].size, y.size)\n assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])\n\n\n@ignore_warnings\ndef test_stratified_shuffle_split_iter_no_indices():\n y = np.asarray([0, 1, 2] * 10)\n\n sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)\n train_mask, test_mask = next(iter(sss1))\n\n sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)\n train_indices, test_indices = next(iter(sss2))\n\n assert_array_equal(sorted(test_indices), np.where(test_mask)[0])\n\n\ndef test_leave_label_out_changing_labels():\n \"\"\"Check that LeaveOneLabelOut and LeavePLabelOut work normally if\n the labels variable is changed before calling __iter__\"\"\"\n labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])\n labels_changing = np.array(labels, copy=True)\n lolo = cval.LeaveOneLabelOut(labels)\n lolo_changing = cval.LeaveOneLabelOut(labels_changing)\n lplo = cval.LeavePLabelOut(labels, p=2)\n lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)\n labels_changing[:] = 0\n for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:\n for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):\n assert_array_equal(train, train_chan)\n assert_array_equal(test, test_chan)\n\n\ndef test_cross_val_score():\n clf = MockClassifier()\n for a in range(-10, 10):\n clf.a = a\n # Smoke test\n scores = cval.cross_val_score(clf, X, y)\n assert_array_equal(scores, clf.score(X, y))\n\n # test with multioutput y\n scores = cval.cross_val_score(clf, X_sparse, X)\n assert_array_equal(scores, clf.score(X_sparse, X))\n\n scores = cval.cross_val_score(clf, X_sparse, y)\n assert_array_equal(scores, clf.score(X_sparse, y))\n\n # test with multioutput y\n scores = cval.cross_val_score(clf, X_sparse, X)\n assert_array_equal(scores, clf.score(X_sparse, X))\n\n # test with X as list\n clf = MockListClassifier()\n scores = cval.cross_val_score(clf, X.tolist(), y)\n\n assert_raises(ValueError, cval.cross_val_score, clf, X, y,\n scoring=\"sklearn\")\n\n\ndef test_cross_val_score_precomputed():\n # test for svm with precomputed kernel\n svm = SVC(kernel=\"precomputed\")\n iris = load_iris()\n X, y = iris.data, iris.target\n linear_kernel = np.dot(X, X.T)\n score_precomputed = cval.cross_val_score(svm, linear_kernel, y)\n svm = SVC(kernel=\"linear\")\n score_linear = cval.cross_val_score(svm, X, y)\n assert_array_equal(score_precomputed, score_linear)\n\n # Error raised for non-square X\n svm = SVC(kernel=\"precomputed\")\n assert_raises(ValueError, cval.cross_val_score, svm, X, y)\n\n # test error is raised when the precomputed kernel is not array-like\n # or sparse\n assert_raises(ValueError, cval.cross_val_score, svm,\n linear_kernel.tolist(), y)\n\n\ndef test_cross_val_score_fit_params():\n clf = MockClassifier()\n n_samples = X.shape[0]\n n_classes = len(np.unique(y))\n fit_params = {'sample_weight': np.ones(n_samples),\n 'class_prior': np.ones(n_classes) \/ n_classes}\n cval.cross_val_score(clf, X, y, fit_params=fit_params)\n\n\ndef test_cross_val_score_score_func():\n clf = MockClassifier()\n _score_func_args = []\n\n def score_func(y_test, y_predict):\n _score_func_args.append((y_test, y_predict))\n return 1.0\n\n with warnings.catch_warnings(record=True):\n score = cval.cross_val_score(clf, X, y, score_func=score_func)\n assert_array_equal(score, [1.0, 1.0, 1.0])\n assert len(_score_func_args) == 3\n\n\ndef test_cross_val_score_errors():\n class BrokenEstimator:\n pass\n\n assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)\n\n\ndef test_train_test_split_errors():\n assert_raises(ValueError, cval.train_test_split)\n assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)\n assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,\n train_size=0.6)\n assert_raises(ValueError, cval.train_test_split, range(3),\n test_size=np.float32(0.6), train_size=np.float32(0.6))\n assert_raises(ValueError, cval.train_test_split, range(3),\n test_size=\"wrong_type\")\n assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,\n train_size=4)\n assert_raises(TypeError, cval.train_test_split, range(3),\n some_argument=1.1)\n assert_raises(ValueError, cval.train_test_split, range(3), range(42))\n\n\ndef test_train_test_split():\n X = np.arange(100).reshape((10, 10))\n X_s = coo_matrix(X)\n y = range(10)\n split = cval.train_test_split(X, X_s, y)\n X_train, X_test, X_s_train, X_s_test, y_train, y_test = split\n assert_array_equal(X_train, X_s_train.toarray())\n assert_array_equal(X_test, X_s_test.toarray())\n assert_array_equal(X_train[:, 0], y_train * 10)\n assert_array_equal(X_test[:, 0], y_test * 10)\n split = cval.train_test_split(X, y, test_size=None, train_size=.5)\n X_train, X_test, y_train, y_test = split\n assert_equal(len(y_test), len(y_train))\n\n\ndef test_cross_val_score_with_score_func_classification():\n iris = load_iris()\n clf = SVC(kernel='linear')\n\n # Default score (should be the accuracy score)\n scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)\n assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)\n\n # Correct classification score (aka. zero \/ one score) - should be the\n # same as the default estimator score\n zo_scores = cval.cross_val_score(clf, iris.data, iris.target,\n scoring=\"accuracy\", cv=5)\n assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)\n\n # F1 score (class are balanced so f1_score should be equal to zero\/one\n # score\n f1_scores = cval.cross_val_score(clf, iris.data, iris.target,\n scoring=\"f1\", cv=5)\n assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)\n # also test deprecated old way\n with warnings.catch_warnings(record=True):\n f1_scores = cval.cross_val_score(clf, iris.data, iris.target,\n score_func=f1_score, cv=5)\n assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)\n\n\ndef test_cross_val_score_with_score_func_regression():\n X, y = make_regression(n_samples=30, n_features=20, n_informative=5,\n random_state=0)\n reg = Ridge()\n\n # Default score of the Ridge regression estimator\n scores = cval.cross_val_score(reg, X, y, cv=5)\n assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n\n # R2 score (aka. determination coefficient) - should be the\n # same as the default estimator score\n r2_scores = cval.cross_val_score(reg, X, y, scoring=\"r2\", cv=5)\n assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n\n # Mean squared error; this is a loss function, so \"scores\" are negative\n mse_scores = cval.cross_val_score(reg, X, y, cv=5,\n scoring=\"mean_squared_error\")\n expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])\n assert_array_almost_equal(mse_scores, expected_mse, 2)\n\n # Explained variance\n with warnings.catch_warnings(record=True):\n ev_scores = cval.cross_val_score(reg, X, y, cv=5,\n score_func=explained_variance_score)\n assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n\n\ndef test_permutation_score():\n iris = load_iris()\n X = iris.data\n X_sparse = coo_matrix(X)\n y = iris.target\n svm = SVC(kernel='linear')\n cv = cval.StratifiedKFold(y, 2)\n\n score, scores, pvalue = cval.permutation_test_score(\n svm, X, y, cv=cv, scoring=\"accuracy\")\n assert_greater(score, 0.9)\n assert_almost_equal(pvalue, 0.0, 1)\n\n score_label, _, pvalue_label = cval.permutation_test_score(\n svm, X, y, cv=cv, scoring=\"accuracy\", labels=np.ones(y.size),\n random_state=0)\n assert_true(score_label == score)\n assert_true(pvalue_label == pvalue)\n\n # test with custom scoring object\n scorer = make_scorer(fbeta_score, beta=2)\n score_label, _, pvalue_label = cval.permutation_test_score(\n svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),\n random_state=0)\n assert_almost_equal(score_label, .97, 2)\n assert_almost_equal(pvalue_label, 0.01, 3)\n\n # check that we obtain the same results with a sparse representation\n svm_sparse = SVC(kernel='linear')\n cv_sparse = cval.StratifiedKFold(y, 2)\n score_label, _, pvalue_label = cval.permutation_test_score(\n svm_sparse, X_sparse, y, cv=cv_sparse,\n scoring=\"accuracy\", labels=np.ones(y.size), random_state=0)\n\n assert_true(score_label == score)\n assert_true(pvalue_label == pvalue)\n\n # set random y\n y = np.mod(np.arange(len(y)), 3)\n\n score, scores, pvalue = cval.permutation_test_score(svm, X, y, cv=cv,\n scoring=\"accuracy\")\n\n assert_less(score, 0.5)\n assert_greater(pvalue, 0.2)\n\n # test with deprecated interface\n with warnings.catch_warnings(record=True):\n score, scores, pvalue = cval.permutation_test_score(\n svm, X, y, score_func=accuracy_score, cv=cv)\n assert_less(score, 0.5)\n assert_greater(pvalue, 0.2)\n\n\ndef test_cross_val_generator_with_mask():\n X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n y = np.array([1, 1, 2, 2])\n labels = np.array([1, 2, 3, 4])\n loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,\n 4, indices=False)\n lpo = assert_warns(DeprecationWarning, cval.LeavePOut,\n 4, 2, indices=False)\n kf = assert_warns(DeprecationWarning, cval.KFold,\n 4, 2, indices=False)\n skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,\n y, 2, indices=False)\n lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,\n labels, indices=False)\n lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,\n labels, 2, indices=False)\n ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,\n 4, indices=False)\n for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:\n for train, test in cv:\n assert_equal(np.asarray(train).dtype.kind, 'b')\n assert_equal(np.asarray(train).dtype.kind, 'b')\n X_train, X_test = X[train], X[test]\n y_train, y_test = y[train], y[test]\n\n\ndef test_cross_val_generator_with_indices():\n X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n y = np.array([1, 1, 2, 2])\n labels = np.array([1, 2, 3, 4])\n # explicitly passing indices value is deprecated\n loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,\n 4, indices=True)\n lpo = assert_warns(DeprecationWarning, cval.LeavePOut,\n 4, 2, indices=True)\n kf = assert_warns(DeprecationWarning, cval.KFold,\n 4, 2, indices=True)\n skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,\n y, 2, indices=True)\n lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,\n labels, indices=True)\n lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,\n labels, 2, indices=True)\n b = cval.Bootstrap(2) # only in index mode\n ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,\n 2, indices=True)\n for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:\n for train, test in cv:\n assert_not_equal(np.asarray(train).dtype.kind, 'b')\n assert_not_equal(np.asarray(train).dtype.kind, 'b')\n X_train, X_test = X[train], X[test]\n y_train, y_test = y[train], y[test]\n\n\ndef test_cross_val_generator_with_default_indices():\n X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n y = np.array([1, 1, 2, 2])\n labels = np.array([1, 2, 3, 4])\n loo = cval.LeaveOneOut(4)\n lpo = cval.LeavePOut(4, 2)\n kf = cval.KFold(4, 2)\n skf = cval.StratifiedKFold(y, 2)\n lolo = cval.LeaveOneLabelOut(labels)\n lopo = cval.LeavePLabelOut(labels, 2)\n b = cval.Bootstrap(2) # only in index mode\n ss = cval.ShuffleSplit(2)\n for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:\n for train, test in cv:\n assert_not_equal(np.asarray(train).dtype.kind, 'b')\n assert_not_equal(np.asarray(train).dtype.kind, 'b')\n X_train, X_test = X[train], X[test]\n y_train, y_test = y[train], y[test]\n\n\n@ignore_warnings\ndef test_cross_val_generator_mask_indices_same():\n # Test that the cross validation generators return the same results when\n # indices=True and when indices=False\n y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])\n labels = np.array([1, 1, 2, 3, 3, 3, 4])\n\n loo_mask = cval.LeaveOneOut(5, indices=False)\n loo_ind = cval.LeaveOneOut(5, indices=True)\n lpo_mask = cval.LeavePOut(10, 2, indices=False)\n lpo_ind = cval.LeavePOut(10, 2, indices=True)\n kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)\n kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)\n skf_mask = cval.StratifiedKFold(y, 3, indices=False)\n skf_ind = cval.StratifiedKFold(y, 3, indices=True)\n lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)\n lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)\n lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)\n lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)\n\n for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),\n (kf_mask, kf_ind), (skf_mask, skf_ind),\n (lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:\n for (train_mask, test_mask), (train_ind, test_ind) in \\\n zip(cv_mask, cv_ind):\n assert_array_equal(np.where(train_mask)[0], train_ind)\n assert_array_equal(np.where(test_mask)[0], test_ind)\n\n\ndef test_bootstrap_errors():\n assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)\n assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)\n assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)\n assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)\n\n\ndef test_bootstrap_test_sizes():\n assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)\n assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)\n assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)\n\n\ndef test_shufflesplit_errors():\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,\n train_size=0.95)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)\n assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)\n assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,\n train_size=None)\n\n\ndef test_shufflesplit_reproducible():\n # Check that iterating twice on the ShuffleSplit gives the same\n # sequence of train-test when the random_state is given\n ss = cval.ShuffleSplit(10, random_state=21)\n assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))\n\n\n@ignore_warnings\ndef test_cross_indices_exception():\n X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))\n y = np.array([1, 1, 2, 2])\n labels = np.array([1, 2, 3, 4])\n loo = cval.LeaveOneOut(4, indices=False)\n lpo = cval.LeavePOut(4, 2, indices=False)\n kf = cval.KFold(4, 2, indices=False)\n skf = cval.StratifiedKFold(y, 2, indices=False)\n lolo = cval.LeaveOneLabelOut(labels, indices=False)\n lopo = cval.LeavePLabelOut(labels, 2, indices=False)\n\n assert_raises(ValueError, cval.check_cv, loo, X, y)\n assert_raises(ValueError, cval.check_cv, lpo, X, y)\n assert_raises(ValueError, cval.check_cv, kf, X, y)\n assert_raises(ValueError, cval.check_cv, skf, X, y)\n assert_raises(ValueError, cval.check_cv, lolo, X, y)\n assert_raises(ValueError, cval.check_cv, lopo, X, y)\n","license":"bsd-3-clause"} {"repo_name":"ahoyosid\/scikit-learn","path":"examples\/applications\/plot_stock_market.py","copies":"227","size":"8284","content":"\"\"\"\n=======================================\nVisualizing the stock market structure\n=======================================\n\nThis example employs several unsupervised learning techniques to extract\nthe stock market structure from variations in historical quotes.\n\nThe quantity that we use is the daily variation in quote price: quotes\nthat are linked tend to cofluctuate during a day.\n\n.. _stock_market:\n\nLearning a graph structure\n--------------------------\n\nWe use sparse inverse covariance estimation to find which quotes are\ncorrelated conditionally on the others. Specifically, sparse inverse\ncovariance gives us a graph, that is a list of connection. For each\nsymbol, the symbols that it is connected too are those useful to explain\nits fluctuations.\n\nClustering\n----------\n\nWe use clustering to group together quotes that behave similarly. Here,\namongst the :ref:`various clustering techniques ` available\nin the scikit-learn, we use :ref:`affinity_propagation` as it does\nnot enforce equal-size clusters, and it can choose automatically the\nnumber of clusters from the data.\n\nNote that this gives us a different indication than the graph, as the\ngraph reflects conditional relations between variables, while the\nclustering reflects marginal properties: variables clustered together can\nbe considered as having a similar impact at the level of the full stock\nmarket.\n\nEmbedding in 2D space\n---------------------\n\nFor visualization purposes, we need to lay out the different symbols on a\n2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D\nembedding.\n\n\nVisualization\n-------------\n\nThe output of the 3 models are combined in a 2D graph where nodes\nrepresents the stocks and edges the:\n\n- cluster labels are used to define the color of the nodes\n- the sparse covariance model is used to display the strength of the edges\n- the 2D embedding is used to position the nodes in the plan\n\nThis example has a fair amount of visualization-related code, as\nvisualization is crucial here to display the graph. One of the challenge\nis to position the labels minimizing overlap. For this we use an\nheuristic based on the direction of the nearest neighbor along each\naxis.\n\"\"\"\nprint(__doc__)\n\n# Author: Gael Varoquaux gael.varoquaux@normalesup.org\n# License: BSD 3 clause\n\nimport datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import finance\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn import cluster, covariance, manifold\n\n###############################################################################\n# Retrieve the data from Internet\n\n# Choose a time period reasonnably calm (not too long ago so that we get\n# high-tech firms, and before the 2008 crash)\nd1 = datetime.datetime(2003, 1, 1)\nd2 = datetime.datetime(2008, 1, 1)\n\n# kraft symbol has now changed from KFT to MDLZ in yahoo\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'MTU': 'Mitsubishi',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'Mc Donalds',\n 'PEP': 'Pepsi',\n 'MDLZ': 'Kraft Foods',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas instruments',\n 'XRX': 'Xerox',\n 'LMT': 'Lookheed Martin',\n 'WMT': 'Wal-Mart',\n 'WBA': 'Walgreen',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\nsymbols, names = np.array(list(symbol_dict.items())).T\n\nquotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)\n for symbol in symbols]\n\nopen = np.array([q.open for q in quotes]).astype(np.float)\nclose = np.array([q.close for q in quotes]).astype(np.float)\n\n# The daily variations of the quotes are what carry most information\nvariation = close - open\n\n###############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphLassoCV()\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX \/= X.std(axis=0)\nedge_model.fit(X)\n\n###############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n###############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n###############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 \/ np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n#a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.spectral(label \/ float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"DESatAPSU\/DAWDs","path":"python\/origBandpass_FITSToCSV.py","copies":"1","size":"1930","content":"# Converts STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits to\n# y3a2_std_passband_extend3000_ugrizYatm.csv\n# \n# To run (bash):\n# python origBandpass_FITSToCSV.py > origBandpass_FITSToCSV.log 2>&1 &\n#\n# To run (tcsh):\n# python origBandpass_FITSToCSV.py >& origBandpass_FITSToCSV.log &\n#\n# DLT, 2017-06-30\n# based in part on scripts by Jack Mueller and Jacob Robertson.\n\n# Initial setup...\nimport numpy as np\nimport pandas as pd\n\nimport os\nimport string\nimport shutil\n\nimport pyfits\n\n# Be sure to edit these next two line2 appropriately...\nbandsDir = '\/Users\/dtucker\/IRAF\/DECam\/StdBands_Y3A2_extend3000'\ninputFile = bandsDir+'\/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits'\n\n# List of filter bands (plus atm)...\nbandList = ['g', 'r', 'i', 'z', 'Y', 'atm']\n\n# Read in inputFile to create a reformatted version in CSV format...\nhdulist = pyfits.open(inputFile)\ntbdata = hdulist[1].data\n\n# Create lists from each column...\nlambdaList = tbdata['LAMBDA'].tolist()\ngList = tbdata['g'].tolist()\nrList = tbdata['r'].tolist()\niList = tbdata['i'].tolist()\nzList = tbdata['z'].tolist()\nYList = tbdata['Y'].tolist()\natmList = tbdata['atm'].tolist()\n\n# Create pandas dataframe from the lists...\ndf = pd.DataFrame(np.column_stack([lambdaList,gList,rList,iList,zList,YList,atmList]),\n columns=['lambda','g','r','i','z','Y','atm'])\n\n# Output the full table as a CSV file\noutputFile = bandsDir+'\/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.csv'\nif os.path.isfile(outputFile):\n shutil.move(outputFile, outputFile+'~')\ndf.to_csv(outputFile,index=False)\n\n# Output individual bands (+atm)...\nfor band in bandList:\n outputFile = bandsDir+'\/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.'+band+'.csv'\n if os.path.isfile(outputFile):\n shutil.move(outputFile, outputFile+'~')\n columnNames = ['lambda',band]\n df.to_csv(outputFile,index=False,columns=columnNames,header=False)\n\n\n# Finis!\nexit()\n\n","license":"mit"} {"repo_name":"nmartensen\/pandas","path":"asv_bench\/benchmarks\/gil.py","copies":"7","size":"11003","content":"from .pandas_vb_common import *\n\nfrom pandas.core.algorithms import take_1d\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\ntry:\n from pandas._libs import algos\nexcept ImportError:\n from pandas import algos\n\ntry:\n from pandas.util.testing import test_parallel\n\n have_real_test_parallel = True\nexcept ImportError:\n have_real_test_parallel = False\n\n\n def test_parallel(num_threads=1):\n\n def wrapper(fname):\n return fname\n\n return wrapper\n\n\nclass NoGilGroupby(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n\n np.random.seed(1234)\n self.size = 2 ** 22\n self.ngroups = 100\n self.data = Series(np.random.randint(0, self.ngroups, size=self.size))\n\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n @test_parallel(num_threads=2)\n def _pg2_count(self):\n self.df.groupby('key')['data'].count()\n\n def time_count_2(self):\n self._pg2_count()\n\n @test_parallel(num_threads=2)\n def _pg2_last(self):\n self.df.groupby('key')['data'].last()\n\n def time_last_2(self):\n self._pg2_last()\n\n @test_parallel(num_threads=2)\n def _pg2_max(self):\n self.df.groupby('key')['data'].max()\n\n def time_max_2(self):\n self._pg2_max()\n\n @test_parallel(num_threads=2)\n def _pg2_mean(self):\n self.df.groupby('key')['data'].mean()\n\n def time_mean_2(self):\n self._pg2_mean()\n\n @test_parallel(num_threads=2)\n def _pg2_min(self):\n self.df.groupby('key')['data'].min()\n\n def time_min_2(self):\n self._pg2_min()\n\n @test_parallel(num_threads=2)\n def _pg2_prod(self):\n self.df.groupby('key')['data'].prod()\n\n def time_prod_2(self):\n self._pg2_prod()\n\n @test_parallel(num_threads=2)\n def _pg2_sum(self):\n self.df.groupby('key')['data'].sum()\n\n def time_sum_2(self):\n self._pg2_sum()\n\n @test_parallel(num_threads=4)\n def _pg4_sum(self):\n self.df.groupby('key')['data'].sum()\n\n def time_sum_4(self):\n self._pg4_sum()\n\n def time_sum_4_notp(self):\n for i in range(4):\n self.df.groupby('key')['data'].sum()\n\n def _f_sum(self):\n self.df.groupby('key')['data'].sum()\n\n @test_parallel(num_threads=8)\n def _pg8_sum(self):\n self._f_sum()\n\n def time_sum_8(self):\n self._pg8_sum()\n\n def time_sum_8_notp(self):\n for i in range(8):\n self._f_sum()\n\n @test_parallel(num_threads=2)\n def _pg2_var(self):\n self.df.groupby('key')['data'].var()\n\n def time_var_2(self):\n self._pg2_var()\n\n # get groups\n\n def _groups(self):\n self.data.groupby(self.data).groups\n\n @test_parallel(num_threads=2)\n def _pg2_groups(self):\n self._groups()\n\n def time_groups_2(self):\n self._pg2_groups()\n\n @test_parallel(num_threads=4)\n def _pg4_groups(self):\n self._groups()\n\n def time_groups_4(self):\n self._pg4_groups()\n\n @test_parallel(num_threads=8)\n def _pg8_groups(self):\n self._groups()\n\n def time_groups_8(self):\n self._pg8_groups()\n\n\n\nclass nogil_take1d_float64(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n if (not have_real_test_parallel):\n raise NotImplementedError\n self.N = 10000000.0\n self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })\n self.indexer = np.arange(100, (len(self.df) - 100))\n\n def time_nogil_take1d_float64(self):\n self.take_1d_pg2_int64()\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_int64(self):\n take_1d(self.df.int64.values, self.indexer)\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_float64(self):\n take_1d(self.df.float64.values, self.indexer)\n\n\nclass nogil_take1d_int64(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 1000000\n self.ngroups = 1000\n np.random.seed(1234)\n self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })\n if (not have_real_test_parallel):\n raise NotImplementedError\n self.N = 10000000.0\n self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })\n self.indexer = np.arange(100, (len(self.df) - 100))\n\n def time_nogil_take1d_int64(self):\n self.take_1d_pg2_float64()\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_int64(self):\n take_1d(self.df.int64.values, self.indexer)\n\n @test_parallel(num_threads=2)\n def take_1d_pg2_float64(self):\n take_1d(self.df.float64.values, self.indexer)\n\n\nclass nogil_kth_smallest(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n np.random.seed(1234)\n self.N = 10000000\n self.k = 500000\n self.a = np.random.randn(self.N)\n self.b = self.a.copy()\n self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]\n\n def time_nogil_kth_smallest(self):\n @test_parallel(num_threads=2, kwargs_list=self.kwargs_list)\n def run(arr):\n algos.kth_smallest(arr, self.k)\n run()\n\n\nclass nogil_datetime_fields(object):\n goal_time = 0.2\n\n def setup(self):\n self.N = 100000000\n self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T')\n self.period = self.dti.to_period('D')\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_datetime_field_year(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.year\n run(self.dti)\n\n def time_datetime_field_day(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.day\n run(self.dti)\n\n def time_datetime_field_daysinmonth(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.days_in_month\n run(self.dti)\n\n def time_datetime_field_normalize(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.normalize()\n run(self.dti)\n\n def time_datetime_to_period(self):\n @test_parallel(num_threads=2)\n def run(dti):\n dti.to_period('S')\n run(self.dti)\n\n def time_period_to_datetime(self):\n @test_parallel(num_threads=2)\n def run(period):\n period.to_timestamp()\n run(self.period)\n\n\nclass nogil_rolling_algos_slow(object):\n goal_time = 0.2\n\n def setup(self):\n self.win = 100\n np.random.seed(1234)\n self.arr = np.random.rand(100000)\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_nogil_rolling_median(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_median(arr, win)\n run(self.arr, self.win)\n\n\nclass nogil_rolling_algos_fast(object):\n goal_time = 0.2\n\n def setup(self):\n self.win = 100\n np.random.seed(1234)\n self.arr = np.random.rand(1000000)\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n def time_nogil_rolling_mean(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_mean(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_min(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_min(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_max(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_max(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_var(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_var(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_skew(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_skew(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_kurt(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_kurt(arr, win)\n run(self.arr, self.win)\n\n def time_nogil_rolling_std(self):\n @test_parallel(num_threads=2)\n def run(arr, win):\n rolling_std(arr, win)\n run(self.arr, self.win)\n\n\nclass nogil_read_csv(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n # Using the values\n self.df = DataFrame(np.random.randn(10000, 50))\n self.df.to_csv('__test__.csv')\n\n self.rng = date_range('1\/1\/2000', periods=10000)\n self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)\n self.df_date_time.to_csv('__test_datetime__.csv')\n\n self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))\n self.df_object.to_csv('__test_object__.csv')\n\n def create_cols(self, name):\n return [('%s%03d' % (name, i)) for i in range(5)]\n\n @test_parallel(num_threads=2)\n def pg_read_csv(self):\n read_csv('__test__.csv', sep=',', header=None, float_precision=None)\n\n def time_read_csv(self):\n self.pg_read_csv()\n\n @test_parallel(num_threads=2)\n def pg_read_csv_object(self):\n read_csv('__test_object__.csv', sep=',')\n\n def time_read_csv_object(self):\n self.pg_read_csv_object()\n\n @test_parallel(num_threads=2)\n def pg_read_csv_datetime(self):\n read_csv('__test_datetime__.csv', sep=',', header=None)\n\n def time_read_csv_datetime(self):\n self.pg_read_csv_datetime()\n\n\nclass nogil_factorize(object):\n number = 1\n repeat = 5\n\n def setup(self):\n if (not have_real_test_parallel):\n raise NotImplementedError\n\n np.random.seed(1234)\n self.strings = tm.makeStringIndex(100000)\n\n def factorize_strings(self):\n pd.factorize(self.strings)\n\n @test_parallel(num_threads=4)\n def _pg_factorize_strings_4(self):\n self.factorize_strings()\n\n def time_factorize_strings_4(self):\n for i in range(2):\n self._pg_factorize_strings_4()\n\n @test_parallel(num_threads=2)\n def _pg_factorize_strings_2(self):\n self.factorize_strings()\n\n def time_factorize_strings_2(self):\n for i in range(4):\n self._pg_factorize_strings_2()\n\n def time_factorize_strings(self):\n for i in range(8):\n self.factorize_strings()\n","license":"bsd-3-clause"} {"repo_name":"great-expectations\/great_expectations","path":"tests\/datasource\/test_batch_generators.py","copies":"1","size":"6706","content":"import os\n\nfrom great_expectations.datasource.batch_kwargs_generator import (\n DatabricksTableBatchKwargsGenerator,\n GlobReaderBatchKwargsGenerator,\n SubdirReaderBatchKwargsGenerator,\n)\n\ntry:\n from unittest import mock\nexcept ImportError:\n from unittest import mock\n\n\ndef test_file_kwargs_generator(\n data_context_parameterized_expectation_suite, filesystem_csv\n):\n base_dir = filesystem_csv\n\n datasource = data_context_parameterized_expectation_suite.add_datasource(\n \"default\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(base_dir),\n }\n },\n )\n\n generator = datasource.get_batch_kwargs_generator(\"subdir_reader\")\n known_data_asset_names = datasource.get_available_data_asset_names()\n\n # Use set to avoid order dependency\n assert set(known_data_asset_names[\"subdir_reader\"][\"names\"]) == {\n (\"f1\", \"file\"),\n (\"f2\", \"file\"),\n (\"f3\", \"directory\"),\n }\n\n f1_batches = [\n batch_kwargs[\"path\"]\n for batch_kwargs in generator.get_iterator(data_asset_name=\"f1\")\n ]\n assert len(f1_batches) == 1\n expected_batches = [{\"path\": os.path.join(base_dir, \"f1.csv\")}]\n for batch in expected_batches:\n assert batch[\"path\"] in f1_batches\n\n f3_batches = [\n batch_kwargs[\"path\"]\n for batch_kwargs in generator.get_iterator(data_asset_name=\"f3\")\n ]\n assert len(f3_batches) == 2\n expected_batches = [\n {\"path\": os.path.join(base_dir, \"f3\", \"f3_20190101.csv\")},\n {\"path\": os.path.join(base_dir, \"f3\", \"f3_20190102.csv\")},\n ]\n for batch in expected_batches:\n assert batch[\"path\"] in f3_batches\n\n\ndef test_glob_reader_generator(basic_pandas_datasource, tmp_path_factory):\n \"\"\"Provides an example of how glob generator works: we specify our own\n names for data_assets, and an associated glob; the generator\n will take care of providing batches consisting of one file per\n batch corresponding to the glob.\"\"\"\n\n basedir = str(tmp_path_factory.mktemp(\"test_glob_reader_generator\"))\n\n with open(os.path.join(basedir, \"f1.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f2.csv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f3.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f4.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f5.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f6.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f7.xls\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f8.parquet\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f9.xls\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f0.json\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n\n g2 = GlobReaderBatchKwargsGenerator(\n base_directory=basedir,\n datasource=basic_pandas_datasource,\n asset_globs={\"blargs\": {\"glob\": \"*.blarg\"}, \"fs\": {\"glob\": \"f*\"}},\n )\n\n g2_assets = g2.get_available_data_asset_names()\n # Use set in test to avoid order issues\n assert set(g2_assets[\"names\"]) == {(\"blargs\", \"path\"), (\"fs\", \"path\")}\n\n blargs_kwargs = [x[\"path\"] for x in g2.get_iterator(data_asset_name=\"blargs\")]\n real_blargs = [\n os.path.join(basedir, \"f1.blarg\"),\n os.path.join(basedir, \"f3.blarg\"),\n os.path.join(basedir, \"f4.blarg\"),\n os.path.join(basedir, \"f5.blarg\"),\n os.path.join(basedir, \"f6.blarg\"),\n ]\n for kwargs in real_blargs:\n assert kwargs in blargs_kwargs\n\n assert len(blargs_kwargs) == len(real_blargs)\n\n\ndef test_file_kwargs_generator_extensions(tmp_path_factory):\n \"\"\"csv, xls, parquet, json should be recognized file extensions\"\"\"\n basedir = str(tmp_path_factory.mktemp(\"test_file_kwargs_generator_extensions\"))\n\n # Do not include: invalid extension\n with open(os.path.join(basedir, \"f1.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n # Include\n with open(os.path.join(basedir, \"f2.csv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n # Do not include: valid subdir, but no valid files in it\n os.mkdir(os.path.join(basedir, \"f3\"))\n with open(os.path.join(basedir, \"f3\", \"f3_1.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f3\", \"f3_2.blarg\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n # Include: valid subdir with valid files\n os.mkdir(os.path.join(basedir, \"f4\"))\n with open(os.path.join(basedir, \"f4\", \"f4_1.csv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f4\", \"f4_2.csv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n # Do not include: valid extension, but dot prefix\n with open(os.path.join(basedir, \".f5.csv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n\n # Include: valid extensions\n with open(os.path.join(basedir, \"f6.tsv\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f7.xls\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f8.parquet\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f9.xls\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n with open(os.path.join(basedir, \"f0.json\"), \"w\") as outfile:\n outfile.write(\"\\n\\n\\n\")\n\n g1 = SubdirReaderBatchKwargsGenerator(datasource=\"foo\", base_directory=basedir)\n\n g1_assets = g1.get_available_data_asset_names()\n # Use set in test to avoid order issues\n assert set(g1_assets[\"names\"]) == {\n (\"f7\", \"file\"),\n (\"f4\", \"directory\"),\n (\"f6\", \"file\"),\n (\"f0\", \"file\"),\n (\"f2\", \"file\"),\n (\"f9\", \"file\"),\n (\"f8\", \"file\"),\n }\n\n\ndef test_databricks_generator(basic_sparkdf_datasource):\n generator = DatabricksTableBatchKwargsGenerator(datasource=basic_sparkdf_datasource)\n available_assets = generator.get_available_data_asset_names()\n\n # We have no tables available\n assert available_assets == {\"names\": []}\n\n databricks_kwargs_iterator = generator.get_iterator(data_asset_name=\"foo\")\n kwargs = [batch_kwargs for batch_kwargs in databricks_kwargs_iterator]\n assert \"select * from\" in kwargs[0][\"query\"].lower()\n","license":"apache-2.0"} {"repo_name":"jblackburne\/scikit-learn","path":"sklearn\/gaussian_process\/gpc.py","copies":"42","size":"31571","content":"\"\"\"Gaussian processes classification.\"\"\"\n\n# Authors: Jan Hendrik Metzen \n#\n# License: BSD 3 clause\n\nimport warnings\nfrom operator import itemgetter\n\nimport numpy as np\nfrom scipy.linalg import cholesky, cho_solve, solve\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom scipy.special import erf\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin, clone\nfrom sklearn.gaussian_process.kernels \\\n import RBF, CompoundKernel, ConstantKernel as C\nfrom sklearn.utils.validation import check_X_y, check_is_fitted, check_array\nfrom sklearn.utils import check_random_state\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\n\n\n# Values required for approximating the logistic sigmoid by\n# error functions. coefs are obtained via:\n# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])\n# b = logistic(x)\n# A = (erf(np.dot(x, self.lambdas)) + 1) \/ 2\n# coefs = lstsq(A, b)[0]\nLAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]\nCOEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,\n 128.12323805, -2010.49422654])[:, np.newaxis]\n\n\nclass _BinaryGaussianProcessClassifierLaplace(BaseEstimator):\n \"\"\"Binary Gaussian process classification based on Laplace approximation.\n\n The implementation is based on Algorithm 3.1, 3.2, and 5.1 of\n ``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and\n Williams.\n\n Internally, the Laplace approximation is used for approximating the\n non-Gaussian posterior by a Gaussian.\n\n Currently, the implementation is restricted to using the logistic link\n function.\n\n Parameters\n ----------\n kernel : kernel object\n The kernel specifying the covariance function of the GP. If None is\n passed, the kernel \"1.0 * RBF(1.0)\" is used as default. Note that\n the kernel's hyperparameters are optimized during fitting.\n\n optimizer : string or callable, optional (default: \"fmin_l_bfgs_b\")\n Can either be one of the internally supported optimizers for optimizing\n the kernel's parameters, specified by a string, or an externally\n defined optimizer passed as a callable. If a callable is passed, it\n must have the signature::\n\n def optimizer(obj_func, initial_theta, bounds):\n # * 'obj_func' is the objective function to be maximized, which\n # takes the hyperparameters theta as parameter and an\n # optional flag eval_gradient, which determines if the\n # gradient is returned additionally to the function value\n # * 'initial_theta': the initial value for theta, which can be\n # used by local optimizers\n # * 'bounds': the bounds on the values of theta\n ....\n # Returned are the best found hyperparameters theta and\n # the corresponding value of the target function.\n return theta_opt, func_min\n\n Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize\n is used. If None is passed, the kernel's parameters are kept fixed.\n Available internal optimizers are::\n\n 'fmin_l_bfgs_b'\n\n n_restarts_optimizer: int, optional (default: 0)\n The number of restarts of the optimizer for finding the kernel's\n parameters which maximize the log-marginal likelihood. The first run\n of the optimizer is performed from the kernel's initial parameters,\n the remaining ones (if any) from thetas sampled log-uniform randomly\n from the space of allowed theta-values. If greater than 0, all bounds\n must be finite. Note that n_restarts_optimizer=0 implies that one\n run is performed.\n\n max_iter_predict: int, optional (default: 100)\n The maximum number of iterations in Newton's method for approximating\n the posterior during predict. Smaller values will reduce computation\n time at the cost of worse results.\n\n warm_start : bool, optional (default: False)\n If warm-starts are enabled, the solution of the last Newton iteration\n on the Laplace approximation of the posterior mode is used as\n initialization for the next call of _posterior_mode(). This can speed\n up convergence when _posterior_mode is called several times on similar\n problems as in hyperparameter optimization.\n\n copy_X_train : bool, optional (default: True)\n If True, a persistent copy of the training data is stored in the\n object. Otherwise, just a reference to the training data is stored,\n which might cause predictions to change if the data is modified\n externally.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n Attributes\n ----------\n X_train_ : array-like, shape = (n_samples, n_features)\n Feature values in training data (also required for prediction)\n\n y_train_: array-like, shape = (n_samples,)\n Target values in training data (also required for prediction)\n\n classes_ : array-like, shape = (n_classes,)\n Unique class labels.\n\n kernel_: kernel object\n The kernel used for prediction. The structure of the kernel is the\n same as the one passed as parameter but with optimized hyperparameters\n\n L_: array-like, shape = (n_samples, n_samples)\n Lower-triangular Cholesky decomposition of the kernel in X_train_\n\n pi_: array-like, shape = (n_samples,)\n The probabilities of the positive class for the training points\n X_train_\n\n W_sr_: array-like, shape = (n_samples,)\n Square root of W, the Hessian of log-likelihood of the latent function\n values for the observed labels. Since W is diagonal, only the diagonal\n of sqrt(W) is stored.\n\n log_marginal_likelihood_value_: float\n The log-marginal-likelihood of ``self.kernel_.theta``\n \"\"\"\n def __init__(self, kernel=None, optimizer=\"fmin_l_bfgs_b\",\n n_restarts_optimizer=0, max_iter_predict=100,\n warm_start=False, copy_X_train=True, random_state=None):\n self.kernel = kernel\n self.optimizer = optimizer\n self.n_restarts_optimizer = n_restarts_optimizer\n self.max_iter_predict = max_iter_predict\n self.warm_start = warm_start\n self.copy_X_train = copy_X_train\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"Fit Gaussian process classification model\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Training data\n\n y : array-like, shape = (n_samples,)\n Target values, must be binary\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n if self.kernel is None: # Use an RBF kernel as default\n self.kernel_ = C(1.0, constant_value_bounds=\"fixed\") \\\n * RBF(1.0, length_scale_bounds=\"fixed\")\n else:\n self.kernel_ = clone(self.kernel)\n\n self.rng = check_random_state(self.random_state)\n\n self.X_train_ = np.copy(X) if self.copy_X_train else X\n\n # Encode class labels and check that it is a binary classification\n # problem\n label_encoder = LabelEncoder()\n self.y_train_ = label_encoder.fit_transform(y)\n self.classes_ = label_encoder.classes_\n if self.classes_.size > 2:\n raise ValueError(\"%s supports only binary classification. \"\n \"y contains classes %s\"\n % (self.__class__.__name__, self.classes_))\n elif self.classes_.size == 1:\n raise ValueError(\"{0:s} requires 2 classes.\".format(\n self.__class__.__name__))\n\n if self.optimizer is not None and self.kernel_.n_dims > 0:\n # Choose hyperparameters based on maximizing the log-marginal\n # likelihood (potentially starting from several initial values)\n def obj_func(theta, eval_gradient=True):\n if eval_gradient:\n lml, grad = self.log_marginal_likelihood(\n theta, eval_gradient=True)\n return -lml, -grad\n else:\n return -self.log_marginal_likelihood(theta)\n\n # First optimize starting from theta specified in kernel\n optima = [self._constrained_optimization(obj_func,\n self.kernel_.theta,\n self.kernel_.bounds)]\n\n # Additional runs are performed from log-uniform chosen initial\n # theta\n if self.n_restarts_optimizer > 0:\n if not np.isfinite(self.kernel_.bounds).all():\n raise ValueError(\n \"Multiple optimizer restarts (n_restarts_optimizer>0) \"\n \"requires that all bounds are finite.\")\n bounds = self.kernel_.bounds\n for iteration in range(self.n_restarts_optimizer):\n theta_initial = np.exp(self.rng.uniform(bounds[:, 0],\n bounds[:, 1]))\n optima.append(\n self._constrained_optimization(obj_func, theta_initial,\n bounds))\n # Select result from run with minimal (negative) log-marginal\n # likelihood\n lml_values = list(map(itemgetter(1), optima))\n self.kernel_.theta = optima[np.argmin(lml_values)][0]\n self.log_marginal_likelihood_value_ = -np.min(lml_values)\n else:\n self.log_marginal_likelihood_value_ = \\\n self.log_marginal_likelihood(self.kernel_.theta)\n\n # Precompute quantities required for predictions which are independent\n # of actual query points\n K = self.kernel_(self.X_train_)\n\n _, (self.pi_, self.W_sr_, self.L_, _, _) = \\\n self._posterior_mode(K, return_temporaries=True)\n\n return self\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Predicted target values for X, values are from ``classes_``\n \"\"\"\n check_is_fitted(self, [\"X_train_\", \"y_train_\", \"pi_\", \"W_sr_\", \"L_\"])\n\n # As discussed on Section 3.4.2 of GPML, for making hard binary\n # decisions, it is enough to compute the MAP of the posterior and\n # pass it through the link function\n K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)\n f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4\n\n return np.where(f_star > 0, self.classes_[1], self.classes_[0])\n\n def predict_proba(self, X):\n \"\"\"Return probability estimates for the test vector X.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n\n Returns\n -------\n C : array-like, shape = (n_samples, n_classes)\n Returns the probability of the samples for each class in\n the model. The columns correspond to the classes in sorted\n order, as they appear in the attribute ``classes_``.\n \"\"\"\n check_is_fitted(self, [\"X_train_\", \"y_train_\", \"pi_\", \"W_sr_\", \"L_\"])\n\n # Based on Algorithm 3.2 of GPML\n K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)\n f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4\n v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5\n # Line 6 (compute np.diag(v.T.dot(v)) via einsum)\n var_f_star = self.kernel_.diag(X) - np.einsum(\"ij,ij->j\", v, v)\n\n # Line 7:\n # Approximate \\int log(z) * N(z | f_star, var_f_star)\n # Approximation is due to Williams & Barber, \"Bayesian Classification\n # with Gaussian Processes\", Appendix A: Approximate the logistic\n # sigmoid by a linear combination of 5 error functions.\n # For information on how this integral can be computed see\n # blitiri.blogspot.de\/2012\/11\/gaussian-integral-of-error-function.html\n alpha = 1 \/ (2 * var_f_star)\n gamma = LAMBDAS * f_star\n integrals = np.sqrt(np.pi \/ alpha) \\\n * erf(gamma * np.sqrt(alpha \/ (alpha + LAMBDAS**2))) \\\n \/ (2 * np.sqrt(var_f_star * 2 * np.pi))\n pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()\n\n return np.vstack((1 - pi_star, pi_star)).T\n\n def log_marginal_likelihood(self, theta=None, eval_gradient=False):\n \"\"\"Returns log-marginal likelihood of theta for training data.\n\n Parameters\n ----------\n theta : array-like, shape = (n_kernel_params,) or None\n Kernel hyperparameters for which the log-marginal likelihood is\n evaluated. If None, the precomputed log_marginal_likelihood\n of ``self.kernel_.theta`` is returned.\n\n eval_gradient : bool, default: False\n If True, the gradient of the log-marginal likelihood with respect\n to the kernel hyperparameters at position theta is returned\n additionally. If True, theta must not be None.\n\n Returns\n -------\n log_likelihood : float\n Log-marginal likelihood of theta for training data.\n\n log_likelihood_gradient : array, shape = (n_kernel_params,), optional\n Gradient of the log-marginal likelihood with respect to the kernel\n hyperparameters at position theta.\n Only returned when eval_gradient is True.\n \"\"\"\n if theta is None:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated for theta!=None\")\n return self.log_marginal_likelihood_value_\n\n kernel = self.kernel_.clone_with_theta(theta)\n\n if eval_gradient:\n K, K_gradient = kernel(self.X_train_, eval_gradient=True)\n else:\n K = kernel(self.X_train_)\n\n # Compute log-marginal-likelihood Z and also store some temporaries\n # which can be reused for computing Z's gradient\n Z, (pi, W_sr, L, b, a) = \\\n self._posterior_mode(K, return_temporaries=True)\n\n if not eval_gradient:\n return Z\n\n # Compute gradient based on Algorithm 5.1 of GPML\n d_Z = np.empty(theta.shape[0])\n # XXX: Get rid of the np.diag() in the next line\n R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7\n C = solve(L, W_sr[:, np.newaxis] * K) # Line 8\n # Line 9: (use einsum to compute np.diag(C.T.dot(C))))\n s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \\\n * (pi * (1 - pi) * (1 - 2 * pi)) # third derivative\n\n for j in range(d_Z.shape[0]):\n C = K_gradient[:, :, j] # Line 11\n # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))\n s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())\n\n b = C.dot(self.y_train_ - pi) # Line 13\n s_3 = b - K.dot(R.dot(b)) # Line 14\n\n d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15\n\n return Z, d_Z\n\n def _posterior_mode(self, K, return_temporaries=False):\n \"\"\"Mode-finding for binary Laplace GPC and fixed kernel.\n\n This approximates the posterior of the latent function values for given\n inputs and target observations with a Gaussian approximation and uses\n Newton's iteration to find the mode of this approximation.\n \"\"\"\n # Based on Algorithm 3.1 of GPML\n\n # If warm_start are enabled, we reuse the last solution for the\n # posterior mode as initialization; otherwise, we initialize with 0\n if self.warm_start and hasattr(self, \"f_cached\") \\\n and self.f_cached.shape == self.y_train_.shape:\n f = self.f_cached\n else:\n f = np.zeros_like(self.y_train_, dtype=np.float64)\n\n # Use Newton's iteration method to find mode of Laplace approximation\n log_marginal_likelihood = -np.inf\n for _ in range(self.max_iter_predict):\n # Line 4\n pi = 1 \/ (1 + np.exp(-f))\n W = pi * (1 - pi)\n # Line 5\n W_sr = np.sqrt(W)\n W_sr_K = W_sr[:, np.newaxis] * K\n B = np.eye(W.shape[0]) + W_sr_K * W_sr\n L = cholesky(B, lower=True)\n # Line 6\n b = W * f + (self.y_train_ - pi)\n # Line 7\n a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))\n # Line 8\n f = K.dot(a)\n\n # Line 10: Compute log marginal likelihood in loop and use as\n # convergence criterion\n lml = -0.5 * a.T.dot(f) \\\n - np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \\\n - np.log(np.diag(L)).sum()\n # Check if we have converged (log marginal likelihood does\n # not decrease)\n # XXX: more complex convergence criterion\n if lml - log_marginal_likelihood < 1e-10:\n break\n log_marginal_likelihood = lml\n\n self.f_cached = f # Remember solution for later warm-starts\n if return_temporaries:\n return log_marginal_likelihood, (pi, W_sr, L, b, a)\n else:\n return log_marginal_likelihood\n\n def _constrained_optimization(self, obj_func, initial_theta, bounds):\n if self.optimizer == \"fmin_l_bfgs_b\":\n theta_opt, func_min, convergence_dict = \\\n fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)\n if convergence_dict[\"warnflag\"] != 0:\n warnings.warn(\"fmin_l_bfgs_b terminated abnormally with the \"\n \" state: %s\" % convergence_dict)\n elif callable(self.optimizer):\n theta_opt, func_min = \\\n self.optimizer(obj_func, initial_theta, bounds=bounds)\n else:\n raise ValueError(\"Unknown optimizer %s.\" % self.optimizer)\n\n return theta_opt, func_min\n\n\nclass GaussianProcessClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Gaussian process classification (GPC) based on Laplace approximation.\n\n The implementation is based on Algorithm 3.1, 3.2, and 5.1 of\n Gaussian Processes for Machine Learning (GPML) by Rasmussen and\n Williams.\n\n Internally, the Laplace approximation is used for approximating the\n non-Gaussian posterior by a Gaussian.\n\n Currently, the implementation is restricted to using the logistic link\n function. For multi-class classification, several binary one-versus rest\n classifiers are fitted. Note that this class thus does not implement\n a true multi-class Laplace approximation.\n\n Parameters\n ----------\n kernel : kernel object\n The kernel specifying the covariance function of the GP. If None is\n passed, the kernel \"1.0 * RBF(1.0)\" is used as default. Note that\n the kernel's hyperparameters are optimized during fitting.\n\n optimizer : string or callable, optional (default: \"fmin_l_bfgs_b\")\n Can either be one of the internally supported optimizers for optimizing\n the kernel's parameters, specified by a string, or an externally\n defined optimizer passed as a callable. If a callable is passed, it\n must have the signature::\n\n def optimizer(obj_func, initial_theta, bounds):\n # * 'obj_func' is the objective function to be maximized, which\n # takes the hyperparameters theta as parameter and an\n # optional flag eval_gradient, which determines if the\n # gradient is returned additionally to the function value\n # * 'initial_theta': the initial value for theta, which can be\n # used by local optimizers\n # * 'bounds': the bounds on the values of theta\n ....\n # Returned are the best found hyperparameters theta and\n # the corresponding value of the target function.\n return theta_opt, func_min\n\n Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize\n is used. If None is passed, the kernel's parameters are kept fixed.\n Available internal optimizers are::\n\n 'fmin_l_bfgs_b'\n\n n_restarts_optimizer: int, optional (default: 0)\n The number of restarts of the optimizer for finding the kernel's\n parameters which maximize the log-marginal likelihood. The first run\n of the optimizer is performed from the kernel's initial parameters,\n the remaining ones (if any) from thetas sampled log-uniform randomly\n from the space of allowed theta-values. If greater than 0, all bounds\n must be finite. Note that n_restarts_optimizer=0 implies that one\n run is performed.\n\n max_iter_predict: int, optional (default: 100)\n The maximum number of iterations in Newton's method for approximating\n the posterior during predict. Smaller values will reduce computation\n time at the cost of worse results.\n\n warm_start : bool, optional (default: False)\n If warm-starts are enabled, the solution of the last Newton iteration\n on the Laplace approximation of the posterior mode is used as\n initialization for the next call of _posterior_mode(). This can speed\n up convergence when _posterior_mode is called several times on similar\n problems as in hyperparameter optimization.\n\n copy_X_train : bool, optional (default: True)\n If True, a persistent copy of the training data is stored in the\n object. Otherwise, just a reference to the training data is stored,\n which might cause predictions to change if the data is modified\n externally.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n multi_class: string, default: \"one_vs_rest\"\n Specifies how multi-class classification problems are handled.\n Supported are \"one_vs_rest\" and \"one_vs_one\". In \"one_vs_rest\",\n one binary Gaussian process classifier is fitted for each class, which\n is trained to separate this class from the rest. In \"one_vs_one\", one\n binary Gaussian process classifier is fitted for each pair of classes,\n which is trained to separate these two classes. The predictions of\n these binary predictors are combined into multi-class predictions.\n Note that \"one_vs_one\" does not support predicting probability\n estimates.\n\n n_jobs : int, optional, default: 1\n The number of jobs to use for the computation. If -1 all CPUs are used.\n If 1 is given, no parallel computing code is used at all, which is\n useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are\n used. Thus for n_jobs = -2, all CPUs but one are used.\n\n Attributes\n ----------\n kernel_ : kernel object\n The kernel used for prediction. In case of binary classification,\n the structure of the kernel is the same as the one passed as parameter\n but with optimized hyperparameters. In case of multi-class\n classification, a CompoundKernel is returned which consists of the\n different kernels used in the one-versus-rest classifiers.\n\n log_marginal_likelihood_value_: float\n The log-marginal-likelihood of ``self.kernel_.theta``\n\n classes_ : array-like, shape = (n_classes,)\n Unique class labels.\n\n n_classes_ : int\n The number of classes in the training data\n \"\"\"\n def __init__(self, kernel=None, optimizer=\"fmin_l_bfgs_b\",\n n_restarts_optimizer=0, max_iter_predict=100,\n warm_start=False, copy_X_train=True, random_state=None,\n multi_class=\"one_vs_rest\", n_jobs=1):\n self.kernel = kernel\n self.optimizer = optimizer\n self.n_restarts_optimizer = n_restarts_optimizer\n self.max_iter_predict = max_iter_predict\n self.warm_start = warm_start\n self.copy_X_train = copy_X_train\n self.random_state = random_state\n self.multi_class = multi_class\n self.n_jobs = n_jobs\n\n def fit(self, X, y):\n \"\"\"Fit Gaussian process classification model\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Training data\n\n y : array-like, shape = (n_samples,)\n Target values, must be binary\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, multi_output=False)\n\n self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(\n self.kernel, self.optimizer, self.n_restarts_optimizer,\n self.max_iter_predict, self.warm_start, self.copy_X_train,\n self.random_state)\n\n self.classes_ = np.unique(y)\n self.n_classes_ = self.classes_.size\n if self.n_classes_ == 1:\n raise ValueError(\"GaussianProcessClassifier requires 2 or more \"\n \"distinct classes. Only class %s present.\"\n % self.classes_[0])\n if self.n_classes_ > 2:\n if self.multi_class == \"one_vs_rest\":\n self.base_estimator_ = \\\n OneVsRestClassifier(self.base_estimator_,\n n_jobs=self.n_jobs)\n elif self.multi_class == \"one_vs_one\":\n self.base_estimator_ = \\\n OneVsOneClassifier(self.base_estimator_,\n n_jobs=self.n_jobs)\n else:\n raise ValueError(\"Unknown multi-class mode %s\"\n % self.multi_class)\n\n self.base_estimator_.fit(X, y)\n\n if self.n_classes_ > 2:\n self.log_marginal_likelihood_value_ = np.mean(\n [estimator.log_marginal_likelihood()\n for estimator in self.base_estimator_.estimators_])\n else:\n self.log_marginal_likelihood_value_ = \\\n self.base_estimator_.log_marginal_likelihood()\n\n return self\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Predicted target values for X, values are from ``classes_``\n \"\"\"\n check_is_fitted(self, [\"classes_\", \"n_classes_\"])\n X = check_array(X)\n return self.base_estimator_.predict(X)\n\n def predict_proba(self, X):\n \"\"\"Return probability estimates for the test vector X.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n\n Returns\n -------\n C : array-like, shape = (n_samples, n_classes)\n Returns the probability of the samples for each class in\n the model. The columns correspond to the classes in sorted\n order, as they appear in the attribute `classes_`.\n \"\"\"\n check_is_fitted(self, [\"classes_\", \"n_classes_\"])\n if self.n_classes_ > 2 and self.multi_class == \"one_vs_one\":\n raise ValueError(\"one_vs_one multi-class mode does not support \"\n \"predicting probability estimates. Use \"\n \"one_vs_rest mode instead.\")\n X = check_array(X)\n return self.base_estimator_.predict_proba(X)\n\n @property\n def kernel_(self):\n if self.n_classes_ == 2:\n return self.base_estimator_.kernel_\n else:\n return CompoundKernel(\n [estimator.kernel_\n for estimator in self.base_estimator_.estimators_])\n\n def log_marginal_likelihood(self, theta=None, eval_gradient=False):\n \"\"\"Returns log-marginal likelihood of theta for training data.\n\n In the case of multi-class classification, the mean log-marginal\n likelihood of the one-versus-rest classifiers are returned.\n\n Parameters\n ----------\n theta : array-like, shape = (n_kernel_params,) or none\n Kernel hyperparameters for which the log-marginal likelihood is\n evaluated. In the case of multi-class classification, theta may\n be the hyperparameters of the compound kernel or of an individual\n kernel. In the latter case, all individual kernel get assigned the\n same theta values. If None, the precomputed log_marginal_likelihood\n of ``self.kernel_.theta`` is returned.\n\n eval_gradient : bool, default: False\n If True, the gradient of the log-marginal likelihood with respect\n to the kernel hyperparameters at position theta is returned\n additionally. Note that gradient computation is not supported\n for non-binary classification. If True, theta must not be None.\n\n Returns\n -------\n log_likelihood : float\n Log-marginal likelihood of theta for training data.\n\n log_likelihood_gradient : array, shape = (n_kernel_params,), optional\n Gradient of the log-marginal likelihood with respect to the kernel\n hyperparameters at position theta.\n Only returned when eval_gradient is True.\n \"\"\"\n check_is_fitted(self, [\"classes_\", \"n_classes_\"])\n\n if theta is None:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated for theta!=None\")\n return self.log_marginal_likelihood_value_\n\n theta = np.asarray(theta)\n if self.n_classes_ == 2:\n return self.base_estimator_.log_marginal_likelihood(\n theta, eval_gradient)\n else:\n if eval_gradient:\n raise NotImplementedError(\n \"Gradient of log-marginal-likelihood not implemented for \"\n \"multi-class GPC.\")\n estimators = self.base_estimator_.estimators_\n n_dims = estimators[0].kernel_.n_dims\n if theta.shape[0] == n_dims: # use same theta for all sub-kernels\n return np.mean(\n [estimator.log_marginal_likelihood(theta)\n for i, estimator in enumerate(estimators)])\n elif theta.shape[0] == n_dims * self.classes_.shape[0]:\n # theta for compound kernel\n return np.mean(\n [estimator.log_marginal_likelihood(\n theta[n_dims * i:n_dims * (i + 1)])\n for i, estimator in enumerate(estimators)])\n else:\n raise ValueError(\"Shape of theta must be either %d or %d. \"\n \"Obtained theta with shape %d.\"\n % (n_dims, n_dims * self.classes_.shape[0],\n theta.shape[0]))\n","license":"bsd-3-clause"} {"repo_name":"dsquareindia\/scikit-learn","path":"sklearn\/decomposition\/tests\/test_fastica.py","copies":"70","size":"7808","content":"\"\"\"\nTest the fastica algorithm.\n\"\"\"\nimport itertools\nimport warnings\n\nimport numpy as np\nfrom scipy import stats\n\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_raises\n\nfrom sklearn.decomposition import FastICA, fastica, PCA\nfrom sklearn.decomposition.fastica_ import _gs_decorrelation\nfrom sklearn.externals.six import moves\n\n\ndef center_and_norm(x, axis=-1):\n \"\"\" Centers and norms x **in place**\n\n Parameters\n -----------\n x: ndarray\n Array with an axis of observations (statistical units) measured on\n random variables.\n axis: int, optional\n Axis along which the mean and variance are calculated.\n \"\"\"\n x = np.rollaxis(x, axis)\n x -= x.mean(axis=0)\n x \/= x.std(axis=0)\n\n\ndef test_gs():\n # Test gram schmidt orthonormalization\n # generate a random orthogonal matrix\n rng = np.random.RandomState(0)\n W, _, _ = np.linalg.svd(rng.randn(10, 10))\n w = rng.randn(10)\n _gs_decorrelation(w, W, 10)\n assert_less((w ** 2).sum(), 1.e-10)\n w = rng.randn(10)\n u = _gs_decorrelation(w, W, 5)\n tmp = np.dot(u, W.T)\n assert_less((tmp[:5] ** 2).sum(), 1.e-10)\n\n\ndef test_fastica_simple(add_noise=False):\n # Test the FastICA algorithm on very simple data.\n rng = np.random.RandomState(0)\n # scipy.stats uses the global RNG:\n np.random.seed(0)\n n_samples = 1000\n # Generate two sources:\n s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1\n s2 = stats.t.rvs(1, size=n_samples)\n s = np.c_[s1, s2].T\n center_and_norm(s)\n s1, s2 = s\n\n # Mixing angle\n phi = 0.6\n mixing = np.array([[np.cos(phi), np.sin(phi)],\n [np.sin(phi), -np.cos(phi)]])\n m = np.dot(mixing, s)\n\n if add_noise:\n m += 0.1 * rng.randn(2, 1000)\n\n center_and_norm(m)\n\n # function as fun arg\n def g_test(x):\n return x ** 3, (3 * x ** 2).mean(axis=-1)\n\n algos = ['parallel', 'deflation']\n nls = ['logcosh', 'exp', 'cube', g_test]\n whitening = [True, False]\n for algo, nl, whiten in itertools.product(algos, nls, whitening):\n if whiten:\n k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)\n assert_raises(ValueError, fastica, m.T, fun=np.tanh,\n algorithm=algo)\n else:\n X = PCA(n_components=2, whiten=True).fit_transform(m.T)\n k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)\n assert_raises(ValueError, fastica, X, fun=np.tanh,\n algorithm=algo)\n s_ = s_.T\n # Check that the mixing model described in the docstring holds:\n if whiten:\n assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))\n\n center_and_norm(s_)\n s1_, s2_ = s_\n # Check to see if the sources have been estimated\n # in the wrong order\n if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):\n s2_, s1_ = s_\n s1_ *= np.sign(np.dot(s1_, s1))\n s2_ *= np.sign(np.dot(s2_, s2))\n\n # Check that we have estimated the original sources\n if not add_noise:\n assert_almost_equal(np.dot(s1_, s1) \/ n_samples, 1, decimal=2)\n assert_almost_equal(np.dot(s2_, s2) \/ n_samples, 1, decimal=2)\n else:\n assert_almost_equal(np.dot(s1_, s1) \/ n_samples, 1, decimal=1)\n assert_almost_equal(np.dot(s2_, s2) \/ n_samples, 1, decimal=1)\n\n # Test FastICA class\n _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)\n ica = FastICA(fun=nl, algorithm=algo, random_state=0)\n sources = ica.fit_transform(m.T)\n assert_equal(ica.components_.shape, (2, 2))\n assert_equal(sources.shape, (1000, 2))\n\n assert_array_almost_equal(sources_fun, sources)\n assert_array_almost_equal(sources, ica.transform(m.T))\n\n assert_equal(ica.mixing_.shape, (2, 2))\n\n for fn in [np.tanh, \"exp(-.5(x^2))\"]:\n ica = FastICA(fun=fn, algorithm=algo, random_state=0)\n assert_raises(ValueError, ica.fit, m.T)\n\n assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)\n\n\ndef test_fastica_nowhiten():\n m = [[0, 1], [1, 0]]\n\n # test for issue #697\n ica = FastICA(n_components=1, whiten=False, random_state=0)\n assert_warns(UserWarning, ica.fit, m)\n assert_true(hasattr(ica, 'mixing_'))\n\n\ndef test_non_square_fastica(add_noise=False):\n # Test the FastICA algorithm on very simple data.\n rng = np.random.RandomState(0)\n\n n_samples = 1000\n # Generate two sources:\n t = np.linspace(0, 100, n_samples)\n s1 = np.sin(t)\n s2 = np.ceil(np.sin(np.pi * t))\n s = np.c_[s1, s2].T\n center_and_norm(s)\n s1, s2 = s\n\n # Mixing matrix\n mixing = rng.randn(6, 2)\n m = np.dot(mixing, s)\n\n if add_noise:\n m += 0.1 * rng.randn(6, n_samples)\n\n center_and_norm(m)\n\n k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)\n s_ = s_.T\n\n # Check that the mixing model described in the docstring holds:\n assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))\n\n center_and_norm(s_)\n s1_, s2_ = s_\n # Check to see if the sources have been estimated\n # in the wrong order\n if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):\n s2_, s1_ = s_\n s1_ *= np.sign(np.dot(s1_, s1))\n s2_ *= np.sign(np.dot(s2_, s2))\n\n # Check that we have estimated the original sources\n if not add_noise:\n assert_almost_equal(np.dot(s1_, s1) \/ n_samples, 1, decimal=3)\n assert_almost_equal(np.dot(s2_, s2) \/ n_samples, 1, decimal=3)\n\n\ndef test_fit_transform():\n # Test FastICA.fit_transform\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10))\n for whiten, n_components in [[True, 5], [False, None]]:\n n_components_ = (n_components if n_components is not None else\n X.shape[1])\n\n ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)\n Xt = ica.fit_transform(X)\n assert_equal(ica.components_.shape, (n_components_, 10))\n assert_equal(Xt.shape, (100, n_components_))\n\n ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)\n ica.fit(X)\n assert_equal(ica.components_.shape, (n_components_, 10))\n Xt2 = ica.transform(X)\n\n assert_array_almost_equal(Xt, Xt2)\n\n\ndef test_inverse_transform():\n # Test FastICA.inverse_transform\n n_features = 10\n n_samples = 100\n n1, n2 = 5, 10\n rng = np.random.RandomState(0)\n X = rng.random_sample((n_samples, n_features))\n expected = {(True, n1): (n_features, n1),\n (True, n2): (n_features, n2),\n (False, n1): (n_features, n2),\n (False, n2): (n_features, n2)}\n for whiten in [True, False]:\n for n_components in [n1, n2]:\n n_components_ = (n_components if n_components is not None else\n X.shape[1])\n ica = FastICA(n_components=n_components, random_state=rng,\n whiten=whiten)\n with warnings.catch_warnings(record=True):\n # catch \"n_components ignored\" warning\n Xt = ica.fit_transform(X)\n expected_shape = expected[(whiten, n_components_)]\n assert_equal(ica.mixing_.shape, expected_shape)\n X2 = ica.inverse_transform(Xt)\n assert_equal(X.shape, X2.shape)\n\n # reversibility test in non-reduction case\n if n_components == X.shape[1]:\n assert_array_almost_equal(X, X2)\n","license":"bsd-3-clause"} {"repo_name":"untom\/scikit-learn","path":"sklearn\/decomposition\/base.py","copies":"313","size":"5647","content":"\"\"\"Principal Component Analysis Base Classes\"\"\"\n\n# Author: Alexandre Gramfort \n# Olivier Grisel \n# Mathieu Blondel \n# Denis A. Engemann \n# Kyle Kastner \n#\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import check_array\nfrom ..utils.extmath import fast_dot\nfrom ..utils.validation import check_is_fitted\nfrom ..externals import six\nfrom abc import ABCMeta, abstractmethod\n\n\nclass _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):\n \"\"\"Base class for PCA methods.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array, shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) \/ self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n precision = np.dot(components_, components_.T) \/ self.noise_variance_\n precision.flat[::len(precision) + 1] += 1. \/ exp_var_diff\n precision = np.dot(components_.T,\n np.dot(linalg.inv(precision), components_))\n precision \/= -(self.noise_variance_ ** 2)\n precision.flat[::len(precision) + 1] += 1. \/ self.noise_variance_\n return precision\n\n @abstractmethod\n def fit(X, y=None):\n \"\"\"Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n\n def transform(self, X, y=None):\n \"\"\"Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.decomposition import IncrementalPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> ipca = IncrementalPCA(n_components=2, batch_size=3)\n >>> ipca.fit(X)\n IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)\n >>> ipca.transform(X) # doctest: +SKIP\n \"\"\"\n check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = fast_dot(X, self.components_.T)\n if self.whiten:\n X_transformed \/= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X, y=None):\n \"\"\"Transform data back to its original space.\n\n In other words, return an input X_original whose transform would be X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples is the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n \"\"\"\n if self.whiten:\n return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *\n self.components_) + self.mean_\n else:\n return fast_dot(X, self.components_) + self.mean_\n","license":"bsd-3-clause"} {"repo_name":"dmargala\/qusp","path":"examples\/compare_delta.py","copies":"1","size":"7364","content":"#!\/usr\/bin\/env python\nimport argparse\n\nimport numpy as np\nimport numpy.ma as ma\nimport h5py\n\nimport qusp\n\nimport matplotlib.pyplot as plt\nimport scipy.interpolate\nimport fitsio\n\nclass DeltaLOS(object):\n def __init__(self, thing_id):\n path = '\/data\/lya\/deltas\/delta-%d.fits' % thing_id\n hdulist = fitsio.FITS(path, mode=fitsio.READONLY)\n self.pmf = hdulist[1].read_header()['pmf']\n self.loglam = hdulist[1]['loglam'][:]\n self.wave = np.power(10.0, self.loglam)\n self.delta = hdulist[1]['delta'][:]\n self.weight = hdulist[1]['weight'][:]\n self.cont = hdulist[1]['cont'][:]\n self.msha = hdulist[1]['msha'][:]\n self.mabs = hdulist[1]['mabs'][:]\n self.ivar = hdulist[1]['ivar'][:]\n\n self.cf = self.cont*self.msha*self.mabs\n self.flux = (1+self.delta)*self.cf\n\ndef main():\n # parse command-line arguments\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"print verbose output\")\n ## targets to fit\n parser.add_argument(\"--name\", type=str, default=None,\n help=\"target list\")\n parser.add_argument(\"--gamma\", type=float, default=3.8,\n help=\"LSS growth and redshift evolution of mean absorption gamma\")\n parser.add_argument(\"--index\", type=int, default=1000,\n help=\"target index\")\n parser.add_argument(\"--pmf\", type=str, default=None,\n help=\"target plate-mjd-fiber string\")\n args = parser.parse_args()\n\n print 'Loading forest data...'\n\n # import data\n skim = h5py.File(args.name+'.hdf5', 'r')\n\n if args.pmf:\n plate, mjd, fiber = [int(val) for val in args.pmf.split('-')]\n index = np.where((skim['meta']['plate'] == plate) & (skim['meta']['mjd'] == mjd) & (skim['meta']['fiber'] == fiber))[0][0]\n else:\n index = args.index\n\n flux = np.ma.MaskedArray(skim['flux'][index], mask=skim['mask'][index])\n ivar = np.ma.MaskedArray(skim['ivar'][index], mask=skim['mask'][index])\n loglam = skim['loglam'][:]\n wave = np.power(10.0, loglam)\n\n z = skim['z'][index]\n norm = skim['norm'][index]\n meta = skim['meta'][index]\n\n linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')\n a = linear_continuum['params_a'][index]\n b = linear_continuum['params_b'][index]\n continuum = linear_continuum['continuum']\n continuum_wave = linear_continuum['continuum_wave']\n continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, ext=1, s=0)\n abs_alpha = linear_continuum.attrs['abs_alpha']\n abs_beta = linear_continuum.attrs['abs_beta']\n\n forest_wave_ref = (1+z)*linear_continuum.attrs['forest_wave_ref']\n wave_lya = linear_continuum.attrs['wave_lya']\n\n forest_pixel_redshifts = wave\/wave_lya - 1\n abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)\n\n print 'flux 1280 Ang: %.2f' % norm\n print 'fit param a: %.2f' % a\n print 'fit param b: %.2f' % b\n\n def model_flux(a, b):\n return a*np.power(wave\/forest_wave_ref, b)*continuum_interp(wave\/(1+z))*np.exp(-abs_coefs)\n\n def chisq(p):\n mflux = model_flux(p[0], p[1])\n res = flux - mflux\n return ma.sum(res*res*ivar)\/ma.sum(ivar)\n\n from scipy.optimize import minimize\n\n result = minimize(chisq, (a, b))\n a,b = result.x\n\n print 'fit param a: %.2f' % a\n print 'fit param b: %.2f' % b\n\n # rest and obs refer to pixel grid\n print 'Estimating deltas in forest frame...'\n\n mflux = model_flux(a,b)\n delta_flux = flux\/mflux - 1.0\n delta_ivar = ivar*mflux*mflux\n\n forest_min_z = linear_continuum.attrs['forest_min_z']\n forest_max_z = linear_continuum.attrs['forest_max_z']\n forest_dz = 0.1\n forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)\n\n print 'Adjusting weights for pipeline variance and LSS variance...'\n\n var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)\n var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)\n\n delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)\n delta_weight = delta_weight\/(1 + delta_weight*var_lss(forest_pixel_redshifts))\n\n thing_id = meta['thing_id']\n pmf = '%s-%s-%s' % (meta['plate'],meta['mjd'],meta['fiber'])\n\n los = DeltaLOS(thing_id)\n\n my_msha = norm*a*np.power(wave\/forest_wave_ref, b)\n my_wave = wave\n my_flux = norm*flux\n my_cf = my_msha*continuum_interp(wave\/(1+z))*np.exp(-abs_coefs)\n my_ivar = ivar\/(norm*norm)\n my_delta = delta_flux\n my_weight = delta_weight\n\n # mean_ratio = np.average(my_msha*continuum)\/ma.average(los.msha*los.cont)\n # print mean_ratio\n\n plt.figure(figsize=(12,4))\n plt.plot(my_wave, my_flux, color='gray')\n\n my_dflux = ma.power(my_ivar, -0.5)\n plt.fill_between(my_wave, my_flux - my_dflux, my_flux + my_dflux, color='gray', alpha=0.5)\n\n plt.plot(my_wave, my_msha*continuum_interp(wave\/(1+z)), label='My continuum', color='blue')\n plt.plot(los.wave, los.cont, label='Busca continuum', color='red')\n plt.plot(my_wave, my_cf, label='My cf', color='green')\n plt.plot(los.wave, los.cf, label='Busca cf', color='orange')\n plt.legend()\n plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))\n plt.xlabel(r'Observed Wavelength ($\\AA$)')\n plt.ylabel(r'Observed Flux')\n plt.xlim(los.wave[[0,-1]])\n plt.savefig(args.name+'-example-flux.png', dpi=100, bbox_inches='tight')\n plt.close()\n\n plt.figure(figsize=(12,4))\n my_delta_sigma = ma.power(delta_weight, -0.5)\n # plt.fill_between(my_wave, my_delta - my_delta_sigma, my_delta + my_delta_sigma, color='blue', alpha=0.1, label='My Delta')\n plt.scatter(my_wave, my_delta, color='blue', marker='+', label='My Delta')\n plt.plot(my_wave, +my_delta_sigma, color='blue', ls=':')\n plt.plot(my_wave, -my_delta_sigma, color='blue', ls=':')\n\n los_delta_sigma = ma.power(los.weight, -0.5)\n # plt.fill_between(los.wave, los.delta - los_delta_sigma, los.delta + los_delta_sigma, color='red', alpha=01, label='Busca Delta')\n plt.scatter(los.wave, los.delta, color='red', marker='+', label='Busca Delta')\n\n plt.plot(los.wave, +los_delta_sigma, color='red', ls=':')\n plt.plot(los.wave, -los_delta_sigma, color='red', ls=':')\n\n my_lss_sigma = np.sqrt(var_lss(forest_pixel_redshifts))\n plt.plot(my_wave, +my_lss_sigma, color='black', ls='--')\n plt.plot(my_wave, -my_lss_sigma, color='black', ls='--')\n\n # my_sn_sigma = np.sqrt(np.power(1 + forest_pixel_redshifts, 0.5*abs_beta))\/10\n # plt.plot(my_wave, +my_sn_sigma, color='orange', ls='--')\n # plt.plot(my_wave, -my_sn_sigma, color='orange', ls='--')\n # import matplotlib.patches as mpatches\n #\n # blue_patch = mpatches.Patch(color='blue', alpha=0.3, label='My Delta')\n # red_patch = mpatches.Patch(color='red', alpha=0.3, label='Busca Delta')\n # plt.legend(handles=[blue_patch,red_patch])\n\n plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))\n plt.ylim(-2,2)\n plt.xlim(los.wave[[0,-1]])\n\n plt.xlabel(r'Observed Wavelength ($\\AA$)')\n plt.ylabel(r'Delta')\n plt.legend()\n plt.savefig(args.name+'-example-delta.png', dpi=100, bbox_inches='tight')\n plt.close()\n\n\n\nif __name__ == '__main__':\n main()\n","license":"mit"} {"repo_name":"guildai\/guild","path":"examples\/iris-svm\/plot_iris_exercise.py","copies":"1","size":"1702","content":"\"\"\"\nA tutorial exercise for using different SVM kernels.\n\nAdapted from:\nhttps:\/\/scikit-learn.org\/stable\/auto_examples\/exercises\/plot_iris_exercise.html\n\"\"\"\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\nkernel = 'linear' # choice of linear, rbf, poly\ntest_split = 0.1\nrandom_seed = 0\ndegree = 3\ngamma = 10\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(random_seed)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nsplit_pos = int((1 - test_split) * n_sample)\nX_train = X[:split_pos]\ny_train = y[:split_pos]\nX_test = X[split_pos:]\ny_test = y[split_pos:]\n\n# fit the model\nclf = svm.SVC(kernel=kernel, degree=degree, gamma=gamma)\nclf.fit(X_train, y_train)\n\nprint(\"Train accuracy: %s\" % clf.score(X_train, y_train))\nprint(\"Test accuracy: %f\" % clf.score(X_test, y_test))\n\nplt.figure()\nplt.clf()\nplt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,\n edgecolor='k', s=20)\n\n# Circle out the test data\nplt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',\n zorder=10, edgecolor='k')\n\nplt.axis('tight')\nx_min = X[:, 0].min()\nx_max = X[:, 0].max()\ny_min = X[:, 1].min()\ny_max = X[:, 1].max()\n\nXX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\nZ = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n# Put the result into a color plot\nZ = Z.reshape(XX.shape)\nplt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\nplt.contour(XX, YY, Z, colors=['k', 'k', 'k'],\n linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\n\nplt.title(kernel)\nplt.savefig(\"plot.png\")\n","license":"apache-2.0"} {"repo_name":"jzt5132\/scikit-learn","path":"examples\/svm\/plot_rbf_parameters.py","copies":"132","size":"8096","content":"'''\n==================\nRBF SVM parameters\n==================\n\nThis example illustrates the effect of the parameters ``gamma`` and ``C`` of\nthe Radial Basis Function (RBF) kernel SVM.\n\nIntuitively, the ``gamma`` parameter defines how far the influence of a single\ntraining example reaches, with low values meaning 'far' and high values meaning\n'close'. The ``gamma`` parameters can be seen as the inverse of the radius of\ninfluence of samples selected by the model as support vectors.\n\nThe ``C`` parameter trades off misclassification of training examples against\nsimplicity of the decision surface. A low ``C`` makes the decision surface\nsmooth, while a high ``C`` aims at classifying all training examples correctly\nby giving the model freedom to select more samples as support vectors.\n\nThe first plot is a visualization of the decision function for a variety of\nparameter values on a simplified classification problem involving only 2 input\nfeatures and 2 possible target classes (binary classification). Note that this\nkind of plot is not possible to do for problems with more features or target\nclasses.\n\nThe second plot is a heatmap of the classifier's cross-validation accuracy as a\nfunction of ``C`` and ``gamma``. For this example we explore a relatively large\ngrid for illustration purposes. In practice, a logarithmic grid from\n:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters\nlie on the boundaries of the grid, it can be extended in that direction in a\nsubsequent search.\n\nNote that the heat map plot has a special colorbar with a midpoint value close\nto the score values of the best performing models so as to make it easy to tell\nthem appart in the blink of an eye.\n\nThe behavior of the model is very sensitive to the ``gamma`` parameter. If\n``gamma`` is too large, the radius of the area of influence of the support\nvectors only includes the support vector itself and no amount of\nregularization with ``C`` will be able to prevent overfitting.\n\nWhen ``gamma`` is very small, the model is too constrained and cannot capture\nthe complexity or \"shape\" of the data. The region of influence of any selected\nsupport vector would include the whole training set. The resulting model will\nbehave similarly to a linear model with a set of hyperplanes that separate the\ncenters of high density of any pair of two classes.\n\nFor intermediate values, we can see on the second plot that good models can\nbe found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``\nvalues) can be made more complex by selecting a larger number of support\nvectors (larger ``C`` values) hence the diagonal of good performing models.\n\nFinally one can also observe that for some intermediate values of ``gamma`` we\nget equally performing models when ``C`` becomes very large: it is not\nnecessary to regularize by limiting the number of support vectors. The radius of\nthe RBF kernel alone acts as a good structural regularizer. In practice though\nit might still be interesting to limit the number of support vectors with a\nlower value of ``C`` so as to favor models that use less memory and that are\nfaster to predict.\n\nWe should also note that small differences in scores results from the random\nsplits of the cross-validation procedure. Those spurious variations can be\nsmoothed out by increasing the number of CV iterations ``n_iter`` at the\nexpense of compute time. Increasing the value number of ``C_range`` and\n``gamma_range`` steps will increase the resolution of the hyper-parameter heat\nmap.\n\n'''\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\n\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import load_iris\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom sklearn.grid_search import GridSearchCV\n\n\n# Utility function to move the midpoint of a colormap to be around\n# the values of interest.\n\nclass MidpointNormalize(Normalize):\n\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n##############################################################################\n# Load and prepare data set\n#\n# dataset for grid search\n\niris = load_iris()\nX = iris.data\ny = iris.target\n\n# Dataset for decision function visualization: we only keep the first two\n# features in X and sub-sample the dataset to keep only 2 classes and\n# make it a binary classification problem.\n\nX_2d = X[:, :2]\nX_2d = X_2d[y > 0]\ny_2d = y[y > 0]\ny_2d -= 1\n\n# It is usually a good idea to scale the data for SVM training.\n# We are cheating a bit in this example in scaling all of the data,\n# instead of fitting the transformation on the training set and\n# just applying it on the test set.\n\nscaler = StandardScaler()\nX = scaler.fit_transform(X)\nX_2d = scaler.fit_transform(X_2d)\n\n##############################################################################\n# Train classifiers\n#\n# For an initial search, a logarithmic grid with basis\n# 10 is often helpful. Using a basis of 2, a finer\n# tuning can be achieved but at a much higher cost.\n\nC_range = np.logspace(-2, 10, 13)\ngamma_range = np.logspace(-9, 3, 13)\nparam_grid = dict(gamma=gamma_range, C=C_range)\ncv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)\ngrid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)\ngrid.fit(X, y)\n\nprint(\"The best parameters are %s with a score of %0.2f\"\n % (grid.best_params_, grid.best_score_))\n\n# Now we need to fit a classifier for all parameters in the 2d version\n# (we use a smaller set of parameters here because it takes a while to train)\n\nC_2d_range = [1e-2, 1, 1e2]\ngamma_2d_range = [1e-1, 1, 1e1]\nclassifiers = []\nfor C in C_2d_range:\n for gamma in gamma_2d_range:\n clf = SVC(C=C, gamma=gamma)\n clf.fit(X_2d, y_2d)\n classifiers.append((C, gamma, clf))\n\n##############################################################################\n# visualization\n#\n# draw visualization of parameter effects\n\nplt.figure(figsize=(8, 6))\nxx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))\nfor (k, (C, gamma, clf)) in enumerate(classifiers):\n # evaluate decision function in a grid\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n # visualize decision function for these parameters\n plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)\n plt.title(\"gamma=10^%d, C=10^%d\" % (np.log10(gamma), np.log10(C)),\n size='medium')\n\n # visualize parameter's effect on decision function\n plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)\n plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)\n plt.xticks(())\n plt.yticks(())\n plt.axis('tight')\n\n# plot the scores of the grid\n# grid_scores_ contains parameter settings and scores\n# We extract just the scores\nscores = [x[1] for x in grid.grid_scores_]\nscores = np.array(scores).reshape(len(C_range), len(gamma_range))\n\n# Draw heatmap of the validation accuracy as a function of gamma and C\n#\n# The score are encoded as colors with the hot colormap which varies from dark\n# red to bright yellow. As the most interesting scores are all located in the\n# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so\n# as to make it easier to visualize the small variations of score values in the\n# interesting range while not brutally collapsing all the low score values to\n# the same color.\n\nplt.figure(figsize=(8, 6))\nplt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)\nplt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,\n norm=MidpointNormalize(vmin=0.2, midpoint=0.92))\nplt.xlabel('gamma')\nplt.ylabel('C')\nplt.colorbar()\nplt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)\nplt.yticks(np.arange(len(C_range)), C_range)\nplt.title('Validation accuracy')\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"etkirsch\/scikit-learn","path":"examples\/semi_supervised\/plot_label_propagation_digits.py","copies":"268","size":"2723","content":"\"\"\"\n===================================================\nLabel Propagation digits: Demonstrating performance\n===================================================\n\nThis example demonstrates the power of semisupervised learning by\ntraining a Label Spreading model to classify handwritten digits\nwith sets of very few labels.\n\nThe handwritten digit dataset has 1797 total points. The model will\nbe trained using all points, but only 30 will be labeled. Results\nin the form of a confusion matrix and a series of metrics over each\nclass will be very good.\n\nAt the end, the top 10 most uncertain predictions will be shown.\n\"\"\"\nprint(__doc__)\n\n# Authors: Clay Woolam \n# Licence: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy import stats\n\nfrom sklearn import datasets\nfrom sklearn.semi_supervised import label_propagation\n\nfrom sklearn.metrics import confusion_matrix, classification_report\n\ndigits = datasets.load_digits()\nrng = np.random.RandomState(0)\nindices = np.arange(len(digits.data))\nrng.shuffle(indices)\n\nX = digits.data[indices[:330]]\ny = digits.target[indices[:330]]\nimages = digits.images[indices[:330]]\n\nn_total_samples = len(y)\nn_labeled_points = 30\n\nindices = np.arange(n_total_samples)\n\nunlabeled_set = indices[n_labeled_points:]\n\n# shuffle everything around\ny_train = np.copy(y)\ny_train[unlabeled_set] = -1\n\n###############################################################################\n# Learn with LabelSpreading\nlp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)\nlp_model.fit(X, y_train)\npredicted_labels = lp_model.transduction_[unlabeled_set]\ntrue_labels = y[unlabeled_set]\n\ncm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)\n\nprint(\"Label Spreading model: %d labeled & %d unlabeled points (%d total)\" %\n (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))\n\nprint(classification_report(true_labels, predicted_labels))\n\nprint(\"Confusion matrix\")\nprint(cm)\n\n# calculate uncertainty values for each transduced distribution\npred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)\n\n# pick the top 10 most uncertain labels\nuncertainty_index = np.argsort(pred_entropies)[-10:]\n\n###############################################################################\n# plot\nf = plt.figure(figsize=(7, 5))\nfor index, image_index in enumerate(uncertainty_index):\n image = images[image_index]\n\n sub = f.add_subplot(2, 5, index + 1)\n sub.imshow(image, cmap=plt.cm.gray_r)\n plt.xticks([])\n plt.yticks([])\n sub.set_title('predict: %i\\ntrue: %i' % (\n lp_model.transduction_[image_index], y[image_index]))\n\nf.suptitle('Learning with small amount of labeled data')\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"nmartensen\/pandas","path":"asv_bench\/benchmarks\/categoricals.py","copies":"3","size":"2803","content":"from .pandas_vb_common import *\ntry:\n from pandas.api.types import union_categoricals\nexcept ImportError:\n try:\n from pandas.types.concat import union_categoricals\n except ImportError:\n pass\n\n\nclass Categoricals(object):\n goal_time = 0.2\n\n def setup(self):\n N = 100000\n self.s = pd.Series((list('aabbcd') * N)).astype('category')\n\n self.a = pd.Categorical((list('aabbcd') * N))\n self.b = pd.Categorical((list('bbcdjk') * N))\n\n self.categories = list('abcde')\n self.cat_idx = Index(self.categories)\n self.values = np.tile(self.categories, N)\n self.codes = np.tile(range(len(self.categories)), N)\n\n self.datetimes = pd.Series(pd.date_range(\n '1995-01-01 00:00:00', periods=10000, freq='s'))\n\n def time_concat(self):\n concat([self.s, self.s])\n\n def time_union(self):\n union_categoricals([self.a, self.b])\n\n def time_constructor_regular(self):\n Categorical(self.values, self.categories)\n\n def time_constructor_fastpath(self):\n Categorical(self.codes, self.cat_idx, fastpath=True)\n\n def time_constructor_datetimes(self):\n Categorical(self.datetimes)\n\n def time_constructor_datetimes_with_nat(self):\n t = self.datetimes\n t.iloc[-1] = pd.NaT\n Categorical(t)\n\n\nclass Categoricals2(object):\n goal_time = 0.2\n\n def setup(self):\n n = 500000\n np.random.seed(2718281)\n arr = ['s%04d' % i for i in np.random.randint(0, n \/\/ 10, size=n)]\n self.ts = Series(arr).astype('category')\n\n self.sel = self.ts.loc[[0]]\n\n def time_value_counts(self):\n self.ts.value_counts(dropna=False)\n\n def time_value_counts_dropna(self):\n self.ts.value_counts(dropna=True)\n\n def time_rendering(self):\n str(self.sel)\n\n def time_set_categories(self):\n self.ts.cat.set_categories(self.ts.cat.categories[::2])\n\n\nclass Categoricals3(object):\n goal_time = 0.2\n\n def setup(self):\n N = 100000\n ncats = 100\n\n self.s1 = Series(np.array(tm.makeCategoricalIndex(N, ncats)))\n self.s1_cat = self.s1.astype('category')\n self.s1_cat_ordered = self.s1.astype('category', ordered=True)\n\n self.s2 = Series(np.random.randint(0, ncats, size=N))\n self.s2_cat = self.s2.astype('category')\n self.s2_cat_ordered = self.s2.astype('category', ordered=True)\n\n def time_rank_string(self):\n self.s1.rank()\n\n def time_rank_string_cat(self):\n self.s1_cat.rank()\n\n def time_rank_string_cat_ordered(self):\n self.s1_cat_ordered.rank()\n\n def time_rank_int(self):\n self.s2.rank()\n\n def time_rank_int_cat(self):\n self.s2_cat.rank()\n\n def time_rank_int_cat_ordered(self):\n self.s2_cat_ordered.rank()\n","license":"bsd-3-clause"} {"repo_name":"YinongLong\/scikit-learn","path":"examples\/preprocessing\/plot_function_transformer.py","copies":"158","size":"1993","content":"\"\"\"\n=========================================================\nUsing FunctionTransformer to select columns\n=========================================================\n\nShows how to use a function transformer in a pipeline. If you know your\ndataset's first principle component is irrelevant for a classification task,\nyou can use the FunctionTransformer to select all but the first column of the\nPCA transformed data.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer\n\n\ndef _generate_vector(shift=0.5, noise=15):\n return np.arange(1000) + (np.random.rand(1000) - shift) * noise\n\n\ndef generate_dataset():\n \"\"\"\n This dataset is two lines with a slope ~ 1, where one has\n a y offset of ~100\n \"\"\"\n return np.vstack((\n np.vstack((\n _generate_vector(),\n _generate_vector() + 100,\n )).T,\n np.vstack((\n _generate_vector(),\n _generate_vector(),\n )).T,\n )), np.hstack((np.zeros(1000), np.ones(1000)))\n\n\ndef all_but_first_column(X):\n return X[:, 1:]\n\n\ndef drop_first_component(X, y):\n \"\"\"\n Create a pipeline with PCA and the column selector and use it to\n transform the dataset.\n \"\"\"\n pipeline = make_pipeline(\n PCA(), FunctionTransformer(all_but_first_column),\n )\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n pipeline.fit(X_train, y_train)\n return pipeline.transform(X_test), y_test\n\n\nif __name__ == '__main__':\n X, y = generate_dataset()\n lw = 0\n plt.figure()\n plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)\n plt.figure()\n X_transformed, y_transformed = drop_first_component(*generate_dataset())\n plt.scatter(\n X_transformed[:, 0],\n np.zeros(len(X_transformed)),\n c=y_transformed,\n lw=lw,\n s=60\n )\n plt.show()\n","license":"bsd-3-clause"} {"repo_name":"newville\/scikit-image","path":"doc\/examples\/plot_rank_mean.py","copies":"17","size":"1499","content":"\"\"\"\n============\nMean filters\n============\n\nThis example compares the following mean filters of the rank filter package:\n\n * **local mean**: all pixels belonging to the structuring element to compute\n average gray level.\n * **percentile mean**: only use values between percentiles p0 and p1\n (here 10% and 90%).\n * **bilateral mean**: only use pixels of the structuring element having a gray\n level situated inside g-s0 and g+s1 (here g-500 and g+500)\n\nPercentile and usual mean give here similar results, these filters smooth the\ncomplete image (background and details). Bilateral mean exhibits a high\nfiltering rate for continuous area (i.e. background) while higher image\nfrequencies remain untouched.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.morphology import disk\nfrom skimage.filters import rank\n\n\nimage = (data.coins()).astype(np.uint16) * 16\nselem = disk(20)\n\npercentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)\nbilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)\nnormal_result = rank.mean(image, selem=selem)\n\n\nfig, axes = plt.subplots(nrows=3, figsize=(8, 10))\nax0, ax1, ax2 = axes\n\nax0.imshow(np.hstack((image, percentile_result)))\nax0.set_title('Percentile mean')\nax0.axis('off')\n\nax1.imshow(np.hstack((image, bilateral_result)))\nax1.set_title('Bilateral mean')\nax1.axis('off')\n\nax2.imshow(np.hstack((image, normal_result)))\nax2.set_title('Local mean')\nax2.axis('off')\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"dipanjanS\/text-analytics-with-python","path":"Old-First-Edition\/Ch06_Text_Similarity_and_Clustering\/utils.py","copies":"1","size":"1097","content":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 11 23:06:06 2016\n\n@author: DIP \n\"\"\"\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ndef build_feature_matrix(documents, feature_type='frequency',\n ngram_range=(1, 1), min_df=0.0, max_df=1.0):\n\n feature_type = feature_type.lower().strip() \n \n if feature_type == 'binary':\n vectorizer = CountVectorizer(binary=True, min_df=min_df,\n max_df=max_df, ngram_range=ngram_range)\n elif feature_type == 'frequency':\n vectorizer = CountVectorizer(binary=False, min_df=min_df,\n max_df=max_df, ngram_range=ngram_range)\n elif feature_type == 'tfidf':\n vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df, \n ngram_range=ngram_range)\n else:\n raise Exception(\"Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'\")\n\n feature_matrix = vectorizer.fit_transform(documents).astype(float)\n \n return vectorizer, feature_matrix","license":"apache-2.0"} {"repo_name":"billy-inn\/scikit-learn","path":"examples\/decomposition\/plot_ica_vs_pca.py","copies":"306","size":"3329","content":"\"\"\"\n==========================\nFastICA on 2D point clouds\n==========================\n\nThis example illustrates visually in the feature space a comparison by\nresults using two different component analysis techniques.\n\n:ref:`ICA` vs :ref:`PCA`.\n\nRepresenting ICA in the feature space gives the view of 'geometric ICA':\nICA is an algorithm that finds directions in the feature space\ncorresponding to projections with high non-Gaussianity. These directions\nneed not be orthogonal in the original feature space, but they are\northogonal in the whitened feature space, in which all directions\ncorrespond to the same variance.\n\nPCA, on the other hand, finds orthogonal directions in the raw feature\nspace that correspond to directions accounting for maximum variance.\n\nHere we simulate independent sources using a highly non-Gaussian\nprocess, 2 student T with a low number of degrees of freedom (top left\nfigure). We mix them to create observations (top right figure).\nIn this raw observation space, directions identified by PCA are\nrepresented by orange vectors. We represent the signal in the PCA space,\nafter whitening by the variance corresponding to the PCA vectors (lower\nleft). Running ICA corresponds to finding a rotation in this space to\nidentify the directions of largest non-Gaussianity (lower right).\n\"\"\"\nprint(__doc__)\n\n# Authors: Alexandre Gramfort, Gael Varoquaux\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.decomposition import PCA, FastICA\n\n###############################################################################\n# Generate sample data\nrng = np.random.RandomState(42)\nS = rng.standard_t(1.5, size=(20000, 2))\nS[:, 0] *= 2.\n\n# Mix data\nA = np.array([[1, 1], [0, 2]]) # Mixing matrix\n\nX = np.dot(S, A.T) # Generate observations\n\npca = PCA()\nS_pca_ = pca.fit(X).transform(X)\n\nica = FastICA(random_state=rng)\nS_ica_ = ica.fit(X).transform(X) # Estimate the sources\n\nS_ica_ \/= S_ica_.std(axis=0)\n\n\n###############################################################################\n# Plot results\n\ndef plot_samples(S, axis_list=None):\n plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,\n color='steelblue', alpha=0.5)\n if axis_list is not None:\n colors = ['orange', 'red']\n for color, axis in zip(colors, axis_list):\n axis \/= axis.std()\n x_axis, y_axis = axis\n # Trick to get legend to work\n plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)\n plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,\n color=color)\n\n plt.hlines(0, -3, 3)\n plt.vlines(0, -3, 3)\n plt.xlim(-3, 3)\n plt.ylim(-3, 3)\n plt.xlabel('x')\n plt.ylabel('y')\n\nplt.figure()\nplt.subplot(2, 2, 1)\nplot_samples(S \/ S.std())\nplt.title('True Independent Sources')\n\naxis_list = [pca.components_.T, ica.mixing_]\nplt.subplot(2, 2, 2)\nplot_samples(X \/ np.std(X), axis_list=axis_list)\nlegend = plt.legend(['PCA', 'ICA'], loc='upper right')\nlegend.set_zorder(100)\n\nplt.title('Observations')\n\nplt.subplot(2, 2, 3)\nplot_samples(S_pca_ \/ np.std(S_pca_, axis=0))\nplt.title('PCA recovered signals')\n\nplt.subplot(2, 2, 4)\nplot_samples(S_ica_ \/ np.std(S_ica_))\nplt.title('ICA recovered signals')\n\nplt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"hrjn\/scikit-learn","path":"sklearn\/cluster\/tests\/test_hierarchical.py","copies":"33","size":"20167","content":"\"\"\"\nSeveral basic tests for hierarchical clustering procedures\n\n\"\"\"\n# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,\n# Matteo Visconti di Oleggio Castello 2014\n# License: BSD 3 clause\nfrom tempfile import mkdtemp\nimport shutil\nfrom functools import partial\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.cluster import hierarchy\n\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.cluster import ward_tree\nfrom sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration\nfrom sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,\n linkage_tree)\nfrom sklearn.feature_extraction.image import grid_to_graph\nfrom sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\\\n manhattan_distances, pairwise_distances\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nfrom sklearn.neighbors.graph import kneighbors_graph\nfrom sklearn.cluster._hierarchical import average_merge, max_merge\nfrom sklearn.utils.fast_dict import IntFloatDict\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_warns\n\n\ndef test_linkage_misc():\n # Misc tests on linkage\n rng = np.random.RandomState(42)\n X = rng.normal(size=(5, 5))\n assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)\n assert_raises(ValueError, linkage_tree, X, linkage='foo')\n assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))\n\n # Smoke test FeatureAgglomeration\n FeatureAgglomeration().fit(X)\n\n # test hierarchical clustering on a precomputed distances matrix\n dis = cosine_distances(X)\n\n res = linkage_tree(dis, affinity=\"precomputed\")\n assert_array_equal(res[0], linkage_tree(X, affinity=\"cosine\")[0])\n\n # test hierarchical clustering on a precomputed distances matrix\n res = linkage_tree(X, affinity=manhattan_distances)\n assert_array_equal(res[0], linkage_tree(X, affinity=\"manhattan\")[0])\n\n\ndef test_structured_linkage_tree():\n # Check that we obtain the correct solution for structured linkage trees.\n rng = np.random.RandomState(0)\n mask = np.ones([10, 10], dtype=np.bool)\n # Avoiding a mask with only 'True' entries\n mask[4:7, 4:7] = 0\n X = rng.randn(50, 100)\n connectivity = grid_to_graph(*mask.shape)\n for tree_builder in _TREE_BUILDERS.values():\n children, n_components, n_leaves, parent = \\\n tree_builder(X.T, connectivity)\n n_nodes = 2 * X.shape[1] - 1\n assert_true(len(children) + n_leaves == n_nodes)\n # Check that ward_tree raises a ValueError with a connectivity matrix\n # of the wrong shape\n assert_raises(ValueError,\n tree_builder, X.T, np.ones((4, 4)))\n # Check that fitting with no samples raises an error\n assert_raises(ValueError,\n tree_builder, X.T[:0], connectivity)\n\n\ndef test_unstructured_linkage_tree():\n # Check that we obtain the correct solution for unstructured linkage trees.\n rng = np.random.RandomState(0)\n X = rng.randn(50, 100)\n for this_X in (X, X[0]):\n # With specified a number of clusters just for the sake of\n # raising a warning and testing the warning code\n with ignore_warnings():\n children, n_nodes, n_leaves, parent = assert_warns(\n UserWarning, ward_tree, this_X.T, n_clusters=10)\n n_nodes = 2 * X.shape[1] - 1\n assert_equal(len(children) + n_leaves, n_nodes)\n\n for tree_builder in _TREE_BUILDERS.values():\n for this_X in (X, X[0]):\n with ignore_warnings():\n children, n_nodes, n_leaves, parent = assert_warns(\n UserWarning, tree_builder, this_X.T, n_clusters=10)\n\n n_nodes = 2 * X.shape[1] - 1\n assert_equal(len(children) + n_leaves, n_nodes)\n\n\ndef test_height_linkage_tree():\n # Check that the height of the results of linkage tree is sorted.\n rng = np.random.RandomState(0)\n mask = np.ones([10, 10], dtype=np.bool)\n X = rng.randn(50, 100)\n connectivity = grid_to_graph(*mask.shape)\n for linkage_func in _TREE_BUILDERS.values():\n children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)\n n_nodes = 2 * X.shape[1] - 1\n assert_true(len(children) + n_leaves == n_nodes)\n\n\ndef test_agglomerative_clustering_wrong_arg_memory():\n # Test either if an error is raised when memory is not\n # either a str or a joblib.Memory instance\n rng = np.random.RandomState(0)\n n_samples = 100\n X = rng.randn(n_samples, 50)\n memory = 5\n clustering = AgglomerativeClustering(memory=memory)\n assert_raises(ValueError, clustering.fit, X)\n\n\ndef test_agglomerative_clustering():\n # Check that we obtain the correct number of clusters with\n # agglomerative clustering.\n rng = np.random.RandomState(0)\n mask = np.ones([10, 10], dtype=np.bool)\n n_samples = 100\n X = rng.randn(n_samples, 50)\n connectivity = grid_to_graph(*mask.shape)\n for linkage in (\"ward\", \"complete\", \"average\"):\n clustering = AgglomerativeClustering(n_clusters=10,\n connectivity=connectivity,\n linkage=linkage)\n clustering.fit(X)\n # test caching\n try:\n tempdir = mkdtemp()\n clustering = AgglomerativeClustering(\n n_clusters=10, connectivity=connectivity,\n memory=tempdir,\n linkage=linkage)\n clustering.fit(X)\n labels = clustering.labels_\n assert_true(np.size(np.unique(labels)) == 10)\n finally:\n shutil.rmtree(tempdir)\n # Turn caching off now\n clustering = AgglomerativeClustering(\n n_clusters=10, connectivity=connectivity, linkage=linkage)\n # Check that we obtain the same solution with early-stopping of the\n # tree building\n clustering.compute_full_tree = False\n clustering.fit(X)\n assert_almost_equal(normalized_mutual_info_score(clustering.labels_,\n labels), 1)\n clustering.connectivity = None\n clustering.fit(X)\n assert_true(np.size(np.unique(clustering.labels_)) == 10)\n # Check that we raise a TypeError on dense matrices\n clustering = AgglomerativeClustering(\n n_clusters=10,\n connectivity=sparse.lil_matrix(\n connectivity.toarray()[:10, :10]),\n linkage=linkage)\n assert_raises(ValueError, clustering.fit, X)\n\n # Test that using ward with another metric than euclidean raises an\n # exception\n clustering = AgglomerativeClustering(\n n_clusters=10,\n connectivity=connectivity.toarray(),\n affinity=\"manhattan\",\n linkage=\"ward\")\n assert_raises(ValueError, clustering.fit, X)\n\n # Test using another metric than euclidean works with linkage complete\n for affinity in PAIRED_DISTANCES.keys():\n # Compare our (structured) implementation to scipy\n clustering = AgglomerativeClustering(\n n_clusters=10,\n connectivity=np.ones((n_samples, n_samples)),\n affinity=affinity,\n linkage=\"complete\")\n clustering.fit(X)\n clustering2 = AgglomerativeClustering(\n n_clusters=10,\n connectivity=None,\n affinity=affinity,\n linkage=\"complete\")\n clustering2.fit(X)\n assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,\n clustering.labels_),\n 1)\n\n # Test that using a distance matrix (affinity = 'precomputed') has same\n # results (with connectivity constraints)\n clustering = AgglomerativeClustering(n_clusters=10,\n connectivity=connectivity,\n linkage=\"complete\")\n clustering.fit(X)\n X_dist = pairwise_distances(X)\n clustering2 = AgglomerativeClustering(n_clusters=10,\n connectivity=connectivity,\n affinity='precomputed',\n linkage=\"complete\")\n clustering2.fit(X_dist)\n assert_array_equal(clustering.labels_, clustering2.labels_)\n\n\ndef test_ward_agglomeration():\n # Check that we obtain the correct solution in a simplistic case\n rng = np.random.RandomState(0)\n mask = np.ones([10, 10], dtype=np.bool)\n X = rng.randn(50, 100)\n connectivity = grid_to_graph(*mask.shape)\n agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)\n agglo.fit(X)\n assert_true(np.size(np.unique(agglo.labels_)) == 5)\n\n X_red = agglo.transform(X)\n assert_true(X_red.shape[1] == 5)\n X_full = agglo.inverse_transform(X_red)\n assert_true(np.unique(X_full[0]).size == 5)\n assert_array_almost_equal(agglo.transform(X_full), X_red)\n\n # Check that fitting with no samples raises a ValueError\n assert_raises(ValueError, agglo.fit, X[:0])\n\n\ndef assess_same_labelling(cut1, cut2):\n \"\"\"Util for comparison with scipy\"\"\"\n co_clust = []\n for cut in [cut1, cut2]:\n n = len(cut)\n k = cut.max() + 1\n ecut = np.zeros((n, k))\n ecut[np.arange(n), cut] = 1\n co_clust.append(np.dot(ecut, ecut.T))\n assert_true((co_clust[0] == co_clust[1]).all())\n\n\ndef test_scikit_vs_scipy():\n # Test scikit linkage with full connectivity (i.e. unstructured) vs scipy\n n, p, k = 10, 5, 3\n rng = np.random.RandomState(0)\n\n # Not using a lil_matrix here, just to check that non sparse\n # matrices are well handled\n connectivity = np.ones((n, n))\n for linkage in _TREE_BUILDERS.keys():\n for i in range(5):\n X = .1 * rng.normal(size=(n, p))\n X -= 4. * np.arange(n)[:, np.newaxis]\n X -= X.mean(axis=1)[:, np.newaxis]\n\n out = hierarchy.linkage(X, method=linkage)\n\n children_ = out[:, :2].astype(np.int)\n children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)\n\n cut = _hc_cut(k, children, n_leaves)\n cut_ = _hc_cut(k, children_, n_leaves)\n assess_same_labelling(cut, cut_)\n\n # Test error management in _hc_cut\n assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)\n\n\ndef test_connectivity_propagation():\n # Check that connectivity in the ward tree is propagated correctly during\n # merging.\n X = np.array([(.014, .120), (.014, .099), (.014, .097),\n (.017, .153), (.017, .153), (.018, .153),\n (.018, .153), (.018, .153), (.018, .153),\n (.018, .153), (.018, .153), (.018, .153),\n (.018, .152), (.018, .149), (.018, .144)])\n connectivity = kneighbors_graph(X, 10, include_self=False)\n ward = AgglomerativeClustering(\n n_clusters=4, connectivity=connectivity, linkage='ward')\n # If changes are not propagated correctly, fit crashes with an\n # IndexError\n ward.fit(X)\n\n\ndef test_ward_tree_children_order():\n # Check that children are ordered in the same way for both structured and\n # unstructured versions of ward_tree.\n\n # test on five random datasets\n n, p = 10, 5\n rng = np.random.RandomState(0)\n\n connectivity = np.ones((n, n))\n for i in range(5):\n X = .1 * rng.normal(size=(n, p))\n X -= 4. * np.arange(n)[:, np.newaxis]\n X -= X.mean(axis=1)[:, np.newaxis]\n\n out_unstructured = ward_tree(X)\n out_structured = ward_tree(X, connectivity=connectivity)\n\n assert_array_equal(out_unstructured[0], out_structured[0])\n\n\ndef test_ward_linkage_tree_return_distance():\n # Test return_distance option on linkage and ward trees\n\n # test that return_distance when set true, gives same\n # output on both structured and unstructured clustering.\n n, p = 10, 5\n rng = np.random.RandomState(0)\n\n connectivity = np.ones((n, n))\n for i in range(5):\n X = .1 * rng.normal(size=(n, p))\n X -= 4. * np.arange(n)[:, np.newaxis]\n X -= X.mean(axis=1)[:, np.newaxis]\n\n out_unstructured = ward_tree(X, return_distance=True)\n out_structured = ward_tree(X, connectivity=connectivity,\n return_distance=True)\n\n # get children\n children_unstructured = out_unstructured[0]\n children_structured = out_structured[0]\n\n # check if we got the same clusters\n assert_array_equal(children_unstructured, children_structured)\n\n # check if the distances are the same\n dist_unstructured = out_unstructured[-1]\n dist_structured = out_structured[-1]\n\n assert_array_almost_equal(dist_unstructured, dist_structured)\n\n for linkage in ['average', 'complete']:\n structured_items = linkage_tree(\n X, connectivity=connectivity, linkage=linkage,\n return_distance=True)[-1]\n unstructured_items = linkage_tree(\n X, linkage=linkage, return_distance=True)[-1]\n structured_dist = structured_items[-1]\n unstructured_dist = unstructured_items[-1]\n structured_children = structured_items[0]\n unstructured_children = unstructured_items[0]\n assert_array_almost_equal(structured_dist, unstructured_dist)\n assert_array_almost_equal(\n structured_children, unstructured_children)\n\n # test on the following dataset where we know the truth\n # taken from scipy\/cluster\/tests\/hierarchy_test_data.py\n X = np.array([[1.43054825, -7.5693489],\n [6.95887839, 6.82293382],\n [2.87137846, -9.68248579],\n [7.87974764, -6.05485803],\n [8.24018364, -6.09495602],\n [7.39020262, 8.54004355]])\n # truth\n linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],\n [1., 5., 1.77045373, 2.],\n [0., 2., 2.55760419, 2.],\n [6., 8., 9.10208346, 4.],\n [7., 9., 24.7784379, 6.]])\n\n linkage_X_complete = np.array(\n [[3., 4., 0.36265956, 2.],\n [1., 5., 1.77045373, 2.],\n [0., 2., 2.55760419, 2.],\n [6., 8., 6.96742194, 4.],\n [7., 9., 18.77445997, 6.]])\n\n linkage_X_average = np.array(\n [[3., 4., 0.36265956, 2.],\n [1., 5., 1.77045373, 2.],\n [0., 2., 2.55760419, 2.],\n [6., 8., 6.55832839, 4.],\n [7., 9., 15.44089605, 6.]])\n\n n_samples, n_features = np.shape(X)\n connectivity_X = np.ones((n_samples, n_samples))\n\n out_X_unstructured = ward_tree(X, return_distance=True)\n out_X_structured = ward_tree(X, connectivity=connectivity_X,\n return_distance=True)\n\n # check that the labels are the same\n assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])\n assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])\n\n # check that the distances are correct\n assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])\n assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])\n\n linkage_options = ['complete', 'average']\n X_linkage_truth = [linkage_X_complete, linkage_X_average]\n for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):\n out_X_unstructured = linkage_tree(\n X, return_distance=True, linkage=linkage)\n out_X_structured = linkage_tree(\n X, connectivity=connectivity_X, linkage=linkage,\n return_distance=True)\n\n # check that the labels are the same\n assert_array_equal(X_truth[:, :2], out_X_unstructured[0])\n assert_array_equal(X_truth[:, :2], out_X_structured[0])\n\n # check that the distances are correct\n assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])\n assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])\n\n\ndef test_connectivity_fixing_non_lil():\n # Check non regression of a bug if a non item assignable connectivity is\n # provided with more than one component.\n # create dummy data\n x = np.array([[0, 0], [1, 1]])\n # create a mask with several components to force connectivity fixing\n m = np.array([[True, False], [False, True]])\n c = grid_to_graph(n_x=2, n_y=2, mask=m)\n w = AgglomerativeClustering(connectivity=c, linkage='ward')\n assert_warns(UserWarning, w.fit, x)\n\n\ndef test_int_float_dict():\n rng = np.random.RandomState(0)\n keys = np.unique(rng.randint(100, size=10).astype(np.intp))\n values = rng.rand(len(keys))\n\n d = IntFloatDict(keys, values)\n for key, value in zip(keys, values):\n assert d[key] == value\n\n other_keys = np.arange(50).astype(np.intp)[::2]\n other_values = 0.5 * np.ones(50)[::2]\n other = IntFloatDict(other_keys, other_values)\n # Complete smoke test\n max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)\n average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)\n\n\ndef test_connectivity_callable():\n rng = np.random.RandomState(0)\n X = rng.rand(20, 5)\n connectivity = kneighbors_graph(X, 3, include_self=False)\n aglc1 = AgglomerativeClustering(connectivity=connectivity)\n aglc2 = AgglomerativeClustering(\n connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))\n aglc1.fit(X)\n aglc2.fit(X)\n assert_array_equal(aglc1.labels_, aglc2.labels_)\n\n\ndef test_connectivity_ignores_diagonal():\n rng = np.random.RandomState(0)\n X = rng.rand(20, 5)\n connectivity = kneighbors_graph(X, 3, include_self=False)\n connectivity_include_self = kneighbors_graph(X, 3, include_self=True)\n aglc1 = AgglomerativeClustering(connectivity=connectivity)\n aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)\n aglc1.fit(X)\n aglc2.fit(X)\n assert_array_equal(aglc1.labels_, aglc2.labels_)\n\n\ndef test_compute_full_tree():\n # Test that the full tree is computed if n_clusters is small\n rng = np.random.RandomState(0)\n X = rng.randn(10, 2)\n connectivity = kneighbors_graph(X, 5, include_self=False)\n\n # When n_clusters is less, the full tree should be built\n # that is the number of merges should be n_samples - 1\n agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)\n agc.fit(X)\n n_samples = X.shape[0]\n n_nodes = agc.children_.shape[0]\n assert_equal(n_nodes, n_samples - 1)\n\n # When n_clusters is large, greater than max of 100 and 0.02 * n_samples.\n # we should stop when there are n_clusters.\n n_clusters = 101\n X = rng.randn(200, 2)\n connectivity = kneighbors_graph(X, 10, include_self=False)\n agc = AgglomerativeClustering(n_clusters=n_clusters,\n connectivity=connectivity)\n agc.fit(X)\n n_samples = X.shape[0]\n n_nodes = agc.children_.shape[0]\n assert_equal(n_nodes, n_samples - n_clusters)\n\n\ndef test_n_components():\n # Test n_components returned by linkage, average and ward tree\n rng = np.random.RandomState(0)\n X = rng.rand(5, 5)\n\n # Connectivity matrix having five components.\n connectivity = np.eye(5)\n\n for linkage_func in _TREE_BUILDERS.values():\n assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)\n\n\ndef test_agg_n_clusters():\n # Test that an error is raised when n_clusters <= 0\n\n rng = np.random.RandomState(0)\n X = rng.rand(20, 10)\n for n_clus in [-1, 0]:\n agc = AgglomerativeClustering(n_clusters=n_clus)\n msg = (\"n_clusters should be an integer greater than 0.\"\n \" %s was provided.\" % str(agc.n_clusters))\n assert_raise_message(ValueError, msg, agc.fit, X)\n","license":"bsd-3-clause"} {"repo_name":"mne-tools\/mne-python","path":"mne\/viz\/circle.py","copies":"14","size":"15879","content":"\"\"\"Functions to plot on circle as for connectivity.\"\"\"\n\n# Authors: Alexandre Gramfort \n# Denis Engemann \n# Martin Luessi \n#\n# License: Simplified BSD\n\n\nfrom itertools import cycle\nfrom functools import partial\n\nimport numpy as np\n\nfrom .utils import plt_show\n\n\ndef circular_layout(node_names, node_order, start_pos=90, start_between=True,\n group_boundaries=None, group_sep=10):\n \"\"\"Create layout arranging nodes on a circle.\n\n Parameters\n ----------\n node_names : list of str\n Node names.\n node_order : list of str\n List with node names defining the order in which the nodes are\n arranged. Must have the elements as node_names but the order can be\n different. The nodes are arranged clockwise starting at \"start_pos\"\n degrees.\n start_pos : float\n Angle in degrees that defines where the first node is plotted.\n start_between : bool\n If True, the layout starts with the position between the nodes. This is\n the same as adding \"180. \/ len(node_names)\" to start_pos.\n group_boundaries : None | array-like\n List of of boundaries between groups at which point a \"group_sep\" will\n be inserted. E.g. \"[0, len(node_names) \/ 2]\" will create two groups.\n group_sep : float\n Group separation angle in degrees. See \"group_boundaries\".\n\n Returns\n -------\n node_angles : array, shape=(n_node_names,)\n Node angles in degrees.\n \"\"\"\n n_nodes = len(node_names)\n\n if len(node_order) != n_nodes:\n raise ValueError('node_order has to be the same length as node_names')\n\n if group_boundaries is not None:\n boundaries = np.array(group_boundaries, dtype=np.int64)\n if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):\n raise ValueError('\"group_boundaries\" has to be between 0 and '\n 'n_nodes - 1.')\n if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):\n raise ValueError('\"group_boundaries\" must have non-decreasing '\n 'values.')\n n_group_sep = len(group_boundaries)\n else:\n n_group_sep = 0\n boundaries = None\n\n # convert it to a list with indices\n node_order = [node_order.index(name) for name in node_names]\n node_order = np.array(node_order)\n if len(np.unique(node_order)) != n_nodes:\n raise ValueError('node_order has repeated entries')\n\n node_sep = (360. - n_group_sep * group_sep) \/ n_nodes\n\n if start_between:\n start_pos += node_sep \/ 2\n\n if boundaries is not None and boundaries[0] == 0:\n # special case when a group separator is at the start\n start_pos += group_sep \/ 2\n boundaries = boundaries[1:] if n_group_sep > 1 else None\n\n node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep\n node_angles[0] = start_pos\n if boundaries is not None:\n node_angles[boundaries] += group_sep\n\n node_angles = np.cumsum(node_angles)[node_order]\n\n return node_angles\n\n\ndef _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,\n n_nodes=0, node_angles=None,\n ylim=[9, 10]):\n \"\"\"Isolate connections around a single node when user left clicks a node.\n\n On right click, resets all connections.\n \"\"\"\n if event.inaxes != axes:\n return\n\n if event.button == 1: # left click\n # click must be near node radius\n if not ylim[0] <= event.ydata <= ylim[1]:\n return\n\n # all angles in range [0, 2*pi]\n node_angles = node_angles % (np.pi * 2)\n node = np.argmin(np.abs(event.xdata - node_angles))\n\n patches = event.inaxes.patches\n for ii, (x, y) in enumerate(zip(indices[0], indices[1])):\n patches[ii].set_visible(node in [x, y])\n fig.canvas.draw()\n elif event.button == 3: # right click\n patches = event.inaxes.patches\n for ii in range(np.size(indices, axis=1)):\n patches[ii].set_visible(True)\n fig.canvas.draw()\n\n\ndef plot_connectivity_circle(con, node_names, indices=None, n_lines=None,\n node_angles=None, node_width=None,\n node_colors=None, facecolor='black',\n textcolor='white', node_edgecolor='black',\n linewidth=1.5, colormap='hot', vmin=None,\n vmax=None, colorbar=True, title=None,\n colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),\n fontsize_title=12, fontsize_names=8,\n fontsize_colorbar=8, padding=6.,\n fig=None, subplot=111, interactive=True,\n node_linewidth=2., show=True):\n \"\"\"Visualize connectivity as a circular graph.\n\n Parameters\n ----------\n con : array\n Connectivity scores. Can be a square matrix, or a 1D array. If a 1D\n array is provided, \"indices\" has to be used to define the connection\n indices.\n node_names : list of str\n Node names. The order corresponds to the order in con.\n indices : tuple of array | None\n Two arrays with indices of connections for which the connections\n strengths are defined in con. Only needed if con is a 1D array.\n n_lines : int | None\n If not None, only the n_lines strongest connections (strength=abs(con))\n are drawn.\n node_angles : array, shape (n_node_names,) | None\n Array with node positions in degrees. If None, the nodes are equally\n spaced on the circle. See mne.viz.circular_layout.\n node_width : float | None\n Width of each node in degrees. If None, the minimum angle between any\n two nodes is used as the width.\n node_colors : list of tuple | list of str\n List with the color to use for each node. If fewer colors than nodes\n are provided, the colors will be repeated. Any color supported by\n matplotlib can be used, e.g., RGBA tuples, named colors.\n facecolor : str\n Color to use for background. See matplotlib.colors.\n textcolor : str\n Color to use for text. See matplotlib.colors.\n node_edgecolor : str\n Color to use for lines around nodes. See matplotlib.colors.\n linewidth : float\n Line width to use for connections.\n colormap : str | instance of matplotlib.colors.LinearSegmentedColormap\n Colormap to use for coloring the connections.\n vmin : float | None\n Minimum value for colormap. If None, it is determined automatically.\n vmax : float | None\n Maximum value for colormap. If None, it is determined automatically.\n colorbar : bool\n Display a colorbar or not.\n title : str\n The figure title.\n colorbar_size : float\n Size of the colorbar.\n colorbar_pos : tuple, shape (2,)\n Position of the colorbar.\n fontsize_title : int\n Font size to use for title.\n fontsize_names : int\n Font size to use for node names.\n fontsize_colorbar : int\n Font size to use for colorbar.\n padding : float\n Space to add around figure to accommodate long labels.\n fig : None | instance of matplotlib.figure.Figure\n The figure to use. If None, a new figure with the specified background\n color will be created.\n subplot : int | tuple, shape (3,)\n Location of the subplot when creating figures with multiple plots. E.g.\n 121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See\n matplotlib.pyplot.subplot.\n interactive : bool\n When enabled, left-click on a node to show only connections to that\n node. Right-click shows all connections.\n node_linewidth : float\n Line with for nodes.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure handle.\n axes : instance of matplotlib.projections.polar.PolarAxes\n The subplot handle.\n\n Notes\n -----\n This code is based on a circle graph example by Nicolas P. Rougier\n\n By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``\n into account when saving, even if set when a figure is generated. This\n can be addressed via, e.g.::\n\n >>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP\n\n If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the\n figure labels, title, and legend may be cut off in the output figure.\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.path as m_path\n import matplotlib.patches as m_patches\n\n n_nodes = len(node_names)\n\n if node_angles is not None:\n if len(node_angles) != n_nodes:\n raise ValueError('node_angles has to be the same length '\n 'as node_names')\n # convert it to radians\n node_angles = node_angles * np.pi \/ 180\n else:\n # uniform layout on unit circle\n node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)\n\n if node_width is None:\n # widths correspond to the minimum angle between two nodes\n dist_mat = node_angles[None, :] - node_angles[:, None]\n dist_mat[np.diag_indices(n_nodes)] = 1e9\n node_width = np.min(np.abs(dist_mat))\n else:\n node_width = node_width * np.pi \/ 180\n\n if node_colors is not None:\n if len(node_colors) < n_nodes:\n node_colors = cycle(node_colors)\n else:\n # assign colors using colormap\n try:\n spectral = plt.cm.spectral\n except AttributeError:\n spectral = plt.cm.Spectral\n node_colors = [spectral(i \/ float(n_nodes))\n for i in range(n_nodes)]\n\n # handle 1D and 2D connectivity information\n if con.ndim == 1:\n if indices is None:\n raise ValueError('indices has to be provided if con.ndim == 1')\n elif con.ndim == 2:\n if con.shape[0] != n_nodes or con.shape[1] != n_nodes:\n raise ValueError('con has to be 1D or a square matrix')\n # we use the lower-triangular part\n indices = np.tril_indices(n_nodes, -1)\n con = con[indices]\n else:\n raise ValueError('con has to be 1D or a square matrix')\n\n # get the colormap\n if isinstance(colormap, str):\n colormap = plt.get_cmap(colormap)\n\n # Make figure background the same colors as axes\n if fig is None:\n fig = plt.figure(figsize=(8, 8), facecolor=facecolor)\n\n # Use a polar axes\n if not isinstance(subplot, tuple):\n subplot = (subplot,)\n axes = plt.subplot(*subplot, polar=True)\n axes.set_facecolor(facecolor)\n\n # No ticks, we'll put our own\n plt.xticks([])\n plt.yticks([])\n\n # Set y axes limit, add additional space if requested\n plt.ylim(0, 10 + padding)\n\n # Remove the black axes border which may obscure the labels\n axes.spines['polar'].set_visible(False)\n\n # Draw lines between connected nodes, only draw the strongest connections\n if n_lines is not None and len(con) > n_lines:\n con_thresh = np.sort(np.abs(con).ravel())[-n_lines]\n else:\n con_thresh = 0.\n\n # get the connections which we are drawing and sort by connection strength\n # this will allow us to draw the strongest connections first\n con_abs = np.abs(con)\n con_draw_idx = np.where(con_abs >= con_thresh)[0]\n\n con = con[con_draw_idx]\n con_abs = con_abs[con_draw_idx]\n indices = [ind[con_draw_idx] for ind in indices]\n\n # now sort them\n sort_idx = np.argsort(con_abs)\n del con_abs\n con = con[sort_idx]\n indices = [ind[sort_idx] for ind in indices]\n\n # Get vmin vmax for color scaling\n if vmin is None:\n vmin = np.min(con[np.abs(con) >= con_thresh])\n if vmax is None:\n vmax = np.max(con)\n vrange = vmax - vmin\n\n # We want to add some \"noise\" to the start and end position of the\n # edges: We modulate the noise with the number of connections of the\n # node and the connection strength, such that the strongest connections\n # are closer to the node center\n nodes_n_con = np.zeros((n_nodes), dtype=np.int64)\n for i, j in zip(indices[0], indices[1]):\n nodes_n_con[i] += 1\n nodes_n_con[j] += 1\n\n # initialize random number generator so plot is reproducible\n rng = np.random.mtrand.RandomState(0)\n\n n_con = len(indices[0])\n noise_max = 0.25 * node_width\n start_noise = rng.uniform(-noise_max, noise_max, n_con)\n end_noise = rng.uniform(-noise_max, noise_max, n_con)\n\n nodes_n_con_seen = np.zeros_like(nodes_n_con)\n for i, (start, end) in enumerate(zip(indices[0], indices[1])):\n nodes_n_con_seen[start] += 1\n nodes_n_con_seen[end] += 1\n\n start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) \/\n float(nodes_n_con[start]))\n end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) \/\n float(nodes_n_con[end]))\n\n # scale connectivity for colormap (vmin<=>0, vmax<=>1)\n con_val_scaled = (con - vmin) \/ vrange\n\n # Finally, we draw the connections\n for pos, (i, j) in enumerate(zip(indices[0], indices[1])):\n # Start point\n t0, r0 = node_angles[i], 10\n\n # End point\n t1, r1 = node_angles[j], 10\n\n # Some noise in start and end point\n t0 += start_noise[pos]\n t1 += end_noise[pos]\n\n verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]\n codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,\n m_path.Path.LINETO]\n path = m_path.Path(verts, codes)\n\n color = colormap(con_val_scaled[pos])\n\n # Actual line\n patch = m_patches.PathPatch(path, fill=False, edgecolor=color,\n linewidth=linewidth, alpha=1.)\n axes.add_patch(patch)\n\n # Draw ring with colored nodes\n height = np.ones(n_nodes) * 1.0\n bars = axes.bar(node_angles, height, width=node_width, bottom=9,\n edgecolor=node_edgecolor, lw=node_linewidth,\n facecolor='.9', align='center')\n\n for bar, color in zip(bars, node_colors):\n bar.set_facecolor(color)\n\n # Draw node labels\n angles_deg = 180 * node_angles \/ np.pi\n for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):\n if angle_deg >= 270:\n ha = 'left'\n else:\n # Flip the label, so text is always upright\n angle_deg += 180\n ha = 'right'\n\n axes.text(angle_rad, 10.4, name, size=fontsize_names,\n rotation=angle_deg, rotation_mode='anchor',\n horizontalalignment=ha, verticalalignment='center',\n color=textcolor)\n\n if title is not None:\n plt.title(title, color=textcolor, fontsize=fontsize_title,\n axes=axes)\n\n if colorbar:\n sm = plt.cm.ScalarMappable(cmap=colormap,\n norm=plt.Normalize(vmin, vmax))\n sm.set_array(np.linspace(vmin, vmax))\n cb = plt.colorbar(sm, ax=axes, use_gridspec=False,\n shrink=colorbar_size,\n anchor=colorbar_pos)\n cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')\n cb.ax.tick_params(labelsize=fontsize_colorbar)\n plt.setp(cb_yticks, color=textcolor)\n\n # Add callback for interaction\n if interactive:\n callback = partial(_plot_connectivity_circle_onpick, fig=fig,\n axes=axes, indices=indices, n_nodes=n_nodes,\n node_angles=node_angles)\n\n fig.canvas.mpl_connect('button_press_event', callback)\n\n plt_show(show)\n return fig, axes\n","license":"bsd-3-clause"} {"repo_name":"zorroblue\/scikit-learn","path":"examples\/model_selection\/plot_roc.py","copies":"102","size":"5056","content":"\"\"\"\n=======================================\nReceiver Operating Characteristic (ROC)\n=======================================\n\nExample of Receiver Operating Characteristic (ROC) metric to evaluate\nclassifier output quality.\n\nROC curves typically feature true positive rate on the Y axis, and false\npositive rate on the X axis. This means that the top left corner of the plot is\nthe \"ideal\" point - a false positive rate of zero, and a true positive rate of\none. This is not very realistic, but it does mean that a larger area under the\ncurve (AUC) is usually better.\n\nThe \"steepness\" of ROC curves is also important, since it is ideal to maximize\nthe true positive rate while minimizing the false positive rate.\n\nMulticlass settings\n-------------------\n\nROC curves are typically used in binary classification to study the output of\na classifier. In order to extend ROC curve and ROC area to multi-class\nor multi-label classification, it is necessary to binarize the output. One ROC\ncurve can be drawn per label, but one can also draw a ROC curve by considering\neach element of the label indicator matrix as a binary prediction\n(micro-averaging).\n\nAnother evaluation measure for multi-class classification is\nmacro-averaging, which gives equal weight to the classification of each\nlabel.\n\n.. note::\n\n See also :func:`sklearn.metrics.roc_auc_score`,\n :ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\n\n# Import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# Binarize the output\ny = label_binarize(y, classes=[0, 1, 2])\nn_classes = y.shape[1]\n\n# Add noisy features to make the problem harder\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# shuffle and split training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n\n# Learn to predict each class against the other\nclassifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,\n random_state=random_state))\ny_score = classifier.fit(X_train, y_train).decision_function(X_test)\n\n# Compute ROC curve and ROC area for each class\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n# Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n\n##############################################################################\n# Plot of a ROC curve for a specific class\nplt.figure()\nlw = 2\nplt.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n##############################################################################\n# Plot ROC curves for the multiclass problem\n\n# Compute macro-average ROC curve and ROC area\n\n# First aggregate all false positive rates\nall_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n# Then interpolate all ROC curves at this points\nmean_tpr = np.zeros_like(all_fpr)\nfor i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n# Finally average it and compute AUC\nmean_tpr \/= n_classes\n\nfpr[\"macro\"] = all_fpr\ntpr[\"macro\"] = mean_tpr\nroc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n# Plot all ROC curves\nplt.figure()\nplt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\nplt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\ncolors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\nfor i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n\nplt.plot([0, 1], [0, 1], 'k--', lw=lw)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Some extension of Receiver operating characteristic to multi-class')\nplt.legend(loc=\"lower right\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"harshaneelhg\/scikit-learn","path":"examples\/cluster\/plot_lena_compress.py","copies":"271","size":"2229","content":"#!\/usr\/bin\/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n=========================================================\nVector Quantization Example\n=========================================================\n\nThe classic image processing example, Lena, an 8-bit grayscale\nbit-depth, 512 x 512 sized image, is used here to illustrate\nhow `k`-means is used for vector quantization.\n\n\"\"\"\nprint(__doc__)\n\n\n# Code source: Ga\u00ebl Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nfrom sklearn import cluster\n\nn_clusters = 5\nnp.random.seed(0)\n\ntry:\n lena = sp.lena()\nexcept AttributeError:\n # Newer versions of scipy have lena in misc\n from scipy import misc\n lena = misc.lena()\nX = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array\nk_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)\nk_means.fit(X)\nvalues = k_means.cluster_centers_.squeeze()\nlabels = k_means.labels_\n\n# create an array from labels and values\nlena_compressed = np.choose(labels, values)\nlena_compressed.shape = lena.shape\n\nvmin = lena.min()\nvmax = lena.max()\n\n# original lena\nplt.figure(1, figsize=(3, 2.2))\nplt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)\n\n# compressed lena\nplt.figure(2, figsize=(3, 2.2))\nplt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)\n\n# equal bins lena\nregular_values = np.linspace(0, 256, n_clusters + 1)\nregular_labels = np.searchsorted(regular_values, lena) - 1\nregular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean\nregular_lena = np.choose(regular_labels.ravel(), regular_values)\nregular_lena.shape = lena.shape\nplt.figure(3, figsize=(3, 2.2))\nplt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)\n\n# histogram\nplt.figure(4, figsize=(3, 2.2))\nplt.clf()\nplt.axes([.01, .01, .98, .98])\nplt.hist(X, bins=256, color='.5', edgecolor='.5')\nplt.yticks(())\nplt.xticks(regular_values)\nvalues = np.sort(values)\nfor center_1, center_2 in zip(values[:-1], values[1:]):\n plt.axvline(.5 * (center_1 + center_2), color='b')\n\nfor center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):\n plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"JosmanPS\/scikit-learn","path":"examples\/cluster\/plot_dict_face_patches.py","copies":"337","size":"2747","content":"\"\"\"\nOnline learning of a dictionary of parts of faces\n==================================================\n\nThis example uses a large dataset of faces to learn a set of 20 x 20\nimages patches that constitute faces.\n\nFrom the programming standpoint, it is interesting because it shows how\nto use the online API of the scikit-learn to process a very large\ndataset by chunks. The way we proceed is that we load an image at a time\nand extract randomly 50 patches from this image. Once we have accumulated\n500 of these patches (using 10 images), we run the `partial_fit` method\nof the online KMeans object, MiniBatchKMeans.\n\nThe verbose setting on the MiniBatchKMeans enables us to see that some\nclusters are reassigned during the successive calls to\npartial-fit. This is because the number of patches that they represent\nhas become too low, and it is better to choose a random new\ncluster.\n\"\"\"\nprint(__doc__)\n\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nfrom sklearn import datasets\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.feature_extraction.image import extract_patches_2d\n\nfaces = datasets.fetch_olivetti_faces()\n\n###############################################################################\n# Learn the dictionary of images\n\nprint('Learning the dictionary... ')\nrng = np.random.RandomState(0)\nkmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)\npatch_size = (20, 20)\n\nbuffer = []\nindex = 1\nt0 = time.time()\n\n# The online learning part: cycle over the whole dataset 6 times\nindex = 0\nfor _ in range(6):\n for img in faces.images:\n data = extract_patches_2d(img, patch_size, max_patches=50,\n random_state=rng)\n data = np.reshape(data, (len(data), -1))\n buffer.append(data)\n index += 1\n if index % 10 == 0:\n data = np.concatenate(buffer, axis=0)\n data -= np.mean(data, axis=0)\n data \/= np.std(data, axis=0)\n kmeans.partial_fit(data)\n buffer = []\n if index % 100 == 0:\n print('Partial fit of %4i out of %i'\n % (index, 6 * len(faces.images)))\n\ndt = time.time() - t0\nprint('done in %.2fs.' % dt)\n\n###############################################################################\n# Plot the results\nplt.figure(figsize=(4.2, 4))\nfor i, patch in enumerate(kmeans.cluster_centers_):\n plt.subplot(9, 9, i + 1)\n plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,\n interpolation='nearest')\n plt.xticks(())\n plt.yticks(())\n\n\nplt.suptitle('Patches of faces\\nTrain time %.1fs on %d patches' %\n (dt, 8 * len(faces.images)), fontsize=16)\nplt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"smorante\/continuous-goal-directed-actions","path":"demonstration-feature-selection\/src\/alternatives\/main_dtw_mds_norm.py","copies":"2","size":"3731","content":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Santiago Morante\nRobotics Lab. Universidad Carlos III de Madrid\n\"\"\"\n########################## DTW ####################################\nimport libmddtw\nimport matplotlib.pyplot as plt\nfrom dtw import dtw\n########################## MDS ####################################\nimport numpy as np\nfrom sklearn.metrics import euclidean_distances\nimport libmds\n\n########################## DBSCAN ####################################\nimport libdbscan\nfrom sklearn.preprocessing import StandardScaler # to normalize\n\n\ndef normalize(X):\n return StandardScaler().fit_transform(X)\n\ndef main():\n \n NUMBER_OF_DEMONSTRATIONS=5\n ##########################################################################\n ########################## DTW ####################################\n ########################################################################## \n\n dist=np.zeros((NUMBER_OF_DEMONSTRATIONS,NUMBER_OF_DEMONSTRATIONS))\n\n demons=[]\n \n # fill demonstrations\n for i in range(NUMBER_OF_DEMONSTRATIONS):\n demons.append(np.matrix([ np.sin(np.arange(15+i)+i) , np.sin(np.arange(15+i)+i)]))\n\n \n # fill distance matrix \n for i in range(NUMBER_OF_DEMONSTRATIONS):\n for j in range(NUMBER_OF_DEMONSTRATIONS):\n\n mddtw = libmddtw.Mddtw()\n x,y = mddtw.collapseRows(demons[i],demons[j])\n \n #fig = plt.figure()\n #plt.plot(x)\n #plt.plot(y)\n singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])\n dist[i][j]=singleDist\n# print 'Minimum distance found:', singleDist\n #fig = plt.figure()\n \n # plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')\n # plt.plot(path[0], path[1], 'w')\n # plt.xlim((-0.5, cost.shape[0]-0.5))\n # plt.ylim((-0.5, cost.shape[1]-0.5))\n \n# print \"dist\", dist\n ###########################################################################\n ########################### MDS ####################################\n ###########################################################################\n\n md = libmds.Mds()\n md.create(n_components=1, metric=False, max_iter=3000, eps=1e-9, random_state=None,\n dissimilarity=\"precomputed\", n_jobs=1)\n\n points = md.compute(dist)\n print \"points\", points.flatten()\n # md.plot()\n \n \n \n \n ##########################################################################\n ########################## norm ####################################\n ##########################################################################\n from scipy.stats import norm\n from numpy import linspace\n from pylab import plot,show,hist,figure,title\n \n param = norm.fit(points.flatten()) # distribution fitting\n x = linspace(np.min(points),np.max(points),NUMBER_OF_DEMONSTRATIONS)\n \n pdf_fitted = norm.pdf(x, loc=param[0],scale=param[1])\n\n \n fig = plt.figure()\n title('Normal distribution') \n plot(x,pdf_fitted,'r-')\n hist(points.flatten(),normed=1,alpha=.3)\n show()\n\n\n for elem in points:\n\n if elem <= np.mean(points): \n print \"probability of point \", str(elem), \": \", norm.cdf(elem, loc=param[0],scale=param[1])\n \n if elem > np.mean(points): \n print \"probability of point \", str(elem), \": \", 1-norm.cdf(elem, loc=param[0],scale=param[1])\n\n\n##############################################################################\n##############################################################################\n\nif __name__ == \"__main__\":\n main()","license":"mit"} {"repo_name":"wavycloud\/pyboto3","path":"pyboto3\/glue.py","copies":"1","size":"692979","content":"'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\ndef batch_create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInputList=None):\n \"\"\"\n Creates one or more partitions in a batch operation.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_create_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionInputList=[\n {\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog in which the partition is to be created. Currently, this should be the AWS account ID.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the metadata database in which the partition is to be created.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the metadata table in which the partition is to be created.\\n\n\n :type PartitionInputList: list\n :param PartitionInputList: [REQUIRED]\\nA list of PartitionInput structures that define the partitions to be created.\\n\\n(dict) --The structure used to create and update a partition.\\n\\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\\n\\n(string) --\\n\\n\\nLastAccessTime (datetime) --The last time at which the partition was accessed.\\n\\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\\n\\nColumns (list) --A list of the Columns in the table.\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\\n\\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\\n\\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\\n\\nCompressed (boolean) --\\nTrue if the data in the table is compressed, or False if not.\\n\\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\\n\\nSerdeInfo (dict) --The serialization\/deserialization (SerDe) information.\\n\\nName (string) --Name of the SerDe.\\n\\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\\n\\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\\n\\n(string) --\\n\\n\\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\\n\\n(dict) --Specifies the sort order of a sorted column.\\n\\nColumn (string) -- [REQUIRED]The name of the column.\\n\\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\\n\\n\\n\\n\\n\\nParameters (dict) --The user-supplied properties in key-value form.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\\n\\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\\n\\n(string) --\\n\\n\\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\\n\\n(string) --\\n\\n\\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nStoredAsSubDirectories (boolean) --\\nTrue if the table data is stored in subdirectories, or False if not.\\n\\n\\n\\nParameters (dict) --These key-value pairs define partition parameters.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nErrors (list) --\nThe errors encountered when trying to create the requested partitions.\n\n(dict) --\nContains information about a partition error.\n\nPartitionValues (list) --\nThe values that define the partition.\n\n(string) --\n\n\nErrorDetail (dict) --\nThe details about the partition error.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_connection(CatalogId=None, ConnectionNameList=None):\n \"\"\"\n Deletes a list of connection definitions from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_delete_connection(\n CatalogId='string',\n ConnectionNameList=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.\n\n :type ConnectionNameList: list\n :param ConnectionNameList: [REQUIRED]\\nA list of names of the connections to delete.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Succeeded': [\n 'string',\n ],\n 'Errors': {\n 'string': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nSucceeded (list) --\nA list of names of the connection definitions that were successfully deleted.\n\n(string) --\n\n\nErrors (dict) --\nA map of the names of connections that were not successfully deleted to error details.\n\n(string) --\n\n(dict) --\nContains details about an error.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Succeeded': [\n 'string',\n ],\n 'Errors': {\n 'string': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToDelete=None):\n \"\"\"\n Deletes one or more partitions in a batch operation.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_delete_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionsToDelete=[\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the table in question resides.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table that contains the partitions to be deleted.\\n\n\n :type PartitionsToDelete: list\n :param PartitionsToDelete: [REQUIRED]\\nA list of PartitionInput structures that define the partitions to be deleted.\\n\\n(dict) --Contains a list of values defining partitions.\\n\\nValues (list) -- [REQUIRED]The list of values.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nErrors (list) --\nThe errors encountered when trying to delete the requested partitions.\n\n(dict) --\nContains information about a partition error.\n\nPartitionValues (list) --\nThe values that define the partition.\n\n(string) --\n\n\nErrorDetail (dict) --\nThe details about the partition error.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Errors': [\n {\n 'PartitionValues': [\n 'string',\n ],\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_delete_table(CatalogId=None, DatabaseName=None, TablesToDelete=None):\n \"\"\"\n Deletes multiple tables at once.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_delete_table(\n CatalogId='string',\n DatabaseName='string',\n TablesToDelete=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the tables to delete reside. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TablesToDelete: list\n :param TablesToDelete: [REQUIRED]\\nA list of the table to delete.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Errors': [\n {\n 'TableName': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nErrors (list) --\nA list of errors encountered in attempting to delete the specified tables.\n\n(dict) --\nAn error record for table operations.\n\nTableName (string) --\nThe name of the table. For Hive compatibility, this must be entirely lowercase.\n\nErrorDetail (dict) --\nThe details about the error.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Errors': [\n {\n 'TableName': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef batch_delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionIds=None):\n \"\"\"\n Deletes a specified batch of versions of a table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_delete_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionIds=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type VersionIds: list\n :param VersionIds: [REQUIRED]\\nA list of the IDs of versions to be deleted. A VersionId is a string representation of an integer. Each version is incremented by 1.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Errors': [\n {\n 'TableName': 'string',\n 'VersionId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nErrors (list) --\nA list of errors encountered while trying to delete the specified table versions.\n\n(dict) --\nAn error record for table-version operations.\n\nTableName (string) --\nThe name of the table in question.\n\nVersionId (string) --\nThe ID value of the version in question. A VersionID is a string representation of an integer. Each version is incremented by 1.\n\nErrorDetail (dict) --\nThe details about the error.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Errors': [\n {\n 'TableName': 'string',\n 'VersionId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef batch_get_crawlers(CrawlerNames=None):\n \"\"\"\n Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_crawlers(\n CrawlerNames=[\n 'string',\n ]\n )\n \n \n :type CrawlerNames: list\n :param CrawlerNames: [REQUIRED]\\nA list of crawler names, which might be the names returned from the ListCrawlers operation.\\n\\n(string) --\\n\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Crawlers': [\n {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n },\n ],\n 'CrawlersNotFound': [\n 'string',\n ]\n}\n\n\nResponse Structure\n\n(dict) --\nCrawlers (list) --A list of crawler definitions.\n\n(dict) --Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.\n\nName (string) --The name of the crawler.\n\nRole (string) --The Amazon Resource Name (ARN) of an IAM role that\\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.\n\nTargets (dict) --A collection of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) --The name of the database to be synchronized.\n\nTables (list) --A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n\nDatabaseName (string) --The name of the database in which the crawler\\'s output is stored.\n\nDescription (string) --A description of the crawler.\n\nClassifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.\n\n(string) --\n\n\nSchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n\nState (string) --Indicates whether the crawler is running, or whether a run is pending.\n\nTablePrefix (string) --The prefix added to the names of tables that are created.\n\nSchedule (dict) --For scheduled crawlers, the schedule when the crawler runs.\n\nScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\nState (string) --The state of the schedule.\n\n\n\nCrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.\n\nCreationTime (datetime) --The time that the crawler was created.\n\nLastUpdated (datetime) --The time that the crawler was last updated.\n\nLastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.\n\nStatus (string) --Status of the last crawl.\n\nErrorMessage (string) --If an error occurred, the error information about the last crawl.\n\nLogGroup (string) --The log group for the last crawl.\n\nLogStream (string) --The log stream for the last crawl.\n\nMessagePrefix (string) --The prefix for a message about this crawl.\n\nStartTime (datetime) --The time at which the crawl started.\n\n\n\nVersion (integer) --The version of the crawler.\n\nConfiguration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\\'s behavior. For more information, see Configuring a Crawler .\n\nCrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.\n\n\n\n\n\nCrawlersNotFound (list) --A list of names of crawlers that were not found.\n\n(string) --\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Crawlers': [\n {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n },\n ],\n 'CrawlersNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_get_dev_endpoints(DevEndpointNames=None):\n \"\"\"\n Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_dev_endpoints(\n DevEndpointNames=[\n 'string',\n ]\n )\n \n \n :type DevEndpointNames: list\n :param DevEndpointNames: [REQUIRED]\\nThe list of DevEndpoint names, which might be the names returned from the ListDevEndpoint operation.\\n\\n(string) --\\n\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'DevEndpoints': [\n {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n },\n ],\n 'DevEndpointsNotFound': [\n 'string',\n ]\n}\n\n\nResponse Structure\n\n(dict) --\nDevEndpoints (list) --A list of DevEndpoint definitions.\n\n(dict) --A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.\n\nEndpointName (string) --The name of the DevEndpoint .\n\nRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .\n\nSecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .\n\n(string) --\n\n\nSubnetId (string) --The subnet ID for this DevEndpoint .\n\nYarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .\n\nPrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.\n\nZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.\n\nPublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .\n\nStatus (string) --The current status of this DevEndpoint .\n\nWorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nNumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .\n\nAvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.\n\nVpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .\n\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java\/Scala libraries with a DevEndpoint .\n\n\nFailureReason (string) --The reason for a current failure in this DevEndpoint .\n\nLastUpdateStatus (string) --The status of the last update.\n\nCreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.\n\nLastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.\n\nPublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.\n\nPublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .\n\nArguments (dict) --A map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n\"--enable-glue-datacatalog\": \"\"\n\"GLUE_PYTHON_VERSION\": \"3\"\n\"GLUE_PYTHON_VERSION\": \"2\"\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nDevEndpointsNotFound (list) --A list of DevEndpoints not found.\n\n(string) --\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AccessDeniedException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'DevEndpoints': [\n {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n },\n ],\n 'DevEndpointsNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_get_jobs(JobNames=None):\n \"\"\"\n Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_jobs(\n JobNames=[\n 'string',\n ]\n )\n \n \n :type JobNames: list\n :param JobNames: [REQUIRED]\\nA list of job names, which might be the names returned from the ListJobs operation.\\n\\n(string) --\\n\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Jobs': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'JobsNotFound': [\n 'string',\n ]\n}\n\n\nResponse Structure\n\n(dict) --\nJobs (list) --A list of job definitions.\n\n(dict) --Specifies a job definition.\n\nName (string) --The name you assign to this job definition.\n\nDescription (string) --A description of the job.\n\nLogUri (string) --This field is reserved for future use.\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n\nCreatedOn (datetime) --The time and date that this job definition was created.\n\nLastModifiedOn (datetime) --The last point in time when this job definition was modified.\n\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --The JobCommand that executes this job.\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.\n\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --Specifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\nJobsNotFound (list) --A list of names of jobs not found.\n\n(string) --\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'Jobs': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'JobsNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef batch_get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToGet=None):\n \"\"\"\n Retrieves partitions in a batch request.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionsToGet=[\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the partitions reside.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the partitions\\' table.\\n\n\n :type PartitionsToGet: list\n :param PartitionsToGet: [REQUIRED]\\nA list of partition values identifying the partitions to retrieve.\\n\\n(dict) --Contains a list of values defining partitions.\\n\\nValues (list) -- [REQUIRED]The list of values.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'UnprocessedKeys': [\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nPartitions (list) --\nA list of the requested partitions.\n\n(dict) --\nRepresents a slice of table data.\n\nValues (list) --\nThe values of the partition.\n\n(string) --\n\n\nDatabaseName (string) --\nThe name of the catalog database in which to create the partition.\n\nTableName (string) --\nThe name of the database table in which to create the partition.\n\nCreationTime (datetime) --\nThe time at which the partition was created.\n\nLastAccessTime (datetime) --\nThe last time at which the partition was accessed.\n\nStorageDescriptor (dict) --\nProvides information about the physical location where the partition is stored.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nParameters (dict) --\nThese key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --\nThe last time at which column statistics were computed for this partition.\n\n\n\n\n\nUnprocessedKeys (list) --\nA list of the partition values in the request for which partitions were not returned.\n\n(dict) --\nContains a list of values defining partitions.\n\nValues (list) --\nThe list of values.\n\n(string) --\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'UnprocessedKeys': [\n {\n 'Values': [\n 'string',\n ]\n },\n ]\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef batch_get_triggers(TriggerNames=None):\n \"\"\"\n Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_triggers(\n TriggerNames=[\n 'string',\n ]\n )\n \n \n :type TriggerNames: list\n :param TriggerNames: [REQUIRED]\\nA list of trigger names, which may be the names returned from the ListTriggers operation.\\n\\n(string) --\\n\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Triggers': [\n {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n },\n ],\n 'TriggersNotFound': [\n 'string',\n ]\n}\n\n\nResponse Structure\n\n(dict) --\nTriggers (list) --A list of trigger definitions.\n\n(dict) --Information about a specific trigger.\n\nName (string) --The name of the trigger.\n\nWorkflowName (string) --The name of the workflow associated with the trigger.\n\nId (string) --Reserved for future use.\n\nType (string) --The type of trigger that this is.\n\nState (string) --The current state of the trigger.\n\nDescription (string) --A description of this trigger.\n\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --The actions initiated by this trigger.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nTriggersNotFound (list) --A list of names of triggers not found.\n\n(string) --\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'Triggers': [\n {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n },\n ],\n 'TriggersNotFound': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef batch_get_workflows(Names=None, IncludeGraph=None):\n \"\"\"\n Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_get_workflows(\n Names=[\n 'string',\n ],\n IncludeGraph=True|False\n )\n \n \n :type Names: list\n :param Names: [REQUIRED]\\nA list of workflow names, which may be the names returned from the ListWorkflows operation.\\n\\n(string) --\\n\\n\n\n :type IncludeGraph: boolean\n :param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Workflows': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'DefaultRunProperties': {\n 'string': 'string'\n },\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'LastRun': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n ],\n 'MissingWorkflows': [\n 'string',\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nWorkflows (list) --\nA list of workflow resource metadata.\n\n(dict) --\nA workflow represents a flow in which AWS Glue components should be executed to complete a logical task.\n\nName (string) --\nThe name of the workflow representing the flow.\n\nDescription (string) --\nA description of the workflow.\n\nDefaultRunProperties (dict) --\nA collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n\nCreatedOn (datetime) --\nThe date and time when the workflow was created.\n\nLastModifiedOn (datetime) --\nThe date and time when the workflow was last modified.\n\nLastRun (dict) --\nThe information about the last execution of the workflow.\n\nName (string) --\nName of the workflow which was executed.\n\nWorkflowRunId (string) --\nThe ID of this workflow run.\n\nWorkflowRunProperties (dict) --\nThe workflow run properties which were set during the run.\n\n(string) --\n(string) --\n\n\n\n\nStartedOn (datetime) --\nThe date and time when the workflow run was started.\n\nCompletedOn (datetime) --\nThe date and time when the workflow run completed.\n\nStatus (string) --\nThe status of the workflow run.\n\nStatistics (dict) --\nThe statistics of the run.\n\nTotalActions (integer) --\nTotal number of Actions in the workflow run.\n\nTimeoutActions (integer) --\nTotal number of Actions which timed out.\n\nFailedActions (integer) --\nTotal number of Actions which have failed.\n\nStoppedActions (integer) --\nTotal number of Actions which have stopped.\n\nSucceededActions (integer) --\nTotal number of Actions which have succeeded.\n\nRunningActions (integer) --\nTotal number Actions in running state.\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\n\n\nMissingWorkflows (list) --\nA list of names of workflows not found.\n\n(string) --\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'Workflows': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'DefaultRunProperties': {\n 'string': 'string'\n },\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'LastRun': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n ],\n 'MissingWorkflows': [\n 'string',\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef batch_stop_job_run(JobName=None, JobRunIds=None):\n \"\"\"\n Stops one or more job runs for a specified job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.batch_stop_job_run(\n JobName='string',\n JobRunIds=[\n 'string',\n ]\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition for which to stop job runs.\\n\n\n :type JobRunIds: list\n :param JobRunIds: [REQUIRED]\\nA list of the JobRunIds that should be stopped for that job definition.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'SuccessfulSubmissions': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string'\n },\n ],\n 'Errors': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nSuccessfulSubmissions (list) --\nA list of the JobRuns that were successfully submitted for stopping.\n\n(dict) --\nRecords a successful request to stop a specified JobRun .\n\nJobName (string) --\nThe name of the job definition used in the job run that was stopped.\n\nJobRunId (string) --\nThe JobRunId of the job run that was stopped.\n\n\n\n\n\nErrors (list) --\nA list of the errors that were encountered in trying to stop JobRuns , including the JobRunId for which each error was encountered and details about the error.\n\n(dict) --\nRecords an error that occurred when attempting to stop a specified job run.\n\nJobName (string) --\nThe name of the job definition that is used in the job run in question.\n\nJobRunId (string) --\nThe JobRunId of the job run in question.\n\nErrorDetail (dict) --\nSpecifies details about the error that was encountered.\n\nErrorCode (string) --\nThe code associated with this error.\n\nErrorMessage (string) --\nA message describing the error.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'SuccessfulSubmissions': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string'\n },\n ],\n 'Errors': [\n {\n 'JobName': 'string',\n 'JobRunId': 'string',\n 'ErrorDetail': {\n 'ErrorCode': 'string',\n 'ErrorMessage': 'string'\n }\n },\n ]\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef can_paginate(operation_name=None):\n \"\"\"\n Check if an operation can be paginated.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\\nas the method name on the client. For example, if the\\nmethod name is create_foo, and you\\'d normally invoke the\\noperation as client.create_foo(**kwargs), if the\\ncreate_foo operation can be paginated, you can use the\\ncall client.get_paginator('create_foo').\n\n \"\"\"\n pass\n\ndef cancel_ml_task_run(TransformId=None, TaskRunId=None):\n \"\"\"\n Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run\\'s parent transform\\'s TransformID and the task run\\'s TaskRunId .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.cancel_ml_task_run(\n TransformId='string',\n TaskRunId='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type TaskRunId: string\n :param TaskRunId: [REQUIRED]\\nA unique identifier for the task run.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransformId (string) --\nThe unique identifier of the machine learning transform.\n\nTaskRunId (string) --\nThe unique identifier for the task run.\n\nStatus (string) --\nThe status for this run.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):\n \"\"\"\n Creates a classifier in the user\\'s account. This can be a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field of the request is present.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_classifier(\n GrokClassifier={\n 'Classification': 'string',\n 'Name': 'string',\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n XMLClassifier={\n 'Classification': 'string',\n 'Name': 'string',\n 'RowTag': 'string'\n },\n JsonClassifier={\n 'Name': 'string',\n 'JsonPath': 'string'\n },\n CsvClassifier={\n 'Name': 'string',\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n )\n \n \n :type GrokClassifier: dict\n :param GrokClassifier: A GrokClassifier object specifying the classifier to create.\\n\\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\\n\\nName (string) -- [REQUIRED]The name of the new classifier.\\n\\nGrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\\n\\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\\n\\n\\n\n\n :type XMLClassifier: dict\n :param XMLClassifier: An XMLClassifier object specifying the classifier to create.\\n\\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\\'t identify a self-closing element (closed by \/> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <\/row> is okay, but is not).\\n\\n\\n\n\n :type JsonClassifier: dict\n :param JsonClassifier: A JsonClassifier object specifying the classifier to create.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nJsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\\n\\n\\n\n\n :type CsvClassifier: dict\n :param CsvClassifier: A CsvClassifier object specifying the classifier to create.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\\n\\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.\\n\\nContainsHeader (string) --Indicates whether the CSV file contains a header.\\n\\nHeader (list) --A list of strings representing column names.\\n\\n(string) --\\n\\n\\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\\n\\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_connection(CatalogId=None, ConnectionInput=None):\n \"\"\"\n Creates a connection definition in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_connection(\n CatalogId='string',\n ConnectionInput={\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.\n\n :type ConnectionInput: dict\n :param ConnectionInput: [REQUIRED]\\nA ConnectionInput object defining the connection to create.\\n\\nName (string) -- [REQUIRED]The name of the connection.\\n\\nDescription (string) --The description of the connection.\\n\\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\\n\\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\\nMONGODB - Designates a connection to a MongoDB document database.\\n\\nSFTP is not supported.\\n\\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\\n\\n(string) --\\n\\n\\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\\n\\nSubnetId (string) --The subnet ID used by the connection.\\n\\nSecurityGroupIdList (list) --The security group ID list used by the connection.\\n\\n(string) --\\n\\n\\nAvailabilityZone (string) --The connection\\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None, Tags=None):\n \"\"\"\n Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_crawler(\n Name='string',\n Role='string',\n DatabaseName='string',\n Description='string',\n Targets={\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n Schedule='string',\n Classifiers=[\n 'string',\n ],\n TablePrefix='string',\n SchemaChangePolicy={\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n Configuration='string',\n CrawlerSecurityConfiguration='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the new crawler.\\n\n\n :type Role: string\n :param Role: [REQUIRED]\\nThe IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.\\n\n\n :type DatabaseName: string\n :param DatabaseName: The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database\/sometable\/* .\n\n :type Description: string\n :param Description: A description of the new crawler.\n\n :type Targets: dict\n :param Targets: [REQUIRED]\\nA list of collection of targets to crawl.\\n\\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\\n\\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\\n\\nPath (string) --The path to the Amazon S3 target.\\n\\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\nJdbcTargets (list) --Specifies JDBC targets.\\n\\n(dict) --Specifies a JDBC data store to crawl.\\n\\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\\n\\nPath (string) --The path of the JDBC target.\\n\\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\\n\\n(dict) --Specifies an Amazon DynamoDB table to crawl.\\n\\nPath (string) --The name of the DynamoDB table to crawl.\\n\\n\\n\\n\\n\\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\\n\\n(dict) --Specifies an AWS Glue Data Catalog target.\\n\\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\\n\\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\n :type Classifiers: list\n :param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\\n\\n(string) --\\n\\n\n\n :type TablePrefix: string\n :param TablePrefix: The table prefix used for catalog tables that are created.\n\n :type SchemaChangePolicy: dict\n :param SchemaChangePolicy: The policy for the crawler\\'s update and deletion behavior.\\n\\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\\n\\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\\n\\n\\n\n\n :type Configuration: string\n :param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\\'s behavior. For more information, see Configuring a Crawler .\n\n :type CrawlerSecurityConfiguration: string\n :param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.\n\n :type Tags: dict\n :param Tags: The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue .\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_database(CatalogId=None, DatabaseInput=None):\n \"\"\"\n Creates a new database in a Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_database(\n CatalogId='string',\n DatabaseInput={\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseInput: dict\n :param DatabaseInput: [REQUIRED]\\nThe metadata for the database.\\n\\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\\n\\nDescription (string) --A description of the database.\\n\\nLocationUri (string) --The location of the database (for example, an HDFS path).\\n\\nParameters (dict) --These key-value pairs define parameters and properties of the database.\\nThese key-value pairs define parameters and properties of the database.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\\n\\n(dict) --Permissions granted to a principal.\\n\\nPrincipal (dict) --The principal who is granted permissions.\\n\\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\\n\\n\\n\\nPermissions (list) --The permissions that are granted to the principal.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_dev_endpoint(EndpointName=None, RoleArn=None, SecurityGroupIds=None, SubnetId=None, PublicKey=None, PublicKeys=None, NumberOfNodes=None, WorkerType=None, GlueVersion=None, NumberOfWorkers=None, ExtraPythonLibsS3Path=None, ExtraJarsS3Path=None, SecurityConfiguration=None, Tags=None, Arguments=None):\n \"\"\"\n Creates a new development endpoint.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_dev_endpoint(\n EndpointName='string',\n RoleArn='string',\n SecurityGroupIds=[\n 'string',\n ],\n SubnetId='string',\n PublicKey='string',\n PublicKeys=[\n 'string',\n ],\n NumberOfNodes=123,\n WorkerType='Standard'|'G.1X'|'G.2X',\n GlueVersion='string',\n NumberOfWorkers=123,\n ExtraPythonLibsS3Path='string',\n ExtraJarsS3Path='string',\n SecurityConfiguration='string',\n Tags={\n 'string': 'string'\n },\n Arguments={\n 'string': 'string'\n }\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\\nThe name to be assigned to the new DevEndpoint .\\n\n\n :type RoleArn: string\n :param RoleArn: [REQUIRED]\\nThe IAM role for the DevEndpoint .\\n\n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: Security group IDs for the security groups to be used by the new DevEndpoint .\\n\\n(string) --\\n\\n\n\n :type SubnetId: string\n :param SubnetId: The subnet ID for the new DevEndpoint to use.\n\n :type PublicKey: string\n :param PublicKey: The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.\n\n :type PublicKeys: list\n :param PublicKeys: A list of public keys to be used by the development endpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\\n\\nNote\\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\\n\\n\\n(string) --\\n\\n\n\n :type NumberOfNodes: integer\n :param NumberOfNodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint .\n\n :type WorkerType: string\n :param WorkerType: The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\n\\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\\n\n\n :type GlueVersion: string\n :param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\\n\n\n :type NumberOfWorkers: integer\n :param NumberOfWorkers: The number of workers of a defined workerType that are allocated to the development endpoint.\\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\\n\n\n :type ExtraPythonLibsS3Path: string\n :param ExtraPythonLibsS3Path: The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\\n\\nNote\\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\\n\\n\n\n :type ExtraJarsS3Path: string\n :param ExtraJarsS3Path: The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this DevEndpoint .\n\n :type Tags: dict\n :param Tags: The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type Arguments: dict\n :param Arguments: A map of arguments used to configure the DevEndpoint .\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'EndpointName': 'string',\n 'Status': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'RoleArn': 'string',\n 'YarnEndpointAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'NumberOfNodes': 123,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'SecurityConfiguration': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'Arguments': {\n 'string': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nEndpointName (string) --\nThe name assigned to the new DevEndpoint .\n\nStatus (string) --\nThe current status of the new DevEndpoint .\n\nSecurityGroupIds (list) --\nThe security groups assigned to the new DevEndpoint .\n\n(string) --\n\n\nSubnetId (string) --\nThe subnet ID assigned to the new DevEndpoint .\n\nRoleArn (string) --\nThe Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint .\n\nYarnEndpointAddress (string) --\nThe address of the YARN endpoint used by this DevEndpoint .\n\nZeppelinRemoteSparkInterpreterPort (integer) --\nThe Apache Zeppelin port for the remote Apache Spark interpreter.\n\nNumberOfNodes (integer) --\nThe number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.\n\nWorkerType (string) --\nThe type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated to the development endpoint.\n\nAvailabilityZone (string) --\nThe AWS Availability Zone where this DevEndpoint is located.\n\nVpcId (string) --\nThe ID of the virtual private cloud (VPC) used by this DevEndpoint .\n\nExtraPythonLibsS3Path (string) --\nThe paths to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint .\n\nExtraJarsS3Path (string) --\nPath to one or more Java .jar files in an S3 bucket that will be loaded in your DevEndpoint .\n\nFailureReason (string) --\nThe reason for a current failure in this DevEndpoint .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure being used with this DevEndpoint .\n\nCreatedTimestamp (datetime) --\nThe point in time at which this DevEndpoint was created.\n\nArguments (dict) --\nThe map of arguments used to configure this DevEndpoint .\nValid arguments are:\n\n\"--enable-glue-datacatalog\": \"\"\n\"GLUE_PYTHON_VERSION\": \"3\"\n\"GLUE_PYTHON_VERSION\": \"2\"\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AccessDeniedException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.IdempotentParameterMismatchException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.ValidationException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\n\n\n :return: {\n 'EndpointName': 'string',\n 'Status': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'RoleArn': 'string',\n 'YarnEndpointAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'NumberOfNodes': 123,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'SecurityConfiguration': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'Arguments': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef create_job(Name=None, Description=None, LogUri=None, Role=None, ExecutionProperty=None, Command=None, DefaultArguments=None, NonOverridableArguments=None, Connections=None, MaxRetries=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, Tags=None, NotificationProperty=None, GlueVersion=None, NumberOfWorkers=None, WorkerType=None):\n \"\"\"\n Creates a new job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_job(\n Name='string',\n Description='string',\n LogUri='string',\n Role='string',\n ExecutionProperty={\n 'MaxConcurrentRuns': 123\n },\n Command={\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n DefaultArguments={\n 'string': 'string'\n },\n NonOverridableArguments={\n 'string': 'string'\n },\n Connections={\n 'Connections': [\n 'string',\n ]\n },\n MaxRetries=123,\n AllocatedCapacity=123,\n Timeout=123,\n MaxCapacity=123.0,\n SecurityConfiguration='string',\n Tags={\n 'string': 'string'\n },\n NotificationProperty={\n 'NotifyDelayAfter': 123\n },\n GlueVersion='string',\n NumberOfWorkers=123,\n WorkerType='Standard'|'G.1X'|'G.2X'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name you assign to this job definition. It must be unique in your account.\\n\n\n :type Description: string\n :param Description: Description of the job being defined.\n\n :type LogUri: string\n :param LogUri: This field is reserved for future use.\n\n :type Role: string\n :param Role: [REQUIRED]\\nThe name or Amazon Resource Name (ARN) of the IAM role associated with this job.\\n\n\n :type ExecutionProperty: dict\n :param ExecutionProperty: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\\n\\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\\n\\n\\n\n\n :type Command: dict\n :param Command: [REQUIRED]\\nThe JobCommand that executes this job.\\n\\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\\n\\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\\n\\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\\n\\n\\n\n\n :type DefaultArguments: dict\n :param DefaultArguments: The default arguments for this job.\\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type NonOverridableArguments: dict\n :param NonOverridableArguments: Non-overridable arguments for this job, specified as name-value pairs.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type Connections: dict\n :param Connections: The connections used for this job.\\n\\nConnections (list) --A list of connections used by the job.\\n\\n(string) --\\n\\n\\n\\n\n\n :type MaxRetries: integer\n :param MaxRetries: The maximum number of times to retry this job if it fails.\n\n :type AllocatedCapacity: integer\n :param AllocatedCapacity: This parameter is deprecated. Use MaxCapacity instead.\\nThe number of AWS Glue data processing units (DPUs) to allocate to this Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\n\n\n :type Timeout: integer\n :param Timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\n :type MaxCapacity: float\n :param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\\n\\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\\n\\n\n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job.\n\n :type Tags: dict\n :param Tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type NotificationProperty: dict\n :param NotificationProperty: Specifies configuration properties of a job notification.\\n\\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\\n\\n\\n\n\n :type GlueVersion: string\n :param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\\nJobs that are created without specifying a Glue version default to Glue 0.9.\\n\n\n :type NumberOfWorkers: integer\n :param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\\n\n\n :type WorkerType: string\n :param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nName (string) --\nThe unique name that was provided for this job definition.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.IdempotentParameterMismatchException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.IdempotentParameterMismatchException\n Glue.Client.exceptions.AlreadyExistsException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.ConcurrentModificationException\n \n \"\"\"\n pass\n\ndef create_ml_transform(Name=None, Description=None, InputRecordTables=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None, Tags=None):\n \"\"\"\n Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.\n Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description , in addition to the parameters that you want to use for your algorithm.\n You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role , and optionally, AllocatedCapacity , Timeout , and MaxRetries . For more information, see Jobs .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_ml_transform(\n Name='string',\n Description='string',\n InputRecordTables=[\n {\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CatalogId': 'string',\n 'ConnectionName': 'string'\n },\n ],\n Parameters={\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n Role='string',\n GlueVersion='string',\n MaxCapacity=123.0,\n WorkerType='Standard'|'G.1X'|'G.2X',\n NumberOfWorkers=123,\n Timeout=123,\n MaxRetries=123,\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe unique name that you give the transform when you create it.\\n\n\n :type Description: string\n :param Description: A description of the machine learning transform that is being defined. The default is an empty string.\n\n :type InputRecordTables: list\n :param InputRecordTables: [REQUIRED]\\nA list of AWS Glue table definitions used by the transform.\\n\\n(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.\\n\\nDatabaseName (string) -- [REQUIRED]A database name in the AWS Glue Data Catalog.\\n\\nTableName (string) -- [REQUIRED]A table name in the AWS Glue Data Catalog.\\n\\nCatalogId (string) --A unique identifier for the AWS Glue Data Catalog.\\n\\nConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.\\n\\n\\n\\n\\n\n\n :type Parameters: dict\n :param Parameters: [REQUIRED]\\nThe algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type.\\n\\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\\n\\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\\n\\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\\n\\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\\nThe precision metric indicates how often your model is correct when it predicts a match.\\nThe recall metric indicates that for an actual match, how often your model predicts the match.\\n\\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\\nCost measures how many compute resources, and thus money, are consumed to run the transform.\\n\\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\\nNote that setting this value to true may increase the conflation execution time.\\n\\n\\n\\n\\n\n\n :type Role: string\n :param Role: [REQUIRED]\\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.\\n\\nThis role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\\n\\n\n\n :type GlueVersion: string\n :param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\n :type MaxCapacity: float\n :param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\n\\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\\n\\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\\nMaxCapacity and NumberOfWorkers must both be at least 1.\\n\\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\\n\n\n :type WorkerType: string\n :param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\\n\\n\\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\\n\\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\\nMaxCapacity and NumberOfWorkers must both be at least 1.\\n\\n\n\n :type NumberOfWorkers: integer\n :param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.\\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\\n\n\n :type Timeout: integer\n :param Timeout: The timeout of the task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\n :type MaxRetries: integer\n :param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.\n\n :type Tags: dict\n :param Tags: The tags to use with this machine learning transform. You may use tags to limit access to the machine learning transform. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TransformId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransformId (string) --\nA unique identifier that is generated for the transform.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.AccessDeniedException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.IdempotentParameterMismatchException\n\n\n :return: {\n 'TransformId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.AlreadyExistsException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.AccessDeniedException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.IdempotentParameterMismatchException\n \n \"\"\"\n pass\n\ndef create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInput=None):\n \"\"\"\n Creates a new partition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionInput={\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The AWS account ID of the catalog in which the partition is to be created.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the metadata database in which the partition is to be created.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the metadata table in which the partition is to be created.\\n\n\n :type PartitionInput: dict\n :param PartitionInput: [REQUIRED]\\nA PartitionInput structure defining the partition to be created.\\n\\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\\n\\n(string) --\\n\\n\\nLastAccessTime (datetime) --The last time at which the partition was accessed.\\n\\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\\n\\nColumns (list) --A list of the Columns in the table.\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\\n\\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\\n\\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\\n\\nCompressed (boolean) --\\nTrue if the data in the table is compressed, or False if not.\\n\\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\\n\\nSerdeInfo (dict) --The serialization\/deserialization (SerDe) information.\\n\\nName (string) --Name of the SerDe.\\n\\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\\n\\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\\n\\n(string) --\\n\\n\\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\\n\\n(dict) --Specifies the sort order of a sorted column.\\n\\nColumn (string) -- [REQUIRED]The name of the column.\\n\\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\\n\\n\\n\\n\\n\\nParameters (dict) --The user-supplied properties in key-value form.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\\n\\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\\n\\n(string) --\\n\\n\\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\\n\\n(string) --\\n\\n\\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nStoredAsSubDirectories (boolean) --\\nTrue if the table data is stored in subdirectories, or False if not.\\n\\n\\n\\nParameters (dict) --These key-value pairs define partition parameters.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_script(DagNodes=None, DagEdges=None, Language=None):\n \"\"\"\n Transforms a directed acyclic graph (DAG) into code.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_script(\n DagNodes=[\n {\n 'Id': 'string',\n 'NodeType': 'string',\n 'Args': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'LineNumber': 123\n },\n ],\n DagEdges=[\n {\n 'Source': 'string',\n 'Target': 'string',\n 'TargetParameter': 'string'\n },\n ],\n Language='PYTHON'|'SCALA'\n )\n \n \n :type DagNodes: list\n :param DagNodes: A list of the nodes in the DAG.\\n\\n(dict) --Represents a node in a directed acyclic graph (DAG)\\n\\nId (string) -- [REQUIRED]A node identifier that is unique within the node\\'s graph.\\n\\nNodeType (string) -- [REQUIRED]The type of node that this is.\\n\\nArgs (list) -- [REQUIRED]Properties of the node, in the form of name-value pairs.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\nLineNumber (integer) --The line number of the node.\\n\\n\\n\\n\\n\n\n :type DagEdges: list\n :param DagEdges: A list of the edges in the DAG.\\n\\n(dict) --Represents a directional edge in a directed acyclic graph (DAG).\\n\\nSource (string) -- [REQUIRED]The ID of the node at which the edge starts.\\n\\nTarget (string) -- [REQUIRED]The ID of the node at which the edge ends.\\n\\nTargetParameter (string) --The target of the edge.\\n\\n\\n\\n\\n\n\n :type Language: string\n :param Language: The programming language of the resulting code from the DAG.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nPythonScript (string) --\nThe Python script generated from the DAG.\n\nScalaCode (string) --\nThe Scala code generated from the DAG.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef create_security_configuration(Name=None, EncryptionConfiguration=None):\n \"\"\"\n Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_security_configuration(\n Name='string',\n EncryptionConfiguration={\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name for the new security configuration.\\n\n\n :type EncryptionConfiguration: dict\n :param EncryptionConfiguration: [REQUIRED]\\nThe encryption configuration for the new security configuration.\\n\\nS3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\\n\\n(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\\n\\nS3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.\\n\\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\\n\\n\\n\\n\\n\\nCloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.\\n\\nCloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\\n\\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\\n\\n\\n\\nJobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.\\n\\nJobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.\\n\\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n}\n\n\nResponse Structure\n\n(dict) --\n\nName (string) --\nThe name assigned to the new security configuration.\n\nCreatedTimestamp (datetime) --\nThe time at which the new security configuration was created.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\n\n\n :return: {\n 'Name': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1)\n }\n \n \n :returns: \n Glue.Client.exceptions.AlreadyExistsException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n \n \"\"\"\n pass\n\ndef create_table(CatalogId=None, DatabaseName=None, TableInput=None):\n \"\"\"\n Creates a new table definition in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_table(\n CatalogId='string',\n DatabaseName='string',\n TableInput={\n 'Name': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the Table . If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableInput: dict\n :param TableInput: [REQUIRED]\\nThe TableInput object that defines the metadata table to create in the catalog.\\n\\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\\n\\nDescription (string) --A description of the table.\\n\\nOwner (string) --The table owner.\\n\\nLastAccessTime (datetime) --The last time that the table was accessed.\\n\\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\\n\\nRetention (integer) --The retention time for this table.\\n\\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\\n\\nColumns (list) --A list of the Columns in the table.\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\\n\\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\\n\\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\\n\\nCompressed (boolean) --\\nTrue if the data in the table is compressed, or False if not.\\n\\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\\n\\nSerdeInfo (dict) --The serialization\/deserialization (SerDe) information.\\n\\nName (string) --Name of the SerDe.\\n\\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\\n\\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\\n\\n(string) --\\n\\n\\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\\n\\n(dict) --Specifies the sort order of a sorted column.\\n\\nColumn (string) -- [REQUIRED]The name of the column.\\n\\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\\n\\n\\n\\n\\n\\nParameters (dict) --The user-supplied properties in key-value form.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\\n\\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\\n\\n(string) --\\n\\n\\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\\n\\n(string) --\\n\\n\\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nStoredAsSubDirectories (boolean) --\\nTrue if the table data is stored in subdirectories, or False if not.\\n\\n\\n\\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\\n\\n'PartitionKeys': []\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\\n\\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\\n\\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\\n\\nParameters (dict) --These key-value pairs define properties associated with the table.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_trigger(Name=None, WorkflowName=None, Type=None, Schedule=None, Predicate=None, Actions=None, Description=None, StartOnCreation=None, Tags=None):\n \"\"\"\n Creates a new trigger.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_trigger(\n Name='string',\n WorkflowName='string',\n Type='SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n Schedule='string',\n Predicate={\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n },\n Actions=[\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n Description='string',\n StartOnCreation=True|False,\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger.\\n\n\n :type WorkflowName: string\n :param WorkflowName: The name of the workflow associated with the trigger.\n\n :type Type: string\n :param Type: [REQUIRED]\\nThe type of the new trigger.\\n\n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\\nThis field is required when the trigger type is SCHEDULED.\\n\n\n :type Predicate: dict\n :param Predicate: A predicate to specify when the new trigger should fire.\\nThis field is required when the trigger type is CONDITIONAL .\\n\\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\\n\\nConditions (list) --A list of the conditions that determine when the trigger will fire.\\n\\n(dict) --Defines a condition under which a trigger fires.\\n\\nLogicalOperator (string) --A logical operator.\\n\\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\\n\\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\\n\\nCrawlerName (string) --The name of the crawler to which this condition applies.\\n\\nCrawlState (string) --The state of the crawler to which this condition applies.\\n\\n\\n\\n\\n\\n\\n\n\n :type Actions: list\n :param Actions: [REQUIRED]\\nThe actions initiated by this trigger when it fires.\\n\\n(dict) --Defines an action to be initiated by a trigger.\\n\\nJobName (string) --The name of a job to be executed.\\n\\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\\n\\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\\n\\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\\n\\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\\n\\n\\n\\nCrawlerName (string) --The name of the crawler to be used with this action.\\n\\n\\n\\n\\n\n\n :type Description: string\n :param Description: A description of the new trigger.\n\n :type StartOnCreation: boolean\n :param StartOnCreation: Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.\n\n :type Tags: dict\n :param Tags: The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nName (string) --\nThe name of the trigger.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.IdempotentParameterMismatchException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.AlreadyExistsException\n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.IdempotentParameterMismatchException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.ConcurrentModificationException\n \n \"\"\"\n pass\n\ndef create_user_defined_function(CatalogId=None, DatabaseName=None, FunctionInput=None):\n \"\"\"\n Creates a new function definition in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionInput={\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which to create the function. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which to create the function.\\n\n\n :type FunctionInput: dict\n :param FunctionInput: [REQUIRED]\\nA FunctionInput object that defines the function to create in the Data Catalog.\\n\\nFunctionName (string) --The name of the function.\\n\\nClassName (string) --The Java class that contains the function code.\\n\\nOwnerName (string) --The owner of the function.\\n\\nOwnerType (string) --The owner type.\\n\\nResourceUris (list) --The resource URIs for the function.\\n\\n(dict) --The URIs for function resources.\\n\\nResourceType (string) --The type of the resource.\\n\\nUri (string) --The URI for accessing the resource.\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef create_workflow(Name=None, Description=None, DefaultRunProperties=None, Tags=None):\n \"\"\"\n Creates a new workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.create_workflow(\n Name='string',\n Description='string',\n DefaultRunProperties={\n 'string': 'string'\n },\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name to be assigned to the workflow. It should be unique within your account.\\n\n\n :type Description: string\n :param Description: A description of the workflow.\n\n :type DefaultRunProperties: dict\n :param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type Tags: dict\n :param Tags: The tags to be used with this workflow.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nName (string) --\nThe name of the workflow which was provided as part of the request.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.AlreadyExistsException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.ConcurrentModificationException\n \n \"\"\"\n pass\n\ndef delete_classifier(Name=None):\n \"\"\"\n Removes a classifier from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_classifier(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the classifier to remove.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef delete_connection(CatalogId=None, ConnectionName=None):\n \"\"\"\n Deletes a connection from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_connection(\n CatalogId='string',\n ConnectionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.\n\n :type ConnectionName: string\n :param ConnectionName: [REQUIRED]\\nThe name of the connection to delete.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_crawler(Name=None):\n \"\"\"\n Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler state is RUNNING .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the crawler to remove.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.CrawlerRunningException\nGlue.Client.exceptions.SchedulerTransitioningException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.CrawlerRunningException\n Glue.Client.exceptions.SchedulerTransitioningException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef delete_database(CatalogId=None, Name=None):\n \"\"\"\n Removes a specified database from a Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_database(\n CatalogId='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the database to delete. For Hive compatibility, this must be all lowercase.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_dev_endpoint(EndpointName=None):\n \"\"\"\n Deletes a specified development endpoint.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_dev_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\\nThe name of the DevEndpoint .\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InvalidInputException\n \n \"\"\"\n pass\n\ndef delete_job(JobName=None):\n \"\"\"\n Deletes a specified job definition. If the job definition is not found, no exception is thrown.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_job(\n JobName='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition to delete.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'JobName': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nJobName (string) --The name of the job definition that was deleted.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'JobName': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_ml_transform(TransformId=None):\n \"\"\"\n Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms . However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_ml_transform(\n TransformId='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the transform to delete.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'TransformId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nTransformId (string) --The unique identifier of the transform that was deleted.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TransformId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):\n \"\"\"\n Deletes a specified partition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValues=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the table in question resides.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table that contains the partition to be deleted.\\n\n\n :type PartitionValues: list\n :param PartitionValues: [REQUIRED]\\nThe values that define the partition.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_resource_policy(PolicyHashCondition=None):\n \"\"\"\n Deletes a specified policy.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_resource_policy(\n PolicyHashCondition='string'\n )\n \n \n :type PolicyHashCondition: string\n :param PolicyHashCondition: The hash value returned when this policy was set.\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.ConditionCheckFailureException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.ConditionCheckFailureException\n \n \"\"\"\n pass\n\ndef delete_security_configuration(Name=None):\n \"\"\"\n Deletes a specified security configuration.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_security_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the security configuration to delete.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef delete_table(CatalogId=None, DatabaseName=None, Name=None):\n \"\"\"\n Removes a table definition from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_table(\n CatalogId='string',\n DatabaseName='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):\n \"\"\"\n Deletes a specified version of a table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type VersionId: string\n :param VersionId: [REQUIRED]\\nThe ID of the table version to be deleted. A VersionID is a string representation of an integer. Each version is incremented by 1.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_trigger(Name=None):\n \"\"\"\n Deletes a specified trigger. If the trigger is not found, no exception is thrown.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger to delete.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nName (string) --The name of the trigger that was deleted.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef delete_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):\n \"\"\"\n Deletes an existing function definition from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the function is located.\\n\n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\\nThe name of the function definition to be deleted.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef delete_workflow(Name=None):\n \"\"\"\n Deletes a workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.delete_workflow(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow to be deleted.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nName (string) --Name of the workflow specified in input.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n \n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n\n :type Params: dict\n :param Params: The parameters normally passed to\\nClientMethod.\n\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\\nfor. By default it expires in an hour (3600 seconds)\n\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\\ndefault, the http method is whatever is used in the method\\'s model.\n\n \"\"\"\n pass\n\ndef get_catalog_import_status(CatalogId=None):\n \"\"\"\n Retrieves the status of a migration operation.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_catalog_import_status(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog to migrate. Currently, this should be the AWS account ID.\n\n :rtype: dict\nReturnsResponse Syntax{\n 'ImportStatus': {\n 'ImportCompleted': True|False,\n 'ImportTime': datetime(2015, 1, 1),\n 'ImportedBy': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\nImportStatus (dict) --The status of the specified catalog migration.\n\nImportCompleted (boolean) --\nTrue if the migration has completed, or False otherwise.\n\nImportTime (datetime) --The time that the migration was started.\n\nImportedBy (string) --The name of the person who initiated the migration.\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'ImportStatus': {\n 'ImportCompleted': True|False,\n 'ImportTime': datetime(2015, 1, 1),\n 'ImportedBy': 'string'\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_classifier(Name=None):\n \"\"\"\n Retrieve a classifier by name.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_classifier(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the classifier to retrieve.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Classifier': {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n },\n 'CsvClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\nClassifier (dict) --The requested classifier.\n\nGrokClassifier (dict) --A classifier that uses grok .\n\nName (string) --The name of the classifier.\n\nClassification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.\n\nCreationTime (datetime) --The time that this classifier was registered.\n\nLastUpdated (datetime) --The time that this classifier was last updated.\n\nVersion (integer) --The version of this classifier.\n\nGrokPattern (string) --The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .\n\nCustomPatterns (string) --Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .\n\n\n\nXMLClassifier (dict) --A classifier for XML content.\n\nName (string) --The name of the classifier.\n\nClassification (string) --An identifier of the data format that the classifier matches.\n\nCreationTime (datetime) --The time that this classifier was registered.\n\nLastUpdated (datetime) --The time that this classifier was last updated.\n\nVersion (integer) --The version of this classifier.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\\'t identify a self-closing element (closed by \/> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <\/row> is okay, but is not).\n\n\n\nJsonClassifier (dict) --A classifier for JSON content.\n\nName (string) --The name of the classifier.\n\nCreationTime (datetime) --The time that this classifier was registered.\n\nLastUpdated (datetime) --The time that this classifier was last updated.\n\nVersion (integer) --The version of this classifier.\n\nJsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n\nCsvClassifier (dict) --A classifier for comma-separated values (CSV).\n\nName (string) --The name of the classifier.\n\nCreationTime (datetime) --The time that this classifier was registered.\n\nLastUpdated (datetime) --The time that this classifier was last updated.\n\nVersion (integer) --The version of this classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true .\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Classifier': {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n },\n 'CsvClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef get_classifiers(MaxResults=None, NextToken=None):\n \"\"\"\n Lists all classifier objects in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_classifiers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The size of the list to return (optional).\n\n :type NextToken: string\n :param NextToken: An optional continuation token.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Classifiers': [\n {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n },\n 'CsvClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nClassifiers (list) --\nThe requested list of classifier objects.\n\n(dict) --\nClassifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.\nYou can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.\n\nGrokClassifier (dict) --\nA classifier that uses grok .\n\nName (string) --\nThe name of the classifier.\n\nClassification (string) --\nAn identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.\n\nCreationTime (datetime) --\nThe time that this classifier was registered.\n\nLastUpdated (datetime) --\nThe time that this classifier was last updated.\n\nVersion (integer) --\nThe version of this classifier.\n\nGrokPattern (string) --\nThe grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .\n\nCustomPatterns (string) --\nOptional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .\n\n\n\nXMLClassifier (dict) --\nA classifier for XML content.\n\nName (string) --\nThe name of the classifier.\n\nClassification (string) --\nAn identifier of the data format that the classifier matches.\n\nCreationTime (datetime) --\nThe time that this classifier was registered.\n\nLastUpdated (datetime) --\nThe time that this classifier was last updated.\n\nVersion (integer) --\nThe version of this classifier.\n\nRowTag (string) --\nThe XML tag designating the element that contains each record in an XML document being parsed. This can\\'t identify a self-closing element (closed by \/> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <\/row> is okay, but is not).\n\n\n\nJsonClassifier (dict) --\nA classifier for JSON content.\n\nName (string) --\nThe name of the classifier.\n\nCreationTime (datetime) --\nThe time that this classifier was registered.\n\nLastUpdated (datetime) --\nThe time that this classifier was last updated.\n\nVersion (integer) --\nThe version of this classifier.\n\nJsonPath (string) --\nA JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n\nCsvClassifier (dict) --\nA classifier for comma-separated values (CSV).\n\nName (string) --\nThe name of the classifier.\n\nCreationTime (datetime) --\nThe time that this classifier was registered.\n\nLastUpdated (datetime) --\nThe time that this classifier was last updated.\n\nVersion (integer) --\nThe version of this classifier.\n\nDelimiter (string) --\nA custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --\nA custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\n\nContainsHeader (string) --\nIndicates whether the CSV file contains a header.\n\nHeader (list) --\nA list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --\nSpecifies not to trim values before identifying the type of column values. The default value is true .\n\nAllowSingleColumn (boolean) --\nEnables the processing of files that contain only one column.\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Classifiers': [\n {\n 'GrokClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n 'XMLClassifier': {\n 'Name': 'string',\n 'Classification': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'RowTag': 'string'\n },\n 'JsonClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'JsonPath': 'string'\n },\n 'CsvClassifier': {\n 'Name': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'Version': 123,\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_connection(CatalogId=None, Name=None, HidePassword=None):\n \"\"\"\n Retrieves a connection definition from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_connection(\n CatalogId='string',\n Name='string',\n HidePassword=True|False\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the connection definition to retrieve.\\n\n\n :type HidePassword: boolean\n :param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Connection': {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nConnection (dict) --\nThe requested connection definition.\n\nName (string) --\nThe name of the connection definition.\n\nDescription (string) --\nThe description of the connection.\n\nConnectionType (string) --\nThe type of the connection. Currently, only JDBC is supported; SFTP is not supported.\n\nMatchCriteria (list) --\nA list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) --\nThese key-value pairs define parameters for the connection:\n\nHOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.\nPORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.\nUSER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME \".\nPASSWORD - A password, if one is used, for the user name.\nENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.\nJDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.\nJDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.\nJDBC_ENGINE - The name of the JDBC engine to use.\nJDBC_ENGINE_VERSION - The version of the JDBC engine to use.\nCONFIG_FILES - (Reserved for future use.)\nINSTANCE_ID - The instance ID to use.\nJDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.\nJDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.\nCUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\\'s root certificate. AWS Glue uses this root certificate to validate the customer\\xe2\\x80\\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.\nSKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\\xe2\\x80\\x99s validation of the customer certificate.\nCUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .\nCONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.\nKAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.\n\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --\nA map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.\n\nSubnetId (string) --\nThe subnet ID used by the connection.\n\nSecurityGroupIdList (list) --\nThe security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --\nThe connection\\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\nCreationTime (datetime) --\nThe time that this connection definition was created.\n\nLastUpdatedTime (datetime) --\nThe last time that this connection definition was updated.\n\nLastUpdatedBy (string) --\nThe user, group, or role that last updated this connection definition.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Connection': {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_connections(CatalogId=None, Filter=None, HidePassword=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a list of connection definitions from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_connections(\n CatalogId='string',\n Filter={\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA'\n },\n HidePassword=True|False,\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.\n\n :type Filter: dict\n :param Filter: A filter that controls which connections are returned.\\n\\nMatchCriteria (list) --A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.\\n\\n(string) --\\n\\n\\nConnectionType (string) --The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.\\n\\n\\n\n\n :type HidePassword: boolean\n :param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of connections to return in one response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'ConnectionList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nConnectionList (list) --\nA list of requested connection definitions.\n\n(dict) --\nDefines a connection to a data source.\n\nName (string) --\nThe name of the connection definition.\n\nDescription (string) --\nThe description of the connection.\n\nConnectionType (string) --\nThe type of the connection. Currently, only JDBC is supported; SFTP is not supported.\n\nMatchCriteria (list) --\nA list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) --\nThese key-value pairs define parameters for the connection:\n\nHOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.\nPORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.\nUSER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME \".\nPASSWORD - A password, if one is used, for the user name.\nENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.\nJDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.\nJDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.\nJDBC_ENGINE - The name of the JDBC engine to use.\nJDBC_ENGINE_VERSION - The version of the JDBC engine to use.\nCONFIG_FILES - (Reserved for future use.)\nINSTANCE_ID - The instance ID to use.\nJDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.\nJDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.\nCUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\\'s root certificate. AWS Glue uses this root certificate to validate the customer\\xe2\\x80\\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.\nSKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\\xe2\\x80\\x99s validation of the customer certificate.\nCUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .\nCONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.\nKAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.\n\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --\nA map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.\n\nSubnetId (string) --\nThe subnet ID used by the connection.\n\nSecurityGroupIdList (list) --\nThe security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --\nThe connection\\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\nCreationTime (datetime) --\nThe time that this connection definition was created.\n\nLastUpdatedTime (datetime) --\nThe last time that this connection definition was updated.\n\nLastUpdatedBy (string) --\nThe user, group, or role that last updated this connection definition.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the list of connections returned does not include the last of the filtered connections.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'ConnectionList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n },\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdatedTime': datetime(2015, 1, 1),\n 'LastUpdatedBy': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_crawler(Name=None):\n \"\"\"\n Retrieves metadata for a specified crawler.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the crawler to retrieve metadata for.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Crawler': {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\nCrawler (dict) --The metadata for the specified crawler.\n\nName (string) --The name of the crawler.\n\nRole (string) --The Amazon Resource Name (ARN) of an IAM role that\\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.\n\nTargets (dict) --A collection of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) --The name of the database to be synchronized.\n\nTables (list) --A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n\nDatabaseName (string) --The name of the database in which the crawler\\'s output is stored.\n\nDescription (string) --A description of the crawler.\n\nClassifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.\n\n(string) --\n\n\nSchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n\nState (string) --Indicates whether the crawler is running, or whether a run is pending.\n\nTablePrefix (string) --The prefix added to the names of tables that are created.\n\nSchedule (dict) --For scheduled crawlers, the schedule when the crawler runs.\n\nScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\nState (string) --The state of the schedule.\n\n\n\nCrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.\n\nCreationTime (datetime) --The time that the crawler was created.\n\nLastUpdated (datetime) --The time that the crawler was last updated.\n\nLastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.\n\nStatus (string) --Status of the last crawl.\n\nErrorMessage (string) --If an error occurred, the error information about the last crawl.\n\nLogGroup (string) --The log group for the last crawl.\n\nLogStream (string) --The log stream for the last crawl.\n\nMessagePrefix (string) --The prefix for a message about this crawl.\n\nStartTime (datetime) --The time at which the crawl started.\n\n\n\nVersion (integer) --The version of the crawler.\n\nConfiguration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\\'s behavior. For more information, see Configuring a Crawler .\n\nCrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Crawler': {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_crawler_metrics(CrawlerNameList=None, MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves metrics about specified crawlers.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_crawler_metrics(\n CrawlerNameList=[\n 'string',\n ],\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type CrawlerNameList: list\n :param CrawlerNameList: A list of the names of crawlers about which to retrieve metrics.\\n\\n(string) --\\n\\n\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'CrawlerMetricsList': [\n {\n 'CrawlerName': 'string',\n 'TimeLeftSeconds': 123.0,\n 'StillEstimating': True|False,\n 'LastRuntimeSeconds': 123.0,\n 'MedianRuntimeSeconds': 123.0,\n 'TablesCreated': 123,\n 'TablesUpdated': 123,\n 'TablesDeleted': 123\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nCrawlerMetricsList (list) --\nA list of metrics for the specified crawler.\n\n(dict) --\nMetrics for a specified crawler.\n\nCrawlerName (string) --\nThe name of the crawler.\n\nTimeLeftSeconds (float) --\nThe estimated time left to complete a running crawl.\n\nStillEstimating (boolean) --\nTrue if the crawler is still estimating how long it will take to complete this run.\n\nLastRuntimeSeconds (float) --\nThe duration of the crawler\\'s most recent run, in seconds.\n\nMedianRuntimeSeconds (float) --\nThe median duration of this crawler\\'s runs, in seconds.\n\nTablesCreated (integer) --\nThe number of tables created by this crawler.\n\nTablesUpdated (integer) --\nThe number of tables updated by this crawler.\n\nTablesDeleted (integer) --\nThe number of tables deleted by this crawler.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'CrawlerMetricsList': [\n {\n 'CrawlerName': 'string',\n 'TimeLeftSeconds': 123.0,\n 'StillEstimating': True|False,\n 'LastRuntimeSeconds': 123.0,\n 'MedianRuntimeSeconds': 123.0,\n 'TablesCreated': 123,\n 'TablesUpdated': 123,\n 'TablesDeleted': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef get_crawlers(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves metadata for all crawlers defined in the customer account.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_crawlers(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The number of crawlers to return on each call.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Crawlers': [\n {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nCrawlers (list) --\nA list of crawler metadata.\n\n(dict) --\nSpecifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.\n\nName (string) --\nThe name of the crawler.\n\nRole (string) --\nThe Amazon Resource Name (ARN) of an IAM role that\\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.\n\nTargets (dict) --\nA collection of targets to crawl.\n\nS3Targets (list) --\nSpecifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --\nSpecifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --\nThe path to the Amazon S3 target.\n\nExclusions (list) --\nA list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --\nSpecifies JDBC targets.\n\n(dict) --\nSpecifies a JDBC data store to crawl.\n\nConnectionName (string) --\nThe name of the connection to use to connect to the JDBC target.\n\nPath (string) --\nThe path of the JDBC target.\n\nExclusions (list) --\nA list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --\nSpecifies Amazon DynamoDB targets.\n\n(dict) --\nSpecifies an Amazon DynamoDB table to crawl.\n\nPath (string) --\nThe name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --\nSpecifies AWS Glue Data Catalog targets.\n\n(dict) --\nSpecifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) --\nThe name of the database to be synchronized.\n\nTables (list) --\nA list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n\nDatabaseName (string) --\nThe name of the database in which the crawler\\'s output is stored.\n\nDescription (string) --\nA description of the crawler.\n\nClassifiers (list) --\nA list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.\n\n(string) --\n\n\nSchemaChangePolicy (dict) --\nThe policy that specifies update and delete behaviors for the crawler.\n\nUpdateBehavior (string) --\nThe update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --\nThe deletion behavior when the crawler finds a deleted object.\n\n\n\nState (string) --\nIndicates whether the crawler is running, or whether a run is pending.\n\nTablePrefix (string) --\nThe prefix added to the names of tables that are created.\n\nSchedule (dict) --\nFor scheduled crawlers, the schedule when the crawler runs.\n\nScheduleExpression (string) --\nA cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\nState (string) --\nThe state of the schedule.\n\n\n\nCrawlElapsedTime (integer) --\nIf the crawler is running, contains the total time elapsed since the last crawl began.\n\nCreationTime (datetime) --\nThe time that the crawler was created.\n\nLastUpdated (datetime) --\nThe time that the crawler was last updated.\n\nLastCrawl (dict) --\nThe status of the last crawl, and potentially error information if an error occurred.\n\nStatus (string) --\nStatus of the last crawl.\n\nErrorMessage (string) --\nIf an error occurred, the error information about the last crawl.\n\nLogGroup (string) --\nThe log group for the last crawl.\n\nLogStream (string) --\nThe log stream for the last crawl.\n\nMessagePrefix (string) --\nThe prefix for a message about this crawl.\n\nStartTime (datetime) --\nThe time at which the crawl started.\n\n\n\nVersion (integer) --\nThe version of the crawler.\n\nConfiguration (string) --\nCrawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\\'s behavior. For more information, see Configuring a Crawler .\n\nCrawlerSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used by this crawler.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the returned list has not reached the end of those defined in this customer account.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Crawlers': [\n {\n 'Name': 'string',\n 'Role': 'string',\n 'Targets': {\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Classifiers': [\n 'string',\n ],\n 'SchemaChangePolicy': {\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n 'State': 'READY'|'RUNNING'|'STOPPING',\n 'TablePrefix': 'string',\n 'Schedule': {\n 'ScheduleExpression': 'string',\n 'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'\n },\n 'CrawlElapsedTime': 123,\n 'CreationTime': datetime(2015, 1, 1),\n 'LastUpdated': datetime(2015, 1, 1),\n 'LastCrawl': {\n 'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string',\n 'MessagePrefix': 'string',\n 'StartTime': datetime(2015, 1, 1)\n },\n 'Version': 123,\n 'Configuration': 'string',\n 'CrawlerSecurityConfiguration': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_data_catalog_encryption_settings(CatalogId=None):\n \"\"\"\n Retrieves the security configuration for a specified catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_data_catalog_encryption_settings(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog to retrieve the security configuration for. If none is provided, the AWS account ID is used by default.\n\n :rtype: dict\nReturnsResponse Syntax{\n 'DataCatalogEncryptionSettings': {\n 'EncryptionAtRest': {\n 'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'SseAwsKmsKeyId': 'string'\n },\n 'ConnectionPasswordEncryption': {\n 'ReturnConnectionPasswordEncrypted': True|False,\n 'AwsKmsKeyId': 'string'\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\nDataCatalogEncryptionSettings (dict) --The requested security configuration.\n\nEncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.\n\nCatalogEncryptionMode (string) --The encryption-at-rest mode for encrypting Data Catalog data.\n\nSseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\n\n\n\nConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\n\nReturnConnectionPasswordEncrypted (boolean) --When the ReturnConnectionPasswordEncrypted flag is set to \"true\", passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\n\nAwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.\nIf connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.\nYou can set the decrypt permission to enable or restrict access on the password key according to your security requirements.\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'DataCatalogEncryptionSettings': {\n 'EncryptionAtRest': {\n 'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'SseAwsKmsKeyId': 'string'\n },\n 'ConnectionPasswordEncryption': {\n 'ReturnConnectionPasswordEncrypted': True|False,\n 'AwsKmsKeyId': 'string'\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_database(CatalogId=None, Name=None):\n \"\"\"\n Retrieves the definition of a specified database.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_database(\n CatalogId='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the database to retrieve. For Hive compatibility, this should be all lowercase.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Database': {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nDatabase (dict) --\nThe definition of the specified database in the Data Catalog.\n\nName (string) --\nThe name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --\nA description of the database.\n\nLocationUri (string) --\nThe location of the database (for example, an HDFS path).\n\nParameters (dict) --\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTime (datetime) --\nThe time at which the metadata database was created in the catalog.\n\nCreateTableDefaultPermissions (list) --\nCreates a set of default permissions on the table for principals.\n\n(dict) --\nPermissions granted to a principal.\n\nPrincipal (dict) --\nThe principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --\nAn identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --\nThe permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Database': {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_databases(CatalogId=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves all databases defined in a given Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_databases(\n CatalogId='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog from which to retrieve Databases . If none is provided, the AWS account ID is used by default.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of databases to return in one response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'DatabaseList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nDatabaseList (list) --\nA list of Database objects from the specified catalog.\n\n(dict) --\nThe Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.\n\nName (string) --\nThe name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --\nA description of the database.\n\nLocationUri (string) --\nThe location of the database (for example, an HDFS path).\n\nParameters (dict) --\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTime (datetime) --\nThe time at which the metadata database was created in the catalog.\n\nCreateTableDefaultPermissions (list) --\nCreates a set of default permissions on the table for principals.\n\n(dict) --\nPermissions granted to a principal.\n\nPrincipal (dict) --\nThe principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --\nAn identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --\nThe permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'DatabaseList': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTime': datetime(2015, 1, 1),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_dataflow_graph(PythonScript=None):\n \"\"\"\n Transforms a Python script into a directed acyclic graph (DAG).\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_dataflow_graph(\n PythonScript='string'\n )\n \n \n :type PythonScript: string\n :param PythonScript: The Python script to transform.\n\n :rtype: dict\nReturnsResponse Syntax{\n 'DagNodes': [\n {\n 'Id': 'string',\n 'NodeType': 'string',\n 'Args': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'LineNumber': 123\n },\n ],\n 'DagEdges': [\n {\n 'Source': 'string',\n 'Target': 'string',\n 'TargetParameter': 'string'\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\nDagNodes (list) --A list of the nodes in the resulting DAG.\n\n(dict) --Represents a node in a directed acyclic graph (DAG)\n\nId (string) --A node identifier that is unique within the node\\'s graph.\n\nNodeType (string) --The type of node that this is.\n\nArgs (list) --Properties of the node, in the form of name-value pairs.\n\n(dict) --An argument or property of a node.\n\nName (string) --The name of the argument or property.\n\nValue (string) --The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nLineNumber (integer) --The line number of the node.\n\n\n\n\n\nDagEdges (list) --A list of the edges in the resulting DAG.\n\n(dict) --Represents a directional edge in a directed acyclic graph (DAG).\n\nSource (string) --The ID of the node at which the edge starts.\n\nTarget (string) --The ID of the node at which the edge ends.\n\nTargetParameter (string) --The target of the edge.\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'DagNodes': [\n {\n 'Id': 'string',\n 'NodeType': 'string',\n 'Args': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'LineNumber': 123\n },\n ],\n 'DagEdges': [\n {\n 'Source': 'string',\n 'Target': 'string',\n 'TargetParameter': 'string'\n },\n ]\n }\n \n \n \"\"\"\n pass\n\ndef get_dev_endpoint(EndpointName=None):\n \"\"\"\n Retrieves information about a specified development endpoint.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_dev_endpoint(\n EndpointName='string'\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\\nName of the DevEndpoint to retrieve information for.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'DevEndpoint': {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\nDevEndpoint (dict) --A DevEndpoint definition.\n\nEndpointName (string) --The name of the DevEndpoint .\n\nRoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .\n\nSecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .\n\n(string) --\n\n\nSubnetId (string) --The subnet ID for this DevEndpoint .\n\nYarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .\n\nPrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.\n\nZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.\n\nPublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .\n\nStatus (string) --The current status of this DevEndpoint .\n\nWorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nNumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .\n\nAvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.\n\nVpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .\n\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java\/Scala libraries with a DevEndpoint .\n\n\nFailureReason (string) --The reason for a current failure in this DevEndpoint .\n\nLastUpdateStatus (string) --The status of the last update.\n\nCreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.\n\nLastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.\n\nPublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.\n\nPublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .\n\nArguments (dict) --A map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n\"--enable-glue-datacatalog\": \"\"\n\"GLUE_PYTHON_VERSION\": \"3\"\n\"GLUE_PYTHON_VERSION\": \"2\"\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'DevEndpoint': {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n }\n }\n \n \n :returns: \n For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\n For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n \n \"\"\"\n pass\n\ndef get_dev_endpoints(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves all the development endpoints in this AWS account.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_dev_endpoints(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum size of information to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'DevEndpoints': [\n {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nDevEndpoints (list) --\nA list of DevEndpoint definitions.\n\n(dict) --\nA development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.\n\nEndpointName (string) --\nThe name of the DevEndpoint .\n\nRoleArn (string) --\nThe Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .\n\nSecurityGroupIds (list) --\nA list of security group identifiers used in this DevEndpoint .\n\n(string) --\n\n\nSubnetId (string) --\nThe subnet ID for this DevEndpoint .\n\nYarnEndpointAddress (string) --\nThe YARN endpoint address used by this DevEndpoint .\n\nPrivateAddress (string) --\nA private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.\n\nZeppelinRemoteSparkInterpreterPort (integer) --\nThe Apache Zeppelin port for the remote Apache Spark interpreter.\n\nPublicAddress (string) --\nThe public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .\n\nStatus (string) --\nThe current status of this DevEndpoint .\n\nWorkerType (string) --\nThe type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nNumberOfNodes (integer) --\nThe number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .\n\nAvailabilityZone (string) --\nThe AWS Availability Zone where this DevEndpoint is located.\n\nVpcId (string) --\nThe ID of the virtual private cloud (VPC) used by this DevEndpoint .\n\nExtraPythonLibsS3Path (string) --\nThe paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --\nThe path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java\/Scala libraries with a DevEndpoint .\n\n\nFailureReason (string) --\nThe reason for a current failure in this DevEndpoint .\n\nLastUpdateStatus (string) --\nThe status of the last update.\n\nCreatedTimestamp (datetime) --\nThe point in time at which this DevEndpoint was created.\n\nLastModifiedTimestamp (datetime) --\nThe point in time at which this DevEndpoint was last modified.\n\nPublicKey (string) --\nThe public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.\n\nPublicKeys (list) --\nA list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this DevEndpoint .\n\nArguments (dict) --\nA map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n\"--enable-glue-datacatalog\": \"\"\n\"GLUE_PYTHON_VERSION\": \"3\"\n\"GLUE_PYTHON_VERSION\": \"2\"\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token, if not all DevEndpoint definitions have yet been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'DevEndpoints': [\n {\n 'EndpointName': 'string',\n 'RoleArn': 'string',\n 'SecurityGroupIds': [\n 'string',\n ],\n 'SubnetId': 'string',\n 'YarnEndpointAddress': 'string',\n 'PrivateAddress': 'string',\n 'ZeppelinRemoteSparkInterpreterPort': 123,\n 'PublicAddress': 'string',\n 'Status': 'string',\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'GlueVersion': 'string',\n 'NumberOfWorkers': 123,\n 'NumberOfNodes': 123,\n 'AvailabilityZone': 'string',\n 'VpcId': 'string',\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string',\n 'FailureReason': 'string',\n 'LastUpdateStatus': 'string',\n 'CreatedTimestamp': datetime(2015, 1, 1),\n 'LastModifiedTimestamp': datetime(2015, 1, 1),\n 'PublicKey': 'string',\n 'PublicKeys': [\n 'string',\n ],\n 'SecurityConfiguration': 'string',\n 'Arguments': {\n 'string': 'string'\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_job(JobName=None):\n \"\"\"\n Retrieves an existing job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_job(\n JobName='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition to retrieve.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Job': {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\nJob (dict) --The requested job definition.\n\nName (string) --The name you assign to this job definition.\n\nDescription (string) --A description of the job.\n\nLogUri (string) --This field is reserved for future use.\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n\nCreatedOn (datetime) --The time and date that this job definition was created.\n\nLastModifiedOn (datetime) --The last point in time when this job definition was modified.\n\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --The JobCommand that executes this job.\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.\n\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --Specifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Job': {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_job_bookmark(JobName=None, RunId=None):\n \"\"\"\n Returns information on a job bookmark entry.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_job_bookmark(\n JobName='string',\n RunId='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job in question.\\n\n\n :type RunId: string\n :param RunId: The unique run identifier associated with this job run.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobBookmarkEntry': {\n 'JobName': 'string',\n 'Version': 123,\n 'Run': 123,\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'RunId': 'string',\n 'JobBookmark': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobBookmarkEntry (dict) --\nA structure that defines a point that a job can resume processing.\n\nJobName (string) --\nThe name of the job in question.\n\nVersion (integer) --\nThe version of the job.\n\nRun (integer) --\nThe run ID number.\n\nAttempt (integer) --\nThe attempt ID number.\n\nPreviousRunId (string) --\nThe unique run identifier associated with the previous job run.\n\nRunId (string) --\nThe run ID number.\n\nJobBookmark (string) --\nThe bookmark itself.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ValidationException\n\n\n :return: {\n 'JobBookmarkEntry': {\n 'JobName': 'string',\n 'Version': 123,\n 'Run': 123,\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'RunId': 'string',\n 'JobBookmark': 'string'\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ValidationException\n \n \"\"\"\n pass\n\ndef get_job_run(JobName=None, RunId=None, PredecessorsIncluded=None):\n \"\"\"\n Retrieves the metadata for a given job run.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_job_run(\n JobName='string',\n RunId='string',\n PredecessorsIncluded=True|False\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nName of the job definition being run.\\n\n\n :type RunId: string\n :param RunId: [REQUIRED]\\nThe ID of the job run.\\n\n\n :type PredecessorsIncluded: boolean\n :param PredecessorsIncluded: True if a list of predecessor runs should be returned.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobRun': {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobRun (dict) --\nThe requested job-run metadata.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'JobRun': {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_job_runs(JobName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves metadata for all runs of a given job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_job_runs(\n JobName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition for which to retrieve all job runs.\\n\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobRuns (list) --\nA list of job-run metadata objects.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if not all requested job runs have been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_jobs(NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves all current job definitions.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_jobs(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Jobs': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobs (list) --\nA list of job definitions.\n\n(dict) --\nSpecifies a job definition.\n\nName (string) --\nThe name you assign to this job definition.\n\nDescription (string) --\nA description of the job.\n\nLogUri (string) --\nThis field is reserved for future use.\n\nRole (string) --\nThe name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n\nCreatedOn (datetime) --\nThe time and date that this job definition was created.\n\nLastModifiedOn (datetime) --\nThe last point in time when this job definition was modified.\n\nExecutionProperty (dict) --\nAn ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --\nThe maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --\nThe JobCommand that executes this job.\n\nName (string) --\nThe name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --\nSpecifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --\nThe Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --\nThe default arguments for this job, specified as name-value pairs.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --\nNon-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --\nThe connections used for this job.\n\nConnections (list) --\nA list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --\nThe maximum number of times to retry this job after a JobRun fails.\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --\nThe job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if not all job definitions have yet been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Jobs': [\n {\n 'Name': 'string',\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_mapping(Source=None, Sinks=None, Location=None):\n \"\"\"\n Creates mappings.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_mapping(\n Source={\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n Sinks=[\n {\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n ],\n Location={\n 'Jdbc': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'S3': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'DynamoDB': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ]\n }\n )\n \n \n :type Source: dict\n :param Source: [REQUIRED]\\nSpecifies the source table.\\n\\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\\n\\nTableName (string) -- [REQUIRED]The name of the table in question.\\n\\n\\n\n\n :type Sinks: list\n :param Sinks: A list of target tables.\\n\\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\\n\\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\\n\\nTableName (string) -- [REQUIRED]The name of the table in question.\\n\\n\\n\\n\\n\n\n :type Location: dict\n :param Location: Parameters for the mapping.\\n\\nJdbc (list) --A JDBC location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\nDynamoDB (list) --An Amazon DynamoDB table location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Mapping': [\n {\n 'SourceTable': 'string',\n 'SourcePath': 'string',\n 'SourceType': 'string',\n 'TargetTable': 'string',\n 'TargetPath': 'string',\n 'TargetType': 'string'\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nMapping (list) --\nA list of mappings to the specified targets.\n\n(dict) --\nDefines a mapping.\n\nSourceTable (string) --\nThe name of the source table.\n\nSourcePath (string) --\nThe source path.\n\nSourceType (string) --\nThe source type.\n\nTargetTable (string) --\nThe target table.\n\nTargetPath (string) --\nThe target path.\n\nTargetType (string) --\nThe target type.\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.EntityNotFoundException\n\n\n :return: {\n 'Mapping': [\n {\n 'SourceTable': 'string',\n 'SourcePath': 'string',\n 'SourceType': 'string',\n 'TargetTable': 'string',\n 'TargetPath': 'string',\n 'TargetType': 'string'\n },\n ]\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.EntityNotFoundException\n \n \"\"\"\n pass\n\ndef get_ml_task_run(TransformId=None, TaskRunId=None):\n \"\"\"\n Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform\\'s TransformID .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_ml_task_run(\n TransformId='string',\n TaskRunId='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type TaskRunId: string\n :param TaskRunId: [REQUIRED]\\nThe unique identifier of the task run.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'LogGroupName': 'string',\n 'Properties': {\n 'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',\n 'ImportLabelsTaskRunProperties': {\n 'InputS3Path': 'string',\n 'Replace': True|False\n },\n 'ExportLabelsTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'LabelingSetGenerationTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'FindMatchesTaskRunProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobRunId': 'string'\n }\n },\n 'ErrorString': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ExecutionTime': 123\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransformId (string) --\nThe unique identifier of the task run.\n\nTaskRunId (string) --\nThe unique run identifier associated with this run.\n\nStatus (string) --\nThe status for this task run.\n\nLogGroupName (string) --\nThe names of the log groups that are associated with the task run.\n\nProperties (dict) --\nThe list of properties that are associated with the task run.\n\nTaskType (string) --\nThe type of task run.\n\nImportLabelsTaskRunProperties (dict) --\nThe configuration properties for an importing labels task run.\n\nInputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.\n\nReplace (boolean) --\nIndicates whether to overwrite your existing labels.\n\n\n\nExportLabelsTaskRunProperties (dict) --\nThe configuration properties for an exporting labels task run.\n\nOutputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path where you will export the labels.\n\n\n\nLabelingSetGenerationTaskRunProperties (dict) --\nThe configuration properties for a labeling set generation task run.\n\nOutputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.\n\n\n\nFindMatchesTaskRunProperties (dict) --\nThe configuration properties for a find matches task run.\n\nJobId (string) --\nThe job ID for the Find Matches task run.\n\nJobName (string) --\nThe name assigned to the job for the Find Matches task run.\n\nJobRunId (string) --\nThe job run ID for the Find Matches task run.\n\n\n\n\n\nErrorString (string) --\nThe error strings that are associated with the task run.\n\nStartedOn (datetime) --\nThe date and time when this task run started.\n\nLastModifiedOn (datetime) --\nThe date and time when this task run was last modified.\n\nCompletedOn (datetime) --\nThe date and time when this task run was completed.\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the task run consumed resources.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'LogGroupName': 'string',\n 'Properties': {\n 'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',\n 'ImportLabelsTaskRunProperties': {\n 'InputS3Path': 'string',\n 'Replace': True|False\n },\n 'ExportLabelsTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'LabelingSetGenerationTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'FindMatchesTaskRunProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobRunId': 'string'\n }\n },\n 'ErrorString': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ExecutionTime': 123\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef get_ml_task_runs(TransformId=None, NextToken=None, MaxResults=None, Filter=None, Sort=None):\n \"\"\"\n Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform\\'s TransformID and other optional parameters as documented in this section.\n This operation returns a list of historic runs and must be paginated.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_ml_task_runs(\n TransformId='string',\n NextToken='string',\n MaxResults=123,\n Filter={\n 'TaskRunType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'StartedBefore': datetime(2015, 1, 1),\n 'StartedAfter': datetime(2015, 1, 1)\n },\n Sort={\n 'Column': 'TASK_RUN_TYPE'|'STATUS'|'STARTED',\n 'SortDirection': 'DESCENDING'|'ASCENDING'\n }\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type NextToken: string\n :param NextToken: A token for pagination of the results. The default is empty.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return.\n\n :type Filter: dict\n :param Filter: The filter criteria, in the TaskRunFilterCriteria structure, for the task run.\\n\\nTaskRunType (string) --The type of task run.\\n\\nStatus (string) --The current status of the task run.\\n\\nStartedBefore (datetime) --Filter on task runs started before this date.\\n\\nStartedAfter (datetime) --Filter on task runs started after this date.\\n\\n\\n\n\n :type Sort: dict\n :param Sort: The sorting criteria, in the TaskRunSortCriteria structure, for the task run.\\n\\nColumn (string) -- [REQUIRED]The column to be used to sort the list of task runs for the machine learning transform.\\n\\nSortDirection (string) -- [REQUIRED]The sort direction to be used to sort the list of task runs for the machine learning transform.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TaskRuns': [\n {\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'LogGroupName': 'string',\n 'Properties': {\n 'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',\n 'ImportLabelsTaskRunProperties': {\n 'InputS3Path': 'string',\n 'Replace': True|False\n },\n 'ExportLabelsTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'LabelingSetGenerationTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'FindMatchesTaskRunProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobRunId': 'string'\n }\n },\n 'ErrorString': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ExecutionTime': 123\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTaskRuns (list) --\nA list of task runs that are associated with the transform.\n\n(dict) --\nThe sampling parameters that are associated with the machine learning transform.\n\nTransformId (string) --\nThe unique identifier for the transform.\n\nTaskRunId (string) --\nThe unique identifier for this task run.\n\nStatus (string) --\nThe current status of the requested task run.\n\nLogGroupName (string) --\nThe names of the log group for secure logging, associated with this task run.\n\nProperties (dict) --\nSpecifies configuration properties associated with this task run.\n\nTaskType (string) --\nThe type of task run.\n\nImportLabelsTaskRunProperties (dict) --\nThe configuration properties for an importing labels task run.\n\nInputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.\n\nReplace (boolean) --\nIndicates whether to overwrite your existing labels.\n\n\n\nExportLabelsTaskRunProperties (dict) --\nThe configuration properties for an exporting labels task run.\n\nOutputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path where you will export the labels.\n\n\n\nLabelingSetGenerationTaskRunProperties (dict) --\nThe configuration properties for a labeling set generation task run.\n\nOutputS3Path (string) --\nThe Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.\n\n\n\nFindMatchesTaskRunProperties (dict) --\nThe configuration properties for a find matches task run.\n\nJobId (string) --\nThe job ID for the Find Matches task run.\n\nJobName (string) --\nThe name assigned to the job for the Find Matches task run.\n\nJobRunId (string) --\nThe job run ID for the Find Matches task run.\n\n\n\n\n\nErrorString (string) --\nThe list of error strings associated with this task run.\n\nStartedOn (datetime) --\nThe date and time that this task run started.\n\nLastModifiedOn (datetime) --\nThe last point in time that the requested task run was updated.\n\nCompletedOn (datetime) --\nThe last point in time that the requested task run was completed.\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the task run consumed resources.\n\n\n\n\n\nNextToken (string) --\nA pagination token, if more results are available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TaskRuns': [\n {\n 'TransformId': 'string',\n 'TaskRunId': 'string',\n 'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'LogGroupName': 'string',\n 'Properties': {\n 'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',\n 'ImportLabelsTaskRunProperties': {\n 'InputS3Path': 'string',\n 'Replace': True|False\n },\n 'ExportLabelsTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'LabelingSetGenerationTaskRunProperties': {\n 'OutputS3Path': 'string'\n },\n 'FindMatchesTaskRunProperties': {\n 'JobId': 'string',\n 'JobName': 'string',\n 'JobRunId': 'string'\n }\n },\n 'ErrorString': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ExecutionTime': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef get_ml_transform(TransformId=None):\n \"\"\"\n Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_ml_transform(\n TransformId='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the transform, generated at the time that the transform was created.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'TransformId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'InputRecordTables': [\n {\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CatalogId': 'string',\n 'ConnectionName': 'string'\n },\n ],\n 'Parameters': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n 'EvaluationMetrics': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesMetrics': {\n 'AreaUnderPRCurve': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1': 123.0,\n 'ConfusionMatrix': {\n 'NumTruePositives': 123,\n 'NumFalsePositives': 123,\n 'NumTrueNegatives': 123,\n 'NumFalseNegatives': 123\n }\n }\n },\n 'LabelCount': 123,\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ],\n 'Role': 'string',\n 'GlueVersion': 'string',\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'Timeout': 123,\n 'MaxRetries': 123\n}\n\n\nResponse Structure\n\n(dict) --\nTransformId (string) --The unique identifier of the transform, generated at the time that the transform was created.\n\nName (string) --The unique name given to the transform when it was created.\n\nDescription (string) --A description of the transform.\n\nStatus (string) --The last known status of the transform (to indicate whether it can be used or not). One of \"NOT_READY\", \"READY\", or \"DELETING\".\n\nCreatedOn (datetime) --The date and time when the transform was created.\n\nLastModifiedOn (datetime) --The date and time when the transform was last modified.\n\nInputRecordTables (list) --A list of AWS Glue table definitions used by the transform.\n\n(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.\n\nDatabaseName (string) --A database name in the AWS Glue Data Catalog.\n\nTableName (string) --A table name in the AWS Glue Data Catalog.\n\nCatalogId (string) --A unique identifier for the AWS Glue Data Catalog.\n\nConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.\n\n\n\n\n\nParameters (dict) --The configuration parameters that are specific to the algorithm used.\n\nTransformType (string) --The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n\nEvaluationMetrics (dict) --The latest evaluation metrics.\n\nTransformType (string) --The type of machine learning transform.\n\nFindMatchesMetrics (dict) --The evaluation metrics for the find matches algorithm.\n\nAreaUnderPRCurve (float) --The area under the precision\/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.\nFor more information, see Precision and recall in Wikipedia.\n\nPrecision (float) --The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.\nFor more information, see Precision and recall in Wikipedia.\n\nRecall (float) --The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.\nFor more information, see Precision and recall in Wikipedia.\n\nF1 (float) --The maximum F1 metric indicates the transform\\'s accuracy between 0 and 1, where 1 is the best accuracy.\nFor more information, see F1 score in Wikipedia.\n\nConfusionMatrix (dict) --The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.\nFor more information, see Confusion matrix in Wikipedia.\n\nNumTruePositives (integer) --The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.\n\nNumFalsePositives (integer) --The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.\n\nNumTrueNegatives (integer) --The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.\n\nNumFalseNegatives (integer) --The number of matches in the data that the transform didn\\'t find, in the confusion matrix for your transform.\n\n\n\n\n\n\n\nLabelCount (integer) --The number of labels available for this transform.\n\nSchema (list) --The Map object that represents the schema that this transform accepts. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n\nWorkerType (string) --The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when this task runs.\n\nTimeout (integer) --The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxRetries (integer) --The maximum number of times to retry a task for this transform after a task run fails.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TransformId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'InputRecordTables': [\n {\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CatalogId': 'string',\n 'ConnectionName': 'string'\n },\n ],\n 'Parameters': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n 'EvaluationMetrics': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesMetrics': {\n 'AreaUnderPRCurve': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1': 123.0,\n 'ConfusionMatrix': {\n 'NumTruePositives': 123,\n 'NumFalsePositives': 123,\n 'NumTrueNegatives': 123,\n 'NumFalseNegatives': 123\n }\n }\n },\n 'LabelCount': 123,\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ],\n 'Role': 'string',\n 'GlueVersion': 'string',\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'Timeout': 123,\n 'MaxRetries': 123\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef get_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None):\n \"\"\"\n Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_ml_transforms(\n NextToken='string',\n MaxResults=123,\n Filter={\n 'Name': 'string',\n 'TransformType': 'FIND_MATCHES',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'GlueVersion': 'string',\n 'CreatedBefore': datetime(2015, 1, 1),\n 'CreatedAfter': datetime(2015, 1, 1),\n 'LastModifiedBefore': datetime(2015, 1, 1),\n 'LastModifiedAfter': datetime(2015, 1, 1),\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ]\n },\n Sort={\n 'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',\n 'SortDirection': 'DESCENDING'|'ASCENDING'\n }\n )\n \n \n :type NextToken: string\n :param NextToken: A paginated token to offset the results.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return.\n\n :type Filter: dict\n :param Filter: The filter transformation criteria.\\n\\nName (string) --A unique transform name that is used to filter the machine learning transforms.\\n\\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\\n\\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\\n\\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\\n\\nCreatedBefore (datetime) --The time and date before which the transforms were created.\\n\\nCreatedAfter (datetime) --The time and date after which the transforms were created.\\n\\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\\n\\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\\n\\nSchema (list) --Filters on datasets with a specific schema. The Map object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\\n\\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\\n\\nName (string) --The name of the column.\\n\\nDataType (string) --The type of data in the column.\\n\\n\\n\\n\\n\\n\\n\n\n :type Sort: dict\n :param Sort: The sorting criteria.\\n\\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\\n\\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Transforms': [\n {\n 'TransformId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'InputRecordTables': [\n {\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CatalogId': 'string',\n 'ConnectionName': 'string'\n },\n ],\n 'Parameters': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n 'EvaluationMetrics': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesMetrics': {\n 'AreaUnderPRCurve': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1': 123.0,\n 'ConfusionMatrix': {\n 'NumTruePositives': 123,\n 'NumFalsePositives': 123,\n 'NumTrueNegatives': 123,\n 'NumFalseNegatives': 123\n }\n }\n },\n 'LabelCount': 123,\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ],\n 'Role': 'string',\n 'GlueVersion': 'string',\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'Timeout': 123,\n 'MaxRetries': 123\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransforms (list) --\nA list of machine learning transforms.\n\n(dict) --\nA structure for a machine learning transform.\n\nTransformId (string) --\nThe unique transform ID that is generated for the machine learning transform. The ID is guaranteed to be unique and does not change.\n\nName (string) --\nA user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.\n\nDescription (string) --\nA user-defined, long-form description text for the machine learning transform. Descriptions are not guaranteed to be unique and can be changed at any time.\n\nStatus (string) --\nThe current status of the machine learning transform.\n\nCreatedOn (datetime) --\nA timestamp. The time and date that this machine learning transform was created.\n\nLastModifiedOn (datetime) --\nA timestamp. The last point in time when this machine learning transform was modified.\n\nInputRecordTables (list) --\nA list of AWS Glue table definitions used by the transform.\n\n(dict) --\nThe database and table in the AWS Glue Data Catalog that is used for input or output data.\n\nDatabaseName (string) --\nA database name in the AWS Glue Data Catalog.\n\nTableName (string) --\nA table name in the AWS Glue Data Catalog.\n\nCatalogId (string) --\nA unique identifier for the AWS Glue Data Catalog.\n\nConnectionName (string) --\nThe name of the connection to the AWS Glue Data Catalog.\n\n\n\n\n\nParameters (dict) --\nA TransformParameters object. You can use parameters to tune (customize) the behavior of the machine learning transform by specifying what data it learns from and your preference on various tradeoffs (such as precious vs. recall, or accuracy vs. cost).\n\nTransformType (string) --\nThe type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --\nThe parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --\nThe name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --\nThe value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --\nThe value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --\nThe value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n\nEvaluationMetrics (dict) --\nAn EvaluationMetrics object. Evaluation metrics provide an estimate of the quality of your machine learning transform.\n\nTransformType (string) --\nThe type of machine learning transform.\n\nFindMatchesMetrics (dict) --\nThe evaluation metrics for the find matches algorithm.\n\nAreaUnderPRCurve (float) --\nThe area under the precision\/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.\nFor more information, see Precision and recall in Wikipedia.\n\nPrecision (float) --\nThe precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.\nFor more information, see Precision and recall in Wikipedia.\n\nRecall (float) --\nThe recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.\nFor more information, see Precision and recall in Wikipedia.\n\nF1 (float) --\nThe maximum F1 metric indicates the transform\\'s accuracy between 0 and 1, where 1 is the best accuracy.\nFor more information, see F1 score in Wikipedia.\n\nConfusionMatrix (dict) --\nThe confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.\nFor more information, see Confusion matrix in Wikipedia.\n\nNumTruePositives (integer) --\nThe number of matches in the data that the transform correctly found, in the confusion matrix for your transform.\n\nNumFalsePositives (integer) --\nThe number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.\n\nNumTrueNegatives (integer) --\nThe number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.\n\nNumFalseNegatives (integer) --\nThe number of matches in the data that the transform didn\\'t find, in the confusion matrix for your transform.\n\n\n\n\n\n\n\nLabelCount (integer) --\nA count identifier for the labeling files generated by AWS Glue for this transform. As you create a better transform, you can iteratively download, label, and upload the labeling file.\n\nSchema (list) --\nA map of key-value pairs representing the columns and data types that this transform can run against. Has an upper bound of 100 columns.\n\n(dict) --\nA key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --\nThe name of the column.\n\nDataType (string) --\nThe type of data in the column.\n\n\n\n\n\nRole (string) --\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.\n\nThis role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\n\n\nGlueVersion (string) --\nThis value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a task of the transform runs.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\n\nTimeout (integer) --\nThe timeout in minutes of the machine learning transform.\n\nMaxRetries (integer) --\nThe maximum number of times to retry after an MLTaskRun of the machine learning transform fails.\n\n\n\n\n\nNextToken (string) --\nA pagination token, if more results are available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'Transforms': [\n {\n 'TransformId': 'string',\n 'Name': 'string',\n 'Description': 'string',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'InputRecordTables': [\n {\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CatalogId': 'string',\n 'ConnectionName': 'string'\n },\n ],\n 'Parameters': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n 'EvaluationMetrics': {\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesMetrics': {\n 'AreaUnderPRCurve': 123.0,\n 'Precision': 123.0,\n 'Recall': 123.0,\n 'F1': 123.0,\n 'ConfusionMatrix': {\n 'NumTruePositives': 123,\n 'NumFalsePositives': 123,\n 'NumTrueNegatives': 123,\n 'NumFalseNegatives': 123\n }\n }\n },\n 'LabelCount': 123,\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ],\n 'Role': 'string',\n 'GlueVersion': 'string',\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'Timeout': 123,\n 'MaxRetries': 123\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\n This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\n \n \"\"\"\n pass\n\ndef get_paginator(operation_name=None):\n \"\"\"\n Create a paginator for an operation.\n \n :type operation_name: string\n :param operation_name: The operation name. This is the same name\\nas the method name on the client. For example, if the\\nmethod name is create_foo, and you\\'d normally invoke the\\noperation as client.create_foo(**kwargs), if the\\ncreate_foo operation can be paginated, you can use the\\ncall client.get_paginator('create_foo').\n\n :rtype: L{botocore.paginate.Paginator}\nReturnsA paginator object.\n\n\n \"\"\"\n pass\n\ndef get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):\n \"\"\"\n Retrieves information about a specified partition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValues=[\n 'string',\n ]\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition in question resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the partition resides.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the partition\\'s table.\\n\n\n :type PartitionValues: list\n :param PartitionValues: [REQUIRED]\\nThe values that define the partition.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Partition': {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nPartition (dict) --\nThe requested information, in the form of a Partition object.\n\nValues (list) --\nThe values of the partition.\n\n(string) --\n\n\nDatabaseName (string) --\nThe name of the catalog database in which to create the partition.\n\nTableName (string) --\nThe name of the database table in which to create the partition.\n\nCreationTime (datetime) --\nThe time at which the partition was created.\n\nLastAccessTime (datetime) --\nThe last time at which the partition was accessed.\n\nStorageDescriptor (dict) --\nProvides information about the physical location where the partition is stored.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nParameters (dict) --\nThese key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --\nThe last time at which column statistics were computed for this partition.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Partition': {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_partitions(CatalogId=None, DatabaseName=None, TableName=None, Expression=None, NextToken=None, Segment=None, MaxResults=None):\n \"\"\"\n Retrieves information about the partitions in a table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_partitions(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n Expression='string',\n NextToken='string',\n Segment={\n 'SegmentNumber': 123,\n 'TotalSegments': 123\n },\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the partitions reside.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the partitions\\' table.\\n\n\n :type Expression: string\n :param Expression: An expression that filters the partitions to be returned.\\nThe expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.\\n\\nOperators : The following are the operators that you can use in the Expression API call:\\n=\\n\\nChecks whether the values of the two operands are equal; if yes, then the condition becomes true.\\nExample: Assume \\'variable a\\' holds 10 and \\'variable b\\' holds 20.\\n(a = b) is not true.\\n\\n< >\\nChecks whether the values of two operands are equal; if the values are not equal, then the condition becomes true.\\nExample: (a < > b) is true.\\n\\n>\\nChecks whether the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.\\nExample: (a > b) is not true.\\n\\n<\\nChecks whether the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.\\nExample: (a < b) is true.\\n\\n>=\\nChecks whether the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.\\nExample: (a >= b) is not true.\\n\\n<=\\nChecks whether the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.\\nExample: (a <= b) is true.\\n\\nAND, OR, IN, BETWEEN, LIKE, NOT, IS NULL\\nLogical operators.\\n\\nSupported Partition Key Types : The following are the supported partition keys.\\n\\nstring\\ndate\\ntimestamp\\nint\\nbigint\\nlong\\ntinyint\\nsmallint\\ndecimal\\n\\nIf an invalid type is encountered, an exception is thrown.\\nThe following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING , to be compatible with the catalog partitions.\\n\\nSample API Call :\\n\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is not the first call to retrieve these partitions.\n\n :type Segment: dict\n :param Segment: The segment of the table\\'s partitions to scan in this request.\\n\\nSegmentNumber (integer) -- [REQUIRED]The zero-based index number of the segment. For example, if the total number of segments is 4, SegmentNumber values range from 0 through 3.\\n\\nTotalSegments (integer) -- [REQUIRED]The total number of segments.\\n\\n\\n\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of partitions to return in a single response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nPartitions (list) --\nA list of requested partitions.\n\n(dict) --\nRepresents a slice of table data.\n\nValues (list) --\nThe values of the partition.\n\n(string) --\n\n\nDatabaseName (string) --\nThe name of the catalog database in which to create the partition.\n\nTableName (string) --\nThe name of the database table in which to create the partition.\n\nCreationTime (datetime) --\nThe time at which the partition was created.\n\nLastAccessTime (datetime) --\nThe last time at which the partition was accessed.\n\nStorageDescriptor (dict) --\nProvides information about the physical location where the partition is stored.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nParameters (dict) --\nThese key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --\nThe last time at which column statistics were computed for this partition.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the returned list of partitions does not include the last one.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Partitions': [\n {\n 'Values': [\n 'string',\n ],\n 'DatabaseName': 'string',\n 'TableName': 'string',\n 'CreationTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef get_plan(Mapping=None, Source=None, Sinks=None, Location=None, Language=None):\n \"\"\"\n Gets code to perform a specified mapping.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_plan(\n Mapping=[\n {\n 'SourceTable': 'string',\n 'SourcePath': 'string',\n 'SourceType': 'string',\n 'TargetTable': 'string',\n 'TargetPath': 'string',\n 'TargetType': 'string'\n },\n ],\n Source={\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n Sinks=[\n {\n 'DatabaseName': 'string',\n 'TableName': 'string'\n },\n ],\n Location={\n 'Jdbc': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'S3': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ],\n 'DynamoDB': [\n {\n 'Name': 'string',\n 'Value': 'string',\n 'Param': True|False\n },\n ]\n },\n Language='PYTHON'|'SCALA'\n )\n \n \n :type Mapping: list\n :param Mapping: [REQUIRED]\\nThe list of mappings from a source table to target tables.\\n\\n(dict) --Defines a mapping.\\n\\nSourceTable (string) --The name of the source table.\\n\\nSourcePath (string) --The source path.\\n\\nSourceType (string) --The source type.\\n\\nTargetTable (string) --The target table.\\n\\nTargetPath (string) --The target path.\\n\\nTargetType (string) --The target type.\\n\\n\\n\\n\\n\n\n :type Source: dict\n :param Source: [REQUIRED]\\nThe source table.\\n\\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\\n\\nTableName (string) -- [REQUIRED]The name of the table in question.\\n\\n\\n\n\n :type Sinks: list\n :param Sinks: The target tables.\\n\\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\\n\\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\\n\\nTableName (string) -- [REQUIRED]The name of the table in question.\\n\\n\\n\\n\\n\n\n :type Location: dict\n :param Location: The parameters for the mapping.\\n\\nJdbc (list) --A JDBC location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\nDynamoDB (list) --An Amazon DynamoDB table location.\\n\\n(dict) --An argument or property of a node.\\n\\nName (string) -- [REQUIRED]The name of the argument or property.\\n\\nValue (string) -- [REQUIRED]The value of the argument or property.\\n\\nParam (boolean) --True if the value is used as a parameter.\\n\\n\\n\\n\\n\\n\\n\n\n :type Language: string\n :param Language: The programming language of the code to perform the mapping.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nPythonScript (string) --\nA Python script to perform the mapping.\n\nScalaCode (string) --\nThe Scala code to perform the mapping.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'PythonScript': 'string',\n 'ScalaCode': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef get_resource_policy():\n \"\"\"\n Retrieves a specified resource policy.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_resource_policy()\n \n \n :rtype: dict\nReturnsResponse Syntax{\n 'PolicyInJson': 'string',\n 'PolicyHash': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1)\n}\n\n\nResponse Structure\n\n(dict) --\nPolicyInJson (string) --Contains the requested policy document, in JSON format.\n\nPolicyHash (string) --Contains the hash value associated with this policy.\n\nCreateTime (datetime) --The date and time at which the policy was created.\n\nUpdateTime (datetime) --The date and time at which the policy was last updated.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\n\n\n :return: {\n 'PolicyInJson': 'string',\n 'PolicyHash': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1)\n }\n \n \n \"\"\"\n pass\n\ndef get_security_configuration(Name=None):\n \"\"\"\n Retrieves a specified security configuration.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_security_configuration(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the security configuration to retrieve.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'SecurityConfiguration': {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\nSecurityConfiguration (dict) --The requested security configuration.\n\nName (string) --The name of the security configuration.\n\nCreatedTimeStamp (datetime) --The time at which this security configuration was created.\n\nEncryptionConfiguration (dict) --The encryption configuration associated with this security configuration.\n\nS3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\n\n(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\n\nS3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\nCloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.\n\nCloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\nJobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.\n\nJobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'SecurityConfiguration': {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n }\n }\n \n \n \"\"\"\n pass\n\ndef get_security_configurations(MaxResults=None, NextToken=None):\n \"\"\"\n Retrieves a list of all security configurations.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_security_configurations(\n MaxResults=123,\n NextToken='string'\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum number of results to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'SecurityConfigurations': [\n {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nSecurityConfigurations (list) --\nA list of security configurations.\n\n(dict) --\nSpecifies a security configuration.\n\nName (string) --\nThe name of the security configuration.\n\nCreatedTimeStamp (datetime) --\nThe time at which this security configuration was created.\n\nEncryptionConfiguration (dict) --\nThe encryption configuration associated with this security configuration.\n\nS3Encryption (list) --\nThe encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\n\n(dict) --\nSpecifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\n\nS3EncryptionMode (string) --\nThe encryption mode to use for Amazon S3 data.\n\nKmsKeyArn (string) --\nThe Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\nCloudWatchEncryption (dict) --\nThe encryption configuration for Amazon CloudWatch.\n\nCloudWatchEncryptionMode (string) --\nThe encryption mode to use for CloudWatch data.\n\nKmsKeyArn (string) --\nThe Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\nJobBookmarksEncryption (dict) --\nThe encryption configuration for job bookmarks.\n\nJobBookmarksEncryptionMode (string) --\nThe encryption mode to use for job bookmarks data.\n\nKmsKeyArn (string) --\nThe Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token, if there are more security configurations to return.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'SecurityConfigurations': [\n {\n 'Name': 'string',\n 'CreatedTimeStamp': datetime(2015, 1, 1),\n 'EncryptionConfiguration': {\n 'S3Encryption': [\n {\n 'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',\n 'KmsKeyArn': 'string'\n },\n ],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'KmsKeyArn': 'string'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',\n 'KmsKeyArn': 'string'\n }\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef get_table(CatalogId=None, DatabaseName=None, Name=None):\n \"\"\"\n Retrieves the Table definition in a Data Catalog for a specified table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_table(\n CatalogId='string',\n DatabaseName='string',\n Name='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nTable (dict) --\nThe Table object that defines the specified table.\n\nName (string) --\nThe table name. For Hive compatibility, this must be entirely lowercase.\n\nDatabaseName (string) --\nThe name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.\n\nDescription (string) --\nA description of the table.\n\nOwner (string) --\nThe owner of the table.\n\nCreateTime (datetime) --\nThe time when the table definition was created in the Data Catalog.\n\nUpdateTime (datetime) --\nThe last time that the table was updated.\n\nLastAccessTime (datetime) --\nThe last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.\n\nLastAnalyzedTime (datetime) --\nThe last time that column statistics were computed for this table.\n\nRetention (integer) --\nThe retention time for this table.\n\nStorageDescriptor (dict) --\nA storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nPartitionKeys (list) --\nA list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n\"PartitionKeys\": []\n\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --\nIf the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --\nIf the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --\nThe type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --\nThese key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\nCreatedBy (string) --\nThe person or entity who created the table.\n\nIsRegisteredWithLakeFormation (boolean) --\nIndicates whether the table has been registered with AWS Lake Formation.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):\n \"\"\"\n Retrieves a specified version of a table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_table_version(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n VersionId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type VersionId: string\n :param VersionId: The ID value of the table version to be retrieved. A VersionID is a string representation of an integer. Each version is incremented by 1.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TableVersion': {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n 'VersionId': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nTableVersion (dict) --\nThe requested table version.\n\nTable (dict) --\nThe table in question.\n\nName (string) --\nThe table name. For Hive compatibility, this must be entirely lowercase.\n\nDatabaseName (string) --\nThe name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.\n\nDescription (string) --\nA description of the table.\n\nOwner (string) --\nThe owner of the table.\n\nCreateTime (datetime) --\nThe time when the table definition was created in the Data Catalog.\n\nUpdateTime (datetime) --\nThe last time that the table was updated.\n\nLastAccessTime (datetime) --\nThe last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.\n\nLastAnalyzedTime (datetime) --\nThe last time that column statistics were computed for this table.\n\nRetention (integer) --\nThe retention time for this table.\n\nStorageDescriptor (dict) --\nA storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nPartitionKeys (list) --\nA list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n\"PartitionKeys\": []\n\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --\nIf the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --\nIf the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --\nThe type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --\nThese key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\nCreatedBy (string) --\nThe person or entity who created the table.\n\nIsRegisteredWithLakeFormation (boolean) --\nIndicates whether the table has been registered with AWS Lake Formation.\n\n\n\nVersionId (string) --\nThe ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'TableVersion': {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n 'VersionId': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_table_versions(CatalogId=None, DatabaseName=None, TableName=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves a list of strings that identify available versions of a specified table.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_table_versions(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is not the first call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of table versions to return in one response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TableVersions': [\n {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n 'VersionId': 'string'\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTableVersions (list) --\nA list of strings identifying available versions of the specified table.\n\n(dict) --\nSpecifies a version of a table.\n\nTable (dict) --\nThe table in question.\n\nName (string) --\nThe table name. For Hive compatibility, this must be entirely lowercase.\n\nDatabaseName (string) --\nThe name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.\n\nDescription (string) --\nA description of the table.\n\nOwner (string) --\nThe owner of the table.\n\nCreateTime (datetime) --\nThe time when the table definition was created in the Data Catalog.\n\nUpdateTime (datetime) --\nThe last time that the table was updated.\n\nLastAccessTime (datetime) --\nThe last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.\n\nLastAnalyzedTime (datetime) --\nThe last time that column statistics were computed for this table.\n\nRetention (integer) --\nThe retention time for this table.\n\nStorageDescriptor (dict) --\nA storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nPartitionKeys (list) --\nA list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n\"PartitionKeys\": []\n\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --\nIf the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --\nIf the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --\nThe type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --\nThese key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\nCreatedBy (string) --\nThe person or entity who created the table.\n\nIsRegisteredWithLakeFormation (boolean) --\nIndicates whether the table has been registered with AWS Lake Formation.\n\n\n\nVersionId (string) --\nThe ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the list of available versions does not include the last one.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'TableVersions': [\n {\n 'Table': {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n 'VersionId': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_tables(CatalogId=None, DatabaseName=None, Expression=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves the definitions of some or all of the tables in a given Database .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_tables(\n CatalogId='string',\n DatabaseName='string',\n Expression='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type Expression: string\n :param Expression: A regular expression pattern. If present, only those tables whose names match the pattern are returned.\n\n :type NextToken: string\n :param NextToken: A continuation token, included if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of tables to return in a single response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TableList': [\n {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTableList (list) --\nA list of the requested Table objects.\n\n(dict) --\nRepresents a collection of related data organized in columns and rows.\n\nName (string) --\nThe table name. For Hive compatibility, this must be entirely lowercase.\n\nDatabaseName (string) --\nThe name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.\n\nDescription (string) --\nA description of the table.\n\nOwner (string) --\nThe owner of the table.\n\nCreateTime (datetime) --\nThe time when the table definition was created in the Data Catalog.\n\nUpdateTime (datetime) --\nThe last time that the table was updated.\n\nLastAccessTime (datetime) --\nThe last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.\n\nLastAnalyzedTime (datetime) --\nThe last time that column statistics were computed for this table.\n\nRetention (integer) --\nThe retention time for this table.\n\nStorageDescriptor (dict) --\nA storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nPartitionKeys (list) --\nA list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n\"PartitionKeys\": []\n\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --\nIf the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --\nIf the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --\nThe type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --\nThese key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\nCreatedBy (string) --\nThe person or entity who created the table.\n\nIsRegisteredWithLakeFormation (boolean) --\nIndicates whether the table has been registered with AWS Lake Formation.\n\n\n\n\n\nNextToken (string) --\nA continuation token, present if the current list segment is not the last.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'TableList': [\n {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_tags(ResourceArn=None):\n \"\"\"\n Retrieves a list of tags associated with a resource.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_tags(\n ResourceArn='string'\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\\nThe Amazon Resource Name (ARN) of the resource for which to retrieve tags.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Tags': {\n 'string': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\nTags (dict) --The requested tags.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.EntityNotFoundException\n\n\n :return: {\n 'Tags': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.EntityNotFoundException\n \n \"\"\"\n pass\n\ndef get_trigger(Name=None):\n \"\"\"\n Retrieves the definition of a trigger.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger to retrieve.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\nTrigger (dict) --The requested trigger definition.\n\nName (string) --The name of the trigger.\n\nWorkflowName (string) --The name of the workflow associated with the trigger.\n\nId (string) --Reserved for future use.\n\nType (string) --The type of trigger that this is.\n\nState (string) --The current state of the trigger.\n\nDescription (string) --A description of this trigger.\n\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --The actions initiated by this trigger.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef get_triggers(NextToken=None, DependentJobName=None, MaxResults=None):\n \"\"\"\n Gets all the triggers associated with a job.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_triggers(\n NextToken='string',\n DependentJobName='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type DependentJobName: string\n :param DependentJobName: The name of the job to retrieve triggers for. The trigger that can start this job is returned, and if there is no such trigger, all triggers are returned.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of the response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Triggers': [\n {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTriggers (list) --\nA list of triggers for the specified job.\n\n(dict) --\nInformation about a specific trigger.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token, if not all the requested triggers have yet been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Triggers': [\n {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):\n \"\"\"\n Retrieves a specified function definition from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the function is located.\\n\n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\\nThe name of the function.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'UserDefinedFunction': {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nUserDefinedFunction (dict) --\nThe requested function definition.\n\nFunctionName (string) --\nThe name of the function.\n\nClassName (string) --\nThe Java class that contains the function code.\n\nOwnerName (string) --\nThe owner of the function.\n\nOwnerType (string) --\nThe owner type.\n\nCreateTime (datetime) --\nThe time at which the function was created.\n\nResourceUris (list) --\nThe resource URIs for the function.\n\n(dict) --\nThe URIs for function resources.\n\nResourceType (string) --\nThe type of the resource.\n\nUri (string) --\nThe URI for accessing the resource.\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'UserDefinedFunction': {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.GlueEncryptionException\n \n \"\"\"\n pass\n\ndef get_user_defined_functions(CatalogId=None, DatabaseName=None, Pattern=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves multiple function definitions from the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_user_defined_functions(\n CatalogId='string',\n DatabaseName='string',\n Pattern='string',\n NextToken='string',\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the functions to be retrieved are located. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: The name of the catalog database where the functions are located.\n\n :type Pattern: string\n :param Pattern: [REQUIRED]\\nAn optional function-name pattern string that filters the function definitions returned.\\n\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation call.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of functions to return in one response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'UserDefinedFunctions': [\n {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nUserDefinedFunctions (list) --\nA list of requested function definitions.\n\n(dict) --\nRepresents the equivalent of a Hive user-defined function (UDF ) definition.\n\nFunctionName (string) --\nThe name of the function.\n\nClassName (string) --\nThe Java class that contains the function code.\n\nOwnerName (string) --\nThe owner of the function.\n\nOwnerType (string) --\nThe owner type.\n\nCreateTime (datetime) --\nThe time at which the function was created.\n\nResourceUris (list) --\nThe resource URIs for the function.\n\n(dict) --\nThe URIs for function resources.\n\nResourceType (string) --\nThe type of the resource.\n\nUri (string) --\nThe URI for accessing the resource.\n\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token, if the list of functions returned does not include the last requested function.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {\n 'UserDefinedFunctions': [\n {\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'CreateTime': datetime(2015, 1, 1),\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.GlueEncryptionException\n \n \"\"\"\n pass\n\ndef get_waiter(waiter_name=None):\n \"\"\"\n Returns an object that can wait for some condition.\n \n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\\nsection of the service docs for a list of available waiters.\n\n :rtype: botocore.waiter.Waiter\n\n\n \"\"\"\n pass\n\ndef get_workflow(Name=None, IncludeGraph=None):\n \"\"\"\n Retrieves resource metadata for a workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_workflow(\n Name='string',\n IncludeGraph=True|False\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the workflow to retrieve.\\n\n\n :type IncludeGraph: boolean\n :param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Workflow': {\n 'Name': 'string',\n 'Description': 'string',\n 'DefaultRunProperties': {\n 'string': 'string'\n },\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'LastRun': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nWorkflow (dict) --\nThe resource metadata for the workflow.\n\nName (string) --\nThe name of the workflow representing the flow.\n\nDescription (string) --\nA description of the workflow.\n\nDefaultRunProperties (dict) --\nA collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n\nCreatedOn (datetime) --\nThe date and time when the workflow was created.\n\nLastModifiedOn (datetime) --\nThe date and time when the workflow was last modified.\n\nLastRun (dict) --\nThe information about the last execution of the workflow.\n\nName (string) --\nName of the workflow which was executed.\n\nWorkflowRunId (string) --\nThe ID of this workflow run.\n\nWorkflowRunProperties (dict) --\nThe workflow run properties which were set during the run.\n\n(string) --\n(string) --\n\n\n\n\nStartedOn (datetime) --\nThe date and time when the workflow run was started.\n\nCompletedOn (datetime) --\nThe date and time when the workflow run completed.\n\nStatus (string) --\nThe status of the workflow run.\n\nStatistics (dict) --\nThe statistics of the run.\n\nTotalActions (integer) --\nTotal number of Actions in the workflow run.\n\nTimeoutActions (integer) --\nTotal number of Actions which timed out.\n\nFailedActions (integer) --\nTotal number of Actions which have failed.\n\nStoppedActions (integer) --\nTotal number of Actions which have stopped.\n\nSucceededActions (integer) --\nTotal number of Actions which have succeeded.\n\nRunningActions (integer) --\nTotal number Actions in running state.\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Workflow': {\n 'Name': 'string',\n 'Description': 'string',\n 'DefaultRunProperties': {\n 'string': 'string'\n },\n 'CreatedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'LastRun': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_workflow_run(Name=None, RunId=None, IncludeGraph=None):\n \"\"\"\n Retrieves the metadata for a given workflow run.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_workflow_run(\n Name='string',\n RunId='string',\n IncludeGraph=True|False\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow being run.\\n\n\n :type RunId: string\n :param RunId: [REQUIRED]\\nThe ID of the workflow run.\\n\n\n :type IncludeGraph: boolean\n :param IncludeGraph: Specifies whether to include the workflow graph in response or not.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Run': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nRun (dict) --\nThe requested workflow run metadata.\n\nName (string) --\nName of the workflow which was executed.\n\nWorkflowRunId (string) --\nThe ID of this workflow run.\n\nWorkflowRunProperties (dict) --\nThe workflow run properties which were set during the run.\n\n(string) --\n(string) --\n\n\n\n\nStartedOn (datetime) --\nThe date and time when the workflow run was started.\n\nCompletedOn (datetime) --\nThe date and time when the workflow run completed.\n\nStatus (string) --\nThe status of the workflow run.\n\nStatistics (dict) --\nThe statistics of the run.\n\nTotalActions (integer) --\nTotal number of Actions in the workflow run.\n\nTimeoutActions (integer) --\nTotal number of Actions which timed out.\n\nFailedActions (integer) --\nTotal number of Actions which have failed.\n\nStoppedActions (integer) --\nTotal number of Actions which have stopped.\n\nSucceededActions (integer) --\nTotal number of Actions which have succeeded.\n\nRunningActions (integer) --\nTotal number Actions in running state.\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Run': {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_workflow_run_properties(Name=None, RunId=None):\n \"\"\"\n Retrieves the workflow run properties which were set during the run.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_workflow_run_properties(\n Name='string',\n RunId='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow which was run.\\n\n\n :type RunId: string\n :param RunId: [REQUIRED]\\nThe ID of the workflow run whose run properties should be returned.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'RunProperties': {\n 'string': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nRunProperties (dict) --\nThe workflow run properties which were set during the specified run.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'RunProperties': {\n 'string': 'string'\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef get_workflow_runs(Name=None, IncludeGraph=None, NextToken=None, MaxResults=None):\n \"\"\"\n Retrieves metadata for all runs of a given workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.get_workflow_runs(\n Name='string',\n IncludeGraph=True|False,\n NextToken='string',\n MaxResults=123\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow whose metadata of runs should be returned.\\n\n\n :type IncludeGraph: boolean\n :param IncludeGraph: Specifies whether to include the workflow graph in response or not.\n\n :type NextToken: string\n :param NextToken: The maximum size of the response.\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of workflow runs to be included in the response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Runs': [\n {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nRuns (list) --\nA list of workflow run metadata objects.\n\n(dict) --\nA workflow run is an execution of a workflow providing all the runtime information.\n\nName (string) --\nName of the workflow which was executed.\n\nWorkflowRunId (string) --\nThe ID of this workflow run.\n\nWorkflowRunProperties (dict) --\nThe workflow run properties which were set during the run.\n\n(string) --\n(string) --\n\n\n\n\nStartedOn (datetime) --\nThe date and time when the workflow run was started.\n\nCompletedOn (datetime) --\nThe date and time when the workflow run completed.\n\nStatus (string) --\nThe status of the workflow run.\n\nStatistics (dict) --\nThe statistics of the run.\n\nTotalActions (integer) --\nTotal number of Actions in the workflow run.\n\nTimeoutActions (integer) --\nTotal number of Actions which timed out.\n\nFailedActions (integer) --\nTotal number of Actions which have failed.\n\nStoppedActions (integer) --\nTotal number of Actions which have stopped.\n\nSucceededActions (integer) --\nTotal number of Actions which have succeeded.\n\nRunningActions (integer) --\nTotal number Actions in running state.\n\n\n\nGraph (dict) --\nThe graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.\n\nNodes (list) --\nA list of the the AWS Glue components belong to the workflow represented as nodes.\n\n(dict) --\nA node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.\n\nType (string) --\nThe type of AWS Glue component represented by the node.\n\nName (string) --\nThe name of the AWS Glue component represented by the node.\n\nUniqueId (string) --\nThe unique Id assigned to the node within the workflow.\n\nTriggerDetails (dict) --\nDetails of the Trigger when the node represents a Trigger.\n\nTrigger (dict) --\nThe information of the trigger represented by the trigger node.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\nJobDetails (dict) --\nDetails of the Job when the node represents a Job.\n\nJobRuns (list) --\nThe information for the job runs represented by the job node.\n\n(dict) --\nContains information about a job run.\n\nId (string) --\nThe ID of this job run.\n\nAttempt (integer) --\nThe number of the attempt to run this job.\n\nPreviousRunId (string) --\nThe ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.\n\nTriggerName (string) --\nThe name of the trigger that started this job run.\n\nJobName (string) --\nThe name of the job definition being used in this run.\n\nStartedOn (datetime) --\nThe date and time at which this job run was started.\n\nLastModifiedOn (datetime) --\nThe last time that this job run was modified.\n\nCompletedOn (datetime) --\nThe date and time that this job run completed.\n\nJobRunState (string) --\nThe current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .\n\nArguments (dict) --\nThe job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nErrorMessage (string) --\nAn error message associated with this job run.\n\nPredecessorRuns (list) --\nA list of predecessors to this job run.\n\n(dict) --\nA job run that was used in the predicate of a conditional trigger that triggered this job run.\n\nJobName (string) --\nThe name of the job definition used by the predecessor job run.\n\nRunId (string) --\nThe job-run ID of the predecessor job run.\n\n\n\n\n\nAllocatedCapacity (integer) --\nThis field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nExecutionTime (integer) --\nThe amount of time (in seconds) that the job run consumed resources.\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nMaxCapacity (float) --\nThe number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --\nThe type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nNumberOfWorkers (integer) --\nThe number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this job run.\n\nLogGroupName (string) --\nThe name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be \/aws-glue\/jobs\/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, \/aws-glue\/jobs-yourRoleName-yourSecurityConfigurationName\/ ), then that security configuration is used to encrypt the log group.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --\nGlue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n\n\n\n\n\n\n\nCrawlerDetails (dict) --\nDetails of the crawler when the node represents a crawler.\n\nCrawls (list) --\nA list of crawls represented by the crawl node.\n\n(dict) --\nThe details of a crawl in the workflow.\n\nState (string) --\nThe state of the crawler.\n\nStartedOn (datetime) --\nThe date and time on which the crawl started.\n\nCompletedOn (datetime) --\nThe date and time on which the crawl completed.\n\nErrorMessage (string) --\nThe error message associated with the crawl.\n\nLogGroup (string) --\nThe log group associated with the crawl.\n\nLogStream (string) --\nThe log stream associated with the crawl.\n\n\n\n\n\n\n\n\n\n\n\nEdges (list) --\nA list of all the directed connections between the nodes belonging to the workflow.\n\n(dict) --\nAn edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.\n\nSourceId (string) --\nThe unique of the node within the workflow where the edge starts.\n\nDestinationId (string) --\nThe unique of the node within the workflow where the edge ends.\n\n\n\n\n\n\n\n\n\n\n\nNextToken (string) --\nA continuation token, if not all requested workflow runs have been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Runs': [\n {\n 'Name': 'string',\n 'WorkflowRunId': 'string',\n 'WorkflowRunProperties': {\n 'string': 'string'\n },\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',\n 'Statistics': {\n 'TotalActions': 123,\n 'TimeoutActions': 123,\n 'FailedActions': 123,\n 'StoppedActions': 123,\n 'SucceededActions': 123,\n 'RunningActions': 123\n },\n 'Graph': {\n 'Nodes': [\n {\n 'Type': 'CRAWLER'|'JOB'|'TRIGGER',\n 'Name': 'string',\n 'UniqueId': 'string',\n 'TriggerDetails': {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n },\n 'JobDetails': {\n 'JobRuns': [\n {\n 'Id': 'string',\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'TriggerName': 'string',\n 'JobName': 'string',\n 'StartedOn': datetime(2015, 1, 1),\n 'LastModifiedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'Arguments': {\n 'string': 'string'\n },\n 'ErrorMessage': 'string',\n 'PredecessorRuns': [\n {\n 'JobName': 'string',\n 'RunId': 'string'\n },\n ],\n 'AllocatedCapacity': 123,\n 'ExecutionTime': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'LogGroupName': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n },\n ]\n },\n 'CrawlerDetails': {\n 'Crawls': [\n {\n 'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',\n 'StartedOn': datetime(2015, 1, 1),\n 'CompletedOn': datetime(2015, 1, 1),\n 'ErrorMessage': 'string',\n 'LogGroup': 'string',\n 'LogStream': 'string'\n },\n ]\n }\n },\n ],\n 'Edges': [\n {\n 'SourceId': 'string',\n 'DestinationId': 'string'\n },\n ]\n }\n },\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef import_catalog_to_glue(CatalogId=None):\n \"\"\"\n Imports an existing Amazon Athena Data Catalog to AWS Glue\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.import_catalog_to_glue(\n CatalogId='string'\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the catalog to import. Currently, this should be the AWS account ID.\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef list_crawlers(MaxResults=None, NextToken=None, Tags=None):\n \"\"\"\n Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.\n This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_crawlers(\n MaxResults=123,\n NextToken='string',\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type Tags: dict\n :param Tags: Specifies to return only these tagged resources.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'CrawlerNames': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nCrawlerNames (list) --\nThe names of all crawlers in the account, or the crawlers with the specified tags.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'CrawlerNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_dev_endpoints(NextToken=None, MaxResults=None, Tags=None):\n \"\"\"\n Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.\n This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_dev_endpoints(\n NextToken='string',\n MaxResults=123,\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type Tags: dict\n :param Tags: Specifies to return only these tagged resources.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'DevEndpointNames': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nDevEndpointNames (list) --\nThe names of all the DevEndpoint s in the account, or the DevEndpoint s with the specified tags.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'DevEndpointNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_jobs(NextToken=None, MaxResults=None, Tags=None):\n \"\"\"\n Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.\n This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_jobs(\n NextToken='string',\n MaxResults=123,\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type Tags: dict\n :param Tags: Specifies to return only these tagged resources.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobNames': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobNames (list) --\nThe names of all jobs in the account, or the jobs with the specified tags.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'JobNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None, Tags=None):\n \"\"\"\n Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_ml_transforms(\n NextToken='string',\n MaxResults=123,\n Filter={\n 'Name': 'string',\n 'TransformType': 'FIND_MATCHES',\n 'Status': 'NOT_READY'|'READY'|'DELETING',\n 'GlueVersion': 'string',\n 'CreatedBefore': datetime(2015, 1, 1),\n 'CreatedAfter': datetime(2015, 1, 1),\n 'LastModifiedBefore': datetime(2015, 1, 1),\n 'LastModifiedAfter': datetime(2015, 1, 1),\n 'Schema': [\n {\n 'Name': 'string',\n 'DataType': 'string'\n },\n ]\n },\n Sort={\n 'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',\n 'SortDirection': 'DESCENDING'|'ASCENDING'\n },\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type Filter: dict\n :param Filter: A TransformFilterCriteria used to filter the machine learning transforms.\\n\\nName (string) --A unique transform name that is used to filter the machine learning transforms.\\n\\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\\n\\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\\n\\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\\n\\nCreatedBefore (datetime) --The time and date before which the transforms were created.\\n\\nCreatedAfter (datetime) --The time and date after which the transforms were created.\\n\\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\\n\\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\\n\\nSchema (list) --Filters on datasets with a specific schema. The Map object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\\n\\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\\n\\nName (string) --The name of the column.\\n\\nDataType (string) --The type of data in the column.\\n\\n\\n\\n\\n\\n\\n\n\n :type Sort: dict\n :param Sort: A TransformSortCriteria used to sort the machine learning transforms.\\n\\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\\n\\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\\n\\n\\n\n\n :type Tags: dict\n :param Tags: Specifies to return only these tagged resources.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TransformIds': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransformIds (list) --\nThe identifiers of all the machine learning transforms in the account, or the machine learning transforms with the specified tags.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TransformIds': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_triggers(NextToken=None, DependentJobName=None, MaxResults=None, Tags=None):\n \"\"\"\n Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.\n This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_triggers(\n NextToken='string',\n DependentJobName='string',\n MaxResults=123,\n Tags={\n 'string': 'string'\n }\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type DependentJobName: string\n :param DependentJobName: The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :type Tags: dict\n :param Tags: Specifies to return only these tagged resources.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TriggerNames': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTriggerNames (list) --\nThe names of all triggers in the account, or the triggers with the specified tags.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if the returned list does not contain the last metric available.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'TriggerNames': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef list_workflows(NextToken=None, MaxResults=None):\n \"\"\"\n Lists names of workflows created in the account.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.list_workflows(\n NextToken='string',\n MaxResults=123\n )\n \n \n :type NextToken: string\n :param NextToken: A continuation token, if this is a continuation request.\n\n :type MaxResults: integer\n :param MaxResults: The maximum size of a list to return.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Workflows': [\n 'string',\n ],\n 'NextToken': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nWorkflows (list) --\nList of names of workflows in the account.\n\n(string) --\n\n\nNextToken (string) --\nA continuation token, if not all workflow names have been returned.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'Workflows': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n \n :returns: \n (string) --\n \n \"\"\"\n pass\n\ndef put_data_catalog_encryption_settings(CatalogId=None, DataCatalogEncryptionSettings=None):\n \"\"\"\n Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.put_data_catalog_encryption_settings(\n CatalogId='string',\n DataCatalogEncryptionSettings={\n 'EncryptionAtRest': {\n 'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',\n 'SseAwsKmsKeyId': 'string'\n },\n 'ConnectionPasswordEncryption': {\n 'ReturnConnectionPasswordEncrypted': True|False,\n 'AwsKmsKeyId': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default.\n\n :type DataCatalogEncryptionSettings: dict\n :param DataCatalogEncryptionSettings: [REQUIRED]\\nThe security configuration to set.\\n\\nEncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.\\n\\nCatalogEncryptionMode (string) -- [REQUIRED]The encryption-at-rest mode for encrypting Data Catalog data.\\n\\nSseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\\n\\n\\n\\nConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\\n\\nReturnConnectionPasswordEncrypted (boolean) -- [REQUIRED]When the ReturnConnectionPasswordEncrypted flag is set to 'true', passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\\n\\nAwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.\\nIf connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.\\nYou can set the decrypt permission to enable or restrict access on the password key according to your security requirements.\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef put_resource_policy(PolicyInJson=None, PolicyHashCondition=None, PolicyExistsCondition=None):\n \"\"\"\n Sets the Data Catalog resource policy for access control.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.put_resource_policy(\n PolicyInJson='string',\n PolicyHashCondition='string',\n PolicyExistsCondition='MUST_EXIST'|'NOT_EXIST'|'NONE'\n )\n \n \n :type PolicyInJson: string\n :param PolicyInJson: [REQUIRED]\\nContains the policy document to set, in JSON format.\\n\n\n :type PolicyHashCondition: string\n :param PolicyHashCondition: The hash value returned when the previous policy was set using PutResourcePolicy . Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.\n\n :type PolicyExistsCondition: string\n :param PolicyExistsCondition: A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'PolicyHash': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nPolicyHash (string) --\nA hash of the policy that has just been set. This must be included in a subsequent call that overwrites or updates this policy.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.ConditionCheckFailureException\n\n\n :return: {\n 'PolicyHash': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.ConditionCheckFailureException\n \n \"\"\"\n pass\n\ndef put_workflow_run_properties(Name=None, RunId=None, RunProperties=None):\n \"\"\"\n Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.put_workflow_run_properties(\n Name='string',\n RunId='string',\n RunProperties={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow which was run.\\n\n\n :type RunId: string\n :param RunId: [REQUIRED]\\nThe ID of the workflow run for which the run properties should be updated.\\n\n\n :type RunProperties: dict\n :param RunProperties: [REQUIRED]\\nThe properties to put for the specified run.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.AlreadyExistsException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef reset_job_bookmark(JobName=None, RunId=None):\n \"\"\"\n Resets a bookmark entry.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.reset_job_bookmark(\n JobName='string',\n RunId='string'\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job in question.\\n\n\n :type RunId: string\n :param RunId: The unique run identifier associated with this job run.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobBookmarkEntry': {\n 'JobName': 'string',\n 'Version': 123,\n 'Run': 123,\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'RunId': 'string',\n 'JobBookmark': 'string'\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobBookmarkEntry (dict) --\nThe reset bookmark entry.\n\nJobName (string) --\nThe name of the job in question.\n\nVersion (integer) --\nThe version of the job.\n\nRun (integer) --\nThe run ID number.\n\nAttempt (integer) --\nThe attempt ID number.\n\nPreviousRunId (string) --\nThe unique run identifier associated with the previous job run.\n\nRunId (string) --\nThe run ID number.\n\nJobBookmark (string) --\nThe bookmark itself.\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'JobBookmarkEntry': {\n 'JobName': 'string',\n 'Version': 123,\n 'Run': 123,\n 'Attempt': 123,\n 'PreviousRunId': 'string',\n 'RunId': 'string',\n 'JobBookmark': 'string'\n }\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef search_tables(CatalogId=None, NextToken=None, Filters=None, SearchText=None, SortCriteria=None, MaxResults=None):\n \"\"\"\n Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.\n You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.search_tables(\n CatalogId='string',\n NextToken='string',\n Filters=[\n {\n 'Key': 'string',\n 'Value': 'string',\n 'Comparator': 'EQUALS'|'GREATER_THAN'|'LESS_THAN'|'GREATER_THAN_EQUALS'|'LESS_THAN_EQUALS'\n },\n ],\n SearchText='string',\n SortCriteria=[\n {\n 'FieldName': 'string',\n 'Sort': 'ASC'|'DESC'\n },\n ],\n MaxResults=123\n )\n \n \n :type CatalogId: string\n :param CatalogId: A unique identifier, consisting of `` account_id \/datalake`` .\n\n :type NextToken: string\n :param NextToken: A continuation token, included if this is a continuation call.\n\n :type Filters: list\n :param Filters: A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.\\n\\n(dict) --Defines a property predicate.\\n\\nKey (string) --The key of the property.\\n\\nValue (string) --The value of the property.\\n\\nComparator (string) --The comparator used to compare this property to others.\\n\\n\\n\\n\\n\n\n :type SearchText: string\n :param SearchText: A string used for a text search.\\nSpecifying a value in quotes filters based on an exact match to the value.\\n\n\n :type SortCriteria: list\n :param SortCriteria: A list of criteria for sorting the results by a field name, in an ascending or descending order.\\n\\n(dict) --Specifies a field to sort by and a sort order.\\n\\nFieldName (string) --The name of the field on which to sort.\\n\\nSort (string) --An ascending or descending sort.\\n\\n\\n\\n\\n\n\n :type MaxResults: integer\n :param MaxResults: The maximum number of tables to return in a single response.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'NextToken': 'string',\n 'TableList': [\n {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n ]\n}\n\n\nResponse Structure\n\n(dict) --\n\nNextToken (string) --\nA continuation token, present if the current list segment is not the last.\n\nTableList (list) --\nA list of the requested Table objects. The SearchTables response returns only the tables that you have access to.\n\n(dict) --\nRepresents a collection of related data organized in columns and rows.\n\nName (string) --\nThe table name. For Hive compatibility, this must be entirely lowercase.\n\nDatabaseName (string) --\nThe name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.\n\nDescription (string) --\nA description of the table.\n\nOwner (string) --\nThe owner of the table.\n\nCreateTime (datetime) --\nThe time when the table definition was created in the Data Catalog.\n\nUpdateTime (datetime) --\nThe last time that the table was updated.\n\nLastAccessTime (datetime) --\nThe last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.\n\nLastAnalyzedTime (datetime) --\nThe last time that column statistics were computed for this table.\n\nRetention (integer) --\nThe retention time for this table.\n\nStorageDescriptor (dict) --\nA storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --\nA list of the Columns in the table.\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --\nThe physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --\nThe input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --\nThe output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\n\nTrue if the data in the table is compressed, or False if not.\n\n\nNumberOfBuckets (integer) --\nMust be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --\nThe serialization\/deserialization (SerDe) information.\n\nName (string) --\nName of the SerDe.\n\nSerializationLibrary (string) --\nUsually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --\nThese key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --\nA list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --\nA list specifying the sort order of each bucket in the table.\n\n(dict) --\nSpecifies the sort order of a sorted column.\n\nColumn (string) --\nThe name of the column.\n\nSortOrder (integer) --\nIndicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --\nThe user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --\nThe information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --\nA list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --\nA list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --\nA mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\n\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\n\nPartitionKeys (list) --\nA list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n\"PartitionKeys\": []\n\n\n(dict) --\nA column in a Table .\n\nName (string) --\nThe name of the Column .\n\nType (string) --\nThe data type of the Column .\n\nComment (string) --\nA free-form text comment.\n\nParameters (dict) --\nThese key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --\nIf the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --\nIf the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --\nThe type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --\nThese key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\nCreatedBy (string) --\nThe person or entity who created the table.\n\nIsRegisteredWithLakeFormation (boolean) --\nIndicates whether the table has been registered with AWS Lake Formation.\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {\n 'NextToken': 'string',\n 'TableList': [\n {\n 'Name': 'string',\n 'DatabaseName': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'CreateTime': datetime(2015, 1, 1),\n 'UpdateTime': datetime(2015, 1, 1),\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreatedBy': 'string',\n 'IsRegisteredWithLakeFormation': True|False\n },\n ]\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef start_crawler(Name=None):\n \"\"\"\n Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the crawler to start.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.CrawlerRunningException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.CrawlerRunningException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef start_crawler_schedule(CrawlerName=None):\n \"\"\"\n Changes the schedule state of the specified crawler to SCHEDULED , unless the crawler is already running or the schedule state is already SCHEDULED .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_crawler_schedule(\n CrawlerName='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\\nName of the crawler to schedule.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.SchedulerRunningException\nGlue.Client.exceptions.SchedulerTransitioningException\nGlue.Client.exceptions.NoScheduleException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.SchedulerRunningException\n Glue.Client.exceptions.SchedulerTransitioningException\n Glue.Client.exceptions.NoScheduleException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef start_export_labels_task_run(TransformId=None, OutputS3Path=None):\n \"\"\"\n Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId . You can check on the status of your task run by calling the GetMLTaskRun API.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_export_labels_task_run(\n TransformId='string',\n OutputS3Path='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type OutputS3Path: string\n :param OutputS3Path: [REQUIRED]\\nThe Amazon S3 path where you export the labels.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TaskRunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTaskRunId (string) --\nThe unique identifier for the task run.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TaskRunId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef start_import_labels_task_run(TransformId=None, InputS3Path=None, ReplaceAllLabels=None):\n \"\"\"\n Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.\n After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called \\'labeling\\' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, \\xe2\\x80\\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\\xe2\\x80\\x9d After the labeling process is finished, users upload their answers\/labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.\n By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.\n You can check on the status of your task run by calling the GetMLTaskRun operation.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_import_labels_task_run(\n TransformId='string',\n InputS3Path='string',\n ReplaceAllLabels=True|False\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type InputS3Path: string\n :param InputS3Path: [REQUIRED]\\nThe Amazon Simple Storage Service (Amazon S3) path from where you import the labels.\\n\n\n :type ReplaceAllLabels: boolean\n :param ReplaceAllLabels: Indicates whether to overwrite your existing labels.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TaskRunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTaskRunId (string) --\nThe unique identifier for the task run.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.InternalServiceException\n\n\n :return: {\n 'TaskRunId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.InternalServiceException\n \n \"\"\"\n pass\n\ndef start_job_run(JobName=None, JobRunId=None, Arguments=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, NotificationProperty=None, WorkerType=None, NumberOfWorkers=None):\n \"\"\"\n Starts a job run using a job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_job_run(\n JobName='string',\n JobRunId='string',\n Arguments={\n 'string': 'string'\n },\n AllocatedCapacity=123,\n Timeout=123,\n MaxCapacity=123.0,\n SecurityConfiguration='string',\n NotificationProperty={\n 'NotifyDelayAfter': 123\n },\n WorkerType='Standard'|'G.1X'|'G.2X',\n NumberOfWorkers=123\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition to use.\\n\n\n :type JobRunId: string\n :param JobRunId: The ID of a previous JobRun to retry.\n\n :type Arguments: dict\n :param Arguments: The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.\\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :type AllocatedCapacity: integer\n :param AllocatedCapacity: This field is deprecated. Use MaxCapacity instead.\\nThe number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\n\n\n :type Timeout: integer\n :param Timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\n :type MaxCapacity: float\n :param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:\\n\\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\\n\\n\n\n :type SecurityConfiguration: string\n :param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job run.\n\n :type NotificationProperty: dict\n :param NotificationProperty: Specifies configuration properties of a job run notification.\\n\\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\\n\\n\\n\n\n :type WorkerType: string\n :param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\\n\\n\n\n :type NumberOfWorkers: integer\n :param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobRunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobRunId (string) --\nThe ID assigned to this job run.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentRunsExceededException\n\n\n :return: {\n 'JobRunId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ResourceNumberLimitExceededException\n Glue.Client.exceptions.ConcurrentRunsExceededException\n \n \"\"\"\n pass\n\ndef start_ml_evaluation_task_run(TransformId=None):\n \"\"\"\n Starts a task to estimate the quality of the transform.\n When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.\n Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_ml_evaluation_task_run(\n TransformId='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'TaskRunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nTaskRunId (string) --The unique identifier associated with this run.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.ConcurrentRunsExceededException\nGlue.Client.exceptions.MLTransformNotReadyException\n\n\n :return: {\n 'TaskRunId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_ml_labeling_set_generation_task_run(TransformId=None, OutputS3Path=None):\n \"\"\"\n Starts the active learning workflow for your machine learning transform to improve the transform\\'s quality by generating label sets and adding labels.\n When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a \"labeling set\" or a set of questions for humans to answer.\n In the case of the FindMatches transform, these questions are of the form, \\xe2\\x80\\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\\xe2\\x80\\x9d\n After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_ml_labeling_set_generation_task_run(\n TransformId='string',\n OutputS3Path='string'\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nThe unique identifier of the machine learning transform.\\n\n\n :type OutputS3Path: string\n :param OutputS3Path: [REQUIRED]\\nThe Amazon Simple Storage Service (Amazon S3) path where you generate the labeling set.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TaskRunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTaskRunId (string) --\nThe unique run identifier that is associated with this task run.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.ConcurrentRunsExceededException\n\n\n :return: {\n 'TaskRunId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.ConcurrentRunsExceededException\n \n \"\"\"\n pass\n\ndef start_trigger(Name=None):\n \"\"\"\n Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger to start.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nName (string) --The name of the trigger that was started.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentRunsExceededException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef start_workflow_run(Name=None):\n \"\"\"\n Starts a new run of the specified workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.start_workflow_run(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the workflow to start.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'RunId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nRunId (string) --An Id for the new run.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.ConcurrentRunsExceededException\n\n\n :return: {\n 'RunId': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_crawler(Name=None):\n \"\"\"\n If the specified crawler is running, stops the crawl.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.stop_crawler(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the crawler to stop.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.CrawlerNotRunningException\nGlue.Client.exceptions.CrawlerStoppingException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.CrawlerNotRunningException\n Glue.Client.exceptions.CrawlerStoppingException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef stop_crawler_schedule(CrawlerName=None):\n \"\"\"\n Sets the schedule state of the specified crawler to NOT_SCHEDULED , but does not stop the crawler if it is already running.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.stop_crawler_schedule(\n CrawlerName='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\\nName of the crawler whose schedule state to set.\\n\n\n :rtype: dict\nReturnsResponse Syntax{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.SchedulerNotRunningException\nGlue.Client.exceptions.SchedulerTransitioningException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.SchedulerNotRunningException\n Glue.Client.exceptions.SchedulerTransitioningException\n Glue.Client.exceptions.OperationTimeoutException\n \n \"\"\"\n pass\n\ndef stop_trigger(Name=None):\n \"\"\"\n Stops a specified trigger.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.stop_trigger(\n Name='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger to stop.\\n\n\n :rtype: dict\nReturnsResponse Syntax{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\nName (string) --The name of the trigger that was stopped.\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n \"\"\"\n pass\n\ndef stop_workflow_run(Name=None, RunId=None):\n \"\"\"\n Stops the execution of the specified workflow run.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.stop_workflow_run(\n Name='string',\n RunId='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the workflow to stop.\\n\n\n :type RunId: string\n :param RunId: [REQUIRED]\\nThe ID of the workflow run to stop.\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.IllegalWorkflowStateException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef tag_resource(ResourceArn=None, TagsToAdd=None):\n \"\"\"\n Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue .\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.tag_resource(\n ResourceArn='string',\n TagsToAdd={\n 'string': 'string'\n }\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\\nThe ARN of the AWS Glue resource to which to add the tags. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern .\\n\n\n :type TagsToAdd: dict\n :param TagsToAdd: [REQUIRED]\\nTags to add to this resource.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.EntityNotFoundException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef untag_resource(ResourceArn=None, TagsToRemove=None):\n \"\"\"\n Removes tags from a resource.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.untag_resource(\n ResourceArn='string',\n TagsToRemove=[\n 'string',\n ]\n )\n \n \n :type ResourceArn: string\n :param ResourceArn: [REQUIRED]\\nThe Amazon Resource Name (ARN) of the resource from which to remove the tags.\\n\n\n :type TagsToRemove: list\n :param TagsToRemove: [REQUIRED]\\nTags to remove from this resource.\\n\\n(string) --\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.EntityNotFoundException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):\n \"\"\"\n Modifies an existing classifier (a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field is present).\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_classifier(\n GrokClassifier={\n 'Name': 'string',\n 'Classification': 'string',\n 'GrokPattern': 'string',\n 'CustomPatterns': 'string'\n },\n XMLClassifier={\n 'Name': 'string',\n 'Classification': 'string',\n 'RowTag': 'string'\n },\n JsonClassifier={\n 'Name': 'string',\n 'JsonPath': 'string'\n },\n CsvClassifier={\n 'Name': 'string',\n 'Delimiter': 'string',\n 'QuoteSymbol': 'string',\n 'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',\n 'Header': [\n 'string',\n ],\n 'DisableValueTrimming': True|False,\n 'AllowSingleColumn': True|False\n }\n )\n \n \n :type GrokClassifier: dict\n :param GrokClassifier: A GrokClassifier object with updated fields.\\n\\nName (string) -- [REQUIRED]The name of the GrokClassifier .\\n\\nClassification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\\n\\nGrokPattern (string) --The grok pattern used by this classifier.\\n\\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\\n\\n\\n\n\n :type XMLClassifier: dict\n :param XMLClassifier: An XMLClassifier object with updated fields.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nClassification (string) --An identifier of the data format that the classifier matches.\\n\\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This cannot identify a self-closing element (closed by \/> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <\/row> is okay, but is not).\\n\\n\\n\n\n :type JsonClassifier: dict\n :param JsonClassifier: A JsonClassifier object with updated fields.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nJsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\\n\\n\\n\n\n :type CsvClassifier: dict\n :param CsvClassifier: A CsvClassifier object with updated fields.\\n\\nName (string) -- [REQUIRED]The name of the classifier.\\n\\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\\n\\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\\n\\nContainsHeader (string) --Indicates whether the CSV file contains a header.\\n\\nHeader (list) --A list of strings representing column names.\\n\\n(string) --\\n\\n\\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\\n\\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.VersionMismatchException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_connection(CatalogId=None, Name=None, ConnectionInput=None):\n \"\"\"\n Updates a connection definition in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_connection(\n CatalogId='string',\n Name='string',\n ConnectionInput={\n 'Name': 'string',\n 'Description': 'string',\n 'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',\n 'MatchCriteria': [\n 'string',\n ],\n 'ConnectionProperties': {\n 'string': 'string'\n },\n 'PhysicalConnectionRequirements': {\n 'SubnetId': 'string',\n 'SecurityGroupIdList': [\n 'string',\n ],\n 'AvailabilityZone': 'string'\n }\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the connection definition to update.\\n\n\n :type ConnectionInput: dict\n :param ConnectionInput: [REQUIRED]\\nA ConnectionInput object that redefines the connection in question.\\n\\nName (string) -- [REQUIRED]The name of the connection.\\n\\nDescription (string) --The description of the connection.\\n\\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\\n\\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\\nMONGODB - Designates a connection to a MongoDB document database.\\n\\nSFTP is not supported.\\n\\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\\n\\n(string) --\\n\\n\\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\\n\\nSubnetId (string) --The subnet ID used by the connection.\\n\\nSecurityGroupIdList (list) --The security group ID list used by the connection.\\n\\n(string) --\\n\\n\\nAvailabilityZone (string) --The connection\\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):\n \"\"\"\n Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_crawler(\n Name='string',\n Role='string',\n DatabaseName='string',\n Description='string',\n Targets={\n 'S3Targets': [\n {\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'JdbcTargets': [\n {\n 'ConnectionName': 'string',\n 'Path': 'string',\n 'Exclusions': [\n 'string',\n ]\n },\n ],\n 'DynamoDBTargets': [\n {\n 'Path': 'string'\n },\n ],\n 'CatalogTargets': [\n {\n 'DatabaseName': 'string',\n 'Tables': [\n 'string',\n ]\n },\n ]\n },\n Schedule='string',\n Classifiers=[\n 'string',\n ],\n TablePrefix='string',\n SchemaChangePolicy={\n 'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',\n 'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'\n },\n Configuration='string',\n CrawlerSecurityConfiguration='string'\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the new crawler.\\n\n\n :type Role: string\n :param Role: The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by the new crawler to access customer resources.\n\n :type DatabaseName: string\n :param DatabaseName: The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database\/sometable\/* .\n\n :type Description: string\n :param Description: A description of the new crawler.\n\n :type Targets: dict\n :param Targets: A list of targets to crawl.\\n\\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\\n\\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\\n\\nPath (string) --The path to the Amazon S3 target.\\n\\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\nJdbcTargets (list) --Specifies JDBC targets.\\n\\n(dict) --Specifies a JDBC data store to crawl.\\n\\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\\n\\nPath (string) --The path of the JDBC target.\\n\\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\\n\\n(dict) --Specifies an Amazon DynamoDB table to crawl.\\n\\nPath (string) --The name of the DynamoDB table to crawl.\\n\\n\\n\\n\\n\\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\\n\\n(dict) --Specifies an AWS Glue Data Catalog target.\\n\\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\\n\\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\n\n :type Schedule: string\n :param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\n :type Classifiers: list\n :param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\\n\\n(string) --\\n\\n\n\n :type TablePrefix: string\n :param TablePrefix: The table prefix used for catalog tables that are created.\n\n :type SchemaChangePolicy: dict\n :param SchemaChangePolicy: The policy for the crawler\\'s update and deletion behavior.\\n\\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\\n\\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\\n\\n\\n\n\n :type Configuration: string\n :param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\\'s behavior. For more information, see Configuring a Crawler .\n\n :type CrawlerSecurityConfiguration: string\n :param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.VersionMismatchException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.CrawlerRunningException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_crawler_schedule(CrawlerName=None, Schedule=None):\n \"\"\"\n Updates the schedule of a crawler using a cron expression.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_crawler_schedule(\n CrawlerName='string',\n Schedule='string'\n )\n \n \n :type CrawlerName: string\n :param CrawlerName: [REQUIRED]\\nThe name of the crawler whose schedule to update.\\n\n\n :type Schedule: string\n :param Schedule: The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.VersionMismatchException\nGlue.Client.exceptions.SchedulerTransitioningException\nGlue.Client.exceptions.OperationTimeoutException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_database(CatalogId=None, Name=None, DatabaseInput=None):\n \"\"\"\n Updates an existing database definition in a Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_database(\n CatalogId='string',\n Name='string',\n DatabaseInput={\n 'Name': 'string',\n 'Description': 'string',\n 'LocationUri': 'string',\n 'Parameters': {\n 'string': 'string'\n },\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': {\n 'DataLakePrincipalIdentifier': 'string'\n },\n 'Permissions': [\n 'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',\n ]\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog in which the metadata database resides. If none is provided, the AWS account ID is used by default.\n\n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.\\n\n\n :type DatabaseInput: dict\n :param DatabaseInput: [REQUIRED]\\nA DatabaseInput object specifying the new definition of the metadata database in the catalog.\\n\\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\\n\\nDescription (string) --A description of the database.\\n\\nLocationUri (string) --The location of the database (for example, an HDFS path).\\n\\nParameters (dict) --These key-value pairs define parameters and properties of the database.\\nThese key-value pairs define parameters and properties of the database.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\\n\\n(dict) --Permissions granted to a principal.\\n\\nPrincipal (dict) --The principal who is granted permissions.\\n\\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\\n\\n\\n\\nPermissions (list) --The permissions that are granted to the principal.\\n\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_dev_endpoint(EndpointName=None, PublicKey=None, AddPublicKeys=None, DeletePublicKeys=None, CustomLibraries=None, UpdateEtlLibraries=None, DeleteArguments=None, AddArguments=None):\n \"\"\"\n Updates a specified development endpoint.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_dev_endpoint(\n EndpointName='string',\n PublicKey='string',\n AddPublicKeys=[\n 'string',\n ],\n DeletePublicKeys=[\n 'string',\n ],\n CustomLibraries={\n 'ExtraPythonLibsS3Path': 'string',\n 'ExtraJarsS3Path': 'string'\n },\n UpdateEtlLibraries=True|False,\n DeleteArguments=[\n 'string',\n ],\n AddArguments={\n 'string': 'string'\n }\n )\n \n \n :type EndpointName: string\n :param EndpointName: [REQUIRED]\\nThe name of the DevEndpoint to be updated.\\n\n\n :type PublicKey: string\n :param PublicKey: The public key for the DevEndpoint to use.\n\n :type AddPublicKeys: list\n :param AddPublicKeys: The list of public keys for the DevEndpoint to use.\\n\\n(string) --\\n\\n\n\n :type DeletePublicKeys: list\n :param DeletePublicKeys: The list of public keys to be deleted from the DevEndpoint .\\n\\n(string) --\\n\\n\n\n :type CustomLibraries: dict\n :param CustomLibraries: Custom Python or Java libraries to be loaded in the DevEndpoint .\\n\\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon Simple Storage Service (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\\n\\nNote\\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\\n\\n\\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\\n\\nNote\\nYou can only use pure Java\/Scala libraries with a DevEndpoint .\\n\\n\\n\\n\n\n :type UpdateEtlLibraries: boolean\n :param UpdateEtlLibraries: True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.\n\n :type DeleteArguments: list\n :param DeleteArguments: The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint .\\n\\n(string) --\\n\\n\n\n :type AddArguments: dict\n :param AddArguments: The map of arguments to add the map of arguments used to configure the DevEndpoint .\\nValid arguments are:\\n\\n'--enable-glue-datacatalog': ''\\n'GLUE_PYTHON_VERSION': '3'\\n'GLUE_PYTHON_VERSION': '2'\\n\\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.ValidationException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_job(JobName=None, JobUpdate=None):\n \"\"\"\n Updates an existing job definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_job(\n JobName='string',\n JobUpdate={\n 'Description': 'string',\n 'LogUri': 'string',\n 'Role': 'string',\n 'ExecutionProperty': {\n 'MaxConcurrentRuns': 123\n },\n 'Command': {\n 'Name': 'string',\n 'ScriptLocation': 'string',\n 'PythonVersion': 'string'\n },\n 'DefaultArguments': {\n 'string': 'string'\n },\n 'NonOverridableArguments': {\n 'string': 'string'\n },\n 'Connections': {\n 'Connections': [\n 'string',\n ]\n },\n 'MaxRetries': 123,\n 'AllocatedCapacity': 123,\n 'Timeout': 123,\n 'MaxCapacity': 123.0,\n 'WorkerType': 'Standard'|'G.1X'|'G.2X',\n 'NumberOfWorkers': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'GlueVersion': 'string'\n }\n )\n \n \n :type JobName: string\n :param JobName: [REQUIRED]\\nThe name of the job definition to update.\\n\n\n :type JobUpdate: dict\n :param JobUpdate: [REQUIRED]\\nSpecifies the values with which to update the job definition.\\n\\nDescription (string) --Description of the job being defined.\\n\\nLogUri (string) --This field is reserved for future use.\\n\\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required).\\n\\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\\n\\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\\n\\n\\n\\nCommand (dict) --The JobCommand that executes this job (required).\\n\\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\\n\\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\\n\\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\\n\\n\\n\\nDefaultArguments (dict) --The default arguments for this job.\\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nConnections (dict) --The connections used for this job.\\n\\nConnections (list) --A list of connections used by the job.\\n\\n(string) --\\n\\n\\n\\n\\nMaxRetries (integer) --The maximum number of times to retry this job if it fails.\\n\\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\\nThe number of AWS Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\n\\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\\n\\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\\n\\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\\n\\n\\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\\n\\n\\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\\n\\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\\n\\nNotificationProperty (dict) --Specifies the configuration properties of a job notification.\\n\\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\\n\\n\\n\\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'JobName': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nJobName (string) --\nReturns the name of the updated job definition.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'JobName': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ConcurrentModificationException\n \n \"\"\"\n pass\n\ndef update_ml_transform(TransformId=None, Name=None, Description=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None):\n \"\"\"\n Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.\n After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_ml_transform(\n TransformId='string',\n Name='string',\n Description='string',\n Parameters={\n 'TransformType': 'FIND_MATCHES',\n 'FindMatchesParameters': {\n 'PrimaryKeyColumnName': 'string',\n 'PrecisionRecallTradeoff': 123.0,\n 'AccuracyCostTradeoff': 123.0,\n 'EnforceProvidedLabels': True|False\n }\n },\n Role='string',\n GlueVersion='string',\n MaxCapacity=123.0,\n WorkerType='Standard'|'G.1X'|'G.2X',\n NumberOfWorkers=123,\n Timeout=123,\n MaxRetries=123\n )\n \n \n :type TransformId: string\n :param TransformId: [REQUIRED]\\nA unique identifier that was generated when the transform was created.\\n\n\n :type Name: string\n :param Name: The unique name that you gave the transform when you created it.\n\n :type Description: string\n :param Description: A description of the transform. The default is an empty string.\n\n :type Parameters: dict\n :param Parameters: The configuration parameters that are specific to the transform type (algorithm) used. Conditionally dependent on the transform type.\\n\\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\\n\\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\\n\\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\\n\\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\\nThe precision metric indicates how often your model is correct when it predicts a match.\\nThe recall metric indicates that for an actual match, how often your model predicts the match.\\n\\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\\nCost measures how many compute resources, and thus money, are consumed to run the transform.\\n\\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\\nNote that setting this value to true may increase the conflation execution time.\\n\\n\\n\\n\\n\n\n :type Role: string\n :param Role: The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.\n\n :type GlueVersion: string\n :param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\n :type MaxCapacity: float\n :param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\\n\n\n :type WorkerType: string\n :param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\\n\\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\\n\\n\n\n :type NumberOfWorkers: integer\n :param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.\n\n :type Timeout: integer\n :param Timeout: The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\n :type MaxRetries: integer\n :param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'TransformId': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nTransformId (string) --\nThe unique identifier for the transform that was updated.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.AccessDeniedException\n\n\n :return: {\n 'TransformId': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.AccessDeniedException\n \n \"\"\"\n pass\n\ndef update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None):\n \"\"\"\n Updates a partition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_partition(\n CatalogId='string',\n DatabaseName='string',\n TableName='string',\n PartitionValueList=[\n 'string',\n ],\n PartitionInput={\n 'Values': [\n 'string',\n ],\n 'LastAccessTime': datetime(2015, 1, 1),\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'Parameters': {\n 'string': 'string'\n },\n 'LastAnalyzedTime': datetime(2015, 1, 1)\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the table in question resides.\\n\n\n :type TableName: string\n :param TableName: [REQUIRED]\\nThe name of the table in which the partition to be updated is located.\\n\n\n :type PartitionValueList: list\n :param PartitionValueList: [REQUIRED]\\nA list of the values defining the partition.\\n\\n(string) --\\n\\n\n\n :type PartitionInput: dict\n :param PartitionInput: [REQUIRED]\\nThe new partition object to update the partition to.\\n\\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\\n\\n(string) --\\n\\n\\nLastAccessTime (datetime) --The last time at which the partition was accessed.\\n\\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\\n\\nColumns (list) --A list of the Columns in the table.\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\\n\\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\\n\\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\\n\\nCompressed (boolean) --\\nTrue if the data in the table is compressed, or False if not.\\n\\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\\n\\nSerdeInfo (dict) --The serialization\/deserialization (SerDe) information.\\n\\nName (string) --Name of the SerDe.\\n\\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\\n\\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\\n\\n(string) --\\n\\n\\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\\n\\n(dict) --Specifies the sort order of a sorted column.\\n\\nColumn (string) -- [REQUIRED]The name of the column.\\n\\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\\n\\n\\n\\n\\n\\nParameters (dict) --The user-supplied properties in key-value form.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\\n\\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\\n\\n(string) --\\n\\n\\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\\n\\n(string) --\\n\\n\\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nStoredAsSubDirectories (boolean) --\\nTrue if the table data is stored in subdirectories, or False if not.\\n\\n\\n\\nParameters (dict) --These key-value pairs define partition parameters.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_table(CatalogId=None, DatabaseName=None, TableInput=None, SkipArchive=None):\n \"\"\"\n Updates a metadata table in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_table(\n CatalogId='string',\n DatabaseName='string',\n TableInput={\n 'Name': 'string',\n 'Description': 'string',\n 'Owner': 'string',\n 'LastAccessTime': datetime(2015, 1, 1),\n 'LastAnalyzedTime': datetime(2015, 1, 1),\n 'Retention': 123,\n 'StorageDescriptor': {\n 'Columns': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'Location': 'string',\n 'InputFormat': 'string',\n 'OutputFormat': 'string',\n 'Compressed': True|False,\n 'NumberOfBuckets': 123,\n 'SerdeInfo': {\n 'Name': 'string',\n 'SerializationLibrary': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n 'BucketColumns': [\n 'string',\n ],\n 'SortColumns': [\n {\n 'Column': 'string',\n 'SortOrder': 123\n },\n ],\n 'Parameters': {\n 'string': 'string'\n },\n 'SkewedInfo': {\n 'SkewedColumnNames': [\n 'string',\n ],\n 'SkewedColumnValues': [\n 'string',\n ],\n 'SkewedColumnValueLocationMaps': {\n 'string': 'string'\n }\n },\n 'StoredAsSubDirectories': True|False\n },\n 'PartitionKeys': [\n {\n 'Name': 'string',\n 'Type': 'string',\n 'Comment': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n ],\n 'ViewOriginalText': 'string',\n 'ViewExpandedText': 'string',\n 'TableType': 'string',\n 'Parameters': {\n 'string': 'string'\n }\n },\n SkipArchive=True|False\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\\n\n\n :type TableInput: dict\n :param TableInput: [REQUIRED]\\nAn updated TableInput object to define the metadata table in the catalog.\\n\\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\\n\\nDescription (string) --A description of the table.\\n\\nOwner (string) --The table owner.\\n\\nLastAccessTime (datetime) --The last time that the table was accessed.\\n\\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\\n\\nRetention (integer) --The retention time for this table.\\n\\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\\n\\nColumns (list) --A list of the Columns in the table.\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\\n\\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\\n\\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\\n\\nCompressed (boolean) --\\nTrue if the data in the table is compressed, or False if not.\\n\\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\\n\\nSerdeInfo (dict) --The serialization\/deserialization (SerDe) information.\\n\\nName (string) --Name of the SerDe.\\n\\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\\n\\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\\n\\n(string) --\\n\\n\\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\\n\\n(dict) --Specifies the sort order of a sorted column.\\n\\nColumn (string) -- [REQUIRED]The name of the column.\\n\\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\\n\\n\\n\\n\\n\\nParameters (dict) --The user-supplied properties in key-value form.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\\n\\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\\n\\n(string) --\\n\\n\\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\\n\\n(string) --\\n\\n\\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\nStoredAsSubDirectories (boolean) --\\nTrue if the table data is stored in subdirectories, or False if not.\\n\\n\\n\\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\\n\\n'PartitionKeys': []\\n\\n(dict) --A column in a Table .\\n\\nName (string) -- [REQUIRED]The name of the Column .\\n\\nType (string) --The data type of the Column .\\n\\nComment (string) --A free-form text comment.\\n\\nParameters (dict) --These key-value pairs define properties associated with the column.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\\n\\n\\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\\n\\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\\n\\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\\n\\nParameters (dict) --These key-value pairs define properties associated with the table.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\n\\n\n\n :type SkipArchive: boolean\n :param SkipArchive: By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version.\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\nGlue.Client.exceptions.ResourceNumberLimitExceededException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_trigger(Name=None, TriggerUpdate=None):\n \"\"\"\n Updates a trigger definition.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_trigger(\n Name='string',\n TriggerUpdate={\n 'Name': 'string',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nThe name of the trigger to update.\\n\n\n :type TriggerUpdate: dict\n :param TriggerUpdate: [REQUIRED]\\nThe new values with which to update the trigger.\\n\\nName (string) --Reserved for future use.\\n\\nDescription (string) --A description of this trigger.\\n\\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\\n\\nActions (list) --The actions initiated by this trigger.\\n\\n(dict) --Defines an action to be initiated by a trigger.\\n\\nJobName (string) --The name of a job to be executed.\\n\\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\\n\\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\\n\\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\\n\\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\\n\\n\\n\\nCrawlerName (string) --The name of the crawler to be used with this action.\\n\\n\\n\\n\\n\\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\\n\\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\\n\\nConditions (list) --A list of the conditions that determine when the trigger will fire.\\n\\n(dict) --Defines a condition under which a trigger fires.\\n\\nLogicalOperator (string) --A logical operator.\\n\\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\\n\\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\\n\\nCrawlerName (string) --The name of the crawler to which this condition applies.\\n\\nCrawlState (string) --The state of the crawler to which this condition applies.\\n\\n\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n}\n\n\nResponse Structure\n\n(dict) --\n\nTrigger (dict) --\nThe resulting trigger definition.\n\nName (string) --\nThe name of the trigger.\n\nWorkflowName (string) --\nThe name of the workflow associated with the trigger.\n\nId (string) --\nReserved for future use.\n\nType (string) --\nThe type of trigger that this is.\n\nState (string) --\nThe current state of the trigger.\n\nDescription (string) --\nA description of this trigger.\n\nSchedule (string) --\nA cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --\nThe actions initiated by this trigger.\n\n(dict) --\nDefines an action to be initiated by a trigger.\n\nJobName (string) --\nThe name of a job to be executed.\n\nArguments (dict) --\nThe job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --\nThe JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --\nThe name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --\nSpecifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --\nAfter a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --\nThe name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --\nThe predicate of this trigger, which defines when it will fire.\n\nLogical (string) --\nAn optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --\nA list of the conditions that determine when the trigger will fire.\n\n(dict) --\nDefines a condition under which a trigger fires.\n\nLogicalOperator (string) --\nA logical operator.\n\nJobName (string) --\nThe name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --\nThe condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --\nThe name of the crawler to which this condition applies.\n\nCrawlState (string) --\nThe state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Trigger': {\n 'Name': 'string',\n 'WorkflowName': 'string',\n 'Id': 'string',\n 'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',\n 'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',\n 'Description': 'string',\n 'Schedule': 'string',\n 'Actions': [\n {\n 'JobName': 'string',\n 'Arguments': {\n 'string': 'string'\n },\n 'Timeout': 123,\n 'SecurityConfiguration': 'string',\n 'NotificationProperty': {\n 'NotifyDelayAfter': 123\n },\n 'CrawlerName': 'string'\n },\n ],\n 'Predicate': {\n 'Logical': 'AND'|'ANY',\n 'Conditions': [\n {\n 'LogicalOperator': 'EQUALS',\n 'JobName': 'string',\n 'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',\n 'CrawlerName': 'string',\n 'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'\n },\n ]\n }\n }\n }\n \n \n :returns: \n (string) --\n (string) --\n \n \n \n \"\"\"\n pass\n\ndef update_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None, FunctionInput=None):\n \"\"\"\n Updates an existing function definition in the Data Catalog.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_user_defined_function(\n CatalogId='string',\n DatabaseName='string',\n FunctionName='string',\n FunctionInput={\n 'FunctionName': 'string',\n 'ClassName': 'string',\n 'OwnerName': 'string',\n 'OwnerType': 'USER'|'ROLE'|'GROUP',\n 'ResourceUris': [\n {\n 'ResourceType': 'JAR'|'FILE'|'ARCHIVE',\n 'Uri': 'string'\n },\n ]\n }\n )\n \n \n :type CatalogId: string\n :param CatalogId: The ID of the Data Catalog where the function to be updated is located. If none is provided, the AWS account ID is used by default.\n\n :type DatabaseName: string\n :param DatabaseName: [REQUIRED]\\nThe name of the catalog database where the function to be updated is located.\\n\n\n :type FunctionName: string\n :param FunctionName: [REQUIRED]\\nThe name of the function.\\n\n\n :type FunctionInput: dict\n :param FunctionInput: [REQUIRED]\\nA FunctionInput object that redefines the function in the Data Catalog.\\n\\nFunctionName (string) --The name of the function.\\n\\nClassName (string) --The Java class that contains the function code.\\n\\nOwnerName (string) --The owner of the function.\\n\\nOwnerType (string) --The owner type.\\n\\nResourceUris (list) --The resource URIs for the function.\\n\\n(dict) --The URIs for function resources.\\n\\nResourceType (string) --The type of the resource.\\n\\nUri (string) --The URI for accessing the resource.\\n\\n\\n\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{}\n\n\nResponse Structure\n\n(dict) --\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.GlueEncryptionException\n\n\n :return: {}\n \n \n :returns: \n (dict) --\n \n \"\"\"\n pass\n\ndef update_workflow(Name=None, Description=None, DefaultRunProperties=None):\n \"\"\"\n Updates an existing workflow.\n See also: AWS API Documentation\n \n Exceptions\n \n :example: response = client.update_workflow(\n Name='string',\n Description='string',\n DefaultRunProperties={\n 'string': 'string'\n }\n )\n \n \n :type Name: string\n :param Name: [REQUIRED]\\nName of the workflow to be updated.\\n\n\n :type Description: string\n :param Description: The description of the workflow.\n\n :type DefaultRunProperties: dict\n :param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\\n\\n(string) --\\n(string) --\\n\\n\\n\\n\n\n :rtype: dict\n\nReturnsResponse Syntax\n{\n 'Name': 'string'\n}\n\n\nResponse Structure\n\n(dict) --\n\nName (string) --\nThe name of the workflow which was specified in input.\n\n\n\n\n\n\n\nExceptions\n\nGlue.Client.exceptions.InvalidInputException\nGlue.Client.exceptions.EntityNotFoundException\nGlue.Client.exceptions.InternalServiceException\nGlue.Client.exceptions.OperationTimeoutException\nGlue.Client.exceptions.ConcurrentModificationException\n\n\n :return: {\n 'Name': 'string'\n }\n \n \n :returns: \n Glue.Client.exceptions.InvalidInputException\n Glue.Client.exceptions.EntityNotFoundException\n Glue.Client.exceptions.InternalServiceException\n Glue.Client.exceptions.OperationTimeoutException\n Glue.Client.exceptions.ConcurrentModificationException\n \n \"\"\"\n pass\n\n","license":"mit"} {"repo_name":"alekz112\/statsmodels","path":"statsmodels\/datasets\/tests\/test_utils.py","copies":"26","size":"1697","content":"import os\nimport sys\nfrom statsmodels.datasets import get_rdataset, webuse, check_internet\nfrom numpy.testing import assert_, assert_array_equal, dec\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef test_get_rdataset():\n # smoke test\n if sys.version_info[0] >= 3:\n #NOTE: there's no way to test both since the cached files were\n #created with Python 2.x, they're strings, but Python 3 expects\n #bytes and the index file path is hard-coded so both can't live\n #side by side\n pass\n #duncan = get_rdataset(\"Duncan-py3\", \"car\", cache=cur_dir)\n else:\n duncan = get_rdataset(\"Duncan\", \"car\", cache=cur_dir)\n assert_(duncan.from_cache)\n\n#internet_available = check_internet()\n#@dec.skipif(not internet_available)\ndef t_est_webuse():\n # test copied and adjusted from iolib\/tests\/test_foreign\n from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2\n #base_gh = \"http:\/\/github.com\/statsmodels\/statsmodels\/raw\/master\/statsmodels\/datasets\/macrodata\/\"\n base_gh = \"http:\/\/statsmodels.sourceforge.net\/devel\/_static\/\"\n res1 = webuse('macrodata', baseurl=base_gh, as_df=False)\n assert_array_equal(res1 == res2, True)\n\n#@dec.skipif(not internet_available)\ndef t_est_webuse_pandas():\n # test copied and adjusted from iolib\/tests\/test_foreign\n from pandas.util.testing import assert_frame_equal\n from statsmodels.datasets import macrodata\n dta = macrodata.load_pandas().data\n base_gh = \"http:\/\/github.com\/statsmodels\/statsmodels\/raw\/master\/statsmodels\/datasets\/macrodata\/\"\n res1 = webuse('macrodata', baseurl=base_gh)\n res1 = res1.astype(float)\n assert_frame_equal(res1, dta)\n","license":"bsd-3-clause"} {"repo_name":"vene\/ambra","path":"ambra\/cross_validation.py","copies":"1","size":"9371","content":"import numbers\nimport time\n\nimport numpy as np\n\nfrom sklearn.utils import safe_indexing\nfrom sklearn.base import is_classifier, clone\nfrom sklearn.metrics.scorer import check_scoring\nfrom sklearn.externals.joblib import Parallel, delayed, logger\nfrom ambra.backports import _num_samples, indexable\nfrom sklearn.cross_validation import check_cv\n\ndef _safe_split(estimator, X, y, indices, train_indices=None):\n \"\"\"Create subset of dataset and properly handle kernels.\"\"\"\n if hasattr(estimator, 'kernel') and callable(estimator.kernel):\n # cannot compute the kernel values with custom function\n raise ValueError(\"Cannot use a custom kernel function. \"\n \"Precompute the kernel matrix instead.\")\n\n if not hasattr(X, \"shape\"):\n if getattr(estimator, \"_pairwise\", False):\n raise ValueError(\"Precomputed kernels or affinity matrices have \"\n \"to be passed as arrays or sparse matrices.\")\n X_subset = [X[idx] for idx in indices]\n else:\n if getattr(estimator, \"_pairwise\", False):\n # X is a precomputed square kernel matrix\n if X.shape[0] != X.shape[1]:\n raise ValueError(\"X should be a square kernel matrix\")\n if train_indices is None:\n X_subset = X[np.ix_(indices, indices)]\n else:\n X_subset = X[np.ix_(indices, train_indices)]\n else:\n X_subset = safe_indexing(X, indices)\n\n if y is not None:\n y_subset = safe_indexing(y, indices)\n else:\n y_subset = None\n\n return X_subset, y_subset\n\n\ndef _score(estimator, X_test, y_test, scorer, **params):\n \"\"\"Compute the score of an estimator on a given test set.\"\"\"\n if y_test is None:\n score = scorer(estimator, X_test, **params)\n else:\n score = scorer(estimator, X_test, y_test, **params)\n if not isinstance(score, numbers.Number):\n raise ValueError(\"scoring must return a number, got %s (%s) instead.\"\n % (str(score), type(score)))\n return score\n\n\ndef cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,\n verbose=0, fit_params=None, pre_dispatch='2*n_jobs',\n scorer_params=None):\n \"\"\"Evaluate a score by cross-validation\n\n Parameters\n ----------\n estimator : estimator object implementing 'fit'\n The object to use to fit the data.\n\n X : array-like\n The data to fit. Can be, for example a list, or an array at least 2d.\n\n y : array-like, optional, default: None\n The target variable to try to predict in the case of\n supervised learning.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object \/ function with signature\n ``scorer(estimator, X, y)``.\n\n cv : cross-validation generator or int, optional, default: None\n A cross-validation generator to use. If int, determines\n the number of folds in StratifiedKFold if y is binary\n or multiclass and estimator is a classifier, or the number\n of folds in KFold otherwise. If None, it is equivalent to cv=3.\n\n n_jobs : integer, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n\n verbose : integer, optional\n The verbosity level.\n\n fit_params : dict, optional\n Parameters to pass to the fit method of the estimator.\n\n pre_dispatch : int, or string, optional\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n - An int, giving the exact number of total jobs that are\n spawned\n\n - A string, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n scorer_params : dict, optional\n Parameters to pass to the scorer. Can be used for sample weights\n and sample groups.\n\n Returns\n -------\n scores : array of float, shape=(len(list(cv)),)\n Array of scores of the estimator for each run of the cross validation.\n \"\"\"\n X, y = indexable(X, y)\n\n cv = check_cv(cv, X, y, classifier=is_classifier(estimator))\n scorer = check_scoring(estimator, scoring=scoring)\n # We clone the estimator to make sure that all the folds are\n # independent, and that it is pickle-able.\n parallel = Parallel(n_jobs=n_jobs, verbose=verbose,\n pre_dispatch=pre_dispatch)\n scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y,\n scorer, train, test, verbose,\n None, fit_params, scorer_params)\n for train, test in cv)\n return np.array(scores)[:, 0]\n\n\ndef _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,\n fit_params, scorer_params, return_train_score=False,\n return_parameters=False):\n \"\"\"Fit estimator and compute scores for a given dataset split.\n\n Parameters\n ----------\n estimator : estimator object implementing 'fit'\n The object to use to fit the data.\n\n X : array-like of shape at least 2D\n The data to fit.\n\n y : array-like or None\n The target variable to try to predict in the case of\n supervised learning.\n\n scoring : callable\n A scorer callable object \/ function with signature\n ``scorer(estimator, X, y)``.\n\n train : array-like, shape = (n_train_samples,)\n Indices of training samples.\n\n test : array-like, shape = (n_test_samples,)\n Indices of test samples.\n\n verbose : integer\n The verbosity level.\n\n parameters : dict or None\n Parameters to be set on the estimator.\n\n fit_params : dict or None\n Parameters that will be passed to ``estimator.fit``.\n\n scorer_params : dict or None\n Parameters that will be passed to the scorer.\n\n return_train_score : boolean, optional, default: False\n Compute and return score on training set.\n\n return_parameters : boolean, optional, default: False\n Return parameters that has been used for the estimator.\n\n Returns\n -------\n train_score : float, optional\n Score on training set, returned only if `return_train_score` is `True`.\n\n test_score : float\n Score on test set.\n\n n_test_samples : int\n Number of test samples.\n\n scoring_time : float\n Time spent for fitting and scoring in seconds.\n\n parameters : dict or None, optional\n The parameters that have been evaluated.\n \"\"\"\n if verbose > 1:\n if parameters is None:\n msg = \"no parameters to be set\"\n else:\n msg = '%s' % (', '.join('%s=%s' % (k, v)\n for k, v in parameters.items()))\n print(\"[CV] %s %s\" % (msg, (64 - len(msg)) * '.'))\n\n # Adjust lenght of sample weights\n n_samples = _num_samples(X)\n fit_params = fit_params if fit_params is not None else {}\n fit_params = dict([(k, np.asarray(v)[train]\n if hasattr(v, '__len__') and len(v) == n_samples else v)\n for k, v in fit_params.items()])\n\n # Same, but take both slices\n scorer_params = scorer_params if scorer_params is not None else {}\n train_scorer_params = dict([(k, np.asarray(v)[train]\n if hasattr(v, '__len__')\n and len(v) == n_samples\n else v)\n for k, v in scorer_params.items()])\n test_scorer_params = dict([(k, np.asarray(v)[test]\n if hasattr(v, '__len__')\n and len(v) == n_samples\n else v)\n for k, v in scorer_params.items()])\n\n if parameters is not None:\n estimator.set_params(**parameters)\n\n start_time = time.time()\n\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n if y_train is None:\n estimator.fit(X_train, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n test_score = _score(estimator, X_test, y_test, scorer,\n **test_scorer_params)\n if return_train_score:\n train_score = _score(estimator, X_train, y_train, scorer,\n **train_scorer_params)\n\n scoring_time = time.time() - start_time\n\n if verbose > 2:\n msg += \", score=%f\" % test_score\n if verbose > 1:\n end_msg = \"%s -%s\" % (msg, logger.short_format_time(scoring_time))\n print(\"[CV] %s %s\" % ((64 - len(end_msg)) * '.', end_msg))\n\n ret = [train_score] if return_train_score else []\n ret.extend([test_score, _num_samples(X_test), scoring_time])\n if return_parameters:\n ret.append(parameters)\n return ret\n\n\n","license":"bsd-2-clause"} {"repo_name":"jlegendary\/scikit-learn","path":"sklearn\/decomposition\/tests\/test_dict_learning.py","copies":"47","size":"8095","content":"import numpy as np\n\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.decomposition import DictionaryLearning\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\nfrom sklearn.decomposition import SparseCoder\nfrom sklearn.decomposition import dict_learning_online\nfrom sklearn.decomposition import sparse_encode\n\n\nrng_global = np.random.RandomState(0)\nn_samples, n_features = 10, 8\nX = rng_global.randn(n_samples, n_features)\n\n\ndef test_dict_learning_shapes():\n n_components = 5\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\n assert_true(dico.components_.shape == (n_components, n_features))\n\n\ndef test_dict_learning_overcomplete():\n n_components = 12\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\n assert_true(dico.components_.shape == (n_components, n_features))\n\n\ndef test_dict_learning_reconstruction():\n n_components = 12\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\n transform_alpha=0.001, random_state=0)\n code = dico.fit(X).transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X)\n\n dico.set_params(transform_algorithm='lasso_lars')\n code = dico.transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\n\n # used to test lars here too, but there's no guarantee the number of\n # nonzero atoms is right.\n\n\ndef test_dict_learning_reconstruction_parallel():\n # regression test that parallel reconstruction works with n_jobs=-1\n n_components = 12\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\n transform_alpha=0.001, random_state=0, n_jobs=-1)\n code = dico.fit(X).transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X)\n\n dico.set_params(transform_algorithm='lasso_lars')\n code = dico.transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\n\n\ndef test_dict_learning_nonzero_coefs():\n n_components = 4\n dico = DictionaryLearning(n_components, transform_algorithm='lars',\n transform_n_nonzero_coefs=3, random_state=0)\n code = dico.fit(X).transform(X[1])\n assert_true(len(np.flatnonzero(code)) == 3)\n\n dico.set_params(transform_algorithm='omp')\n code = dico.transform(X[1])\n assert_equal(len(np.flatnonzero(code)), 3)\n\n\ndef test_dict_learning_unknown_fit_algorithm():\n n_components = 5\n dico = DictionaryLearning(n_components, fit_algorithm='')\n assert_raises(ValueError, dico.fit, X)\n\n\ndef test_dict_learning_split():\n n_components = 5\n dico = DictionaryLearning(n_components, transform_algorithm='threshold',\n random_state=0)\n code = dico.fit(X).transform(X)\n dico.split_sign = True\n split_code = dico.transform(X)\n\n assert_array_equal(split_code[:, :n_components] -\n split_code[:, n_components:], code)\n\n\ndef test_dict_learning_online_shapes():\n rng = np.random.RandomState(0)\n n_components = 8\n code, dictionary = dict_learning_online(X, n_components=n_components,\n alpha=1, random_state=rng)\n assert_equal(code.shape, (n_samples, n_components))\n assert_equal(dictionary.shape, (n_components, n_features))\n assert_equal(np.dot(code, dictionary).shape, X.shape)\n\n\ndef test_dict_learning_online_verbosity():\n n_components = 5\n # test verbosity\n from sklearn.externals.six.moves import cStringIO as StringIO\n import sys\n\n old_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,\n random_state=0)\n dico.fit(X)\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,\n random_state=0)\n dico.fit(X)\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,\n random_state=0)\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,\n random_state=0)\n finally:\n sys.stdout = old_stdout\n\n assert_true(dico.components_.shape == (n_components, n_features))\n\n\ndef test_dict_learning_online_estimator_shapes():\n n_components = 5\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)\n dico.fit(X)\n assert_true(dico.components_.shape == (n_components, n_features))\n\n\ndef test_dict_learning_online_overcomplete():\n n_components = 12\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20,\n random_state=0).fit(X)\n assert_true(dico.components_.shape == (n_components, n_features))\n\n\ndef test_dict_learning_online_initialization():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features)\n dico = MiniBatchDictionaryLearning(n_components, n_iter=0,\n dict_init=V, random_state=0).fit(X)\n assert_array_equal(dico.components_, V)\n\n\ndef test_dict_learning_online_partial_fit():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V \/= np.sum(V ** 2, axis=1)[:, np.newaxis]\n dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),\n batch_size=1,\n alpha=1, shuffle=False, dict_init=V,\n random_state=0).fit(X)\n dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,\n n_iter=1, dict_init=V,\n random_state=0)\n for i in range(10):\n for sample in X:\n dict2.partial_fit(sample)\n\n assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==\n 0))\n assert_array_almost_equal(dict1.components_, dict2.components_,\n decimal=2)\n\n\ndef test_sparse_encode_shapes():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V \/= np.sum(V ** 2, axis=1)[:, np.newaxis]\n for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):\n code = sparse_encode(X, V, algorithm=algo)\n assert_equal(code.shape, (n_samples, n_components))\n\n\ndef test_sparse_encode_error():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V \/= np.sum(V ** 2, axis=1)[:, np.newaxis]\n code = sparse_encode(X, V, alpha=0.001)\n assert_true(not np.all(code == 0))\n assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)\n\n\ndef test_sparse_encode_error_default_sparsity():\n rng = np.random.RandomState(0)\n X = rng.randn(100, 64)\n D = rng.randn(2, 64)\n code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',\n n_nonzero_coefs=None)\n assert_equal(code.shape, (100, 2))\n\n\ndef test_unknown_method():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n assert_raises(ValueError, sparse_encode, X, V, algorithm=\"\")\n\n\ndef test_sparse_coder_estimator():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V \/= np.sum(V ** 2, axis=1)[:, np.newaxis]\n code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',\n transform_alpha=0.001).transform(X)\n assert_true(not np.all(code == 0))\n assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)\n","license":"bsd-3-clause"} {"repo_name":"glouppe\/scikit-learn","path":"examples\/model_selection\/plot_roc.py","copies":"49","size":"5041","content":"\"\"\"\n=======================================\nReceiver Operating Characteristic (ROC)\n=======================================\n\nExample of Receiver Operating Characteristic (ROC) metric to evaluate\nclassifier output quality.\n\nROC curves typically feature true positive rate on the Y axis, and false\npositive rate on the X axis. This means that the top left corner of the plot is\nthe \"ideal\" point - a false positive rate of zero, and a true positive rate of\none. This is not very realistic, but it does mean that a larger area under the\ncurve (AUC) is usually better.\n\nThe \"steepness\" of ROC curves is also important, since it is ideal to maximize\nthe true positive rate while minimizing the false positive rate.\n\nMulticlass settings\n-------------------\n\nROC curves are typically used in binary classification to study the output of\na classifier. In order to extend ROC curve and ROC area to multi-class\nor multi-label classification, it is necessary to binarize the output. One ROC\ncurve can be drawn per label, but one can also draw a ROC curve by considering\neach element of the label indicator matrix as a binary prediction\n(micro-averaging).\n\nAnother evaluation measure for multi-class classification is\nmacro-averaging, which gives equal weight to the classification of each\nlabel.\n\n.. note::\n\n See also :func:`sklearn.metrics.roc_auc_score`,\n :ref:`example_model_selection_plot_roc_crossval.py`.\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\n\n# Import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# Binarize the output\ny = label_binarize(y, classes=[0, 1, 2])\nn_classes = y.shape[1]\n\n# Add noisy features to make the problem harder\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# shuffle and split training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n\n# Learn to predict each class against the other\nclassifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,\n random_state=random_state))\ny_score = classifier.fit(X_train, y_train).decision_function(X_test)\n\n# Compute ROC curve and ROC area for each class\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n# Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n\n##############################################################################\n# Plot of a ROC curve for a specific class\nplt.figure()\nlw = 2\nplt.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n##############################################################################\n# Plot ROC curves for the multiclass problem\n\n# Compute macro-average ROC curve and ROC area\n\n# First aggregate all false positive rates\nall_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n# Then interpolate all ROC curves at this points\nmean_tpr = np.zeros_like(all_fpr)\nfor i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n# Finally average it and compute AUC\nmean_tpr \/= n_classes\n\nfpr[\"macro\"] = all_fpr\ntpr[\"macro\"] = mean_tpr\nroc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n# Plot all ROC curves\nplt.figure()\nplt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\nplt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\ncolors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\nfor i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n\nplt.plot([0, 1], [0, 1], 'k--', lw=lw)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Some extension of Receiver operating characteristic to multi-class')\nplt.legend(loc=\"lower right\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"dahlstrom-g\/intellij-community","path":"python\/helpers\/pydev\/_pydevd_bundle\/pydevd_vars.py","copies":"7","size":"26282","content":"\"\"\" pydevd_vars deals with variables:\n resolution\/conversion to XML.\n\"\"\"\nimport math\nimport pickle\n\nfrom _pydev_bundle.pydev_imports import quote\nfrom _pydev_imps._pydev_saved_modules import thread\nfrom _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES, NUMPY_FLOATING_POINT_TYPES\nfrom _pydevd_bundle.pydevd_custom_frames import get_custom_frame\nfrom _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nimport sys # @Reimport\n\ntry:\n from collections import OrderedDict\nexcept:\n OrderedDict = dict\n\nfrom _pydev_imps._pydev_saved_modules import threading\nimport traceback\nfrom _pydevd_bundle import pydevd_save_locals\nfrom _pydev_bundle.pydev_imports import Exec, execfile\nfrom _pydevd_bundle.pydevd_utils import VariableWithOffset\n\nSENTINEL_VALUE = []\nDEFAULT_DF_FORMAT = \"s\"\n\n# ------------------------------------------------------------------------------------------------------ class for errors\n\nclass VariableError(RuntimeError): pass\n\n\nclass FrameNotFoundError(RuntimeError): pass\n\n\ndef _iter_frames(initialFrame):\n '''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''\n # cannot use yield\n frames = []\n\n while initialFrame is not None:\n frames.append(initialFrame)\n initialFrame = initialFrame.f_back\n\n return frames\n\n\ndef dump_frames(thread_id):\n sys.stdout.write('dumping frames\\n')\n if thread_id != get_current_thread_id(threading.currentThread()):\n raise VariableError(\"find_frame: must execute on same thread\")\n\n curFrame = get_frame()\n for frame in _iter_frames(curFrame):\n sys.stdout.write('%s\\n' % pickle.dumps(frame))\n\n\n# ===============================================================================\n# AdditionalFramesContainer\n# ===============================================================================\nclass AdditionalFramesContainer:\n lock = thread.allocate_lock()\n additional_frames = {} # dict of dicts\n\n\ndef add_additional_frame_by_id(thread_id, frames_by_id):\n AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id\n\n\naddAdditionalFrameById = add_additional_frame_by_id # Backward compatibility\n\n\ndef remove_additional_frame_by_id(thread_id):\n del AdditionalFramesContainer.additional_frames[thread_id]\n\n\nremoveAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility\n\n\ndef has_additional_frames_by_id(thread_id):\n return thread_id in AdditionalFramesContainer.additional_frames\n\n\ndef get_additional_frames_by_id(thread_id):\n return AdditionalFramesContainer.additional_frames.get(thread_id)\n\n\ndef find_frame(thread_id, frame_id):\n \"\"\" returns a frame on the thread that has a given frame_id \"\"\"\n try:\n curr_thread_id = get_current_thread_id(threading.currentThread())\n if thread_id != curr_thread_id:\n try:\n return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.\n except:\n pass\n\n raise VariableError(\"find_frame: must execute on same thread (%s != %s)\" % (thread_id, curr_thread_id))\n\n lookingFor = int(frame_id)\n\n if AdditionalFramesContainer.additional_frames:\n if thread_id in AdditionalFramesContainer.additional_frames:\n frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)\n\n if frame is not None:\n return frame\n\n curFrame = get_frame()\n if frame_id == \"*\":\n return curFrame # any frame is specified with \"*\"\n\n frameFound = None\n\n for frame in _iter_frames(curFrame):\n if lookingFor == id(frame):\n frameFound = frame\n del frame\n break\n\n del frame\n\n # Important: python can hold a reference to the frame from the current context\n # if an exception is raised, so, if we don't explicitly add those deletes\n # we might have those variables living much more than we'd want to.\n\n # I.e.: sys.exc_info holding reference to frame that raises exception (so, other places\n # need to call sys.exc_clear())\n del curFrame\n\n if frameFound is None:\n msgFrames = ''\n i = 0\n\n for frame in _iter_frames(get_frame()):\n i += 1\n msgFrames += str(id(frame))\n if i % 5 == 0:\n msgFrames += '\\n'\n else:\n msgFrames += ' - '\n\n# Note: commented this error message out (it may commonly happen \n# if a message asking for a frame is issued while a thread is paused\n# but the thread starts running before the message is actually \n# handled).\n# Leaving code to uncomment during tests. \n# err_msg = '''find_frame: frame not found.\n# Looking for thread_id:%s, frame_id:%s\n# Current thread_id:%s, available frames:\n# %s\\n\n# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)\n# \n# sys.stderr.write(err_msg)\n return None\n\n return frameFound\n except:\n import traceback\n traceback.print_exc()\n return None\n\n\ndef getVariable(thread_id, frame_id, scope, attrs):\n \"\"\"\n returns the value of a variable\n\n :scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME\n\n BY_ID means we'll traverse the list of all objects alive to get the object.\n\n :attrs: after reaching the proper scope, we have to get the attributes until we find\n the proper location (i.e.: obj\\tattr1\\tattr2).\n\n :note: when BY_ID is used, the frame_id is considered the id of the object to find and\n not the frame (as we don't care about the frame in this case).\n \"\"\"\n if scope == 'BY_ID':\n if thread_id != get_current_thread_id(threading.currentThread()):\n raise VariableError(\"getVariable: must execute on same thread\")\n\n try:\n import gc\n objects = gc.get_objects()\n except:\n pass # Not all python variants have it.\n else:\n frame_id = int(frame_id)\n for var in objects:\n if id(var) == frame_id:\n if attrs is not None:\n attrList = attrs.split('\\t')\n for k in attrList:\n _type, _typeName, resolver = get_type(var)\n var = resolver.resolve(var, k)\n\n return var\n\n # If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).\n sys.stderr.write('Unable to find object with id: %s\\n' % (frame_id,))\n return None\n\n frame = find_frame(thread_id, frame_id)\n if frame is None:\n return {}\n\n if attrs is not None:\n attrList = attrs.split('\\t')\n else:\n attrList = []\n\n for attr in attrList:\n attr.replace(\"@_@TAB_CHAR@_@\", '\\t')\n\n if scope == 'EXPRESSION':\n for count in xrange(len(attrList)):\n if count == 0:\n # An Expression can be in any scope (globals\/locals), therefore it needs to evaluated as an expression\n var = evaluate_expression(thread_id, frame_id, attrList[count], False)\n else:\n _type, _typeName, resolver = get_type(var)\n var = resolver.resolve(var, attrList[count])\n else:\n if scope == \"GLOBAL\":\n var = frame.f_globals\n del attrList[0] # globals are special, and they get a single dummy unused attribute\n else:\n # in a frame access both locals and globals as Python does\n var = {}\n var.update(frame.f_globals)\n var.update(frame.f_locals)\n\n for k in attrList:\n _type, _typeName, resolver = get_type(var)\n var = resolver.resolve(var, k)\n\n return var\n\n\ndef get_offset(attrs):\n \"\"\"\n Extract offset from the given attributes.\n\n :param attrs: The string of a compound variable fields split by tabs.\n If an offset is given, it must go the first element.\n :return: The value of offset if given or 0.\n \"\"\"\n offset = 0\n if attrs is not None:\n try:\n offset = int(attrs.split('\\t')[0])\n except ValueError:\n pass\n return offset\n\n\ndef resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):\n \"\"\"\n Resolve compound variable in debugger scopes by its name and attributes\n\n :param thread_id: id of the variable's thread\n :param frame_id: id of the variable's frame\n :param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME\n :param attrs: after reaching the proper scope, we have to get the attributes until we find\n the proper location (i.e.: obj\\tattr1\\tattr2)\n :return: a dictionary of variables's fields\n\n :note: PyCharm supports progressive loading of large collections and uses the `attrs`\n parameter to pass the offset, e.g. 300\\t\\\\obj\\tattr1\\tattr2 should return\n the value of attr2 starting from the 300th element. This hack makes it possible\n to add the support of progressive loading without extending of the protocol.\n \"\"\"\n offset = get_offset(attrs)\n\n orig_attrs, attrs = attrs, attrs.split('\\t', 1)[1] if offset else attrs\n\n var = getVariable(thread_id, frame_id, scope, attrs)\n\n try:\n _type, _typeName, resolver = get_type(var)\n return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)\n except:\n sys.stderr.write('Error evaluating: thread_id: %s\\nframe_id: %s\\nscope: %s\\nattrs: %s\\n' % (\n thread_id, frame_id, scope, orig_attrs,))\n traceback.print_exc()\n\n\ndef resolve_var_object(var, attrs):\n \"\"\"\n Resolve variable's attribute\n\n :param var: an object of variable\n :param attrs: a sequence of variable's attributes separated by \\t (i.e.: obj\\tattr1\\tattr2)\n :return: a value of resolved variable's attribute\n \"\"\"\n if attrs is not None:\n attr_list = attrs.split('\\t')\n else:\n attr_list = []\n for k in attr_list:\n type, _typeName, resolver = get_type(var)\n var = resolver.resolve(var, k)\n return var\n\n\ndef resolve_compound_var_object_fields(var, attrs):\n \"\"\"\n Resolve compound variable by its object and attributes\n\n :param var: an object of variable\n :param attrs: a sequence of variable's attributes separated by \\t (i.e.: obj\\tattr1\\tattr2)\n :return: a dictionary of variables's fields\n \"\"\"\n offset = get_offset(attrs)\n\n attrs = attrs.split('\\t', 1)[1] if offset else attrs\n\n attr_list = attrs.split('\\t')\n\n for k in attr_list:\n type, _typeName, resolver = get_type(var)\n var = resolver.resolve(var, k)\n\n try:\n type, _typeName, resolver = get_type(var)\n return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)\n except:\n traceback.print_exc()\n\n\ndef custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):\n \"\"\"\n We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.\n\n code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.\n operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)\n \"\"\"\n expressionValue = getVariable(thread_id, frame_id, scope, attrs)\n\n try:\n namespace = {'__name__': ''}\n if style == \"EXECFILE\":\n namespace['__file__'] = code_or_file\n execfile(code_or_file, namespace, namespace)\n else: # style == EXEC\n namespace['__file__'] = ''\n Exec(code_or_file, namespace, namespace)\n\n return str(namespace[operation_fn_name](expressionValue))\n except:\n traceback.print_exc()\n\n\ndef eval_in_context(expression, globals, locals):\n result = None\n try:\n result = eval(expression, globals, locals)\n except Exception:\n s = StringIO()\n traceback.print_exc(file=s)\n result = s.getvalue()\n\n try:\n try:\n etype, value, tb = sys.exc_info()\n result = value\n finally:\n etype = value = tb = None\n except:\n pass\n\n result = ExceptionOnEvaluate(result)\n\n # Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...\n try:\n if '__' in expression:\n # Try to handle '__' name mangling...\n split = expression.split('.')\n curr = locals.get(split[0])\n for entry in split[1:]:\n if entry.startswith('__') and not hasattr(curr, entry):\n entry = '_%s%s' % (curr.__class__.__name__, entry)\n curr = getattr(curr, entry)\n\n result = curr\n except:\n pass\n return result\n\n\ndef evaluate_expression(thread_id, frame_id, expression, doExec):\n '''returns the result of the evaluated expression\n @param doExec: determines if we should do an exec or an eval\n '''\n frame = find_frame(thread_id, frame_id)\n if frame is None:\n return\n\n # Not using frame.f_globals because of https:\/\/sourceforge.net\/tracker2\/?func=detail&aid=2541355&group_id=85796&atid=577329\n # (Names not resolved in generator expression in method)\n # See message: http:\/\/mail.python.org\/pipermail\/python-list\/2009-January\/526522.html\n updated_globals = {}\n updated_globals.update(frame.f_globals)\n updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals\n\n try:\n expression = str(expression.replace('@LINE@', '\\n'))\n\n if doExec:\n try:\n # try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and\n # it will have whatever the user actually did)\n compiled = compile(expression, '', 'eval')\n except:\n Exec(expression, updated_globals, frame.f_locals)\n pydevd_save_locals.save_locals(frame)\n else:\n result = eval(compiled, updated_globals, frame.f_locals)\n if result is not None: # Only print if it's not None (as python does)\n sys.stdout.write('%s\\n' % (result,))\n return\n\n else:\n return eval_in_context(expression, updated_globals, frame.f_locals)\n finally:\n # Should not be kept alive if an exception happens and this frame is kept in the stack.\n del updated_globals\n del frame\n\n\ndef change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):\n '''Changes some attribute in a given frame.\n '''\n frame = find_frame(thread_id, frame_id)\n if frame is None:\n return\n\n try:\n expression = expression.replace('@LINE@', '\\n')\n\n if dbg.plugin and value is SENTINEL_VALUE:\n result = dbg.plugin.change_variable(frame, attr, expression)\n if result:\n return result\n\n if value is SENTINEL_VALUE:\n # It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with\n # `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in\n # list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main\n # reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.\n value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)\n\n if attr[:7] == \"Globals\":\n attr = attr[8:]\n if attr in frame.f_globals:\n frame.f_globals[attr] = value\n return frame.f_globals[attr]\n else:\n if pydevd_save_locals.is_save_locals_available():\n frame.f_locals[attr] = value\n pydevd_save_locals.save_locals(frame)\n return frame.f_locals[attr]\n\n # default way (only works for changing it in the topmost frame)\n result = value\n Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)\n return result\n\n except Exception:\n traceback.print_exc()\n\n\nMAXIMUM_ARRAY_SIZE = float('inf')\n\n\ndef array_to_xml(array, name, roffset, coffset, rows, cols, format):\n array, xml, r, c, f = array_to_meta_xml(array, name, format)\n format = '%' + f\n if rows == -1 and cols == -1:\n rows = r\n cols = c\n\n rows = min(rows, MAXIMUM_ARRAY_SIZE)\n cols = min(cols, MAXIMUM_ARRAY_SIZE)\n\n # there is no obvious rule for slicing (at least 5 choices)\n if len(array) == 1 and (rows > 1 or cols > 1):\n array = array[0]\n if array.size > len(array):\n array = array[roffset:, coffset:]\n rows = min(rows, len(array))\n cols = min(cols, len(array[0]))\n if len(array) == 1:\n array = array[0]\n elif array.size == len(array):\n if roffset == 0 and rows == 1:\n array = array[coffset:]\n cols = min(cols, len(array))\n elif coffset == 0 and cols == 1:\n array = array[roffset:]\n rows = min(rows, len(array))\n\n def get_value(row, col):\n value = array\n if rows == 1 or cols == 1:\n if rows == 1 and cols == 1:\n value = array[0]\n else:\n value = array[(col if rows == 1 else row)]\n if \"ndarray\" in str(type(value)):\n value = value[0]\n else:\n value = array[row][col]\n return value\n xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)\n return xml\n\n\nclass ExceedingArrayDimensionsException(Exception):\n pass\n\n\ndef array_to_meta_xml(array, name, format):\n type = array.dtype.kind\n slice = name\n l = len(array.shape)\n\n # initial load, compute slice\n if format == '%':\n if l > 2:\n slice += '[0]' * (l - 2)\n for r in range(l - 2):\n array = array[0]\n if type == 'f':\n format = '.5f'\n elif type == 'i' or type == 'u':\n format = 'd'\n else:\n format = 's'\n else:\n format = format.replace('%', '')\n\n l = len(array.shape)\n reslice = \"\"\n if l > 2:\n raise ExceedingArrayDimensionsException()\n elif l == 1:\n # special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim\n # http:\/\/stackoverflow.com\/questions\/16837946\/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns\n # explanation: http:\/\/stackoverflow.com\/questions\/15165170\/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1\n # we use kind of a hack - get information about memory from C_CONTIGUOUS\n is_row = array.flags['C_CONTIGUOUS']\n\n if is_row:\n rows = 1\n cols = len(array)\n if cols < len(array):\n reslice = '[0:%s]' % (cols)\n array = array[0:cols]\n else:\n cols = 1\n rows = len(array)\n if rows < len(array):\n reslice = '[0:%s]' % (rows)\n array = array[0:rows]\n elif l == 2:\n rows = array.shape[-2]\n cols = array.shape[-1]\n if cols < array.shape[-1] or rows < array.shape[-2]:\n reslice = '[0:%s, 0:%s]' % (rows, cols)\n array = array[0:rows, 0:cols]\n\n # avoid slice duplication\n if not slice.endswith(reslice):\n slice += reslice\n\n bounds = (0, 0)\n if type in NUMPY_NUMERIC_TYPES and array.size != 0:\n bounds = (array.min(), array.max())\n return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format\n\n\ndef get_column_formatter_by_type(initial_format, column_type):\n if column_type in NUMPY_NUMERIC_TYPES and initial_format:\n if column_type in NUMPY_FLOATING_POINT_TYPES and initial_format.strip() == DEFAULT_DF_FORMAT:\n # use custom formatting for floats when default formatting is set\n return array_default_format(column_type)\n return initial_format\n else:\n return array_default_format(column_type)\n\n\ndef get_formatted_row_elements(row, iat, dim, cols, format, dtypes):\n for c in range(cols):\n val = iat[row, c] if dim > 1 else iat[row]\n col_formatter = get_column_formatter_by_type(format, dtypes[c])\n try:\n yield (\"%\" + col_formatter) % (val,)\n except TypeError:\n yield (\"%\" + DEFAULT_DF_FORMAT) % (val,)\n\n\ndef array_default_format(type):\n if type == 'f':\n return '.5f'\n elif type == 'i' or type == 'u':\n return 'd'\n else:\n return 's'\n\n\ndef get_label(label):\n return str(label) if not isinstance(label, tuple) else '\/'.join(map(str, label))\n\n\nDATAFRAME_HEADER_LOAD_MAX_SIZE = 100\n\n\ndef dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):\n \"\"\"\n :type df: pandas.core.frame.DataFrame\n :type name: str\n :type coffset: int\n :type roffset: int\n :type rows: int\n :type cols: int\n :type format: str\n\n\n \"\"\"\n original_df = df\n dim = len(df.axes)\n num_rows = df.shape[0]\n num_cols = df.shape[1] if dim > 1 else 1\n format = format.replace('%', '')\n\n if not format:\n if num_rows > 0 and num_cols == 1: # series or data frame with one column\n try:\n kind = df.dtype.kind\n except AttributeError:\n try:\n kind = df.dtypes[0].kind\n except (IndexError, KeyError):\n kind = 'O'\n format = array_default_format(kind)\n else:\n format = array_default_format(DEFAULT_DF_FORMAT)\n\n xml = slice_to_xml(name, num_rows, num_cols, format, \"\", (0, 0))\n\n if (rows, cols) == (-1, -1):\n rows, cols = num_rows, num_cols\n\n elif (rows, cols) == (0, 0):\n # return header only\n r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)\n c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)\n xml += header_data_to_xml(r, c, [\"\"] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)\n return xml\n\n rows = min(rows, MAXIMUM_ARRAY_SIZE)\n cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)\n # need to precompute column bounds here before slicing!\n col_bounds = [None] * cols\n dtypes = [None] * cols\n if dim > 1:\n for col in range(cols):\n dtype = df.dtypes.iloc[coffset + col].kind\n dtypes[col] = dtype\n if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:\n cvalues = df.iloc[:, coffset + col]\n bounds = (cvalues.min(), cvalues.max())\n else:\n bounds = (0, 0)\n col_bounds[col] = bounds\n else:\n dtype = df.dtype.kind\n dtypes[0] = dtype\n col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)\n\n df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]\n rows = df.shape[0]\n cols = df.shape[1] if dim > 1 else 1\n\n def col_to_format(column_type):\n return get_column_formatter_by_type(format, column_type)\n\n iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc\n\n def formatted_row_elements(row):\n return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)\n\n xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)\n\n xml += array_data_to_xml(rows, cols, formatted_row_elements, format)\n return xml\n\n\ndef array_data_to_xml(rows, cols, get_row, format):\n xml = \"\\n\" % (rows, cols)\n for row in range(rows):\n xml += \"\\n\" % row\n for value in get_row(row):\n xml += var_to_xml(value, '', format=format)\n return xml\n\n\ndef slice_to_xml(slice, rows, cols, format, type, bounds):\n return '' % \\\n (slice, rows, cols, quote(format), type, bounds[1], bounds[0])\n\n\ndef header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):\n xml = \"\\n\" % (rows, cols)\n for col in range(cols):\n col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))\n bounds = col_bounds[col]\n col_format = \"%\" + col_to_format(dtypes[col])\n xml += '\\n' % \\\n (str(col), col_label, dtypes[col], col_to_format(dtypes[col]), col_format % bounds[1], col_format % bounds[0])\n for row in range(rows):\n xml += \"\\n\" % (str(row), get_label(df.axes[0].values[row]))\n xml += \"<\/headerdata>\\n\"\n return xml\n\n\ndef is_able_to_format_number(format):\n try:\n format % math.pi\n except Exception:\n return False\n return True\n\n\nTYPE_TO_XML_CONVERTERS = {\n \"ndarray\": array_to_xml,\n \"DataFrame\": dataframe_to_xml,\n \"Series\": dataframe_to_xml,\n \"GeoDataFrame\": dataframe_to_xml,\n \"GeoSeries\": dataframe_to_xml\n}\n\n\ndef table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):\n _, type_name, _ = get_type(array)\n format = format if is_able_to_format_number(format) else '%'\n if type_name in TYPE_TO_XML_CONVERTERS:\n return \"%s<\/xml>\" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)\n else:\n raise VariableError(\"type %s not supported\" % type_name)\n","license":"apache-2.0"} {"repo_name":"rmm-fcul\/workshops","path":"2015_graz\/binary_choice\/two_arenas_real_real\/casu_utils.py","copies":"5","size":"8116","content":"#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\n'''\na library of functions used in CASU controller dynamics. Got a lot of\nmessy code that would be neater like this\n\nRM, Feb 2015\n\n'''\n\nimport numpy as np\nfrom assisipy import casu\n#import matplotlib.cm as cm\nfrom datetime import datetime\nimport parsing\nimport time\n\n### ============= maths ============= ###\n\n#{{{ rolling_avg\ndef rolling_avg(x, n):\n '''\n given the sample x, provide a rolling average taking n samples per data point.\n NOT a quick solution, but easy...\n '''\n y = np.zeros((len(x),))\n for ctr in range(len(x)):\n y[ctr] = np.sum(x[ctr:(ctr+n)])\n\n return y\/n\n#}}}\n\n### ============= general behaviour ============= ###\n\n#{{{ measure_ir_sensors\ndef measure_ir_sensors(mycasu, detect_data):\n ''' count up sensors that detect a bee, plus rotate history array '''\n\n # don't discriminate between specific directions, so just accumulate all\n count = 0\n for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):\n if (val > t):\n count += 1\n\n #print \"raw:\", \n #print \",\".join([\"{:.2f}\".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])\n #mycasu.total_count += count # historical count over all time\n detect_data = np.roll(detect_data, 1) # step all positions back\n detect_data[0] = count # and overwrite the first entry (this was rolled\n # around, so is the oldest entry -- and to become the newest now)\n # allow ext usage to apply window -- remain agnostic here during collection.\n return detect_data, count\n\n#}}}\n#{{{ heater_one_step\ndef heater_one_step(h):\n '''legacy function'''\n return detect_bee_proximity_saturated(h)\n\ndef detect_bee_proximity_saturated(h):\n # measure proximity\n detect_data, count = measure_ir_sensors(h, h.detect_data)\n h.detect_data = detect_data\n # overall bee count for this casu\n sat_count = min(h.sat_lim, count) # saturates\n return sat_count\n#}}}\n\n#{{{ find_mean_ext_temp\ndef find_mean_ext_temp(h):\n r = []\n for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:\n r.append(h.get_temp(sensor))\n\n if len(r):\n mean = sum(r) \/ float(len(r))\n else:\n mean = 0.0\n\n return mean\n#}}}\n\n### ============= inter-casu comms ============= ###\n#{{{ comms functions\ndef transmit_my_count(h, sat_count, dest='accomplice'):\n s = \"{}\".format(sat_count)\n if h.verb > 1:\n print \"\\t[i]==> {} send msg ({} by): '{}' bees, to {}\".format(\n h._thename, len(s), s, dest)\n h.send_message(dest, s)\n\n\n#TODO: this is non-specific, i.e., any message from anyone is assumed to have\n# the right form. For heterogeneous neighbours, we need to check identity as\n# well\n\ndef recv_all_msgs(h, retry_cnt=0, max_recv=None):\n '''\n continue to read message bffer until no more messages.\n as list of parsed messages parsed into (src, float) pairs\n '''\n msgs = []\n try_cnt = 0\n\n while(True):\n msg = h.read_message()\n #print msg\n if msg:\n txt = msg['data'].strip()\n src = msg['sender']\n bee_cnt = float(txt.split()[0])\n msgs.append((src, bee_cnt))\n\n if h.verb >1:\n print \"\\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}\".format(\n msg['sender'], bee_cnt, len(msg['data']), h._thename,\n BLU, ENDC)\n\n if h.verb > 1:\n #print dir(msg)\n print msg.items()\n\n if(max_recv is not None and len(msgs) >= max_recv):\n break\n else:\n # buffer emptied, return\n try_cnt += 1\n if try_cnt > retry_cnt:\n break\n\n return msgs\n\n\ndef recv_neighbour_msg(h):\n bee_cnt = 0\n msg = h.read_message()\n #print msg\n if msg:\n txt = msg['data'].strip()\n bee_cnt = int(txt.split()[0])\n if h.verb >1:\n print \"\\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}\".format(\n msg['sender'], bee_cnt, len(msg['data']), h._thename)\n\n return bee_cnt;\n\ndef recv_neighbour_msg_w_src(h):\n ''' provide the source of a message as well as the message count'''\n bee_cnt = 0\n src = None\n msg = h.read_message()\n #print msg\n if msg:\n txt = msg['data'].strip()\n src = msg['sender']\n bee_cnt = float(txt.split()[0])\n if h.verb >1:\n print \"\\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}\".format(\n msg['sender'], bee_cnt, len(msg['data']), h._thename)\n if h.verb > 1:\n #print dir(msg)\n print msg.items()\n\n return bee_cnt, src\n\n\ndef recv_neighbour_msg_flt(h):\n bee_cnt = 0\n msg = h.read_message()\n #print msg\n if msg:\n txt = msg['data'].strip()\n bee_cnt = float(txt.split()[0])\n if h.verb > 1:\n print \"\\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}\".format(\n msg['sender'], bee_cnt, len(msg['data']), h._thename)\n\n return bee_cnt;\n\n#}}}\n\ndef find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):\n links = parsing.find_comm_link_mapping(\n name, rtc_path=rtc_path, suffix=suffix, verb=verb)\n if verb:\n print \"[I] for {}, found the following nodes\/edges\".format(name)\n print \"\\t\", links.items()\n print \"\\n===================================\\n\\n\"\n return links\n\n\n\n### ============= display ============= ###\n\n#{{{ term codes for colored text\nERR = '\\033[41m'\nBLU = '\\033[34m'\nENDC = '\\033[0m'\n#}}}\n#{{{ color funcs\n#def gen_cmap(m='hot', n=32) :\n# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps\n\ndef gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):\n t_rng = float(max_temp - min_temp)\n fr = (new_temp - min_temp) \/ t_rng\n i = int(fr * len(cmap))\n # compute basic color, if on target\n #r,g,b,a = cmap(i)\n g = 0.0; b = 0.0; a = 1.0;\n \n i = sorted([0, i, len(cmap)-1])[1]\n r = cmap[i]\n\n # now adjust according to distance from target\n if tgt is None: tgt=new_temp\n\n dt = np.abs(new_temp - tgt)\n dt_r = dt \/ t_rng\n h2 = np.array([r,g,b])\n h2 *= (1-dt_r)\n\n return h2\n\n# a colormap with 8 settings, taht doesn't depend on the presence of\n# matplotlib (hard-coded though.) -- depricating\n_clrs = [\n (0.2, 0.2, 0.2),\n (0.041, 0, 0),\n (0.412, 0, 0),\n (0.793, 0, 0),\n (1, 0.174, 0),\n (1, 0.555, 0),\n (1, 0.936, 0),\n (1, 1, 0.475),\n (1, 1, 1),\n ]\n\n_dflt_clr = (0.2, 0.2, 0.2)\n\n# can access other gradations of colour using M = cm.hot(n) for n steps, then\n# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)\n# BT here we're going to use 8 steps for all CASUs so no bother.\n\n#}}}\n\ndef sep_with_nowtime():\n print \"# =================== t={} =================== #\\n\".format(\n datetime.now().strftime(\"%H:%M:%S\"))\n\n### ============= more generic ============= ###\n#{{{ a struct constructor\n# some handy python utilities, from Kier Dugan\nclass Struct:\n def __init__ (self, **kwargs):\n self.__dict__.update (kwargs)\n\n def get(self, key, default=None):\n return self.__dict__.get(key, default)\n\n\n def addFields(self, **kwargs):\n # add other fields (basically variables) after initialisation\n self.__dict__.update (kwargs)\n\n#}}}\n\n\n\n### calibraiont\ndef _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):\n '''\n read the sensors several times, and take the highest reading\n seen as the threshold.\n '''\n h._raw_thresh = [0] * 7 # default cases for threshold\n for stp in xrange(calib_steps):\n for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):\n if v > h._raw_thresh[i]:\n h._raw_thresh[i] = v\n time.sleep(interval)\n\n h.thresh = [x*calib_gain for x in h._raw_thresh]\n h.threshold = [x*calib_gain for x in h._raw_thresh]\n\n if h.verb:\n _ts =\", \".join([\"{:.2f}\".format(x) for x in h.thresh])\n print \"[I] post-calibration, we have thresh: \", _ts\n\n \n\n\n\n\n\n\n","license":"lgpl-3.0"} {"repo_name":"zooniverse\/aggregation","path":"docs\/source\/conf.py","copies":"1","size":"9778","content":"# -*- coding: utf-8 -*-\n#\n# Zooniverse Aggregation Engine documentation build configuration file, created by\n# sphinx-quickstart on Mon Mar 14 11:15:07 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nfrom mock import Mock as MagicMock\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..\/..'))\n\n# -- General configuration ------------------------------------------------\n\n\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Zooniverse Aggregation Engine'\ncopyright = u'2016, Zooniverse'\nauthor = u'Greg Hines'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'0.9'\n# The full version, including alpha\/beta\/rc tags.\nrelease = u'0.9'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZooniverseAggregationEnginedoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ZooniverseAggregationEngine.tex', u'Zooniverse Aggregation Engine Documentation',\n u'Greg Hines', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'zooniverseaggregationengine', u'Zooniverse Aggregation Engine Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ZooniverseAggregationEngine', u'Zooniverse Aggregation Engine Documentation',\n author, 'ZooniverseAggregationEngine', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return Mock()\n\nMOCK_MODULES = ['shapely','pandas','numpy','scipy','cassandra-driver',\"sklearn\"]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)","license":"apache-2.0"} {"repo_name":"akloster\/bokeh","path":"bokeh\/properties.py","copies":"20","size":"42601","content":"\"\"\" Properties are objects that can be assigned as class level\nattributes on Bokeh models, to provide automatic serialization\nand validation.\n\nFor example, the following defines a model that has integer,\nstring, and list[float] properties::\n\n class Model(HasProps):\n foo = Int\n bar = String\n baz = List(Float)\n\nThe properties of this class can be initialized by specifying\nkeyword arguments to the initializer::\n\n m = Model(foo=10, bar=\"a str\", baz=[1,2,3,4])\n\nBut also by setting the attributes on an instance::\n\n m.foo = 20\n\nAttempts to set a property to a value of the wrong type will\nresult in a ``ValueError`` exception::\n\n >>> m.foo = 2.3\n Traceback (most recent call last):\n File \"\", line 1, in \n File \"\/Users\/bryan\/work\/bokeh\/bokeh\/properties.py\", line 585, in __setattr__\n super(HasProps, self).__setattr__(name, value)\n File \"\/Users\/bryan\/work\/bokeh\/bokeh\/properties.py\", line 159, in __set__\n raise e\n File \"\/Users\/bryan\/work\/bokeh\/bokeh\/properties.py\", line 152, in __set__\n self.validate(value)\n File \"\/Users\/bryan\/work\/bokeh\/bokeh\/properties.py\", line 707, in validate\n (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))\n ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float\n\nAdditionally, properties know how to serialize themselves,\nto be understood by BokehJS.\n\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport re\nimport types\nimport difflib\nimport datetime\nimport dateutil.parser\nimport collections\nfrom importlib import import_module\nfrom copy import copy\nfrom warnings import warn\nimport inspect\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom six import integer_types, string_types, add_metaclass, iteritems\nimport numpy as np\n\nfrom . import enums\nfrom .util.string import nice_join\n\ndef field(name):\n ''' Convenience function do explicitly mark a field specification for\n a Bokeh model property.\n\n Args:\n name (str) : name of a data source field to reference for a property.\n\n Returns:\n dict : `{\"field\": name}`\n\n Note:\n This function is included for completeness. String values for\n property specifications are by default interpreted as field names.\n\n '''\n return dict(field=name)\n\ndef value(val):\n ''' Convenience function do explicitly mark a value specification for\n a Bokeh model property.\n\n Args:\n val (any) : a fixed value to specify for a property.\n\n Returns:\n dict : `{\"value\": name}`\n\n Note:\n String values for property specifications are by default interpreted\n as field names. This function is especially useful when you want to\n specify a fixed value with text properties.\n\n Example:\n\n .. code-block:: python\n\n # The following will take text values to render from a data source\n # column \"text_column\", but use a fixed value \"12pt\" for font size\n p.text(\"x\", \"y\", text=\"text_column\",\n text_font_size=value(\"12pt\"), source=source)\n\n '''\n return dict(value=val)\n\nbokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types\n\n# used to indicate properties that are not set (vs null, None, etc)\nclass _NotSet(object):\n pass\n\nclass DeserializationError(Exception):\n pass\n\nclass Property(object):\n \"\"\" Base class for all type properties. \"\"\"\n\n def __init__(self, default=None, help=None):\n \"\"\" This is how the descriptor is created in the class declaration \"\"\"\n if isinstance(default, types.FunctionType): # aka. lazy value\n self.validate(default())\n else:\n self.validate(default)\n\n self._default = default\n self.__doc__ = help\n self.alternatives = []\n\n # This gets set by the class decorator at class creation time\n self.name = \"unnamed\"\n\n def __str__(self):\n return self.__class__.__name__\n\n @property\n def _name(self):\n return \"_\" + self.name\n\n @property\n def default(self):\n if not isinstance(self._default, types.FunctionType):\n return copy(self._default)\n else:\n value = self._default()\n self.validate(value)\n return value\n\n @classmethod\n def autocreate(cls, name=None):\n \"\"\" Called by the metaclass to create a\n new instance of this descriptor\n if the user just assigned it to a property without trailing\n parentheses.\n \"\"\"\n return cls()\n\n def matches(self, new, old):\n # XXX: originally this code warned about not being able to compare values, but that\n # doesn't make sense, because most comparisons involving numpy arrays will fail with\n # ValueError exception, thus warning about inevitable.\n try:\n if new is None or old is None:\n return new is old # XXX: silence FutureWarning from NumPy\n else:\n return new == old\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as e:\n logger.debug(\"could not compare %s and %s for property %s (Reason: %s)\", new, old, self.name, e)\n return False\n\n def from_json(self, json, models=None):\n return json\n\n def transform(self, value):\n return value\n\n def validate(self, value):\n pass\n\n def is_valid(self, value):\n try:\n self.validate(value)\n except ValueError:\n return False\n else:\n return True\n\n def _get(self, obj):\n if not hasattr(obj, self._name):\n setattr(obj, self._name, self.default)\n return getattr(obj, self._name)\n\n def __get__(self, obj, owner=None):\n if obj is not None:\n return self._get(obj)\n elif owner is not None:\n return self\n else:\n raise ValueError(\"both 'obj' and 'owner' are None, don't know what to do\")\n\n def __set__(self, obj, value):\n try:\n self.validate(value)\n except ValueError as e:\n for tp, converter in self.alternatives:\n if tp.is_valid(value):\n value = converter(value)\n break\n else:\n raise e\n else:\n value = self.transform(value)\n\n old = self.__get__(obj)\n obj._changed_vars.add(self.name)\n if self._name in obj.__dict__ and self.matches(value, old):\n return\n setattr(obj, self._name, value)\n obj._dirty = True\n if hasattr(obj, '_trigger'):\n if hasattr(obj, '_block_callbacks') and obj._block_callbacks:\n obj._callback_queue.append((self.name, old, value))\n else:\n obj._trigger(self.name, old, value)\n\n def __delete__(self, obj):\n if hasattr(obj, self._name):\n delattr(obj, self._name)\n\n @property\n def has_ref(self):\n return False\n\n def accepts(self, tp, converter):\n tp = ParameterizedProperty._validate_type_param(tp)\n self.alternatives.append((tp, converter))\n return self\n\n def __or__(self, other):\n return Either(self, other)\n\nclass Include(object):\n \"\"\" Include other properties from mixin Models, with a given prefix. \"\"\"\n\n def __init__(self, delegate, help=\"\", use_prefix=True):\n if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):\n raise ValueError(\"expected a subclass of HasProps, got %r\" % delegate)\n\n self.delegate = delegate\n self.help = help\n self.use_prefix = use_prefix\n\nclass MetaHasProps(type):\n def __new__(cls, class_name, bases, class_dict):\n names = set()\n names_with_refs = set()\n container_names = set()\n\n # First pre-process to handle all the Includes\n includes = {}\n removes = set()\n for name, prop in class_dict.items():\n if not isinstance(prop, Include):\n continue\n\n delegate = prop.delegate\n if prop.use_prefix:\n prefix = re.sub(\"_props$\", \"\", name) + \"_\"\n else:\n prefix = \"\"\n\n for subpropname in delegate.class_properties(withbases=False):\n fullpropname = prefix + subpropname\n subprop = delegate.lookup(subpropname)\n if isinstance(subprop, Property):\n # If it's an actual instance, then we need to make a copy\n # so two properties don't write to the same hidden variable\n # inside the instance.\n subprop = copy(subprop)\n if \"%s\" in prop.help:\n doc = prop.help % subpropname.replace('_', ' ')\n else:\n doc = prop.help\n try:\n includes[fullpropname] = subprop(help=doc)\n except TypeError:\n includes[fullpropname] = subprop\n subprop.__doc__ = doc\n # Remove the name of the Include attribute itself\n removes.add(name)\n\n # Update the class dictionary, taking care not to overwrite values\n # from the delegates that the subclass may have explicitly defined\n for key, val in includes.items():\n if key not in class_dict:\n class_dict[key] = val\n for tmp in removes:\n del class_dict[tmp]\n\n dataspecs = {}\n units_to_add = {}\n for name, prop in class_dict.items():\n if isinstance(prop, Property):\n prop.name = name\n if prop.has_ref:\n names_with_refs.add(name)\n elif isinstance(prop, ContainerProperty):\n container_names.add(name)\n names.add(name)\n if isinstance(prop, DataSpec):\n dataspecs[name] = prop\n if hasattr(prop, '_units_type'):\n units_to_add[name+\"_units\"] = prop._units_type\n\n elif isinstance(prop, type) and issubclass(prop, Property):\n # Support the user adding a property without using parens,\n # i.e. using just the Property subclass instead of an\n # instance of the subclass\n newprop = prop.autocreate(name=name)\n class_dict[name] = newprop\n newprop.name = name\n names.add(name)\n\n # Process dataspecs\n if issubclass(prop, DataSpec):\n dataspecs[name] = newprop\n\n for name, prop in units_to_add.items():\n prop.name = name\n names.add(name)\n class_dict[name] = prop\n\n class_dict[\"__properties__\"] = names\n class_dict[\"__properties_with_refs__\"] = names_with_refs\n class_dict[\"__container_props__\"] = container_names\n if dataspecs:\n class_dict[\"_dataspecs\"] = dataspecs\n return type.__new__(cls, class_name, bases, class_dict)\n\ndef accumulate_from_subclasses(cls, propname):\n s = set()\n for c in inspect.getmro(cls):\n if issubclass(c, HasProps):\n s.update(getattr(c, propname))\n return s\n\n@add_metaclass(MetaHasProps)\nclass HasProps(object):\n\n def __init__(self, **properties):\n super(HasProps, self).__init__()\n self._changed_vars = set()\n\n for name, value in properties.items():\n setattr(self, name, value)\n\n def __setattr__(self, name, value):\n props = sorted(self.properties())\n\n if name.startswith(\"_\") or name in props:\n super(HasProps, self).__setattr__(name, value)\n else:\n matches, text = difflib.get_close_matches(name.lower(), props), \"similar\"\n\n if not matches:\n matches, text = props, \"possible\"\n\n raise AttributeError(\"unexpected attribute '%s' to %s, %s attributes are %s\" %\n (name, self.__class__.__name__, text, nice_join(matches)))\n\n def clone(self):\n \"\"\" Returns a duplicate of this object with all its properties\n set appropriately. Values which are containers are shallow-copied.\n \"\"\"\n return self.__class__(**self.changed_properties_with_values())\n\n @classmethod\n def lookup(cls, name):\n return getattr(cls, name)\n\n @classmethod\n def properties_with_refs(cls):\n \"\"\" Returns a set of the names of this object's properties that\n have references. We traverse the class hierarchy and\n pull together the full list of properties.\n \"\"\"\n if not hasattr(cls, \"__cached_allprops_with_refs\"):\n s = accumulate_from_subclasses(cls, \"__properties_with_refs__\")\n cls.__cached_allprops_with_refs = s\n return cls.__cached_allprops_with_refs\n\n @classmethod\n def properties_containers(cls):\n \"\"\" Returns a list of properties that are containers\n \"\"\"\n if not hasattr(cls, \"__cached_allprops_containers\"):\n s = accumulate_from_subclasses(cls, \"__container_props__\")\n cls.__cached_allprops_containers = s\n return cls.__cached_allprops_containers\n\n @classmethod\n def properties(cls):\n \"\"\" Returns a set of the names of this object's properties. We\n traverse the class hierarchy and pull together the full\n list of properties.\n \"\"\"\n if not hasattr(cls, \"__cached_allprops\"):\n s = cls.class_properties()\n cls.__cached_allprops = s\n return cls.__cached_allprops\n\n @classmethod\n def dataspecs(cls):\n \"\"\" Returns a set of the names of this object's dataspecs (and\n dataspec subclasses). Traverses the class hierarchy.\n \"\"\"\n if not hasattr(cls, \"__cached_dataspecs\"):\n dataspecs = set()\n for c in reversed(inspect.getmro(cls)):\n if hasattr(c, \"_dataspecs\"):\n dataspecs.update(c._dataspecs.keys())\n cls.__cached_dataspecs = dataspecs\n return cls.__cached_dataspecs\n\n @classmethod\n def dataspecs_with_refs(cls):\n dataspecs = {}\n for c in reversed(inspect.getmro(cls)):\n if hasattr(c, \"_dataspecs\"):\n dataspecs.update(c._dataspecs)\n return dataspecs\n\n def changed_vars(self):\n \"\"\" Returns which variables changed since the creation of the object,\n or the last called to reset_changed_vars().\n \"\"\"\n return set.union(self._changed_vars, self.properties_with_refs(),\n self.properties_containers())\n\n def reset_changed_vars(self):\n self._changed_vars = set()\n\n def properties_with_values(self):\n return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])\n\n def changed_properties(self):\n return self.changed_vars()\n\n def changed_properties_with_values(self):\n return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])\n\n @classmethod\n def class_properties(cls, withbases=True):\n if withbases:\n return accumulate_from_subclasses(cls, \"__properties__\")\n else:\n return set(cls.__properties__)\n\n def set(self, **kwargs):\n \"\"\" Sets a number of properties at once \"\"\"\n for kw in kwargs:\n setattr(self, kw, kwargs[kw])\n\n def pprint_props(self, indent=0):\n \"\"\" Prints the properties of this object, nicely formatted \"\"\"\n for key, value in self.properties_with_values().items():\n print(\"%s%s: %r\" % (\" \"*indent, key, value))\n\nclass PrimitiveProperty(Property):\n \"\"\" A base class for simple property types. Subclasses should\n define a class attribute ``_underlying_type`` that is a tuple\n of acceptable type values for the property.\n\n \"\"\"\n\n _underlying_type = None\n\n def validate(self, value):\n super(PrimitiveProperty, self).validate(value)\n\n if not (value is None or isinstance(value, self._underlying_type)):\n raise ValueError(\"expected a value of type %s, got %s of type %s\" %\n (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))\n\n def from_json(self, json, models=None):\n if json is None or isinstance(json, self._underlying_type):\n return json\n else:\n expected = nice_join([ cls.__name__ for cls in self._underlying_type ])\n raise DeserializationError(\"%s expected %s, got %s\" % (self, expected, json))\n\nclass Bool(PrimitiveProperty):\n \"\"\" Boolean type property. \"\"\"\n _underlying_type = (bool,)\n\nclass Int(PrimitiveProperty):\n \"\"\" Signed integer type property. \"\"\"\n _underlying_type = bokeh_integer_types\n\nclass Float(PrimitiveProperty):\n \"\"\" Floating point type property. \"\"\"\n _underlying_type = (float, ) + bokeh_integer_types\n\nclass Complex(PrimitiveProperty):\n \"\"\" Complex floating point type property. \"\"\"\n _underlying_type = (complex, float) + bokeh_integer_types\n\nclass String(PrimitiveProperty):\n \"\"\" String type property. \"\"\"\n _underlying_type = string_types\n\nclass Regex(String):\n \"\"\" Regex type property validates that text values match the\n given regular expression.\n \"\"\"\n def __init__(self, regex, default=None, help=None):\n self.regex = re.compile(regex)\n super(Regex, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Regex, self).validate(value)\n\n if not (value is None or self.regex.match(value) is not None):\n raise ValueError(\"expected a string matching %r pattern, got %r\" % (self.regex.pattern, value))\n\n def __str__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.regex.pattern)\n\nclass JSON(String):\n \"\"\" JSON type property validates that text values are valid JSON.\n\n .. note::\n The string is transmitted and received by BokehJS as a *string*\n containing JSON content. i.e., you must use ``JSON.parse`` to unpack\n the value into a JavaScript hash.\n\n \"\"\"\n def validate(self, value):\n super(JSON, self).validate(value)\n\n if value is None: return\n\n try:\n import json\n json.loads(value)\n except ValueError:\n raise ValueError(\"expected JSON text, got %r\" % value)\n\nclass ParameterizedProperty(Property):\n \"\"\" Base class for Properties that have type parameters, e.g.\n ``List(String)``.\n\n \"\"\"\n\n @staticmethod\n def _validate_type_param(type_param):\n if isinstance(type_param, type):\n if issubclass(type_param, Property):\n return type_param()\n else:\n type_param = type_param.__name__\n elif isinstance(type_param, Property):\n return type_param\n\n raise ValueError(\"expected a property as type parameter, got %s\" % type_param)\n\n @property\n def type_params(self):\n raise NotImplementedError(\"abstract method\")\n\n @property\n def has_ref(self):\n return any(type_param.has_ref for type_param in self.type_params)\n\nclass ContainerProperty(ParameterizedProperty):\n \"\"\" Base class for Container-like type properties. \"\"\"\n pass\n\nclass Seq(ContainerProperty):\n \"\"\" Sequence (list, tuple) type property.\n\n \"\"\"\n\n def _is_seq(self, value):\n return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)\n\n def _new_instance(self, value):\n return value\n\n def __init__(self, item_type, default=None, help=None):\n self.item_type = self._validate_type_param(item_type)\n super(Seq, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.item_type]\n\n def validate(self, value):\n super(Seq, self).validate(value)\n\n if value is not None:\n if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.item_type)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, list):\n return self._new_instance([ self.item_type.from_json(item, models) for item in json ])\n else:\n raise DeserializationError(\"%s expected a list or None, got %s\" % (self, json))\n\nclass List(Seq):\n \"\"\" Python list type property.\n\n \"\"\"\n\n def __init__(self, item_type, default=[], help=None):\n # todo: refactor to not use mutable objects as default values.\n # Left in place for now because we want to allow None to express\n # opional values. Also in Dict.\n super(List, self).__init__(item_type, default=default, help=help)\n\n def _is_seq(self, value):\n return isinstance(value, list)\n\nclass Array(Seq):\n \"\"\" NumPy array type property.\n\n \"\"\"\n\n def _is_seq(self, value):\n import numpy as np\n return isinstance(value, np.ndarray)\n\n def _new_instance(self, value):\n return np.array(value)\n\nclass Dict(ContainerProperty):\n \"\"\" Python dict type property.\n\n If a default value is passed in, then a shallow copy of it will be\n used for each new use of this property.\n\n \"\"\"\n\n def __init__(self, keys_type, values_type, default={}, help=None):\n self.keys_type = self._validate_type_param(keys_type)\n self.values_type = self._validate_type_param(values_type)\n super(Dict, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.keys_type, self.values_type]\n\n def validate(self, value):\n super(Dict, self).validate(value)\n\n if value is not None:\n if not (isinstance(value, dict) and \\\n all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s, %s)\" % (self.__class__.__name__, self.keys_type, self.values_type)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, dict):\n return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }\n else:\n raise DeserializationError(\"%s expected a dict or None, got %s\" % (self, json))\n\nclass Tuple(ContainerProperty):\n \"\"\" Tuple type property. \"\"\"\n def __init__(self, tp1, tp2, *type_params, **kwargs):\n self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))\n super(Tuple, self).__init__(default=kwargs.get(\"default\"), help=kwargs.get(\"help\"))\n\n @property\n def type_params(self):\n return self._type_params\n\n def validate(self, value):\n super(Tuple, self).validate(value)\n\n if value is not None:\n if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \\\n all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(str, self.type_params)))\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, list):\n return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))\n else:\n raise DeserializationError(\"%s expected a list or None, got %s\" % (self, json))\n\nclass Instance(Property):\n \"\"\" Instance type property, for references to other Models in the object\n graph.\n\n \"\"\"\n def __init__(self, instance_type, default=None, help=None):\n if not isinstance(instance_type, (type,) + string_types):\n raise ValueError(\"expected a type or string, got %s\" % instance_type)\n\n if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):\n raise ValueError(\"expected a subclass of HasProps, got %s\" % instance_type)\n\n self._instance_type = instance_type\n\n super(Instance, self).__init__(default=default, help=help)\n\n @property\n def instance_type(self):\n if isinstance(self._instance_type, str):\n module, name = self._instance_type.rsplit(\".\", 1)\n self._instance_type = getattr(import_module(module, \"bokeh\"), name)\n\n return self._instance_type\n\n @property\n def has_ref(self):\n return True\n\n def validate(self, value):\n super(Instance, self).validate(value)\n\n if value is not None:\n if not isinstance(value, self.instance_type):\n raise ValueError(\"expected an instance of type %s, got %s of type %s\" %\n (self.instance_type.__name__, value, type(value).__name__))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.instance_type.__name__)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, dict):\n from .plot_object import PlotObject\n if issubclass(self.instance_type, PlotObject):\n if models is None:\n raise DeserializationError(\"%s can't deserialize without models\" % self)\n else:\n model = models.get(json[\"id\"])\n\n if model is not None:\n return model\n else:\n raise DeserializationError(\"%s failed to deserilize reference to %s\" % (self, json))\n else:\n attrs = {}\n\n for name, value in iteritems(json):\n prop = self.instance_type.lookup(name)\n attrs[name] = prop.from_json(value, models)\n\n # XXX: this doesn't work when Instance(Superclass) := Subclass()\n # Serialization dict must carry type information to resolve this.\n return self.instance_type(**attrs)\n else:\n raise DeserializationError(\"%s expected a dict or None, got %s\" % (self, json))\n\nclass This(Property):\n \"\"\" A reference to an instance of the class being defined. \"\"\"\n pass\n\n# Fake types, ABCs\nclass Any(Property):\n \"\"\" Any type property accepts any values. \"\"\"\n pass\n\nclass Function(Property):\n \"\"\" Function type property. \"\"\"\n pass\n\nclass Event(Property):\n \"\"\" Event type property. \"\"\"\n pass\n\nclass Interval(ParameterizedProperty):\n ''' Range type property ensures values are contained inside a given interval. '''\n def __init__(self, interval_type, start, end, default=None, help=None):\n self.interval_type = self._validate_type_param(interval_type)\n self.interval_type.validate(start)\n self.interval_type.validate(end)\n self.start = start\n self.end = end\n super(Interval, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.interval_type]\n\n def validate(self, value):\n super(Interval, self).validate(value)\n\n if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):\n raise ValueError(\"expected a value of type %s in range [%s, %s], got %r\" % (self.interval_type, self.start, self.end, value))\n\n def __str__(self):\n return \"%s(%s, %r, %r)\" % (self.__class__.__name__, self.interval_type, self.start, self.end)\n\nclass Byte(Interval):\n ''' Byte type property. '''\n def __init__(self, default=0, help=None):\n super(Byte, self).__init__(Int, 0, 255, default=default, help=help)\n\nclass Either(ParameterizedProperty):\n \"\"\" Takes a list of valid properties and validates against them in succession. \"\"\"\n\n def __init__(self, tp1, tp2, *type_params, **kwargs):\n self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))\n default = kwargs.get(\"default\", self._type_params[0].default)\n help = kwargs.get(\"help\")\n super(Either, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return self._type_params\n\n def validate(self, value):\n super(Either, self).validate(value)\n\n if not (value is None or any(param.is_valid(value) for param in self.type_params)):\n raise ValueError(\"expected an element of either %s, got %r\" % (nice_join(self.type_params), value))\n\n def transform(self, value):\n for param in self.type_params:\n try:\n return param.transform(value)\n except ValueError:\n pass\n\n raise ValueError(\"Could not transform %r\" % value)\n\n def from_json(self, json, models=None):\n for tp in self.type_params:\n try:\n return tp.from_json(json, models)\n except DeserializationError:\n pass\n else:\n raise DeserializationError(\"%s couldn't deserialize %s\" % (self, json))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(str, self.type_params)))\n\n def __or__(self, other):\n return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)\n\nclass Enum(Property):\n \"\"\" An Enum with a list of allowed values. The first value in the list is\n the default value, unless a default is provided with the \"default\" keyword\n argument.\n \"\"\"\n def __init__(self, enum, *values, **kwargs):\n if not (not values and isinstance(enum, enums.Enumeration)):\n enum = enums.enumeration(enum, *values)\n\n self.allowed_values = enum._values\n\n default = kwargs.get(\"default\", enum._default)\n help = kwargs.get(\"help\")\n super(Enum, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Enum, self).validate(value)\n\n if not (value is None or value in self.allowed_values):\n raise ValueError(\"invalid value for %s: %r; allowed values are %s\" % (self.name, value, nice_join(self.allowed_values)))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(repr, self.allowed_values)))\n\nclass Auto(Enum):\n\n def __init__(self):\n super(Auto, self).__init__(\"auto\")\n\n def __str__(self):\n return self.__class__.__name__\n\n# Properties useful for defining visual attributes\nclass Color(Either):\n \"\"\" Accepts color definition in a variety of ways, and produces an\n appropriate serialization of its value for whatever backend.\n\n For colors, because we support named colors and hex values prefaced\n with a \"#\", when we are handed a string value, there is a little\n interpretation: if the value is one of the 147 SVG named colors or\n it starts with a \"#\", then it is interpreted as a value.\n\n If a 3-tuple is provided, then it is treated as an RGB (0..255).\n If a 4-tuple is provided, then it is treated as an RGBa (0..255), with\n alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)\n \"\"\"\n\n def __init__(self, default=None, help=None):\n types = (Enum(enums.NamedColor),\n Regex(\"^#[0-9a-fA-F]{6}$\"),\n Tuple(Byte, Byte, Byte),\n Tuple(Byte, Byte, Byte, Percent))\n super(Color, self).__init__(*types, default=default, help=help)\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass Align(Property):\n pass\n\nclass DashPattern(Either):\n \"\"\" Dash type property.\n\n Express patterns that describe line dashes. ``DashPattern`` values\n can be specified in a variety of ways:\n\n * An enum: \"solid\", \"dashed\", \"dotted\", \"dotdash\", \"dashdot\"\n * a tuple or list of integers in the `HTML5 Canvas dash specification style`_.\n Note that if the list of integers has an odd number of elements, then\n it is duplicated, and that duplicated list becomes the new dash list.\n\n To indicate that dashing is turned off (solid lines), specify the empty\n list [].\n\n .. _HTML5 Canvas dash specification style: http:\/\/www.w3.org\/html\/wg\/drafts\/2dcontext\/html5_canvas\/#dash-list\n\n \"\"\"\n\n _dash_patterns = {\n \"solid\": [],\n \"dashed\": [6],\n \"dotted\": [2,4],\n \"dotdash\": [2,4,6,4],\n \"dashdot\": [6,4,2,4],\n }\n\n def __init__(self, default=[], help=None):\n types = Enum(enums.DashPattern), Regex(r\"^(\\d+(\\s+\\d+)*)?$\"), Seq(Int)\n super(DashPattern, self).__init__(*types, default=default, help=help)\n\n def transform(self, value):\n value = super(DashPattern, self).transform(value)\n\n if isinstance(value, string_types):\n try:\n return self._dash_patterns[value]\n except KeyError:\n return [int(x) for x in value.split()]\n else:\n return value\n\n def __str__(self):\n return self.__class__.__name__\n\nclass Size(Float):\n \"\"\" Size type property.\n\n .. note::\n ``Size`` is equivalent to an unsigned int.\n\n \"\"\"\n def validate(self, value):\n super(Size, self).validate(value)\n\n if not (value is None or 0.0 <= value):\n raise ValueError(\"expected a non-negative number, got %r\" % value)\n\nclass Percent(Float):\n \"\"\" Percentage type property.\n\n Percents are useful for specifying alphas and coverage and extents; more\n semantically meaningful than Float(0..1).\n\n \"\"\"\n def validate(self, value):\n super(Percent, self).validate(value)\n\n if not (value is None or 0.0 <= value <= 1.0):\n raise ValueError(\"expected a value in range [0, 1], got %r\" % value)\n\nclass Angle(Float):\n \"\"\" Angle type property. \"\"\"\n pass\n\nclass Date(Property):\n \"\"\" Date (not datetime) type property.\n\n \"\"\"\n def __init__(self, default=datetime.date.today(), help=None):\n super(Date, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Date, self).validate(value)\n\n if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):\n raise ValueError(\"expected a date, string or timestamp, got %r\" % value)\n\n def transform(self, value):\n value = super(Date, self).transform(value)\n\n if isinstance(value, (float,) + bokeh_integer_types):\n try:\n value = datetime.date.fromtimestamp(value)\n except ValueError:\n value = datetime.date.fromtimestamp(value\/1000)\n elif isinstance(value, string_types):\n value = dateutil.parser.parse(value).date()\n\n return value\n\nclass Datetime(Property):\n \"\"\" Datetime type property.\n\n \"\"\"\n\n def __init__(self, default=datetime.date.today(), help=None):\n super(Datetime, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Datetime, self).validate(value)\n\n if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):\n return\n try:\n import pandas\n if isinstance(value, (pandas.Timestamp)):\n return\n except ImportError:\n pass\n\n raise ValueError(\"Expected a datetime instance, got %r\" % value)\n\n def transform(self, value):\n value = super(Datetime, self).transform(value)\n return value\n # Handled by serialization in protocol.py for now\n\n\nclass RelativeDelta(Dict):\n \"\"\" RelativeDelta type property for time deltas.\n\n \"\"\"\n\n def __init__(self, default={}, help=None):\n keys = Enum(\"years\", \"months\", \"days\", \"hours\", \"minutes\", \"seconds\", \"microseconds\")\n values = Int\n super(RelativeDelta, self).__init__(keys, values, default=default, help=help)\n\n def __str__(self):\n return self.__class__.__name__\n\nclass DataSpec(Either):\n def __init__(self, typ, default, help=None):\n super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)\n self._type = self._validate_type_param(typ)\n\n def to_dict(self, obj):\n val = getattr(obj, self._name, self.default)\n\n # Check for None value\n if val is None:\n return dict(value=None)\n\n # Check for spec type value\n try:\n self._type.validate(val)\n return dict(value=val)\n except ValueError:\n pass\n\n # Check for data source field name\n if isinstance(val, string_types):\n return dict(field=val)\n\n # Must be dict, return as-is\n return val\n\n def __str__(self):\n val = getattr(self, self._name, self.default)\n return \"%s(%r)\" % (self.__class__.__name__, val)\n\nclass NumberSpec(DataSpec):\n def __init__(self, default, help=None):\n super(NumberSpec, self).__init__(Float, default=default, help=help)\n\nclass StringSpec(DataSpec):\n def __init__(self, default, help=None):\n super(StringSpec, self).__init__(List(String), default=default, help=help)\n\n def __set__(self, obj, value):\n if isinstance(value, list):\n if len(value) != 1:\n raise TypeError(\"StringSpec convenience list values must have length 1\")\n value = dict(value=value[0])\n super(StringSpec, self).__set__(obj, value)\n\nclass FontSizeSpec(DataSpec):\n def __init__(self, default, help=None):\n super(FontSizeSpec, self).__init__(List(String), default=default, help=help)\n\n def __set__(self, obj, value):\n if isinstance(value, string_types):\n warn('Setting a fixed font size value as a string %r is deprecated, '\n 'set with value(%r) or [%r] instead' % (value, value, value),\n DeprecationWarning, stacklevel=2)\n if len(value) > 0 and value[0].isdigit():\n value = dict(value=value)\n super(FontSizeSpec, self).__set__(obj, value)\n\nclass UnitsSpec(NumberSpec):\n def __init__(self, default, units_type, units_default, help=None):\n super(UnitsSpec, self).__init__(default=default, help=help)\n self._units_type = self._validate_type_param(units_type)\n self._units_type.validate(units_default)\n self._units_type._default = units_default\n\n def to_dict(self, obj):\n d = super(UnitsSpec, self).to_dict(obj)\n d[\"units\"] = getattr(obj, self.name+\"_units\")\n return d\n\n def __set__(self, obj, value):\n if isinstance(value, dict):\n units = value.pop(\"units\", None)\n if units: setattr(obj, self.name+\"_units\", units)\n super(UnitsSpec, self).__set__(obj, value)\n\n def __str__(self):\n val = getattr(self, self._name, self.default)\n return \"%s(%r, units_default=%r)\" % (self.__class__.__name__, val, self._units_type._default)\n\nclass AngleSpec(UnitsSpec):\n def __init__(self, default, units_default=\"rad\", help=None):\n super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)\n\nclass DistanceSpec(UnitsSpec):\n def __init__(self, default, units_default=\"data\", help=None):\n super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)\n\n def __set__(self, obj, value):\n try:\n if value < 0:\n raise ValueError(\"Distances must be non-negative\")\n except TypeError:\n pass\n super(DistanceSpec, self).__set__(obj, value)\n\nclass ScreenDistanceSpec(NumberSpec):\n def to_dict(self, obj):\n d = super(ScreenDistanceSpec, self).to_dict(obj)\n d[\"units\"] = \"screen\"\n return d\n\n def __set__(self, obj, value):\n try:\n if value < 0:\n raise ValueError(\"Distances must be non-negative\")\n except TypeError:\n pass\n super(ScreenDistanceSpec, self).__set__(obj, value)\n\nclass DataDistanceSpec(NumberSpec):\n def to_dict(self, obj):\n d = super(ScreenDistanceSpec, self).to_dict(obj)\n d[\"units\"] = \"data\"\n return d\n\n def __set__(self, obj, value):\n try:\n if value < 0:\n raise ValueError(\"Distances must be non-negative\")\n except TypeError:\n pass\n super(DataDistanceSpec, self).__set__(obj, value)\n\nclass ColorSpec(DataSpec):\n def __init__(self, default, help=None):\n super(ColorSpec, self).__init__(Color, default=default, help=help)\n\n @classmethod\n def isconst(cls, arg):\n \"\"\" Returns True if the argument is a literal color. Check for a\n well-formed hexadecimal color value.\n \"\"\"\n return isinstance(arg, string_types) and \\\n ((len(arg) == 7 and arg[0] == \"#\") or arg in enums.NamedColor._values)\n\n @classmethod\n def is_color_tuple(cls, val):\n return isinstance(val, tuple) and len(val) in (3, 4)\n\n @classmethod\n def format_tuple(cls, colortuple):\n if len(colortuple) == 3:\n return \"rgb%r\" % (colortuple,)\n else:\n return \"rgba%r\" % (colortuple,)\n\n def to_dict(self, obj):\n val = getattr(obj, self._name, self.default)\n\n if val is None:\n return dict(value=None)\n\n # Check for hexadecimal or named color\n if self.isconst(val):\n return dict(value=val)\n\n # Check for RGB or RGBa tuple\n if isinstance(val, tuple):\n return dict(value=self.format_tuple(val))\n\n # Check for data source field name\n if isinstance(val, string_types):\n return dict(field=val)\n\n # Must be dict, return as-is\n return val\n\n def validate(self, value):\n try:\n return super(ColorSpec, self).validate(value)\n except ValueError as e:\n # Check for tuple input if not yet a valid input type\n if self.is_color_tuple(value):\n return True\n else:\n raise e\n\n def transform(self, value):\n\n # Make sure that any tuple has either three integers, or three integers and one float\n if isinstance(value, tuple):\n value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))\n\n return value\n","license":"bsd-3-clause"} {"repo_name":"trustedanalytics\/spark-tk","path":"regression-tests\/sparktkregtests\/testcases\/frames\/boxcox_test.py","copies":"12","size":"5074","content":"# vim: set encoding=utf-8\n\n# Copyright\u00a0(c)\u00a02016 Intel\u00a0Corporation\u00a0\n#\n# Licensed\u00a0under\u00a0the\u00a0Apache\u00a0License,\u00a0Version\u00a02.0\u00a0(the\u00a0\"License\");\n# you\u00a0may\u00a0not\u00a0use\u00a0this\u00a0file\u00a0except\u00a0in\u00a0compliance\u00a0with\u00a0the\u00a0License.\n# You\u00a0may\u00a0obtain\u00a0a\u00a0copy\u00a0of\u00a0the\u00a0License\u00a0at\n#\n# \u00a0\u00a0\u00a0\u00a0\u00a0 http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless\u00a0required\u00a0by\u00a0applicable\u00a0law\u00a0or\u00a0agreed\u00a0to\u00a0in\u00a0writing,\u00a0software\n# distributed\u00a0under\u00a0the\u00a0License\u00a0is\u00a0distributed\u00a0on\u00a0an\u00a0\"AS\u00a0IS\"\u00a0BASIS,\n# WITHOUT\u00a0WARRANTIES\u00a0OR\u00a0CONDITIONS\u00a0OF\u00a0ANY\u00a0KIND,\u00a0either\u00a0express\u00a0or\u00a0implied.\n# See\u00a0the\u00a0License\u00a0for\u00a0the\u00a0specific\u00a0language\u00a0governing\u00a0permissions\u00a0and\n# limitations\u00a0under\u00a0the\u00a0License.\n#\n\n\"\"\" Test frame.box_cox() and frame.reverse_box_cox()\"\"\"\n\nimport unittest\nfrom sparktkregtests.lib import sparktk_test\n\n\nclass BoxCoxTest(sparktk_test.SparkTKTestCase):\n\n def setUp(self):\n \"\"\"Build test frame\"\"\"\n super(BoxCoxTest, self).setUp()\n\n dataset =\\\n [[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],\n [8.1583747730768401], [0.35889585616853292]]\n schema = [(\"y\", float)]\n\n self.frame = self.context.frame.create(dataset, schema=schema)\n\n def test_wt_default(self):\n \"\"\" Test behaviour for default params, lambda = 0 \"\"\"\n self.frame.box_cox(\"y\")\n\n actual = self.frame.to_pandas()[\"y_lambda_0.0\"].tolist()\n expected =\\\n [1.7717791879837133, 2.1946810429706676,\n 2.1877697201262163, 2.0990449791729704, -1.0247230268174008]\n self.assertItemsEqual(actual, expected)\n\n def test_lambda(self):\n \"\"\" Test wt for lambda = 0.3 \"\"\"\n self.frame.box_cox(\"y\", 0.3)\n \n actual = self.frame.to_pandas()[\"y_lambda_0.3\"].tolist()\n expected =\\\n [2.3384668540844573, 3.1056915770236082,\n 3.0923547540771801, 2.9235756971904037, -0.88218677941017198]\n self.assertItemsEqual(actual, expected)\n\n def test_reverse_default(self):\n \"\"\" Test reverse transform for default lambda = 0 \"\"\"\n self.frame.box_cox(\"y\")\n self.frame.reverse_box_cox(\"y_lambda_0.0\",\n reverse_box_cox_column_name=\"reverse\")\n\n actual = self.frame.to_pandas()[\"reverse\"].tolist()\n expected =\\\n [5.8813080107727425, 8.9771372790941815,\n 8.9153072947470804, 8.1583747730768401, 0.35889585616853298]\n\n self.assertItemsEqual(actual, expected)\n\n def test_reverse_lambda(self):\n \"\"\" Test reverse transform for lambda = 0.3 \"\"\"\n self.frame.box_cox(\"y\", 0.3)\n self.frame.reverse_box_cox(\"y_lambda_0.3\", 0.3,\n reverse_box_cox_column_name=\"reverse\")\n\n actual = self.frame.to_pandas()[\"reverse\"].tolist()\n expected =\\\n [5.8813080107727442, 8.9771372790941797,\n 8.9153072947470822, 8.1583747730768419,\n 0.35889585616853298]\n\n self.assertItemsEqual(actual, expected)\n\n @unittest.skip(\"req not clear\")\n def test_lambda_negative(self):\n \"\"\" Test box cox for lambda -1 \"\"\"\n self.frame.box_cox(\"y\", -1)\n\n actual = self.frame.to_pandas()[\"y_lambda_-1.0\"].tolist()\n expected =\\\n [0.82996979614597488, 0.88860591423406388,\n 0.88783336715839256, 0.87742656744575354,\n -1.7863236167608822]\n\n self.assertItemsEqual(actual, expected)\n\n def test_existing_boxcox_column(self):\n \"\"\" Test behavior for existing boxcox column \"\"\"\n self.frame.box_cox(\"y\", 0.3)\n \n with self.assertRaisesRegexp(\n Exception, \"duplicate column name\"):\n self.frame.box_cox(\"y\", 0.3)\n\n def test_existing_reverse_column(self):\n \"\"\" Test behavior for existing reverse boxcox column \"\"\"\n self.frame.reverse_box_cox(\"y\", 0.3)\n\n with self.assertRaisesRegexp(\n Exception, \"duplicate column name\"):\n self.frame.reverse_box_cox(\"y\", 0.3)\n\n @unittest.skip(\"Req not clear\")\n def test_negative_col_positive_lambda(self):\n \"\"\"Test behaviour for negative input column and positive lambda\"\"\"\n frame = self.context.frame.create([[-1], [-2], [1]], [(\"y\", float)])\n frame.box_cox(\"y\", 1)\n\n actual = frame.to_pandas()[\"y_lambda_1.0\"].tolist()\n expected = [-2.0, -3.0, 0]\n\n self.assertItemsEqual(actual, expected)\n\n @unittest.skip(\"Req not clear\")\n def test_negative_col_frational_lambda(self):\n \"\"\"Test behaviour for negative input column and negative lambda\"\"\"\n frame = self.context.frame.create([[-1], [-2], [1]], [(\"y\", float)])\n\n with self.assertRaises(Exception):\n frame.box_cox(\"y\", 0.1)\n\n @unittest.skip(\"Req not clear\")\n def test_negative_col_zero_lambda(self):\n \"\"\"Test behaviour for negative input column and positive lambda\"\"\"\n frame = self.context.frame.create([[-1], [-2], [1]], [(\"y\", float)])\n\n with self.assertRaises(Exception):\n frame.box_cox(\"y\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","license":"apache-2.0"} {"repo_name":"yonglehou\/scikit-learn","path":"examples\/applications\/plot_stock_market.py","copies":"227","size":"8284","content":"\"\"\"\n=======================================\nVisualizing the stock market structure\n=======================================\n\nThis example employs several unsupervised learning techniques to extract\nthe stock market structure from variations in historical quotes.\n\nThe quantity that we use is the daily variation in quote price: quotes\nthat are linked tend to cofluctuate during a day.\n\n.. _stock_market:\n\nLearning a graph structure\n--------------------------\n\nWe use sparse inverse covariance estimation to find which quotes are\ncorrelated conditionally on the others. Specifically, sparse inverse\ncovariance gives us a graph, that is a list of connection. For each\nsymbol, the symbols that it is connected too are those useful to explain\nits fluctuations.\n\nClustering\n----------\n\nWe use clustering to group together quotes that behave similarly. Here,\namongst the :ref:`various clustering techniques ` available\nin the scikit-learn, we use :ref:`affinity_propagation` as it does\nnot enforce equal-size clusters, and it can choose automatically the\nnumber of clusters from the data.\n\nNote that this gives us a different indication than the graph, as the\ngraph reflects conditional relations between variables, while the\nclustering reflects marginal properties: variables clustered together can\nbe considered as having a similar impact at the level of the full stock\nmarket.\n\nEmbedding in 2D space\n---------------------\n\nFor visualization purposes, we need to lay out the different symbols on a\n2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D\nembedding.\n\n\nVisualization\n-------------\n\nThe output of the 3 models are combined in a 2D graph where nodes\nrepresents the stocks and edges the:\n\n- cluster labels are used to define the color of the nodes\n- the sparse covariance model is used to display the strength of the edges\n- the 2D embedding is used to position the nodes in the plan\n\nThis example has a fair amount of visualization-related code, as\nvisualization is crucial here to display the graph. One of the challenge\nis to position the labels minimizing overlap. For this we use an\nheuristic based on the direction of the nearest neighbor along each\naxis.\n\"\"\"\nprint(__doc__)\n\n# Author: Gael Varoquaux gael.varoquaux@normalesup.org\n# License: BSD 3 clause\n\nimport datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import finance\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn import cluster, covariance, manifold\n\n###############################################################################\n# Retrieve the data from Internet\n\n# Choose a time period reasonnably calm (not too long ago so that we get\n# high-tech firms, and before the 2008 crash)\nd1 = datetime.datetime(2003, 1, 1)\nd2 = datetime.datetime(2008, 1, 1)\n\n# kraft symbol has now changed from KFT to MDLZ in yahoo\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'MTU': 'Mitsubishi',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'Mc Donalds',\n 'PEP': 'Pepsi',\n 'MDLZ': 'Kraft Foods',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas instruments',\n 'XRX': 'Xerox',\n 'LMT': 'Lookheed Martin',\n 'WMT': 'Wal-Mart',\n 'WBA': 'Walgreen',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\nsymbols, names = np.array(list(symbol_dict.items())).T\n\nquotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)\n for symbol in symbols]\n\nopen = np.array([q.open for q in quotes]).astype(np.float)\nclose = np.array([q.close for q in quotes]).astype(np.float)\n\n# The daily variations of the quotes are what carry most information\nvariation = close - open\n\n###############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphLassoCV()\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX \/= X.std(axis=0)\nedge_model.fit(X)\n\n###############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n###############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n###############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 \/ np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n#a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.spectral(label \/ float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"phoebe-project\/phoebe2-docs","path":"2.1\/examples\/minimal_contact_binary.py","copies":"1","size":"5694","content":"#!\/usr\/bin\/env python\n# coding: utf-8\n\n# Minimal Contact Binary System\n# ============================\n# \n# Setup\n# -----------------------------\n\n# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).\n\n# In[ ]:\n\n\nget_ipython().system('pip install -I \"phoebe>=2.1,<2.2\"')\n\n\n# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](..\/tutorials\/building_a_system.html) for more details.\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\nimport phoebe\nfrom phoebe import u # units\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlogger = phoebe.logger()\n\n\n# Here we'll initialize a default binary, but ask for it to be created as a contact system.\n\n# In[3]:\n\n\nb_cb = phoebe.default_binary(contact_binary=True)\n\n\n# We'll compare this to the default detached binary\n\n# In[4]:\n\n\nb_detached = phoebe.default_binary()\n\n\n# Hierarchy\n# -------------\n\n# Let's first look at the hierarchy of the default detached binary, and then compare that to the hierarchy of the overcontact system\n\n# In[5]:\n\n\nprint b_detached.hierarchy\n\n\n# In[6]:\n\n\nprint b_cb.hierarchy\n\n\n# As you can see, the overcontact system has an additional \"component\" with method \"envelope\" and component label \"contact_envelope\".\n# \n# Next let's look at the parameters in the envelope and star components. You can see that most of parameters in the envelope class are constrained, while the equivalent radius of the primary is unconstrained. The value of primary equivalent radius constrains the potential and fillout factor of the envelope, as well as the equivalent radius of the secondary.\n\n# In[7]:\n\n\nprint b_cb.filter(component='contact_envelope', kind='envelope', context='component')\n\n\n# In[8]:\n\n\nprint b_cb.filter(component='primary', kind='star', context='component')\n\n\n# In[9]:\n\n\nb_cb['requiv@primary'] = 1.5\n\n\n# In[10]:\n\n\nb_cb['pot@contact_envelope@component']\n\n\n# In[11]:\n\n\nb_cb['fillout_factor@contact_envelope@component']\n\n\n# In[12]:\n\n\nb_cb['requiv@secondary@component']\n\n\n# Now, of course, if we didn't originally know we wanted a contact binary and built the default detached system, we could still turn it into an contact binary just by changing the hierarchy.\n\n# In[13]:\n\n\nb_detached.add_component('envelope', component='contact_envelope')\n\n\n# In[14]:\n\n\nhier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'], b_detached['contact_envelope'])\nprint hier\n\n\n# In[15]:\n\n\nb_detached.set_hierarchy(hier)\n\n\n# In[16]:\n\n\nprint b_detached.hierarchy\n\n\n# However, since our system was detached, the system is not overflowing, and therefore doesn't pass system checks\n\n# In[17]:\n\n\nb_detached.run_checks()\n\n\n# And because of this, the potential and requiv@secondary constraints cannot be computed\n\n# In[18]:\n\n\nb_detached['pot@component']\n\n\n# In[19]:\n\n\nb_detached['requiv@secondary@component']\n\n\n# Likewise, we can make a contact system detached again simply by removing the envelope from the hierarchy. The parameters themselves will still exist (unless you remove them), so you can always just change the hierarchy again to change back to an overcontact system.\n\n# In[20]:\n\n\nhier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'])\nprint hier\n\n\n# In[21]:\n\n\nb_detached.set_hierarchy(hier)\n\n\n# In[22]:\n\n\nprint b_detached.hierarchy\n\n\n# Although the constraints have been removed, PHOEBE has lost the original value of the secondary radius (because of the failed contact constraints), so we'll have to reset that here as well.\n\n# In[23]:\n\n\nb_detached['requiv@secondary'] = 1.0\n\n\n# Adding Datasets\n# ---------------------\n\n# In[24]:\n\n\nb_cb.add_dataset('mesh', times=[0], dataset='mesh01')\n\n\n# In[25]:\n\n\nb_cb.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')\n\n\n# In[26]:\n\n\nb_cb.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')\n\n\n# In[27]:\n\n\nb_cb.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')\n\n\n# For comparison, we'll do the same to our detached system\n\n# In[28]:\n\n\nb_detached.add_dataset('mesh', times=[0], dataset='mesh01')\n\n\n# In[29]:\n\n\nb_detached.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')\n\n\n# In[30]:\n\n\nb_detached.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')\n\n\n# In[31]:\n\n\nb_detached.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')\n\n\n# Running Compute\n# --------------------\n\n# In[32]:\n\n\nb_cb.run_compute(irrad_method='none')\n\n\n# In[33]:\n\n\nb_detached.run_compute(irrad_method='none')\n\n\n# Synthetics\n# ------------------\n\n# To ensure compatibility with computing synthetics in detached and semi-detached systems in Phoebe, the synthetic meshes for our overcontact system are attached to each component separetely, instead of the contact envelope.\n\n# In[34]:\n\n\nprint b_cb['mesh01@model'].components\n\n\n# In[35]:\n\n\nprint b_detached['mesh01@model'].components\n\n\n# Plotting\n# ---------------\n\n# ### Meshes\n\n# In[36]:\n\n\nafig, mplfig = b_cb['mesh01@model'].plot(x='ws', show=True)\n\n\n# In[37]:\n\n\nafig, mplfig = b_detached['mesh01@model'].plot(x='ws', show=True)\n\n\n# ### Orbits\n\n# In[38]:\n\n\nafig, mplfig = b_cb['orb01@model'].plot(x='ws',show=True)\n\n\n# In[39]:\n\n\nafig, mplfig = b_detached['orb01@model'].plot(x='ws',show=True)\n\n\n# ### Light Curves\n\n# In[40]:\n\n\nafig, mplfig = b_cb['lc01@model'].plot(show=True)\n\n\n# In[41]:\n\n\nafig, mplfig = b_detached['lc01@model'].plot(show=True)\n\n\n# ### RVs\n\n# In[42]:\n\n\nafig, mplfig = b_cb['rv01@model'].plot(show=True)\n\n\n# In[43]:\n\n\nafig, mplfig = b_detached['rv01@model'].plot(show=True)\n\n\n# In[ ]:\n\n\n\n\n","license":"gpl-3.0"} {"repo_name":"jereze\/scikit-learn","path":"examples\/cluster\/plot_feature_agglomeration_vs_univariate_selection.py","copies":"218","size":"3893","content":"\"\"\"\n==============================================\nFeature agglomeration vs. univariate selection\n==============================================\n\nThis example compares 2 dimensionality reduction strategies:\n\n- univariate feature selection with Anova\n\n- feature agglomeration with Ward hierarchical clustering\n\nBoth methods are compared in a regression problem using\na BayesianRidge as supervised estimator.\n\"\"\"\n\n# Author: Alexandre Gramfort \n# License: BSD 3 clause\n\nprint(__doc__)\n\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg, ndimage\n\nfrom sklearn.feature_extraction.image import grid_to_graph\nfrom sklearn import feature_selection\nfrom sklearn.cluster import FeatureAgglomeration\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.externals.joblib import Memory\nfrom sklearn.cross_validation import KFold\n\n###############################################################################\n# Generate data\nn_samples = 200\nsize = 40 # image size\nroi_size = 15\nsnr = 5.\nnp.random.seed(0)\nmask = np.ones([size, size], dtype=np.bool)\n\ncoef = np.zeros((size, size))\ncoef[0:roi_size, 0:roi_size] = -1.\ncoef[-roi_size:, -roi_size:] = 1.\n\nX = np.random.randn(n_samples, size ** 2)\nfor x in X: # smooth data\n x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()\nX -= X.mean(axis=0)\nX \/= X.std(axis=0)\n\ny = np.dot(X, coef.ravel())\nnoise = np.random.randn(y.shape[0])\nnoise_coef = (linalg.norm(y, 2) \/ np.exp(snr \/ 20.)) \/ linalg.norm(noise, 2)\ny += noise_coef * noise # add noise\n\n###############################################################################\n# Compute the coefs of a Bayesian Ridge with GridSearch\ncv = KFold(len(y), 2) # cross-validation generator for model selection\nridge = BayesianRidge()\ncachedir = tempfile.mkdtemp()\nmem = Memory(cachedir=cachedir, verbose=1)\n\n# Ward agglomeration followed by BayesianRidge\nconnectivity = grid_to_graph(n_x=size, n_y=size)\nward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,\n memory=mem)\nclf = Pipeline([('ward', ward), ('ridge', ridge)])\n# Select the optimal number of parcels with grid search\nclf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)\nclf.fit(X, y) # set the best parameters\ncoef_ = clf.best_estimator_.steps[-1][1].coef_\ncoef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)\ncoef_agglomeration_ = coef_.reshape(size, size)\n\n# Anova univariate feature selection followed by BayesianRidge\nf_regression = mem.cache(feature_selection.f_regression) # caching function\nanova = feature_selection.SelectPercentile(f_regression)\nclf = Pipeline([('anova', anova), ('ridge', ridge)])\n# Select the optimal percentage of features with grid search\nclf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)\nclf.fit(X, y) # set the best parameters\ncoef_ = clf.best_estimator_.steps[-1][1].coef_\ncoef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)\ncoef_selection_ = coef_.reshape(size, size)\n\n###############################################################################\n# Inverse the transformation to plot the results on an image\nplt.close('all')\nplt.figure(figsize=(7.3, 2.7))\nplt.subplot(1, 3, 1)\nplt.imshow(coef, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"True weights\")\nplt.subplot(1, 3, 2)\nplt.imshow(coef_selection_, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"Feature Selection\")\nplt.subplot(1, 3, 3)\nplt.imshow(coef_agglomeration_, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"Feature Agglomeration\")\nplt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)\nplt.show()\n\n# Attempt to remove the temporary cachedir, but don't worry if it fails\nshutil.rmtree(cachedir, ignore_errors=True)\n","license":"bsd-3-clause"} {"repo_name":"jeremiedecock\/snippets","path":"python\/matplotlib\/hist_logscale_x.py","copies":"1","size":"1804","content":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMake a histogram using a logarithmic scale on X axis\n\nSee:\n\n- http:\/\/stackoverflow.com\/questions\/6855710\/how-to-have-logarithmic-bins-in-a-python-histogram\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# SETUP #######################################################################\n\n# histtype : [\u2018bar\u2019 | \u2018barstacked\u2019 | \u2018step\u2019 | \u2018stepfilled\u2019]\nHIST_TYPE='bar'\nALPHA=0.5\n\n# MAKE DATA ###################################################################\n\ndata = np.random.exponential(size=1000000)\n#data = np.abs(np.random.normal(size=1000000) * 10000.)\n#data = np.random.chisquare(10, size=1000000)\n\n# INIT FIGURE #################################################################\n\nfig = plt.figure(figsize=(8.0, 6.0))\n\n# AX1 #########################################################################\n\nax1 = fig.add_subplot(211)\n\nres_tuple = ax1.hist(data,\n bins=50,\n histtype=HIST_TYPE,\n alpha=ALPHA)\n\nax1.set_title(\"Normal scale\")\nax1.set_xlabel(\"Value\")\nax1.set_ylabel(\"Count\")\n\n# AX2 #########################################################################\n\nax2 = fig.add_subplot(212)\n\nvmin = np.log10(data.min())\nvmax = np.log10(data.max())\nbins = np.logspace(vmin, vmax, 50) # <- make a range from 10**vmin to 10**vmax\n\nprint(bins)\n\nres_tuple = ax2.hist(data,\n bins=bins,\n histtype=HIST_TYPE,\n alpha=ALPHA)\n\nax2.set_xscale(\"log\") # <- Activate log scale on X axis\n\nax2.set_title(\"Log scale\")\nax2.set_xlabel(\"Value\")\nax2.set_ylabel(\"Count\")\n\n# SHOW AND SAVE FILE ##########################################################\n\nplt.tight_layout()\n\nplt.savefig(\"hist_logscale_x.png\")\nplt.show()\n","license":"mit"} {"repo_name":"daodaoliang\/bokeh","path":"bokeh\/charts\/builder\/tests\/test_line_builder.py","copies":"33","size":"2376","content":"\"\"\" This is the Bokeh charts testing interface.\n\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import\n\nfrom collections import OrderedDict\nimport unittest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pandas as pd\n\nfrom bokeh.charts import Line\n\nfrom bokeh.charts.builder.tests._utils import create_chart\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\nclass TestLine(unittest.TestCase):\n def test_supported_input(self):\n xyvalues = OrderedDict()\n y_python = xyvalues['python'] = [2, 3, 7, 5, 26]\n y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]\n y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]\n\n xyvaluesdf = pd.DataFrame(xyvalues)\n\n for i, _xy in enumerate([xyvalues, xyvaluesdf]):\n hm = create_chart(Line, _xy)\n builder = hm._builders[0]\n self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))\n assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])\n assert_array_equal(builder._data['y_python'], y_python)\n assert_array_equal(builder._data['y_pypy'], y_pypy)\n assert_array_equal(builder._data['y_jython'], y_jython)\n\n lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]\n for _xy in [lvalues, np.array(lvalues)]:\n hm = create_chart(Line, _xy)\n builder = hm._builders[0]\n self.assertEqual(builder._groups, ['0', '1', '2'])\n assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])\n assert_array_equal(builder._data['y_0'], y_python)\n assert_array_equal(builder._data['y_1'], y_pypy)\n assert_array_equal(builder._data['y_2'], y_jython)\n","license":"bsd-3-clause"} {"repo_name":"pypot\/scikit-learn","path":"examples\/decomposition\/plot_pca_vs_lda.py","copies":"182","size":"1743","content":"\"\"\"\n=======================================================\nComparison of LDA and PCA 2D projection of Iris dataset\n=======================================================\n\nThe Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour\nand Virginica) with 4 attributes: sepal length, sepal width, petal length\nand petal width.\n\nPrincipal Component Analysis (PCA) applied to this data identifies the\ncombination of attributes (principal components, or directions in the\nfeature space) that account for the most variance in the data. Here we\nplot the different samples on the 2 first principal components.\n\nLinear Discriminant Analysis (LDA) tries to identify attributes that\naccount for the most variance *between classes*. In particular,\nLDA, in contrast to PCA, is a supervised method, using known class labels.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.lda import LDA\n\niris = datasets.load_iris()\n\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\n\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\n\nlda = LDA(n_components=2)\nX_r2 = lda.fit(X, y).transform(X)\n\n# Percentage of variance explained for each components\nprint('explained variance ratio (first two components): %s'\n % str(pca.explained_variance_ratio_))\n\nplt.figure()\nfor c, i, target_name in zip(\"rgb\", [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)\nplt.legend()\nplt.title('PCA of IRIS dataset')\n\nplt.figure()\nfor c, i, target_name in zip(\"rgb\", [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)\nplt.legend()\nplt.title('LDA of IRIS dataset')\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"JackKelly\/neuralnilm_prototype","path":"scripts\/experiment029.py","copies":"2","size":"3262","content":"from __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nfrom gen_data_029 import gen_data, N_BATCH, LENGTH\ntheano.config.compute_test_value = 'raise'\n\n\n# Number of units in the hidden (recurrent) layer\nN_HIDDEN = 5\n# SGD learning rate\nLEARNING_RATE = 1e-1\n# Number of iterations to train the net\nN_ITERATIONS = 200\n\n# Generate a \"validation\" sequence whose cost we will periodically compute\nX_val, y_val = gen_data()\n\nn_features = X_val.shape[-1]\nn_output = y_val.shape[-1]\nassert X_val.shape == (N_BATCH, LENGTH, n_features)\nassert y_val.shape == (N_BATCH, LENGTH, n_output)\n\n# Construct LSTM RNN: One LSTM layer and one dense output layer\nl_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features))\n\n\n# setup fwd and bck LSTM layer.\nl_fwd = lasagne.layers.LSTMLayer(\n l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)\nl_bck = lasagne.layers.LSTMLayer(\n l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)\n\n# concatenate forward and backward LSTM layers\nl_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN))\nl_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN))\nl_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)\n\n\nl_recurrent_out = lasagne.layers.DenseLayer(\n l_concat, num_units=n_output, nonlinearity=None)\nl_out = lasagne.layers.ReshapeLayer(\n l_recurrent_out, (N_BATCH, LENGTH, n_output))\n\ninput = T.tensor3('input')\ntarget_output = T.tensor3('target_output')\n\n# add test values\ninput.tag.test_value = np.random.rand(\n *X_val.shape).astype(theano.config.floatX)\ntarget_output.tag.test_value = np.random.rand(\n *y_val.shape).astype(theano.config.floatX)\n\n# Cost = mean squared error\ncost = T.mean((l_out.get_output(input) - target_output)**2)\n\n# Use NAG for training\nall_params = lasagne.layers.get_all_params(l_out)\nupdates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)\n# Theano functions for training, getting output, and computing cost\ntrain = theano.function([input, target_output],\n cost, updates=updates, on_unused_input='warn',\n allow_input_downcast=True)\ny_pred = theano.function(\n [input], l_out.get_output(input), on_unused_input='warn',\n allow_input_downcast=True)\n\ncompute_cost = theano.function(\n [input, target_output], cost, on_unused_input='warn',\n allow_input_downcast=True)\n\n# Train the net\ndef run_training():\n costs = np.zeros(N_ITERATIONS)\n for n in range(N_ITERATIONS):\n X, y = gen_data()\n\n # you should use your own training data mask instead of mask_val\n costs[n] = train(X, y)\n if not n % 10:\n cost_val = compute_cost(X_val, y_val)\n print \"Iteration {} validation cost = {}\".format(n, cost_val)\n\n plt.plot(costs)\n plt.xlabel('Iteration')\n plt.ylabel('Cost')\n plt.show()\n\ndef plot_estimates():\n X, y = gen_data()\n y_predictions = y_pred(X)\n ax = plt.gca()\n ax.plot(y_predictions[0,:,0], label='estimate')\n ax.plot(y[0,:,0], label='ground truth')\n # ax.plot(X[0,:,0], label='aggregate')\n ax.legend()\n plt.show()\n\nrun_training()\nplot_estimates()\n","license":"mit"} {"repo_name":"zmr\/namsel","path":"accuracy_test.py","copies":"1","size":"2139","content":"#encoding: utf-8\n\nimport cPickle as pickle\nfrom classify import load_cls, label_chars\nfrom cv2 import GaussianBlur\nfrom feature_extraction import get_zernike_moments, get_hu_moments, \\\n extract_features, normalize_and_extract_features\nfrom functools import partial\nimport glob\nfrom multiprocessing.pool import Pool\nimport numpy as np\nimport os\nfrom sklearn.externals import joblib\nfrom sobel_features import sobel_features\nfrom transitions import transition_features\nfrom fast_utils import fnormalize, ftrim\n\ncls = load_cls('logistic-cls')\n\n# Load testing sets\nprint 'Loading test data'\n\ntsets = pickle.load(open('datasets\/testing\/training_sets.pkl', 'rb'))\n\nscaler = joblib.load('zernike_scaler-latest')\n\nprint 'importing classifier'\n\nprint cls.get_params()\n\nprint 'scoring ...'\nkeys = tsets.keys()\nkeys.sort()\nall_samples = []\n\n## Baseline accuracies for the data in tsets\nbaseline = [0.608, 0.5785123966942148, 0.4782608695652174, 0.7522123893805309, \n 0.6884057971014492, 0.5447154471544715, 0.9752066115702479, \n 0.9830508474576272]\n\n\ndef test_accuracy(t, clsf=None):\n '''Get accuracy score for a testset t'''\n if clsf:\n cls = clsf\n else:\n global cls\n \n y = tsets[t][:,0]\n x = tsets[t][:,1:]\n \n x3 = []\n for j in x:\n j = ftrim(j.reshape((32,16)).astype(np.uint8))\n x3.append(normalize_and_extract_features(j))\n \n pred = cls.predict(x3)\n\n s = 0\n for i, p in enumerate(pred):\n if float(p) == y[i]:\n s += 1.0 \n else:\n pass\n print 'correct', label_chars[y[i]], '||', label_chars[p], t #, max(cls.predict_proba(x3[i])[0])\n\n score = s \/ len(y)\n return score\n\ndef test_all(clsf=None):\n '''Run accuracy tests for all testsets'''\n \n print 'starting tests. this will take a moment'\n \n test_accuracy(keys[0], clsf)\n \n test_all = partial(test_accuracy, clsf=clsf)\n p = Pool()\n all_samples = p.map(test_all, keys)\n \n for t, s in zip(keys, all_samples):\n print t, s\n return np.mean(all_samples)\n\nif __name__ == '__main__':\n print test_all()\n","license":"mit"} {"repo_name":"ephes\/scikit-learn","path":"examples\/decomposition\/plot_pca_vs_lda.py","copies":"182","size":"1743","content":"\"\"\"\n=======================================================\nComparison of LDA and PCA 2D projection of Iris dataset\n=======================================================\n\nThe Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour\nand Virginica) with 4 attributes: sepal length, sepal width, petal length\nand petal width.\n\nPrincipal Component Analysis (PCA) applied to this data identifies the\ncombination of attributes (principal components, or directions in the\nfeature space) that account for the most variance in the data. Here we\nplot the different samples on the 2 first principal components.\n\nLinear Discriminant Analysis (LDA) tries to identify attributes that\naccount for the most variance *between classes*. In particular,\nLDA, in contrast to PCA, is a supervised method, using known class labels.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.lda import LDA\n\niris = datasets.load_iris()\n\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\n\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\n\nlda = LDA(n_components=2)\nX_r2 = lda.fit(X, y).transform(X)\n\n# Percentage of variance explained for each components\nprint('explained variance ratio (first two components): %s'\n % str(pca.explained_variance_ratio_))\n\nplt.figure()\nfor c, i, target_name in zip(\"rgb\", [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)\nplt.legend()\nplt.title('PCA of IRIS dataset')\n\nplt.figure()\nfor c, i, target_name in zip(\"rgb\", [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)\nplt.legend()\nplt.title('LDA of IRIS dataset')\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"aguirrea\/lucy","path":"tests\/lfootGraph.py","copies":"1","size":"6007","content":"#! \/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n# Andr\u00e9s Aguirre Dorelo\n#\n# MINA\/INCO\/UDELAR\n#\n# module for finding the steps in the tutors\n# \n# This program is free software; you can redistribute it and\/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\nimport os\nimport glob\nimport ntpath\n\nfrom parser.BvhImport import BvhImport\nimport matplotlib.pyplot as plt\nfrom configuration.LoadSystemConfiguration import LoadSystemConfiguration\nimport numpy as np\nfrom scipy.signal import argrelextrema\nfrom collections import Counter\n\nsysConf = LoadSystemConfiguration()\nBVHDir = os.getcwd() + sysConf.getDirectory(\"CMU mocap Files\")\n\nY_THREADHOLD = 11 #TODO calculate this as the average of the steps_highs\nX_THREADHOLD = 36\n\n\ndef firstMax(values1, values2):\n res=0\n for i in range(len(values1)-2):\n if values1[i] < values1[i+1] and values1[i+1] > values1[i+2]: #i+1 is a local maximun\n if (values1[i] - values2[i]) > THREADHOLD: \n res=i+1\n elif values1[i] < values1[i+1] < values1[i+2]: #i is a local maximun\n if (values1[i] - values2[i]) > THREADHOLD: \n res=i\n return res\n\ndef find_nearest(a, a0):\n \"Element in nd array `a` closest to the scalar value `a0`\"\n idx = np.abs(a - a0).argmin()\n return a.flat[idx]\n\nfor filename in glob.glob(os.path.join(BVHDir, '*.bvh')):\n print \"transforming: \" + filename + \" ...\"\n parser = BvhImport(filename)\n x_,y_,z_ = parser.getNodePositionsFromName(\"lFoot\")\n y1 = []\n y2 = []\n x1 = []\n x2 = []\n \n for key, value in y_.iteritems():\n y1.append(value)\n x1.append(key)\n \n x_,y_,z_ = parser.getNodePositionsFromName(\"rFoot\")\n for key, value in y_.iteritems():\n y2.append(value)\n x2.append(key)\n \n maxLfootIndexes = [x for x in argrelextrema(np.array(y1), np.greater)[0]]\n maxRfootIndexes = [x for x in argrelextrema(np.array(y2), np.greater)[0]]\n\n stepsLfootIndexes = []\n for i in range(len(maxLfootIndexes)):\n index = maxLfootIndexes[i]\n if y1[index] - y2[index] > Y_THREADHOLD: #one foot is up and the other is in the floor \n if len(stepsLfootIndexes)>0:\n if abs(index - find_nearest(np.array(stepsLfootIndexes), index) > X_THREADHOLD): #avoid max near an existing point\n stepsLfootIndexes.append(index)\n print \"appeend L\"\n else:\n if y1[find_nearest(np.array(stepsLfootIndexes), index)] < y1[index]: #check if the exiting near max is a local maximun\n print \"remove L\", find_nearest(np.array(stepsLfootIndexes), index), \"from: \", stepsLfootIndexes\n stepsLfootIndexes.remove(find_nearest(np.array(stepsLfootIndexes), index))\n print \"remove L\"\n stepsLfootIndexes.append(index)\n print \"appeend L\"\n else:\n stepsLfootIndexes.append(index)\n print \"appeend L\" \n \n stepsRfootIndexes = []\n for i in range(len(maxRfootIndexes)):\n index = maxRfootIndexes[i]\n if y2[index] - y1[index] > Y_THREADHOLD: #one foot is up and the other is in the floor\n if len(stepsRfootIndexes)>0:\n if abs(index - find_nearest(np.array(stepsRfootIndexes),index) > X_THREADHOLD): #avoid max near an existing point\n stepsRfootIndexes.append(index)\n print \"appeend R\"\n else:\n if y2[find_nearest(np.array(stepsRfootIndexes), index)] < y2[index]: #check if the exiting near max is a local maximun\n print \"remove R\", find_nearest(np.array(stepsRfootIndexes), index), \"from: \", stepsRfootIndexes, \"index: \", index\n stepsRfootIndexes.remove(find_nearest(np.array(stepsRfootIndexes), index))\n print \"remove R\"\n stepsRfootIndexes.append(index)\n print \"appeend R\"\n \n else:\n stepsRfootIndexes.append(index)\n print \"appeend R\"\n \n\n if stepsLfootIndexes[0] < stepsRfootIndexes[0]:\n if len(stepsLfootIndexes) > 2:\n testPoint = stepsLfootIndexes[1]\n while(y1[testPoint]>y2[testPoint]):\n testPoint = testPoint + 1\n\n end = testPoint + 5\n print \"red over green| \", \"red: \", stepsLfootIndexes[0], \"green: \", stepsRfootIndexes[0], \"second red: \", stepsLfootIndexes[1], \"end: \", end\n else:\n end = len(y1)\n print \"red over green| \", \"red: \", stepsLfootIndexes[0], \"green: \", stepsRfootIndexes[0], \"second red: -----\", \"end: \", end\n\n \n else:\n if len(stepsRfootIndexes) > 2:\n testPoint = stepsRfootIndexes[1]\n while(y2[testPoint]>y1[testPoint]):\n testPoint = testPoint + 1\n end = testPoint + 5\n print \"green over red| \", \"green: \", stepsRfootIndexes[0], \"red: \", stepsLfootIndexes[0], \"second green: \", stepsRfootIndexes[1], \"end: \", end\n else:\n end = len(y2)\n print \"green over red| \", \"green: \", stepsRfootIndexes[0], \"red: \", stepsLfootIndexes[0], \"second green: -----\", \"end: \", end\n\n \n\n plt.plot(x1, y1,'ro')\n plt.plot(x1, y2,'g')\n plt.show()\n\n\n\n","license":"gpl-3.0"} {"repo_name":"otmaneJai\/Zipline","path":"zipline\/utils\/tradingcalendar_bmf.py","copies":"17","size":"7576","content":"#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport pytz\n\nfrom datetime import datetime\nfrom dateutil import rrule\nfrom zipline.utils.tradingcalendar import end, canonicalize_datetime, \\\n get_open_and_closes\n\nstart = pd.Timestamp('1994-01-01', tz='UTC')\n\n\ndef get_non_trading_days(start, end):\n non_trading_rules = []\n\n start = canonicalize_datetime(start)\n end = canonicalize_datetime(end)\n\n weekends = rrule.rrule(\n rrule.YEARLY,\n byweekday=(rrule.SA, rrule.SU),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(weekends)\n\n # Universal confraternization\n conf_universal = rrule.rrule(\n rrule.MONTHLY,\n byyearday=1,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(conf_universal)\n\n # Sao Paulo city birthday\n aniversario_sao_paulo = rrule.rrule(\n rrule.MONTHLY,\n bymonth=1,\n bymonthday=25,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(aniversario_sao_paulo)\n\n # Carnival Monday\n carnaval_segunda = rrule.rrule(\n rrule.MONTHLY,\n byeaster=-48,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(carnaval_segunda)\n\n # Carnival Tuesday\n carnaval_terca = rrule.rrule(\n rrule.MONTHLY,\n byeaster=-47,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(carnaval_terca)\n\n # Passion of the Christ\n sexta_paixao = rrule.rrule(\n rrule.MONTHLY,\n byeaster=-2,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(sexta_paixao)\n\n # Corpus Christi\n corpus_christi = rrule.rrule(\n rrule.MONTHLY,\n byeaster=60,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(corpus_christi)\n\n tiradentes = rrule.rrule(\n rrule.MONTHLY,\n bymonth=4,\n bymonthday=21,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(tiradentes)\n\n # Labor day\n dia_trabalho = rrule.rrule(\n rrule.MONTHLY,\n bymonth=5,\n bymonthday=1,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(dia_trabalho)\n\n # Constitutionalist Revolution\n constitucionalista = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=9,\n cache=True,\n dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc),\n until=end\n )\n non_trading_rules.append(constitucionalista)\n\n # Independency day\n independencia = rrule.rrule(\n rrule.MONTHLY,\n bymonth=9,\n bymonthday=7,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(independencia)\n\n # Our Lady of Aparecida\n aparecida = rrule.rrule(\n rrule.MONTHLY,\n bymonth=10,\n bymonthday=12,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(aparecida)\n\n # All Souls' day\n finados = rrule.rrule(\n rrule.MONTHLY,\n bymonth=11,\n bymonthday=2,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(finados)\n\n # Proclamation of the Republic\n proclamacao_republica = rrule.rrule(\n rrule.MONTHLY,\n bymonth=11,\n bymonthday=15,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(proclamacao_republica)\n\n # Day of Black Awareness\n consciencia_negra = rrule.rrule(\n rrule.MONTHLY,\n bymonth=11,\n bymonthday=20,\n cache=True,\n dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc),\n until=end\n )\n non_trading_rules.append(consciencia_negra)\n\n # Christmas Eve\n vespera_natal = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=24,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(vespera_natal)\n\n # Christmas\n natal = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=25,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(natal)\n\n # New Year Eve\n ano_novo = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=31,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(ano_novo)\n\n # New Year Eve on saturday\n ano_novo_sab = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=30,\n byweekday=rrule.FR,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(ano_novo_sab)\n\n non_trading_ruleset = rrule.rruleset()\n\n for rule in non_trading_rules:\n non_trading_ruleset.rrule(rule)\n\n non_trading_days = non_trading_ruleset.between(start, end, inc=True)\n\n # World Cup 2014 Opening\n non_trading_days.append(datetime(2014, 6, 12, tzinfo=pytz.utc))\n\n non_trading_days.sort()\n return pd.DatetimeIndex(non_trading_days)\n\nnon_trading_days = get_non_trading_days(start, end)\ntrading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)\n\n\ndef get_trading_days(start, end, trading_day=trading_day):\n return pd.date_range(start=start.date(),\n end=end.date(),\n freq=trading_day).tz_localize('UTC')\n\ntrading_days = get_trading_days(start, end)\n\n\n# Ash Wednesday\nquarta_cinzas = rrule.rrule(\n rrule.MONTHLY,\n byeaster=-46,\n cache=True,\n dtstart=start,\n until=end\n)\n\n\ndef get_early_closes(start, end):\n # TSX closed at 1:00 PM on december 24th.\n\n start = canonicalize_datetime(start)\n end = canonicalize_datetime(end)\n\n early_close_rules = []\n\n early_close_rules.append(quarta_cinzas)\n\n early_close_ruleset = rrule.rruleset()\n\n for rule in early_close_rules:\n early_close_ruleset.rrule(rule)\n early_closes = early_close_ruleset.between(start, end, inc=True)\n\n early_closes.sort()\n return pd.DatetimeIndex(early_closes)\n\nearly_closes = get_early_closes(start, end)\n\n\ndef get_open_and_close(day, early_closes):\n # only \"early close\" event in Bovespa actually is a late start\n # as the market only opens at 1pm\n open_hour = 13 if day in quarta_cinzas else 10\n market_open = pd.Timestamp(\n datetime(\n year=day.year,\n month=day.month,\n day=day.day,\n hour=open_hour,\n minute=00),\n tz='America\/Sao_Paulo').tz_convert('UTC')\n market_close = pd.Timestamp(\n datetime(\n year=day.year,\n month=day.month,\n day=day.day,\n hour=16),\n tz='America\/Sao_Paulo').tz_convert('UTC')\n\n return market_open, market_close\n\nopen_and_closes = get_open_and_closes(trading_days, early_closes,\n get_open_and_close)\n","license":"apache-2.0"} {"repo_name":"kylerbrown\/scikit-learn","path":"examples\/feature_selection\/plot_rfe_with_cross_validation.py","copies":"226","size":"1384","content":"\"\"\"\n===================================================\nRecursive feature elimination with cross-validation\n===================================================\n\nA recursive feature elimination example with automatic tuning of the\nnumber of features selected with cross-validation.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.datasets import make_classification\n\n# Build a classification task using 3 informative features\nX, y = make_classification(n_samples=1000, n_features=25, n_informative=3,\n n_redundant=2, n_repeated=0, n_classes=8,\n n_clusters_per_class=1, random_state=0)\n\n# Create the RFE object and compute a cross-validated score.\nsvc = SVC(kernel=\"linear\")\n# The \"accuracy\" scoring is proportional to the number of correct\n# classifications\nrfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),\n scoring='accuracy')\nrfecv.fit(X, y)\n\nprint(\"Optimal number of features : %d\" % rfecv.n_features_)\n\n# Plot number of features VS. cross-validation scores\nplt.figure()\nplt.xlabel(\"Number of features selected\")\nplt.ylabel(\"Cross validation score (nb of correct classifications)\")\nplt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"nasseralkmim\/SaPy","path":"sapy\/plotter.py","copies":"1","size":"4743","content":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Line3D\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\n\ndef window(name):\n return plt.figure(name)\n\n\ndef show():\n plt.show()\n return None\n\n\ndef undeformed(model):\n \"\"\"Plot the undeformed structure according to the dimension\n\n \"\"\"\n if model.ndm == 2:\n undeformed = window('Undeformed')\n axes = undeformed.add_subplot(111, aspect='equal')\n geo2d(model.XYZ, model.CON, axes, color='black')\n label2d(model.XYZ, model.CON, axes)\n undeformed.tight_layout()\n\n if model.ndm == 3:\n undeformed = window('Undeformed')\n axes = undeformed.add_subplot(111, projection='3d', aspect='equal')\n geo3d(model.XYZ, model.CON, axes, 'black')\n label3d(model.XYZ, model.CON, axes)\n undeformed.tight_layout()\n\n\ndef deformed(model, U):\n \"\"\"Plot the deformed structure according to the dimension\n\n \"\"\"\n CON = model.CON\n\n XYZ = np.copy(model.XYZ)\n for n in range(model.nn):\n for d in range(model.ndf[n]):\n dof = model.DOF[n, d]\n XYZ[n, d] += U[dof]\n\n if model.ndm == 2:\n deformed = window('Deformed')\n axes = deformed.add_subplot(111, aspect='equal')\n geo2d(XYZ, CON, axes, 'tomato')\n geo2d(model.XYZ, model.CON, axes, 'black')\n label2d(XYZ, CON, axes)\n deformed.tight_layout()\n\n if model.ndm == 3:\n deformed = window('Deformed')\n axes = deformed.add_subplot(111, projection='3d', aspect='equal')\n geo3d(model.XYZ, model.CON, axes, 'black')\n geo3d(XYZ, CON, axes, 'tomato')\n label3d(XYZ, CON, axes)\n deformed.tight_layout()\n\n\ndef geo3d(XYZ, CON, axes, color):\n \"\"\"Plot the 3d model\n\n \"\"\"\n axes.set_xlabel('x')\n axes.set_ylabel('y')\n axes.set_zlabel('z')\n\n # draw nodes\n for node, xyz in enumerate(XYZ):\n axes.scatter(xyz[0], xyz[1], xyz[2], c='k', alpha=1, marker='s')\n\n # draw edges\n for ele, con in enumerate(CON):\n xs = [XYZ[con[0]][0], XYZ[con[1]][0]]\n ys = [XYZ[con[0]][1], XYZ[con[1]][1]]\n zs = [XYZ[con[0]][2], XYZ[con[1]][2]]\n line = Line3D(xs, ys, zs, linewidth=1.0, color=color)\n axes.add_line(line)\n\n\ndef label3d(XYZ, CON, axes):\n \"\"\"Plot the nodes and element label\n\n \"\"\"\n for node, xyz in enumerate(XYZ):\n axes.text(xyz[0], xyz[1], xyz[2], str(node), color='b', size=10)\n\n for ele, con in enumerate(CON):\n xm = (XYZ[con[0]][0] + XYZ[con[1]][0])\/2\n ym = (XYZ[con[0]][1] + XYZ[con[1]][1])\/2\n zm = (XYZ[con[0]][2] + XYZ[con[1]][2])\/2\n axes.text(xm, ym, zm, str(ele), color='g', size=10)\n\n\ndef geo2d(XYZ, CON, axes, color):\n \"\"\"Plot the 2d model\n\n \"\"\"\n axes.set_xlabel('x')\n axes.set_ylabel('y')\n\n # draw nodes\n for xyz in XYZ:\n axes.scatter(xyz[0], xyz[1], c='k', alpha=1, marker='s')\n\n # draw edges\n for con in CON:\n xs = [XYZ[con[0]][0], XYZ[con[1]][0]]\n ys = [XYZ[con[0]][1], XYZ[con[1]][1]]\n line = Line2D(xs, ys, linewidth=1.0, color=color)\n axes.add_line(line)\n\n\ndef label2d(XYZ, CON, axes):\n \"\"\"Plot the nodes and element label\n\n \"\"\"\n for node, xyz in enumerate(XYZ):\n axes.text(xyz[0], xyz[1], str(node), color='b', size=10)\n\n for ele, con in enumerate(CON):\n xm = (XYZ[con[0]][0] + XYZ[con[1]][0])\/2\n ym = (XYZ[con[0]][1] + XYZ[con[1]][1])\/2\n axes.text(xm, ym, str(ele), color='g', size=10)\n\n\ndef axialforce(model, Q):\n \"\"\"Plot axial force\n\n \"\"\"\n if model.ndm == 2:\n axial = window('Axial')\n axes = axial.add_subplot(111, aspect='equal')\n geo2d(model.XYZ, model.CON, axes, color='black')\n axial2d(model.XYZ, model.CON, Q, axes)\n axial.tight_layout()\n\n if model.ndm == 3:\n axial = window('Axial')\n axes = axial.add_subplot(111, projection='3d', aspect='equal')\n geo3d(model.XYZ, model.CON, axes, 'black')\n axial3d(model.XYZ, model.CON, Q, axes)\n axial.tight_layout()\n\n\ndef axial2d(XYZ, CON, Q, axes):\n \"\"\"Plot text with axial force value\n\n \"\"\"\n for ele, con in enumerate(CON):\n xm = (XYZ[con[0]][0] + XYZ[con[1]][0])\/2\n ym = (XYZ[con[0]][1] + XYZ[con[1]][1])\/2\n axes.text(xm, ym, str(np.round_(Q[ele], 1)), color='g', size=10)\n\n\ndef axial3d(XYZ, CON, Q, axes):\n \"\"\"Plot text with axial force value for 3d plot\n\n \"\"\"\n for ele, con in enumerate(CON):\n xm = (XYZ[con[0]][0] + XYZ[con[1]][0])\/2\n ym = (XYZ[con[0]][1] + XYZ[con[1]][1])\/2\n zm = (XYZ[con[0]][2] + XYZ[con[1]][2])\/2\n axes.text(xm, ym, zm, str(np.round_(Q[ele], 1)), color='g', size=10)\n","license":"gpl-3.0"} {"repo_name":"hennersz\/pySpace","path":"basemap\/doc\/users\/figures\/omerc.py","copies":"6","size":"1065","content":"from mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport matplotlib.pyplot as plt\n# setup oblique mercator basemap.\n# width is width of map projection region in km (xmax-xmin_\n# height is height of map projection region in km (ymax-ymin)\n# lon_0, lat_0 are the central longitude and latitude of the projection.\n# lat_1,lon_1 and lat_2,lon_2 are two pairs of points that define\n# the projection centerline.\n# Map projection coordinates are automatically rotated to true north.\n# To avoid this, set no_rot=True.\n# area_thresh=1000 means don't plot coastline features less\n# than 1000 km^2 in area.\nm = Basemap(height=16700000,width=12000000,\n resolution='l',area_thresh=1000.,projection='omerc',\\\n lon_0=-100,lat_0=15,lon_2=-120,lat_2=65,lon_1=-50,lat_1=-55)\nm.drawcoastlines()\nm.fillcontinents(color='coral',lake_color='aqua')\n# draw parallels and meridians.\nm.drawparallels(np.arange(-80.,81.,20.))\nm.drawmeridians(np.arange(-180.,181.,20.))\nm.drawmapboundary(fill_color='aqua') \nplt.title(\"Oblique Mercator Projection\")\nplt.show()\n","license":"gpl-3.0"} {"repo_name":"jlegendary\/scikit-learn","path":"examples\/plot_multilabel.py","copies":"87","size":"4279","content":"# Authors: Vlad Niculae, Mathieu Blondel\n# License: BSD 3 clause\n\"\"\"\n=========================\nMultilabel classification\n=========================\n\nThis example simulates a multi-label document classification problem. The\ndataset is generated randomly based on the following process:\n\n - pick the number of labels: n ~ Poisson(n_labels)\n - n times, choose a class c: c ~ Multinomial(theta)\n - pick the document length: k ~ Poisson(length)\n - k times, choose a word: w ~ Multinomial(theta_c)\n\nIn the above process, rejection sampling is used to make sure that n is more\nthan 2, and that the document length is never zero. Likewise, we reject classes\nwhich have already been chosen. The documents that are assigned to both\nclasses are plotted surrounded by two colored circles.\n\nThe classification is performed by projecting to the first two principal\ncomponents found by PCA and CCA for visualisation purposes, followed by using\nthe :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two\nSVCs with linear kernels to learn a discriminative model for each class.\nNote that PCA is used to perform an unsupervised dimensionality reduction,\nwhile CCA is used to perform a supervised one.\n\nNote: in the plot, \"unlabeled samples\" does not mean that we don't know the\nlabels (as in semi-supervised learning) but that the samples simply do *not*\nhave a label.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_multilabel_classification\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.decomposition import PCA\nfrom sklearn.cross_decomposition import CCA\n\n\ndef plot_hyperplane(clf, min_x, max_x, linestyle, label):\n # get the separating hyperplane\n w = clf.coef_[0]\n a = -w[0] \/ w[1]\n xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough\n yy = a * xx - (clf.intercept_[0]) \/ w[1]\n plt.plot(xx, yy, linestyle, label=label)\n\n\ndef plot_subfigure(X, Y, subplot, title, transform):\n if transform == \"pca\":\n X = PCA(n_components=2).fit_transform(X)\n elif transform == \"cca\":\n X = CCA(n_components=2).fit(X, Y).transform(X)\n else:\n raise ValueError\n\n min_x = np.min(X[:, 0])\n max_x = np.max(X[:, 0])\n\n min_y = np.min(X[:, 1])\n max_y = np.max(X[:, 1])\n\n classif = OneVsRestClassifier(SVC(kernel='linear'))\n classif.fit(X, Y)\n\n plt.subplot(2, 2, subplot)\n plt.title(title)\n\n zero_class = np.where(Y[:, 0])\n one_class = np.where(Y[:, 1])\n plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')\n plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',\n facecolors='none', linewidths=2, label='Class 1')\n plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',\n facecolors='none', linewidths=2, label='Class 2')\n\n plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',\n 'Boundary\\nfor class 1')\n plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',\n 'Boundary\\nfor class 2')\n plt.xticks(())\n plt.yticks(())\n\n plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)\n plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)\n if subplot == 2:\n plt.xlabel('First principal component')\n plt.ylabel('Second principal component')\n plt.legend(loc=\"upper left\")\n\n\nplt.figure(figsize=(8, 6))\n\nX, Y = make_multilabel_classification(n_classes=2, n_labels=1,\n allow_unlabeled=True,\n return_indicator=True,\n random_state=1)\n\nplot_subfigure(X, Y, 1, \"With unlabeled samples + CCA\", \"cca\")\nplot_subfigure(X, Y, 2, \"With unlabeled samples + PCA\", \"pca\")\n\nX, Y = make_multilabel_classification(n_classes=2, n_labels=1,\n allow_unlabeled=False,\n return_indicator=True,\n random_state=1)\n\nplot_subfigure(X, Y, 3, \"Without unlabeled samples + CCA\", \"cca\")\nplot_subfigure(X, Y, 4, \"Without unlabeled samples + PCA\", \"pca\")\n\nplt.subplots_adjust(.04, .02, .97, .94, .09, .2)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"ephes\/scikit-learn","path":"sklearn\/decomposition\/dict_learning.py","copies":"83","size":"44062","content":"\"\"\" Dictionary learning\n\"\"\"\nfrom __future__ import print_function\n# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport time\nimport sys\nimport itertools\n\nfrom math import sqrt, ceil\n\nimport numpy as np\nfrom scipy import linalg\nfrom numpy.lib.stride_tricks import as_strided\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..externals.joblib import Parallel, delayed, cpu_count\nfrom ..externals.six.moves import zip\nfrom ..utils import (check_array, check_random_state, gen_even_slices,\n gen_batches, _get_n_jobs)\nfrom ..utils.extmath import randomized_svd, row_norms\nfrom ..utils.validation import check_is_fitted\nfrom ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars\n\n\ndef _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',\n regularization=None, copy_cov=True,\n init=None, max_iter=1000):\n \"\"\"Generic sparse coding\n\n Each column of the result is the solution to a Lasso problem.\n\n Parameters\n ----------\n X: array of shape (n_samples, n_features)\n Data matrix.\n\n dictionary: array of shape (n_components, n_features)\n The dictionary matrix against which to solve the sparse coding of\n the data. Some of the algorithms assume normalized rows.\n\n gram: None | array, shape=(n_components, n_components)\n Precomputed Gram matrix, dictionary * dictionary'\n gram can be None if method is 'threshold'.\n\n cov: array, shape=(n_components, n_samples)\n Precomputed covariance, dictionary * X'\n\n algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}\n lars: uses the least angle regression method (linear_model.lars_path)\n lasso_lars: uses Lars to compute the Lasso solution\n lasso_cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). lasso_lars will be faster if\n the estimated components are sparse.\n omp: uses orthogonal matching pursuit to estimate the sparse solution\n threshold: squashes to zero all coefficients less than regularization\n from the projection dictionary * data'\n\n regularization : int | float\n The regularization parameter. It corresponds to alpha when\n algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.\n Otherwise it corresponds to n_nonzero_coefs.\n\n init: array of shape (n_samples, n_components)\n Initialization value of the sparse code. Only used if\n `algorithm='lasso_cd'`.\n\n max_iter: int, 1000 by default\n Maximum number of iterations to perform if `algorithm='lasso_cd'`.\n\n copy_cov: boolean, optional\n Whether to copy the precomputed covariance matrix; if False, it may be\n overwritten.\n\n Returns\n -------\n code: array of shape (n_components, n_features)\n The sparse codes\n\n See also\n --------\n sklearn.linear_model.lars_path\n sklearn.linear_model.orthogonal_mp\n sklearn.linear_model.Lasso\n SparseCoder\n \"\"\"\n if X.ndim == 1:\n X = X[:, np.newaxis]\n n_samples, n_features = X.shape\n if cov is None and algorithm != 'lasso_cd':\n # overwriting cov is safe\n copy_cov = False\n cov = np.dot(dictionary, X.T)\n\n if algorithm == 'lasso_lars':\n alpha = float(regularization) \/ n_features # account for scaling\n try:\n err_mgt = np.seterr(all='ignore')\n lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,\n verbose=False, normalize=False,\n precompute=gram, fit_path=False)\n lasso_lars.fit(dictionary.T, X.T, Xy=cov)\n new_code = lasso_lars.coef_\n finally:\n np.seterr(**err_mgt)\n\n elif algorithm == 'lasso_cd':\n alpha = float(regularization) \/ n_features # account for scaling\n clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,\n max_iter=max_iter, warm_start=True)\n clf.coef_ = init\n clf.fit(dictionary.T, X.T)\n new_code = clf.coef_\n\n elif algorithm == 'lars':\n try:\n err_mgt = np.seterr(all='ignore')\n lars = Lars(fit_intercept=False, verbose=False, normalize=False,\n precompute=gram, n_nonzero_coefs=int(regularization),\n fit_path=False)\n lars.fit(dictionary.T, X.T, Xy=cov)\n new_code = lars.coef_\n finally:\n np.seterr(**err_mgt)\n\n elif algorithm == 'threshold':\n new_code = ((np.sign(cov) *\n np.maximum(np.abs(cov) - regularization, 0)).T)\n\n elif algorithm == 'omp':\n new_code = orthogonal_mp_gram(gram, cov, regularization, None,\n row_norms(X, squared=True),\n copy_Xy=copy_cov).T\n else:\n raise ValueError('Sparse coding method must be \"lasso_lars\" '\n '\"lasso_cd\", \"lasso\", \"threshold\" or \"omp\", got %s.'\n % algorithm)\n return new_code\n\n\n# XXX : could be moved to the linear_model module\ndef sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',\n n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,\n max_iter=1000, n_jobs=1):\n \"\"\"Sparse coding\n\n Each row of the result is the solution to a sparse coding problem.\n The goal is to find a sparse array `code` such that::\n\n X ~= code * dictionary\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X: array of shape (n_samples, n_features)\n Data matrix\n\n dictionary: array of shape (n_components, n_features)\n The dictionary matrix against which to solve the sparse coding of\n the data. Some of the algorithms assume normalized rows for meaningful\n output.\n\n gram: array, shape=(n_components, n_components)\n Precomputed Gram matrix, dictionary * dictionary'\n\n cov: array, shape=(n_components, n_samples)\n Precomputed covariance, dictionary' * X\n\n algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}\n lars: uses the least angle regression method (linear_model.lars_path)\n lasso_lars: uses Lars to compute the Lasso solution\n lasso_cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). lasso_lars will be faster if\n the estimated components are sparse.\n omp: uses orthogonal matching pursuit to estimate the sparse solution\n threshold: squashes to zero all coefficients less than alpha from\n the projection dictionary * X'\n\n n_nonzero_coefs: int, 0.1 * n_features by default\n Number of nonzero coefficients to target in each column of the\n solution. This is only used by `algorithm='lars'` and `algorithm='omp'`\n and is overridden by `alpha` in the `omp` case.\n\n alpha: float, 1. by default\n If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\n penalty applied to the L1 norm.\n If `algorithm='threhold'`, `alpha` is the absolute value of the\n threshold below which coefficients will be squashed to zero.\n If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of\n the reconstruction error targeted. In this case, it overrides\n `n_nonzero_coefs`.\n\n init: array of shape (n_samples, n_components)\n Initialization value of the sparse codes. Only used if\n `algorithm='lasso_cd'`.\n\n max_iter: int, 1000 by default\n Maximum number of iterations to perform if `algorithm='lasso_cd'`.\n\n copy_cov: boolean, optional\n Whether to copy the precomputed covariance matrix; if False, it may be\n overwritten.\n\n n_jobs: int, optional\n Number of parallel jobs to run.\n\n Returns\n -------\n code: array of shape (n_samples, n_components)\n The sparse codes\n\n See also\n --------\n sklearn.linear_model.lars_path\n sklearn.linear_model.orthogonal_mp\n sklearn.linear_model.Lasso\n SparseCoder\n \"\"\"\n dictionary = check_array(dictionary)\n X = check_array(X)\n n_samples, n_features = X.shape\n n_components = dictionary.shape[0]\n\n if gram is None and algorithm != 'threshold':\n gram = np.dot(dictionary, dictionary.T)\n if cov is None:\n copy_cov = False\n cov = np.dot(dictionary, X.T)\n\n if algorithm in ('lars', 'omp'):\n regularization = n_nonzero_coefs\n if regularization is None:\n regularization = min(max(n_features \/ 10, 1), n_components)\n else:\n regularization = alpha\n if regularization is None:\n regularization = 1.\n\n if n_jobs == 1 or algorithm == 'threshold':\n return _sparse_encode(X, dictionary, gram, cov=cov,\n algorithm=algorithm,\n regularization=regularization, copy_cov=copy_cov,\n init=init, max_iter=max_iter)\n\n # Enter parallel code block\n code = np.empty((n_samples, n_components))\n slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))\n\n code_views = Parallel(n_jobs=n_jobs)(\n delayed(_sparse_encode)(\n X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,\n regularization=regularization, copy_cov=copy_cov,\n init=init[this_slice] if init is not None else None,\n max_iter=max_iter)\n for this_slice in slices)\n for this_slice, this_view in zip(slices, code_views):\n code[this_slice] = this_view\n return code\n\n\ndef _update_dict(dictionary, Y, code, verbose=False, return_r2=False,\n random_state=None):\n \"\"\"Update the dense dictionary factor in place.\n\n Parameters\n ----------\n dictionary: array of shape (n_features, n_components)\n Value of the dictionary at the previous iteration.\n\n Y: array of shape (n_features, n_samples)\n Data matrix.\n\n code: array of shape (n_components, n_samples)\n Sparse coding of the data against which to optimize the dictionary.\n\n verbose:\n Degree of output the procedure will print.\n\n return_r2: bool\n Whether to compute and return the residual sum of squares corresponding\n to the computed solution.\n\n random_state: int or RandomState\n Pseudo number generator state used for random sampling.\n\n Returns\n -------\n dictionary: array of shape (n_features, n_components)\n Updated dictionary.\n\n \"\"\"\n n_components = len(code)\n n_samples = Y.shape[0]\n random_state = check_random_state(random_state)\n # Residuals, computed 'in-place' for efficiency\n R = -np.dot(dictionary, code)\n R += Y\n R = np.asfortranarray(R)\n ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))\n for k in range(n_components):\n # R <- 1.0 * U_k * V_k^T + R\n R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)\n dictionary[:, k] = np.dot(R, code[k, :].T)\n # Scale k'th atom\n atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])\n if atom_norm_square < 1e-20:\n if verbose == 1:\n sys.stdout.write(\"+\")\n sys.stdout.flush()\n elif verbose:\n print(\"Adding new random atom\")\n dictionary[:, k] = random_state.randn(n_samples)\n # Setting corresponding coefs to 0\n code[k, :] = 0.0\n dictionary[:, k] \/= sqrt(np.dot(dictionary[:, k],\n dictionary[:, k]))\n else:\n dictionary[:, k] \/= sqrt(atom_norm_square)\n # R <- -1.0 * U_k * V_k^T + R\n R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)\n if return_r2:\n R **= 2\n # R is fortran-ordered. For numpy version < 1.6, sum does not\n # follow the quick striding first, and is thus inefficient on\n # fortran ordered data. We take a flat view of the data with no\n # striding\n R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))\n R = np.sum(R)\n return dictionary, R\n return dictionary\n\n\ndef dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,\n method='lars', n_jobs=1, dict_init=None, code_init=None,\n callback=None, verbose=False, random_state=None,\n return_n_iter=False):\n \"\"\"Solves a dictionary learning matrix factorization problem.\n\n Finds the best dictionary and the corresponding sparse code for\n approximating the data matrix X by solving::\n\n (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1\n (U,V)\n with || V_k ||_2 = 1 for all 0 <= k < n_components\n\n where V is the dictionary and U is the sparse code.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X: array of shape (n_samples, n_features)\n Data matrix.\n\n n_components: int,\n Number of dictionary atoms to extract.\n\n alpha: int,\n Sparsity controlling parameter.\n\n max_iter: int,\n Maximum number of iterations to perform.\n\n tol: float,\n Tolerance for the stopping condition.\n\n method: {'lars', 'cd'}\n lars: uses the least angle regression method to solve the lasso problem\n (linear_model.lars_path)\n cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). Lars will be faster if\n the estimated components are sparse.\n\n n_jobs: int,\n Number of parallel jobs to run, or -1 to autodetect.\n\n dict_init: array of shape (n_components, n_features),\n Initial value for the dictionary for warm restart scenarios.\n\n code_init: array of shape (n_samples, n_components),\n Initial value for the sparse code for warm restart scenarios.\n\n callback:\n Callable that gets invoked every five iterations.\n\n verbose:\n Degree of output the procedure will print.\n\n random_state: int or RandomState\n Pseudo number generator state used for random sampling.\n\n return_n_iter : bool\n Whether or not to return the number of iterations.\n\n Returns\n -------\n code: array of shape (n_samples, n_components)\n The sparse code factor in the matrix factorization.\n\n dictionary: array of shape (n_components, n_features),\n The dictionary factor in the matrix factorization.\n\n errors: array\n Vector of errors at each iteration.\n\n n_iter : int\n Number of iterations run. Returned only if `return_n_iter` is\n set to True.\n\n See also\n --------\n dict_learning_online\n DictionaryLearning\n MiniBatchDictionaryLearning\n SparsePCA\n MiniBatchSparsePCA\n \"\"\"\n if method not in ('lars', 'cd'):\n raise ValueError('Coding method %r not supported as a fit algorithm.'\n % method)\n method = 'lasso_' + method\n\n t0 = time.time()\n # Avoid integer division problems\n alpha = float(alpha)\n random_state = check_random_state(random_state)\n\n if n_jobs == -1:\n n_jobs = cpu_count()\n\n # Init the code and the dictionary with SVD of Y\n if code_init is not None and dict_init is not None:\n code = np.array(code_init, order='F')\n # Don't copy V, it will happen below\n dictionary = dict_init\n else:\n code, S, dictionary = linalg.svd(X, full_matrices=False)\n dictionary = S[:, np.newaxis] * dictionary\n r = len(dictionary)\n if n_components <= r: # True even if n_components=None\n code = code[:, :n_components]\n dictionary = dictionary[:n_components, :]\n else:\n code = np.c_[code, np.zeros((len(code), n_components - r))]\n dictionary = np.r_[dictionary,\n np.zeros((n_components - r, dictionary.shape[1]))]\n\n # Fortran-order dict, as we are going to access its row vectors\n dictionary = np.array(dictionary, order='F')\n\n residuals = 0\n\n errors = []\n current_cost = np.nan\n\n if verbose == 1:\n print('[dict_learning]', end=' ')\n\n # If max_iter is 0, number of iterations returned should be zero\n ii = -1\n\n for ii in range(max_iter):\n dt = (time.time() - t0)\n if verbose == 1:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n elif verbose:\n print (\"Iteration % 3i \"\n \"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)\"\n % (ii, dt, dt \/ 60, current_cost))\n\n # Update code\n code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,\n init=code, n_jobs=n_jobs)\n # Update dictionary\n dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,\n verbose=verbose, return_r2=True,\n random_state=random_state)\n dictionary = dictionary.T\n\n # Cost function\n current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))\n errors.append(current_cost)\n\n if ii > 0:\n dE = errors[-2] - errors[-1]\n # assert(dE >= -tol * errors[-1])\n if dE < tol * errors[-1]:\n if verbose == 1:\n # A line return\n print(\"\")\n elif verbose:\n print(\"--- Convergence reached after %d iterations\" % ii)\n break\n if ii % 5 == 0 and callback is not None:\n callback(locals())\n\n if return_n_iter:\n return code, dictionary, errors, ii + 1\n else:\n return code, dictionary, errors\n\n\ndef dict_learning_online(X, n_components=2, alpha=1, n_iter=100,\n return_code=True, dict_init=None, callback=None,\n batch_size=3, verbose=False, shuffle=True, n_jobs=1,\n method='lars', iter_offset=0, random_state=None,\n return_inner_stats=False, inner_stats=None,\n return_n_iter=False):\n \"\"\"Solves a dictionary learning matrix factorization problem online.\n\n Finds the best dictionary and the corresponding sparse code for\n approximating the data matrix X by solving::\n\n (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1\n (U,V)\n with || V_k ||_2 = 1 for all 0 <= k < n_components\n\n where V is the dictionary and U is the sparse code. This is\n accomplished by repeatedly iterating over mini-batches by slicing\n the input data.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X: array of shape (n_samples, n_features)\n Data matrix.\n\n n_components : int,\n Number of dictionary atoms to extract.\n\n alpha : float,\n Sparsity controlling parameter.\n\n n_iter : int,\n Number of iterations to perform.\n\n return_code : boolean,\n Whether to also return the code U or just the dictionary V.\n\n dict_init : array of shape (n_components, n_features),\n Initial value for the dictionary for warm restart scenarios.\n\n callback :\n Callable that gets invoked every five iterations.\n\n batch_size : int,\n The number of samples to take in each batch.\n\n verbose :\n Degree of output the procedure will print.\n\n shuffle : boolean,\n Whether to shuffle the data before splitting it in batches.\n\n n_jobs : int,\n Number of parallel jobs to run, or -1 to autodetect.\n\n method : {'lars', 'cd'}\n lars: uses the least angle regression method to solve the lasso problem\n (linear_model.lars_path)\n cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). Lars will be faster if\n the estimated components are sparse.\n\n iter_offset : int, default 0\n Number of previous iterations completed on the dictionary used for\n initialization.\n\n random_state : int or RandomState\n Pseudo number generator state used for random sampling.\n\n return_inner_stats : boolean, optional\n Return the inner statistics A (dictionary covariance) and B\n (data approximation). Useful to restart the algorithm in an\n online setting. If return_inner_stats is True, return_code is\n ignored\n\n inner_stats : tuple of (A, B) ndarrays\n Inner sufficient statistics that are kept by the algorithm.\n Passing them at initialization is useful in online settings, to\n avoid loosing the history of the evolution.\n A (n_components, n_components) is the dictionary covariance matrix.\n B (n_features, n_components) is the data approximation matrix\n\n return_n_iter : bool\n Whether or not to return the number of iterations.\n\n Returns\n -------\n code : array of shape (n_samples, n_components),\n the sparse code (only returned if `return_code=True`)\n\n dictionary : array of shape (n_components, n_features),\n the solutions to the dictionary learning problem\n\n n_iter : int\n Number of iterations run. Returned only if `return_n_iter` is\n set to `True`.\n\n See also\n --------\n dict_learning\n DictionaryLearning\n MiniBatchDictionaryLearning\n SparsePCA\n MiniBatchSparsePCA\n\n \"\"\"\n if n_components is None:\n n_components = X.shape[1]\n\n if method not in ('lars', 'cd'):\n raise ValueError('Coding method not supported as a fit algorithm.')\n method = 'lasso_' + method\n\n t0 = time.time()\n n_samples, n_features = X.shape\n # Avoid integer division problems\n alpha = float(alpha)\n random_state = check_random_state(random_state)\n\n if n_jobs == -1:\n n_jobs = cpu_count()\n\n # Init V with SVD of X\n if dict_init is not None:\n dictionary = dict_init\n else:\n _, S, dictionary = randomized_svd(X, n_components,\n random_state=random_state)\n dictionary = S[:, np.newaxis] * dictionary\n r = len(dictionary)\n if n_components <= r:\n dictionary = dictionary[:n_components, :]\n else:\n dictionary = np.r_[dictionary,\n np.zeros((n_components - r, dictionary.shape[1]))]\n dictionary = np.ascontiguousarray(dictionary.T)\n\n if verbose == 1:\n print('[dict_learning]', end=' ')\n\n if shuffle:\n X_train = X.copy()\n random_state.shuffle(X_train)\n else:\n X_train = X\n\n batches = gen_batches(n_samples, batch_size)\n batches = itertools.cycle(batches)\n\n # The covariance of the dictionary\n if inner_stats is None:\n A = np.zeros((n_components, n_components))\n # The data approximation\n B = np.zeros((n_features, n_components))\n else:\n A = inner_stats[0].copy()\n B = inner_stats[1].copy()\n\n # If n_iter is zero, we need to return zero.\n ii = iter_offset - 1\n\n for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):\n this_X = X_train[batch]\n dt = (time.time() - t0)\n if verbose == 1:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n elif verbose:\n if verbose > 10 or ii % ceil(100. \/ verbose) == 0:\n print (\"Iteration % 3i (elapsed time: % 3is, % 4.1fmn)\"\n % (ii, dt, dt \/ 60))\n\n this_code = sparse_encode(this_X, dictionary.T, algorithm=method,\n alpha=alpha, n_jobs=n_jobs).T\n\n # Update the auxiliary variables\n if ii < batch_size - 1:\n theta = float((ii + 1) * batch_size)\n else:\n theta = float(batch_size ** 2 + ii + 1 - batch_size)\n beta = (theta + 1 - batch_size) \/ (theta + 1)\n\n A *= beta\n A += np.dot(this_code, this_code.T)\n B *= beta\n B += np.dot(this_X.T, this_code.T)\n\n # Update dictionary\n dictionary = _update_dict(dictionary, B, A, verbose=verbose,\n random_state=random_state)\n # XXX: Can the residuals be of any use?\n\n # Maybe we need a stopping criteria based on the amount of\n # modification in the dictionary\n if callback is not None:\n callback(locals())\n\n if return_inner_stats:\n if return_n_iter:\n return dictionary.T, (A, B), ii - iter_offset + 1\n else:\n return dictionary.T, (A, B)\n if return_code:\n if verbose > 1:\n print('Learning code...', end=' ')\n elif verbose == 1:\n print('|', end=' ')\n code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,\n n_jobs=n_jobs)\n if verbose > 1:\n dt = (time.time() - t0)\n print('done (total time: % 3is, % 4.1fmn)' % (dt, dt \/ 60))\n if return_n_iter:\n return code, dictionary.T, ii - iter_offset + 1\n else:\n return code, dictionary.T\n\n if return_n_iter:\n return dictionary.T, ii - iter_offset + 1\n else:\n return dictionary.T\n\n\nclass SparseCodingMixin(TransformerMixin):\n \"\"\"Sparse coding mixin\"\"\"\n\n def _set_sparse_coding_params(self, n_components,\n transform_algorithm='omp',\n transform_n_nonzero_coefs=None,\n transform_alpha=None, split_sign=False,\n n_jobs=1):\n self.n_components = n_components\n self.transform_algorithm = transform_algorithm\n self.transform_n_nonzero_coefs = transform_n_nonzero_coefs\n self.transform_alpha = transform_alpha\n self.split_sign = split_sign\n self.n_jobs = n_jobs\n\n def transform(self, X, y=None):\n \"\"\"Encode the data as a sparse combination of the dictionary atoms.\n\n Coding method is determined by the object parameter\n `transform_algorithm`.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_features)\n Test data to be transformed, must have the same number of\n features as the data used to train the model.\n\n Returns\n -------\n X_new : array, shape (n_samples, n_components)\n Transformed data\n\n \"\"\"\n check_is_fitted(self, 'components_')\n\n # XXX : kwargs is not documented\n X = check_array(X)\n n_samples, n_features = X.shape\n\n code = sparse_encode(\n X, self.components_, algorithm=self.transform_algorithm,\n n_nonzero_coefs=self.transform_n_nonzero_coefs,\n alpha=self.transform_alpha, n_jobs=self.n_jobs)\n\n if self.split_sign:\n # feature vector is split into a positive and negative side\n n_samples, n_features = code.shape\n split_code = np.empty((n_samples, 2 * n_features))\n split_code[:, :n_features] = np.maximum(code, 0)\n split_code[:, n_features:] = -np.minimum(code, 0)\n code = split_code\n\n return code\n\n\nclass SparseCoder(BaseEstimator, SparseCodingMixin):\n \"\"\"Sparse coding\n\n Finds a sparse representation of data against a fixed, precomputed\n dictionary.\n\n Each row of the result is the solution to a sparse coding problem.\n The goal is to find a sparse array `code` such that::\n\n X ~= code * dictionary\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n dictionary : array, [n_components, n_features]\n The dictionary atoms used for sparse coding. Lines are assumed to be\n normalized to unit norm.\n\n transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \\\n 'threshold'}\n Algorithm used to transform the data:\n lars: uses the least angle regression method (linear_model.lars_path)\n lasso_lars: uses Lars to compute the Lasso solution\n lasso_cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). lasso_lars will be faster if\n the estimated components are sparse.\n omp: uses orthogonal matching pursuit to estimate the sparse solution\n threshold: squashes to zero all coefficients less than alpha from\n the projection ``dictionary * X'``\n\n transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default\n Number of nonzero coefficients to target in each column of the\n solution. This is only used by `algorithm='lars'` and `algorithm='omp'`\n and is overridden by `alpha` in the `omp` case.\n\n transform_alpha : float, 1. by default\n If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\n penalty applied to the L1 norm.\n If `algorithm='threshold'`, `alpha` is the absolute value of the\n threshold below which coefficients will be squashed to zero.\n If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of\n the reconstruction error targeted. In this case, it overrides\n `n_nonzero_coefs`.\n\n split_sign : bool, False by default\n Whether to split the sparse feature vector into the concatenation of\n its negative part and its positive part. This can improve the\n performance of downstream classifiers.\n\n n_jobs : int,\n number of parallel jobs to run\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n The unchanged dictionary atoms\n\n See also\n --------\n DictionaryLearning\n MiniBatchDictionaryLearning\n SparsePCA\n MiniBatchSparsePCA\n sparse_encode\n \"\"\"\n\n def __init__(self, dictionary, transform_algorithm='omp',\n transform_n_nonzero_coefs=None, transform_alpha=None,\n split_sign=False, n_jobs=1):\n self._set_sparse_coding_params(dictionary.shape[0],\n transform_algorithm,\n transform_n_nonzero_coefs,\n transform_alpha, split_sign, n_jobs)\n self.components_ = dictionary\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n return self\n\n\nclass DictionaryLearning(BaseEstimator, SparseCodingMixin):\n \"\"\"Dictionary learning\n\n Finds a dictionary (a set of atoms) that can best be used to represent data\n using a sparse code.\n\n Solves the optimization problem::\n\n (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1\n (U,V)\n with || V_k ||_2 = 1 for all 0 <= k < n_components\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_components : int,\n number of dictionary elements to extract\n\n alpha : float,\n sparsity controlling parameter\n\n max_iter : int,\n maximum number of iterations to perform\n\n tol : float,\n tolerance for numerical error\n\n fit_algorithm : {'lars', 'cd'}\n lars: uses the least angle regression method to solve the lasso problem\n (linear_model.lars_path)\n cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). Lars will be faster if\n the estimated components are sparse.\n\n transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \\\n 'threshold'}\n Algorithm used to transform the data\n lars: uses the least angle regression method (linear_model.lars_path)\n lasso_lars: uses Lars to compute the Lasso solution\n lasso_cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). lasso_lars will be faster if\n the estimated components are sparse.\n omp: uses orthogonal matching pursuit to estimate the sparse solution\n threshold: squashes to zero all coefficients less than alpha from\n the projection ``dictionary * X'``\n\n transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default\n Number of nonzero coefficients to target in each column of the\n solution. This is only used by `algorithm='lars'` and `algorithm='omp'`\n and is overridden by `alpha` in the `omp` case.\n\n transform_alpha : float, 1. by default\n If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\n penalty applied to the L1 norm.\n If `algorithm='threshold'`, `alpha` is the absolute value of the\n threshold below which coefficients will be squashed to zero.\n If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of\n the reconstruction error targeted. In this case, it overrides\n `n_nonzero_coefs`.\n\n split_sign : bool, False by default\n Whether to split the sparse feature vector into the concatenation of\n its negative part and its positive part. This can improve the\n performance of downstream classifiers.\n\n n_jobs : int,\n number of parallel jobs to run\n\n code_init : array of shape (n_samples, n_components),\n initial value for the code, for warm restart\n\n dict_init : array of shape (n_components, n_features),\n initial values for the dictionary, for warm restart\n\n verbose :\n degree of verbosity of the printed output\n\n random_state : int or RandomState\n Pseudo number generator state used for random sampling.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n dictionary atoms extracted from the data\n\n error_ : array\n vector of errors at each iteration\n\n n_iter_ : int\n Number of iterations run.\n\n Notes\n -----\n **References:**\n\n J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning\n for sparse coding (http:\/\/www.di.ens.fr\/sierra\/pdfs\/icml09.pdf)\n\n See also\n --------\n SparseCoder\n MiniBatchDictionaryLearning\n SparsePCA\n MiniBatchSparsePCA\n \"\"\"\n def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,\n fit_algorithm='lars', transform_algorithm='omp',\n transform_n_nonzero_coefs=None, transform_alpha=None,\n n_jobs=1, code_init=None, dict_init=None, verbose=False,\n split_sign=False, random_state=None):\n\n self._set_sparse_coding_params(n_components, transform_algorithm,\n transform_n_nonzero_coefs,\n transform_alpha, split_sign, n_jobs)\n self.alpha = alpha\n self.max_iter = max_iter\n self.tol = tol\n self.fit_algorithm = fit_algorithm\n self.code_init = code_init\n self.dict_init = dict_init\n self.verbose = verbose\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model from data in X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self: object\n Returns the object itself\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = check_array(X)\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n\n V, U, E, self.n_iter_ = dict_learning(\n X, n_components, self.alpha,\n tol=self.tol, max_iter=self.max_iter,\n method=self.fit_algorithm,\n n_jobs=self.n_jobs,\n code_init=self.code_init,\n dict_init=self.dict_init,\n verbose=self.verbose,\n random_state=random_state,\n return_n_iter=True)\n self.components_ = U\n self.error_ = E\n return self\n\n\nclass MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):\n \"\"\"Mini-batch dictionary learning\n\n Finds a dictionary (a set of atoms) that can best be used to represent data\n using a sparse code.\n\n Solves the optimization problem::\n\n (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1\n (U,V)\n with || V_k ||_2 = 1 for all 0 <= k < n_components\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_components : int,\n number of dictionary elements to extract\n\n alpha : float,\n sparsity controlling parameter\n\n n_iter : int,\n total number of iterations to perform\n\n fit_algorithm : {'lars', 'cd'}\n lars: uses the least angle regression method to solve the lasso problem\n (linear_model.lars_path)\n cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). Lars will be faster if\n the estimated components are sparse.\n\n transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \\\n 'threshold'}\n Algorithm used to transform the data.\n lars: uses the least angle regression method (linear_model.lars_path)\n lasso_lars: uses Lars to compute the Lasso solution\n lasso_cd: uses the coordinate descent method to compute the\n Lasso solution (linear_model.Lasso). lasso_lars will be faster if\n the estimated components are sparse.\n omp: uses orthogonal matching pursuit to estimate the sparse solution\n threshold: squashes to zero all coefficients less than alpha from\n the projection dictionary * X'\n\n transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default\n Number of nonzero coefficients to target in each column of the\n solution. This is only used by `algorithm='lars'` and `algorithm='omp'`\n and is overridden by `alpha` in the `omp` case.\n\n transform_alpha : float, 1. by default\n If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the\n penalty applied to the L1 norm.\n If `algorithm='threshold'`, `alpha` is the absolute value of the\n threshold below which coefficients will be squashed to zero.\n If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of\n the reconstruction error targeted. In this case, it overrides\n `n_nonzero_coefs`.\n\n split_sign : bool, False by default\n Whether to split the sparse feature vector into the concatenation of\n its negative part and its positive part. This can improve the\n performance of downstream classifiers.\n\n n_jobs : int,\n number of parallel jobs to run\n\n dict_init : array of shape (n_components, n_features),\n initial value of the dictionary for warm restart scenarios\n\n verbose :\n degree of verbosity of the printed output\n\n batch_size : int,\n number of samples in each mini-batch\n\n shuffle : bool,\n whether to shuffle the samples before forming batches\n\n random_state : int or RandomState\n Pseudo number generator state used for random sampling.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n components extracted from the data\n\n inner_stats_ : tuple of (A, B) ndarrays\n Internal sufficient statistics that are kept by the algorithm.\n Keeping them is useful in online settings, to avoid loosing the\n history of the evolution, but they shouldn't have any use for the\n end user.\n A (n_components, n_components) is the dictionary covariance matrix.\n B (n_features, n_components) is the data approximation matrix\n\n n_iter_ : int\n Number of iterations run.\n\n Notes\n -----\n **References:**\n\n J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning\n for sparse coding (http:\/\/www.di.ens.fr\/sierra\/pdfs\/icml09.pdf)\n\n See also\n --------\n SparseCoder\n DictionaryLearning\n SparsePCA\n MiniBatchSparsePCA\n\n \"\"\"\n def __init__(self, n_components=None, alpha=1, n_iter=1000,\n fit_algorithm='lars', n_jobs=1, batch_size=3,\n shuffle=True, dict_init=None, transform_algorithm='omp',\n transform_n_nonzero_coefs=None, transform_alpha=None,\n verbose=False, split_sign=False, random_state=None):\n\n self._set_sparse_coding_params(n_components, transform_algorithm,\n transform_n_nonzero_coefs,\n transform_alpha, split_sign, n_jobs)\n self.alpha = alpha\n self.n_iter = n_iter\n self.fit_algorithm = fit_algorithm\n self.dict_init = dict_init\n self.verbose = verbose\n self.shuffle = shuffle\n self.batch_size = batch_size\n self.split_sign = split_sign\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model from data in X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = check_array(X)\n\n U, (A, B), self.n_iter_ = dict_learning_online(\n X, self.n_components, self.alpha,\n n_iter=self.n_iter, return_code=False,\n method=self.fit_algorithm,\n n_jobs=self.n_jobs, dict_init=self.dict_init,\n batch_size=self.batch_size, shuffle=self.shuffle,\n verbose=self.verbose, random_state=random_state,\n return_inner_stats=True,\n return_n_iter=True)\n self.components_ = U\n # Keep track of the state of the algorithm to be able to do\n # some online fitting (partial_fit)\n self.inner_stats_ = (A, B)\n self.iter_offset_ = self.n_iter\n return self\n\n def partial_fit(self, X, y=None, iter_offset=None):\n \"\"\"Updates the model using the data in X as a mini-batch.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n iter_offset: integer, optional\n The number of iteration on data batches that has been\n performed before this call to partial_fit. This is optional:\n if no number is passed, the memory of the object is\n used.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n if not hasattr(self, 'random_state_'):\n self.random_state_ = check_random_state(self.random_state)\n X = check_array(X)\n if hasattr(self, 'components_'):\n dict_init = self.components_\n else:\n dict_init = self.dict_init\n inner_stats = getattr(self, 'inner_stats_', None)\n if iter_offset is None:\n iter_offset = getattr(self, 'iter_offset_', 0)\n U, (A, B) = dict_learning_online(\n X, self.n_components, self.alpha,\n n_iter=self.n_iter, method=self.fit_algorithm,\n n_jobs=self.n_jobs, dict_init=dict_init,\n batch_size=len(X), shuffle=False,\n verbose=self.verbose, return_code=False,\n iter_offset=iter_offset, random_state=self.random_state_,\n return_inner_stats=True, inner_stats=inner_stats)\n self.components_ = U\n\n # Keep track of the state of the algorithm to be able to do\n # some online fitting (partial_fit)\n self.inner_stats_ = (A, B)\n self.iter_offset_ = iter_offset + self.n_iter\n return self\n","license":"bsd-3-clause"} {"repo_name":"melqkiades\/yelp","path":"source\/python\/topicmodeling\/external\/topicensemble\/unsupervised\/nmf.py","copies":"2","size":"1622","content":"import numpy as np\nfrom sklearn import decomposition\nimport logging as log\n\n# --------------------------------------------------------------\n\nclass SklNMF:\n\t\"\"\"\n\tWrapper class backed by the scikit-learn package NMF implementation.\n\t\"\"\"\n\tdef __init__( self, max_iters = 100, init_strategy = \"random\" ):\n\t\tself.max_iters = 100\n\t\tself.init_strategy = init_strategy\n\t\tself.W = None\n\t\tself.H = None\n\n\tdef apply( self, X, k = 2, init_W = None, init_H = None ):\n\t\t\"\"\"\n\t\tApply NMF to the specified document-term matrix X.\n\t\t\"\"\"\n\t\tself.W = None\n\t\tself.H = None\n\t\trandom_seed = np.random.randint( 1, 100000 )\n\t\tif not (init_W is None or init_H is None):\n\t\t\tmodel = decomposition.NMF( init=\"custom\", n_components=k, max_iter=self.max_iters, random_state = random_seed )\n\t\t\tself.W = model.fit_transform( X, W=init_W, H=init_H )\n\t\telse:\n\t\t\tmodel = decomposition.NMF( init=self.init_strategy, n_components=k, max_iter=self.max_iters, random_state = random_seed )\n\t\t\tself.W = model.fit_transform( X )\n\t\tself.H = model.components_\t\t\t\n\t\t\n\tdef rank_terms( self, topic_index, top = -1 ):\n\t\t\"\"\"\n\t\tReturn the top ranked terms for the specified topic, generated during the last NMF run.\n\t\t\"\"\"\n\t\tif self.H is None:\n\t\t\traise ValueError(\"No results for previous run available\")\n\t\t# NB: reverse\n\t\ttop_indices = np.argsort( self.H[topic_index,:] )[::-1]\n\t\t# truncate if necessary\n\t\tif top < 1 or top > len(top_indices):\n\t\t\treturn top_indices\n\t\treturn top_indices[0:top]\n\n\tdef generate_partition( self ):\n\t\tif self.W is None:\n\t\t\traise ValueError(\"No results for previous run available\")\n\t\treturn np.argmax( self.W, axis = 1 ).flatten().tolist()\t\t\n\n\n\n","license":"lgpl-2.1"} {"repo_name":"zrhans\/pythonanywhere","path":".virtualenvs\/django19\/lib\/python3.4\/site-packages\/matplotlib\/tri\/triplot.py","copies":"8","size":"3150","content":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom matplotlib.externals import six\n\nimport numpy as np\nfrom matplotlib.tri.triangulation import Triangulation\n\n\ndef triplot(ax, *args, **kwargs):\n \"\"\"\n Draw a unstructured triangular grid as lines and\/or markers.\n\n The triangulation to plot can be specified in one of two ways;\n either::\n\n triplot(triangulation, ...)\n\n where triangulation is a :class:`matplotlib.tri.Triangulation`\n object, or\n\n ::\n\n triplot(x, y, ...)\n triplot(x, y, triangles, ...)\n triplot(x, y, triangles=triangles, ...)\n triplot(x, y, mask=mask, ...)\n triplot(x, y, triangles, mask=mask, ...)\n\n in which case a Triangulation object will be created. See\n :class:`~matplotlib.tri.Triangulation` for a explanation of these\n possibilities.\n\n The remaining args and kwargs are the same as for\n :meth:`~matplotlib.axes.Axes.plot`.\n\n Return a list of 2 :class:`~matplotlib.lines.Line2D` containing\n respectively:\n\n - the lines plotted for triangles edges\n - the markers plotted for triangles nodes\n\n **Example:**\n\n .. plot:: mpl_examples\/pylab_examples\/triplot_demo.py\n \"\"\"\n import matplotlib.axes\n\n tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n x, y, edges = (tri.x, tri.y, tri.edges)\n\n # Decode plot format string, e.g., 'ro-'\n fmt = \"\"\n if len(args) > 0:\n fmt = args[0]\n linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)\n\n # Insert plot format string into a copy of kwargs (kwargs values prevail).\n kw = kwargs.copy()\n for key, val in zip(('linestyle', 'marker', 'color'),\n (linestyle, marker, color)):\n if val is not None:\n kw[key] = kwargs.get(key, val)\n\n # Draw lines without markers.\n # Note 1: If we drew markers here, most markers would be drawn more than\n # once as they belong to several edges.\n # Note 2: We insert nan values in the flattened edges arrays rather than\n # plotting directly (triang.x[edges].T, triang.y[edges].T)\n # as it considerably speeds-up code execution.\n linestyle = kw['linestyle']\n kw_lines = kw.copy()\n kw_lines['marker'] = 'None' # No marker to draw.\n kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.\n if (linestyle is not None) and (linestyle not in ['None', '', ' ']):\n tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)\n tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)\n tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),\n **kw_lines)\n else:\n tri_lines = ax.plot([], [], **kw_lines)\n\n # Draw markers separately.\n marker = kw['marker']\n kw_markers = kw.copy()\n kw_markers['linestyle'] = 'None' # No line to draw.\n if (marker is not None) and (marker not in ['None', '', ' ']):\n tri_markers = ax.plot(x, y, **kw_markers)\n else:\n tri_markers = ax.plot([], [], **kw_markers)\n\n return tri_lines + tri_markers\n","license":"apache-2.0"} {"repo_name":"joshgabriel\/dft-crossfilter","path":"CompleteApp\/crossfilter_app\/old_mains\/old_main.py","copies":"3","size":"10263","content":"# main.py that controls the whole app\n# to run: just run bokeh serve --show crossfilter_app in the benchmark-view repo\n\nfrom random import random\nimport os\n\nfrom bokeh.layouts import column\nfrom bokeh.models import Button\nfrom bokeh.models.widgets import Select, MultiSelect, Slider\nfrom bokeh.palettes import RdYlBu3\nfrom bokeh.plotting import figure, curdoc\n\n\n#### CROSSFILTER PART ##### >>> Module load errors throwing up how to do a relative import ?\nfrom crossview.crossfilter.models import CrossFilter\n#from benchmark.loader import load\n\n#### DATA INPUT FROM REST API ######\n#from benchmark.loader import load\n\n#### DATA INPUT STRAIGHT FROM PANDAS for test purposes ####\nimport pandas as pd\n\n##### PLOTTING PART -- GLOBAL FIGURE CREATION ########\n# create a plot and style its properties\n\n## gloabl data interface to come from REST API\nvasp_data = pd.read_csv('..\/benchmark\/data\/francesca_data_head.csv')\n\np = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location='below')\n#p.border_fill_color = 'black'\n#p.background_fill_color = 'black'\np.outline_line_color = None\np.grid.grid_line_color = None\n\n\n#### FORMAT OF DATA SENT TO WIDGET #######\n\n# add a text renderer to out plot (no data yet)\nr = p.text(x=[], y=[], text=[], text_color=[], text_font_size=\"20pt\",\n text_baseline=\"middle\", text_align=\"center\")\n\nr2 = p.circle(x=[], y=[])\n\ni = 0\n\nds = r.data_source\n\nds2 = r2.data_source\n\n##### WIDGET RESPONSES IN THE FORM OF CALLBACKS ######\n\n# create a callback that will add a number in a random location\ndef callback():\n global i\n\n\n # BEST PRACTICE --- update .data in one step with a new dict\n new_data = dict()\n new_data['x'] = ds.data['x'] + [random()*70 + 15]\n new_data['y'] = ds.data['y'] + [random()*70 + 15]\n new_data['text_color'] = ds.data['text_color'] + [RdYlBu3[i%3]]\n new_data['text'] = ds.data['text'] + [str(i)]\n ds.data = new_data\n\n i = i + 1\n\n\n#### The make crossfilter callback\n\n#### make data loading as easy as possible for now straight from\n#### the benchmark data csv file not from the API with the decorators\n\n#### TO DO after we see that the crossfilter and new bokeh play nicely\n##########: integrate with API and uncomment the decorators and data loader\n#@bokeh_app.route(\"\/bokeh\/benchmark\/\")\n#@object_page(\"benchmark\")\n\n#### RENDERERS OF WIDGETS #####\n\ndef make_bokeh_crossfilter(axis='k-point'):\n \"\"\"The root crossfilter controller\"\"\"\n # Loading the dft data head as a\n # pandas dataframe\n new_data = dict()\n# new_data = load(\".\/benchmark\/data\/francesca_data_head\")\n # use a straight pandas dataframe for now instead and follow the\n # BEST PRACTICE described above basically clean up the data object on each callback.\n # data that will be given back on the callback\n new_data = vasp_data # our data that will be replaced by the API\n global p\n p = CrossFilter.create(df=new_data)\n print (type(p))\n # dont know what Crossfilter class really returns in terms of data but for testnig purposes lets\n # return something that is compatible with the new_data dictionary return in the\n # vanilla example through the global object ds.data\n # for example the x - y coordinates on the plots correspond to mins on the data set in k-point and value fields\n# new_data['x'] = ds2.data['x'] + list(data[axis])\n# new_data['y'] = ds2.data['y'] + list(data['value'])\n # other stuff default as in vanilla callback()\n # for test purposes to see actually what coordinate is getting plotted\n # it is always going to be the same duh beccause only one min exist in the dataset\n # its at x = 6, y = -12 ,\n # SUCESS learnt how to create a custom callback !!! that loads a CSV file and does something with it\n# print (\"New data from crossfilter\", new_data)\n # finally assign to ds.data\n# ds2.data = new_data\n\n\n\n\n\ndef make_wflow_crossfilter(tags={'element_widget':['Cu', 'Pd', 'Mo'], 'code_widget':['VASP'], 'ExchCorr':['PBE']}):\n \"\"\"\n demo crossfilter based on pure pandas dataframes that serves a data processing\n workflow that selects inputs from widgets\n\n args:\n tags: dict of selections by upto 3 widgets\n\n returns:\n dictionary of crossfiltered dataframes that can further be processed down the workflow\n \"\"\"\n\n ## Actual widget controlled inputs ##\n\n # elements = tags['element']\n # exchanges = tags['ExchCorr']\n # propys = tags['code_widget']\n\n ## Demo user inputs for testing selects everything in the test csv : max data load ##\n\n elements = np.unique(vasp_data['element'])\n exchanges = np.unique(vasp_data['exchange'])\n propys = ['B','dB','a0']\n\n\n # final dictionary of crossfiltered dataframes\n crossfilts = {}\n # crossfiltering part - playing the role of the \"Crossfilter class in bokeh.models\"\n\n for pr in propys:\n for el in elements:\n for ex in exchanges:\n # crossfilter down to exchange and element\n elems = vasp_data[vasp_data['element']==el]\n exchs = elems[elems['exchange']==ex]\n # separate into properties, energy, kpoints\n p = exchs[exchs['property']==pr]\n e = exchs[exchs['property']=='e0']\n\n ##### *** Accuracy calculation based on default standards *** #####\n # choose reference from dict\n ref_e = expt_ref_prb[el][pr]\n ref_w = wien_ref[el][pr]\n # calculate percent errors on property - ACCURACY CALCULATION based on default standards\n props = [v for v in p['value'] ]\n percs_wien = [ (v - ref_w) \/ ref_w * 100 for v in p['value']]\n percs_prb = [ (v - ref_e) \/ ref_e * 100 for v in p['value']]\n kpts = [ k for k in p['k-point']]\n kpts_atom = [ k**3 for k in p['k-point'] ]\n ##### *** Accuracy calculation based on default standards *** #####\n\n ##### *** Calculate prec_sigma of energy *** #####\n energy = [ v for v in e['value']]\n end= len(energy) - 1\n prec_sigma = [ v - energy[end] for v in energy]\n\n # make data frame of kpoints, energy, percent errors on property\n if kpts and energy and props:\n NAME = '_'.join([el,ex,pr])\n Rdata =\\\n pd.DataFrame({'Kpoints_size':kpts, 'Kpoints_atom_density':kpts_atom, 'Energy':energy, 'Prec_Sigma':prec_sigma , pr:props, 'percent_error_wien':percs_wien, 'percent_error_expt':percs_prb })\n crossfilts[NAME] = Rdata\n\n\n\ndef calculate_prec(cross_df, automate= False):\n \"\"\"\n function that calculates the prec_inf using R\n and returns a fully contructed plottable dataframe\n\n Args:\n cross_df: pandas dataframe containing the data\n automate: bool, a To do feature to automatically calculate the best fit\n\n Returns:\n dataframe contining the R added precision values to be\n received most always by the plotting commander.\n \"\"\"\n import rpy2.robjects as ro\n from rpy2.robjects import pandas2ri\n from rpy2.robjects.packages import importr\n import rpy2.robjects.numpy2ri\n import rpy2.rinterface as rin\n\n\n stats = importr('stats')\n base = importr('base')\n # activate R environemnt in python\n rpy2.robjects.numpy2ri.activate()\n pandas2ri.activate()\n # read in necessary elements ofmenu = [(\"Item 1\", \"item_1_value\"), (\"Item 2\", \"item_2_value\"), (\"Item 3\", \"item_3_value\")]\n df = pd.DataFrame({'x': cross_df['Kpoints_atom_density'],\n 'y': cross_df['Energy']})\n ro.globalenv['dataframe']=df\n\n ### *** R used to obtain the fit on the data to calculate prec_inf *** ###\n # perform regression - bokeh widgets can be used here to provide the inputs to the nls regression\n\n # some python to R translation of object names via the pandas - R dataframes\n y = df['y']\n x = df['x']\n l = len(y) - 1 # needed because R indexes list from 1 to len(list)\n\n # ***WIDGET inputs*** # OR AUTOMATE\n # the slider inputs on starting point or can be automated also\n l1 = 3\n l2 = 0\n fitover = rin.SexpVector(list(range(l1,l-l2)), rin.INTSXP)\n\n # numeric entry widget for 'b' is plausible for user to choose best starting guess\n start_guess = {'a': y[l], 'b': 5}\n start=pandas2ri.py2ri(pd.DataFrame(start_guess,index=start_guess))\n\n # drop down list selection of model\n model = 'y~a*x\/(b+x)'\n\n # Minimize function with weights and selection\n m = \\\n stats.nls(model, start = start, algorithm = \"port\", subset = fitover, weights = x^2, data=base.as_symbol('dataframe'))\n\n # Estimation of goodness of fit\n g = stats.cor(y[l1:l-l2],stats.predict(m))\n\n # Report summary of fit, values and error bars\n print( base.summary(m).rx2('coefficients') )\n\n # Extrapolation value is given by a\n a = stats.coef(m)[1]\n\n # Calculation of precision\n prec = abs(y-a)\n\n # test print outs of the data ? how to render onto html like Shiny if necesary ?\n\n print(\"We learn that the converged value is: {0} and best precision achieved in the measurement is {1}\".format(a, min(abs(prec))))\n\n cross_df['Energy_Prec_Inf'] = prec\n\n # close the R environments\n rpy2.robjects.numpy2ri.deactivate()\n pandas2ri.deactivate()\n\n return (cross_df)\n\ndef make_widgets():\n \"\"\"\n main function that will control the rendering of UI widgets\n\n \"\"\"\n pass\n\n \n#### WIDGET CREATIONS ####\n\n# OLD VANILLA\n# add a button widget and configure with the call back\n# button_basic = Button(label=\"Press Me\")\n# button_basic.on_click(callback)\n#make_bokeh_crossfilter()\n\n\n# create a button for Select button for input\n\n#menu = [(\"Bulk Modulus\", \"B\"), (\"B'\", \"dB\"), (\"Lattice Constant\", \"a0\")]\n#select_property = Select(name=\"Selection\", options=menu, value=\"B\")\n#select_property.on_click(make_bokeh_crossfilter(axis=value))\n\n\n# create a button for make crossfilter app \n\nbutton_crossfilter = Button(label=\"Make Crossfilter\")\nbutton_crossfilter.on_click(make_bokeh_crossfilter)\n\n#create a button for crossfilter_workflwo\nbutton_w_crossfilter = Button(label=\"Make Crossfilter Workflow\")\nbutton_w_crossfilter.on_click(make_wflow_crossfilter)\n\n# put the button and plot in a layout and add to the document\ncurdoc().add_root(column(button_crossfilter, button_w_crossfilter, p))\n","license":"mit"} {"repo_name":"Tuyki\/TT_RNN","path":"MNISTSeq.py","copies":"1","size":"14227","content":"__author__ = \"Yinchong Yang\"\n__copyright__ = \"Siemens AG, 2018\"\n__licencse__ = \"MIT\"\n__version__ = \"0.1\"\n\n\"\"\"\nMIT License\nCopyright (c) 2018 Siemens AG\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\n\n\"\"\"\nWe first sample MNIST digits to form sequences of random lengths. \nThe sequence is labeled as one if it contains a zero, and is labeled zero otherwise.\nThis simulates a high dimensional sequence classification task, such as predicting therapy decision\nand survival of patients based on their historical clinical event information. \nWe train plain LSTM and Tensor-Train LSTM for this task. \nAfter the training, we apply Layer-wise Relevance Propagation to identify the digit(s) that \nhave influenced the classification. \nApparently, we would expect the LRP algorithm would assign high relevance value to the zero(s)\nin the sequence. \nThese experiments turn out to be successful, which demonstrates that \ni) the LSTM and TT-LSTM can indeed learn the mapping from a zero to the sequence class, and that \nii) both LSTMs have no problem in storing the zero pattern over a period of time, because the \nclassifier is deployed only at the last hidden state, and that \niii) the implementation of the LRP algorithm, complex as it is, is also correct, in that \nthe zeros are assigned high relevance scores. \n\nEspecially the experiments with the plain LSTM serve as simulation study supporting our submission of \n\u201cYinchong Yang, Volker Tresp, Marius Wunderle, Peter A. Fasching, \nExplaining Therapy Predictions with Layer-wise Relevance Propagation in Neural Networks, at IEEE ICHI 2018\u201d. \n\nThe original LRP for LSTM from the repository: \n https:\/\/github.com\/ArrasL\/LRP_for_LSTM\nwhich we modified and adjusted for keras models. \n\nFeel free to experiment with the hyper parameters and suggest other sequence classification tasks.\nHave fun ;) \n\"\"\"\n\n\nimport pickle\nimport sys\nimport numpy as np\nfrom numpy import newaxis as na\nimport keras\nfrom keras.layers.recurrent import Recurrent\nfrom keras import backend as K\nfrom keras.engine import InputSpec\nfrom keras import activations\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras import constraints\nfrom keras.engine.topology import Layer\n\nfrom TTLayer import *\nfrom TTRNN import TT_LSTM\n\n\ndef make_seq(n, x, y, maxlen=32, seed=123):\n np.random.seed(seed)\n lens = np.random.choice(range(2, maxlen), n)\n seqs = np.zeros((n, maxlen, 28**2))\n labels = np.zeros(n)\n digits_label = np.zeros((n, maxlen), dtype='int32')-1\n ids = np.zeros((n, maxlen), dtype='int64')-1\n for i in range(n):\n digits_inds = np.random.choice(range(x.shape[0]), lens[i])\n ids[i, -lens[i]::] = digits_inds\n seqs[i, -lens[i]::, :] = x[digits_inds]\n digits_label[i, -lens[i]::] = y[digits_inds]\n class_inds = y[digits_inds]\n\n if True:\n # option 1: is there any 0 in the sequence?\n labels[i] = (0 in class_inds)\n else:\n # option 2: even number of 0 -> label=0, odd number of 0 -> label=1\n labels[i] = len(np.where(class_inds == 0)[0]) % 2 == 1\n return [seqs, labels, digits_label, ids]\n\n\n# From: https:\/\/github.com\/ArrasL\/LRP_for_LSTM\ndef lrp_linear(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False):\n \"\"\"\n LRP for a linear layer with input dim D and output dim M.\n Args:\n - hin: forward pass input, of shape (D,)\n - w: connection weights, of shape (D, M)\n - b: biases, of shape (M,)\n - hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)\n - Rout: relevance at layer output, of shape (M,)\n - bias_nb_units: number of lower-layer units onto which the bias\/stabilizer contribution is redistributed\n - eps: stabilizer (small positive number)\n - bias_factor: for global relevance conservation set to 1.0, otherwise 0.0 to ignore bias redistribution\n Returns:\n - Rin: relevance at layer input, of shape (D,)\n \"\"\"\n sign_out = np.where(hout[na, :] >= 0, 1., -1.) # shape (1, M)\n\n numer = (w * hin[:, na]) + \\\n ((bias_factor * b[na, :] * 1. + eps * sign_out * 1.) * 1. \/ bias_nb_units) # shape (D, M)\n\n denom = hout[na, :] + (eps * sign_out * 1.) # shape (1, M)\n\n message = (numer \/ denom) * Rout[na, :] # shape (D, M)\n\n Rin = message.sum(axis=1) # shape (D,)\n\n # Note: local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D\n # global network relevance conservation if bias_factor==1.0 (can be used for sanity check)\n if debug:\n print(\"local diff: \", Rout.sum() - Rin.sum())\n\n return Rin\n\n\ndef sigmoid(x):\n x = x.astype('float128')\n return 1. \/ (1. + np.exp(-x))\n\n# Modified from https:\/\/github.com\/ArrasL\/LRP_for_LSTM \ndef lstm_lrp(l, d, train_data = True):\n if train_data:\n x_l = X_tr[l]\n y_l = Y_tr[l]\n z_l = Z_tr[l]\n # d_l = d_tr[l]\n else:\n x_l = X_te[l]\n y_l = Y_te[l]\n z_l = Z_te[l]\n # d_l = d_te[l]\n\n\n # calculate the FF pass in LSTM for every time step\n pre_gates = np.zeros((MAXLEN, d*4))\n gates = np.zeros((MAXLEN, d * 4))\n h = np.zeros((MAXLEN, d))\n c = np.zeros((MAXLEN, d))\n\n for t in range(MAXLEN):\n z = np.dot(x_l[t], Ws)\n if t > 0:\n z += np.dot(h[t-1], Us)\n z += b\n pre_gates[t] = z\n z0 = z[0:d]\n z1 = z[d:2*d]\n z2 = z[2*d:3*d]\n z3 = z[3 * d::]\n i = sigmoid(z0)\n f = sigmoid(z1)\n c[t] = f * c[t-1] + i * np.tanh(z2)\n o = sigmoid(z3)\n h[t] = o * np.tanh(c[t])\n gates[t] = np.concatenate([i, f, np.tanh(z2), o])\n\n # check: z_l[12] \/ h[-1][12]\n\n Rh = np.zeros((MAXLEN, d))\n Rc = np.zeros((MAXLEN, d))\n Rg = np.zeros((MAXLEN, d))\n Rx = np.zeros((MAXLEN, 28**2))\n\n bias_factor = 0\n\n Rh[MAXLEN-1] = lrp_linear(hin=z_l,\n w=Dense_w,\n b=np.array(Dense_b),\n hout=np.dot(z_l, Dense_w)+Dense_b,\n Rout=np.array([y_l]),\n bias_nb_units=len(z_l),\n eps=eps,\n bias_factor=bias_factor)\n\n\n for t in reversed(range(MAXLEN)):\n # t = MAXLEN-1\n # print t\n\n Rc[t] += Rh[t]\n # Rc[t] = Rh[t]\n if t > 0:\n Rc[t-1] = lrp_linear(gates[t, d: 2 * d] * c[t - 1], # gates[t , 2 *d: 3 *d ] *c[ t -1],\n np.identity(d),\n np.zeros((d)),\n c[t],\n Rc[t],\n 2*d,\n eps,\n bias_factor,\n debug=False)\n\n Rg[t] = lrp_linear(gates[t, 0:d] * gates[t, 2*d:3*d], # h_input: i + g\n np.identity(d), # W\n np.zeros((d)), # b\n c[t], # h_output\n Rc[t], # R_output\n 2 * d,\n eps,\n bias_factor,\n debug=False)\n\n # foo = np.dot(x_l[t], Ws[:,2*d:3*d]) + np.dot(h[t-1], Us[:, 2*d:3*d]) + b[2*d:3*d]\n\n Rx[t] = lrp_linear(x_l[t],\n Ws[:,2*d:3*d],\n b[2*d:3*d],\n pre_gates[t, 2*d:3*d],\n Rg[t],\n d + 28 ** 2,\n eps,\n bias_factor,\n debug=False)\n\n if t > 0:\n Rh[t-1] = lrp_linear(h[t-1],\n Us[:,2*d:3*d],\n b[2*d:3*d],\n pre_gates[t, 2 * d:3 * d],\n Rg[t],\n d + 28**2,\n eps,\n bias_factor,\n debug=False)\n\n # hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False\n\n # Rx[np.where(d_l==-1.)[0]] *= 0\n return Rx\n\n\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nfrom keras.models import Model, Input\nfrom keras.layers import Dense, GRU, LSTM, Dropout, Masking\nfrom keras.optimizers import *\nfrom keras.regularizers import l2\n\nfrom sklearn.metrics import *\n\n\n# Script configurations ###################################################################\n\nseed=111111\nuse_TT = True # whether use Tensor-Train or plain RNNs\n\n\n# Prepare the data ########################################################################\n# Load the MNIST data and build sequences:\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.reshape(x_train.shape[0], -1)\nx_test = x_test.reshape(x_test.shape[0], -1)\n\nMAXLEN = 32 # max length of the sequences\n\nX_tr, Y_tr, d_tr, idx_tr = make_seq(n=10000, x=x_train, y=y_train, maxlen=MAXLEN, seed=seed)\nX_te, Y_te, d_te, idx_te = make_seq(n=1000, x=x_test, y=y_test, maxlen=MAXLEN, seed=seed+1)\n\n# Define the model ######################################################################\n\nif use_TT:\n # TT settings\n tt_input_shape = [7, 7, 16]\n tt_output_shape = [4, 4, 4]\n tt_ranks = [1, 4, 4, 1]\n\nrnn_size = 64\n\nX = Input(shape=X_tr.shape[1::])\nX_mask = Masking(mask_value=0.0, input_shape=X_tr.shape[1::])(X)\n\nif use_TT:\n Z = TT_LSTM(tt_input_shape=tt_input_shape, tt_output_shape=tt_output_shape, tt_ranks=tt_ranks,\n return_sequences=False, recurrent_dropout=.5)(X_mask)\n Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)\nelse:\n Z = LSTM(units=rnn_size, return_sequences=False, recurrent_dropout=.5)(X_mask) # dropout=.5,\n Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z)\n\nrnn_model = Model(X, Out)\nrnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Train the model and save the results ######################################################\nrnn_model.fit(X_tr, Y_tr, epochs=50, batch_size=32, validation_split=.2, verbose=2)\n\n\nY_hat = rnn_model.predict(X_tr, verbose=2).reshape(-1)\ntrain_acc = (np.round(Y_hat) == Y_tr).mean()\nY_pred = rnn_model.predict(X_te, verbose=2).reshape(-1)\n(np.round(Y_pred) == Y_te).mean()\npred_acc = (np.round(Y_pred) == Y_te).mean()\n\n\n# Collect all hidden layers ################################################################\nif use_TT:\n # Reconstruct the fully connected input-to-hidden weights:\n from keras.initializers import constant\n _tt_output_shape = np.copy(tt_output_shape)\n _tt_output_shape[0] *= 4\n\n fc_w = rnn_model.get_weights()[0]\n fc_layer = TT_Layer(tt_input_shape=tt_input_shape, tt_output_shape=_tt_output_shape, tt_ranks=tt_ranks,\n kernel_initializer=constant(value=fc_w), use_bias=False)\n fc_input = Input(shape=(X_tr.shape[2],))\n fc_output = fc_layer(fc_input)\n fc_model = Model(fc_input, fc_output)\n fc_model.compile('sgd', 'mse')\n\n fc_recon_mat = fc_model.predict(np.identity(X_tr.shape[2]))\n\n # Reconstruct the entire LSTM:\n fc_Z = LSTM(units=np.prod(tt_output_shape), return_sequences=False, dropout=.5, recurrent_dropout=.5,\n weights=[fc_recon_mat, rnn_model.get_weights()[2], rnn_model.get_weights()[1]])(X_mask)\n\nelse:\n fc_Z = LSTM(units=rnn_size, return_sequences=False, dropout=.5, recurrent_dropout=.5,\n weights=rnn_model.get_weights()[0:3])(X_mask)\n\nfc_Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-3),\n weights=rnn_model.get_weights()[3::])(fc_Z)\nfc_rnn_model = Model(X, fc_Out)\nfc_rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy',\n metrics=['accuracy'])\n\nfc_rnn_model.evaluate(X_te, Y_te, verbose=2)\n\n\n\n# Calculate the LRP: #########################################################################\nfc_Z_model = Model(X, fc_Z)\nfc_Z_model.compile('sgd', 'mse')\n\nY_hat_fc = fc_rnn_model.predict(X_tr)\nY_pred_fc = fc_rnn_model.predict(X_te)\n\nWs = fc_rnn_model.get_weights()[0]\nUs = fc_rnn_model.get_weights()[1]\nb = fc_rnn_model.get_weights()[2]\nDense_w = fc_rnn_model.get_weights()[3]\nDense_b = fc_rnn_model.get_weights()[4]\n\nZ_tr = fc_Z_model.predict(X_tr)\nZ_te = fc_Z_model.predict(X_te)\n\neps = 1e-4\n\nis_number_flag = np.where(d_te != -1)\n\n# All relevance scores of the test sequences\nlrp_te = np.vstack([lstm_lrp(i, rnn_size, False).sum(1) for i in range(X_te.shape[0])])\n\nlrp_auroc = roc_auc_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),\n lrp_te[is_number_flag].reshape(-1))\nlrp_auprc = average_precision_score((d_te == 0).astype('int')[is_number_flag].reshape(-1),\n lrp_te[is_number_flag].reshape(-1))\n\n\n# The reported results:\nprint pred_acc\nprint lrp_auroc\nprint lrp_auprc\n\n","license":"mit"} {"repo_name":"justincely\/rolodex","path":"setup.py","copies":"1","size":"2102","content":"from setuptools import setup, find_packages\n\nsetup(\n name = 'cos_monitoring',\n version = '0.0.1',\n description = 'Provide utilities and monotiring of cos data',\n author = 'Justin Ely',\n author_email = 'ely@stsci.edu',\n keywords = ['astronomy'],\n classifiers = ['Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 1 - Planning',\n 'Intended Audience :: Science\/Research',\n 'Topic :: Scientific\/Engineering :: Astronomy',\n 'Topic :: Scientific\/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules'],\n packages = find_packages(),\n requires = ['numpy', 'scipy', 'astropy', 'matplotlib'],\n entry_points = {'console_scripts': ['clean_slate=cos_monitoring.database:clean_slate',\n 'cm_ingest=cos_monitoring.database:ingest_all',\n 'cm_monitors=cos_monitoring.database:run_all_monitors',\n 'create_master_csv=scripts.create_master_csv:main',\n 'cosmo_retrieval=cos_monitoring.retrieval.run_cosmo_retrieval',\n 'cm_reports=cos_monitoring.database.report:query_all',\n 'cm_delete=cos_monitoring.database.database:cm_delete',\n 'cm_describe=cos_monitoring.database.database:cm_describe',\n 'cm_tot_gain=cos_monitoring.cci.gainmap:make_all_gainmaps_entry'],\n },\n install_requires = ['setuptools',\n 'numpy>=1.11.1',\n 'astropy>=1.0.1',\n 'sqlalchemy>=1.0.12',\n 'pymysql',\n 'matplotlib',\n 'scipy',\n 'fitsio',\n 'psutil',\n 'beautifulsoup4',\n 'pyfastcopy']\n )\n","license":"bsd-3-clause"} {"repo_name":"pycroscopy\/pycroscopy","path":"pycroscopy\/processing\/svd_utils.py","copies":"1","size":"20291","content":"# -*- coding: utf-8 -*-\n\"\"\"\nUSID utilities for performing randomized singular value decomposition and reconstructing results\n\nCreated on Mon Mar 28 09:45:08 2016\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\nimport time\nfrom multiprocessing import cpu_count\nimport numpy as np\nfrom sklearn.utils import gen_batches\nfrom sklearn.utils.extmath import randomized_svd\n\nfrom sidpy.hdf.reg_ref import get_indices_for_region_ref, create_region_reference\nfrom sidpy.hdf.hdf_utils import get_attr, write_simple_attrs, copy_attributes\nfrom sidpy.proc.comp_utils import get_available_memory\nfrom sidpy.base.string_utils import format_time\nfrom sidpy.hdf.dtype_utils import check_dtype, stack_real_to_target_dtype\n\nfrom pyUSID.processing.process import Process\nfrom .proc_utils import get_component_slice\nfrom pyUSID.io.hdf_utils import find_results_groups, \\\n reshape_to_n_dims, write_main_dataset, create_results_group, \\\n create_indexed_group, find_dataset\nfrom pyUSID import Dimension\nfrom pyUSID.io.anc_build_utils import calc_chunks\nfrom pyUSID import USIDataset\n\nimport h5py\nfrom matplotlib import pyplot as plt\nfrom pyUSID.viz import plot_utils\n\nclass SVD(Process):\n \"\"\"\n This class provides a file-wrapper around the :meth:`sklearn.utils.extmath.randomized_svd` function.\n In other words, it extracts and then reformats the data present in the provided :class:`pyUSID.USIDataset` object,\n performs the randomized SVD operation and writes the results back to the USID HDF5 file after\n formatting the results in an USID compliant manner.\n \"\"\"\n\n def __init__(self, h5_main, num_components=None, **kwargs):\n \"\"\"\n Perform the SVD decomposition on the selected dataset and write the results to h5 file.\n\n Parameters\n ----------\n h5_main : :class:`pyUSID.USIDataset` object\n USID Main HDF5 dataset that will be decomposed\n num_components : int, optional\n Number of components to decompose h5_main into. Default None.\n h5_target_group : h5py.Group, optional. Default = None\n Location where to look for existing results and to place newly\n computed results. Use this kwarg if the results need to be written\n to a different HDF5 file. By default, this value is set to the\n parent group containing `h5_main`\n kwargs\n Arguments to be sent to Process\n \"\"\"\n super(SVD, self).__init__(h5_main, 'SVD', **kwargs)\n\n '''\n Calculate the size of the main data in memory and compare to max_mem\n We use the minimum of the actual dtype's itemsize and float32 since we\n don't want to read it in yet and do the proper type conversions.\n '''\n n_samples, n_features = h5_main.shape\n self.data_transform_func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_main)\n\n if num_components is None:\n num_components = min(n_samples, n_features)\n else:\n num_components = min(n_samples, n_features, num_components)\n\n self.num_components = num_components\n\n # Check that we can actually compute the SVD with the selected number of components\n self._check_available_mem()\n\n self.parms_dict = {'num_components': num_components}\n self.duplicate_h5_groups, self.partial_h5_groups = self._check_for_duplicates()\n\n # supercharge h5_main!\n self.h5_main = USIDataset(self.h5_main)\n\n self.__u = None\n self.__v = None\n self.__s = None\n\n def test(self, override=False):\n \"\"\"\n Applies randomised VD to the dataset. This function does NOT write results to the hdf5 file. Call compute() to\n write to the file. Handles complex, compound datasets such that the V matrix is of the same data-type as the\n input matrix.\n\n Parameters\n ----------\n override : bool, optional. default = False\n Set to true to recompute results if prior results are available. Else, returns existing results\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Abundance matrix\n S : :class:`numpy.ndarray`\n variance vector\n V : :class:`numpy.ndarray`\n eigenvector matrix\n \"\"\"\n '''\n Check if a number of compnents has been set and ensure that the number is less than\n the minimum axis length of the data. If both conditions are met, use fsvd. If not\n use the regular svd.\n\n C.Smith -- We might need to put a lower limit on num_comps in the future. I don't\n know enough about svd to be sure.\n '''\n if not override:\n if isinstance(self.duplicate_h5_groups, list) and len(self.duplicate_h5_groups) > 0:\n self.h5_results_grp = self.duplicate_h5_groups[-1]\n print('Returning previously computed results from: {}'.format(self.h5_results_grp.name))\n print('set the \"override\" flag to True to recompute results')\n return reshape_to_n_dims(self.h5_results_grp['U'])[0], self.h5_results_grp['S'][()], \\\n reshape_to_n_dims(self.h5_results_grp['V'])[0]\n\n self.h5_results_grp = None\n\n t1 = time.time()\n\n self.__u, self.__s, self.__v = randomized_svd(self.data_transform_func(self.h5_main), self.num_components,\n n_iter=3)\n self.__v = stack_real_to_target_dtype(self.__v, self.h5_main.dtype)\n\n print('Took {} to compute randomized SVD'.format(format_time(time.time() - t1)))\n\n u_mat, success = reshape_to_n_dims(self.__u, h5_pos=self.h5_main.h5_pos_inds,\n h5_spec=np.expand_dims(np.arange(self.__u.shape[1]), axis=0))\n if not success:\n raise ValueError('Could not reshape U to N-Dimensional dataset! Error:' + success)\n\n # When the source dataset has a singular valued spectroscopic dimension\n # stack_real_to_target causes V to lose all its dimensions\n if self.__v.ndim == 0:\n # However, we want V to be 2D:\n self.__v = np.atleast_2d(self.__v)\n\n v_mat, success = reshape_to_n_dims(self.__v, h5_pos=np.expand_dims(np.arange(self.__u.shape[1]), axis=1),\n h5_spec=self.h5_main.h5_spec_inds)\n if not success:\n raise ValueError('Could not reshape V to N-Dimensional dataset! Error:' + success)\n\n return u_mat, self.__s, v_mat\n\n def compute(self, override=False):\n \"\"\"\n Computes SVD (by calling test_on_subset() if it has not already been called) and writes results to file.\n Consider calling test() to check results before writing to file. Results are deleted from memory\n upon writing to the HDF5 file\n\n Parameters\n ----------\n override : bool, optional. default = False\n Set to true to recompute results if prior results are available. Else, returns existing results\n\n Returns\n -------\n h5_results_grp : :class:`h5py.Group` object\n HDF5 Group containing all the results\n \"\"\"\n if self.__u is None and self.__v is None and self.__s is None:\n self.test(override=override)\n\n if self.h5_results_grp is None:\n self._write_results_chunk()\n self.delete_results()\n\n h5_group = self.h5_results_grp\n\n return h5_group\n\n def delete_results(self):\n \"\"\"\n Deletes results from memory.\n \"\"\"\n del self.__u, self.__s, self.__v\n self.__u = None\n self.__v = None\n self.__s = None\n\n def _write_results_chunk(self):\n \"\"\"\n Writes the provided SVD results to file\n\n Parameters\n ----------\n \"\"\"\n comp_dim = Dimension('Principal Component', 'a. u.', len(self.__s))\n\n h5_svd_group = create_results_group(self.h5_main, self.process_name,\n h5_parent_group=self._h5_target_group)\n self.h5_results_grp = h5_svd_group\n self._write_source_dset_provenance()\n \n\n write_simple_attrs(h5_svd_group, self.parms_dict)\n write_simple_attrs(h5_svd_group, {'svd_method': 'sklearn-randomized'})\n\n h5_u = write_main_dataset(h5_svd_group, np.float32(self.__u), 'U', 'Abundance', 'a.u.', None, comp_dim,\n h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals,\n dtype=np.float32, chunks=calc_chunks(self.__u.shape, np.float32(0).itemsize))\n # print(get_attr(self.h5_main, 'quantity')[0])\n h5_v = write_main_dataset(h5_svd_group, self.__v, 'V', get_attr(self.h5_main, 'quantity')[0],\n 'a.u.', comp_dim, None, h5_spec_inds=self.h5_main.h5_spec_inds,\n h5_spec_vals=self.h5_main.h5_spec_vals,\n chunks=calc_chunks(self.__v.shape, self.h5_main.dtype.itemsize))\n\n # No point making this 1D dataset a main dataset\n h5_s = h5_svd_group.create_dataset('S', data=np.float32(self.__s))\n\n '''\n Check h5_main for plot group references.\n Copy them into V if they exist\n '''\n for key in self.h5_main.attrs.keys():\n if '_Plot_Group' not in key:\n continue\n\n ref_inds = get_indices_for_region_ref(self.h5_main, self.h5_main.attrs[key], return_method='corners')\n ref_inds = ref_inds.reshape([-1, 2, 2])\n ref_inds[:, 1, 0] = h5_v.shape[0] - 1\n\n svd_ref = create_region_reference(h5_v, ref_inds)\n\n h5_v.attrs[key] = svd_ref\n\n # Marking completion:\n self._status_dset_name = 'completed_positions'\n self._h5_status_dset = h5_svd_group.create_dataset(self._status_dset_name,\n data=np.ones(self.h5_main.shape[0], dtype=np.uint8))\n # keeping legacy option:\n h5_svd_group.attrs['last_pixel'] = self.h5_main.shape[0]\n\n def _check_available_mem(self):\n \"\"\"\n Check that there is enough memory to perform the SVD decomposition.\n\n Returns\n -------\n sufficient_mem : bool\n True is enough memory found, False otherwise.\n\n \"\"\"\n if self.verbose:\n print('Checking memory availability.')\n n_samples, n_features = self.h5_main.shape\n s_mem_per_comp = np.float32(0).itemsize\n u_mem_per_comp = np.float32(0).itemsize * n_samples\n v_mem_per_comp = self.h5_main.dtype.itemsize * n_features\n\n mem_per_comp = s_mem_per_comp + u_mem_per_comp + v_mem_per_comp\n max_mem = get_available_memory()\n avail_mem = 0.75 * max_mem\n free_mem = avail_mem - self.h5_main.__sizeof__()\n\n if free_mem <= 0:\n error_message = 'Cannot load main dataset into memory.\\n' + \\\n 'Available memory is {}. Dataset needs {}.'.format(avail_mem,\n self.h5_main.__sizeof__())\n raise MemoryError(error_message)\n\n if self.verbose:\n print('Memory available for SVD is {}.'.format(free_mem))\n print('Memory needed per component is {}.'.format(mem_per_comp))\n\n cant_svd = (free_mem - self.num_components * mem_per_comp) <= 0\n\n if cant_svd:\n max_comps = np.floor(free_mem \/ mem_per_comp, dtype=int)\n error_message = 'Not enough free memory for performing SVD with requested number of parameters.\\n' + \\\n 'Maximum possible parameters is {}.'.format(max_comps)\n raise MemoryError(error_message)\n\n###############################################################################\n\n\ndef simplified_kpca(kpca, source_data):\n \"\"\"\n Performs kernel PCA on the provided dataset and returns the familiar\n eigenvector, eigenvalue, and scree matrices.\n\n Note that the positions in the eigenvalues may need to be transposed\n\n Parameters\n ----------\n kpca : KernelPCA object\n configured Kernel PCA object ready to perform analysis\n source_data : 2D numpy array\n Data arranged as [iteration, features] example - [position, time]\n\n Returns\n -------\n eigenvalues : 2D numpy array\n Eigenvalues in the original space arranged as [component,iteration]\n scree : 1D numpy array\n S component\n eigenvector : 2D numpy array\n Eigenvectors in the original space arranged as [component,features]\n\n \"\"\"\n X_kpca = kpca.fit(source_data.T)\n eigenvectors = X_kpca.alphas_.T\n eigenvalues = X_kpca.fit_transform(source_data)\n # kpca_explained_variance = np.var(kpca.fit_transform(source_data), axis=0)\n # information_content = kpca_explained_variance \/ np.sum(kpca_explained_variance)\n scree = kpca.lambdas_\n return eigenvalues, scree, eigenvectors\n\n\ndef rebuild_svd(h5_main, components=None, cores=None, max_RAM_mb=1024):\n \"\"\"\n Rebuild the Image from the SVD results on the windows\n Optionally, only use components less than n_comp.\n\n Parameters\n ----------\n h5_main : hdf5 Dataset\n dataset which SVD was performed on\n components : {int, iterable of int, slice} optional\n Defines which components to keep\n Default - None, all components kept\n\n Input Types\n integer : Components less than the input will be kept\n length 2 iterable of integers : Integers define start and stop of component slice to retain\n other iterable of integers or slice : Selection of component indices to retain\n cores : int, optional\n How many cores should be used to rebuild\n Default - None, all but 2 cores will be used, min 1\n max_RAM_mb : int, optional\n Maximum ammount of memory to use when rebuilding, in Mb.\n Default - 1024Mb\n\n Returns\n -------\n rebuilt_data : HDF5 Dataset\n the rebuilt dataset\n\n \"\"\"\n comp_slice, num_comps = get_component_slice(components, total_components=h5_main.shape[1])\n if isinstance(comp_slice, np.ndarray):\n comp_slice = list(comp_slice)\n dset_name = h5_main.name.split('\/')[-1]\n\n # Ensuring that at least one core is available for use \/ 2 cores are available for other use\n max_cores = max(1, cpu_count() - 2)\n # print('max_cores',max_cores)\n if cores is not None:\n cores = min(round(abs(cores)), max_cores)\n else:\n cores = max_cores\n\n max_memory = min(max_RAM_mb * 1024 ** 2, 0.75 * get_available_memory())\n if cores != 1:\n max_memory = int(max_memory \/ 2)\n\n '''\n Get the handles for the SVD results\n '''\n try:\n h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]\n\n h5_S = h5_svd_group['S']\n h5_U = h5_svd_group['U']\n h5_V = h5_svd_group['V']\n\n except KeyError:\n raise KeyError('SVD Results for {dset} were not found.'.format(dset=dset_name))\n except:\n raise\n\n func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_V)\n\n '''\n Calculate the size of a single batch that will fit in the available memory\n '''\n n_comps = h5_S[comp_slice].size\n mem_per_pix = (h5_U.dtype.itemsize + h5_V.dtype.itemsize * h5_V.shape[1]) * n_comps\n fixed_mem = h5_main.size * h5_main.dtype.itemsize\n\n if cores is None:\n free_mem = max_memory - fixed_mem\n else:\n free_mem = max_memory * 2 - fixed_mem\n\n batch_size = int(round(float(free_mem) \/ mem_per_pix))\n batch_slices = gen_batches(h5_U.shape[0], batch_size)\n\n print('Reconstructing in batches of {} positions.'.format(batch_size))\n print('Batchs should be {} Mb each.'.format(mem_per_pix * batch_size \/ 1024.0 ** 2))\n\n '''\n Loop over all batches.\n '''\n ds_V = np.dot(np.diag(h5_S[comp_slice]), func(h5_V[comp_slice, :]))\n rebuild = np.zeros((h5_main.shape[0], ds_V.shape[1]))\n for ibatch, batch in enumerate(batch_slices):\n rebuild[batch, :] += np.dot(h5_U[batch, comp_slice], ds_V)\n\n rebuild = stack_real_to_target_dtype(rebuild, h5_V.dtype)\n\n print('Completed reconstruction of data from SVD results. Writing to file.')\n '''\n Create the Group and dataset to hold the rebuild data\n '''\n rebuilt_grp = create_indexed_group(h5_svd_group, 'Rebuilt_Data')\n h5_rebuilt = write_main_dataset(rebuilt_grp, rebuild, 'Rebuilt_Data',\n get_attr(h5_main, 'quantity'), get_attr(h5_main, 'units'),\n None, None,\n h5_pos_inds=h5_main.h5_pos_inds, h5_pos_vals=h5_main.h5_pos_vals,\n h5_spec_inds=h5_main.h5_spec_inds, h5_spec_vals=h5_main.h5_spec_vals,\n chunks=h5_main.chunks, compression=h5_main.compression)\n\n if isinstance(comp_slice, slice):\n rebuilt_grp.attrs['components_used'] = '{}-{}'.format(comp_slice.start, comp_slice.stop)\n else:\n rebuilt_grp.attrs['components_used'] = components\n\n copy_attributes(h5_main, h5_rebuilt, skip_refs=False)\n\n h5_main.file.flush()\n\n print('Done writing reconstructed data to file.')\n\n return h5_rebuilt\n\ndef plot_svd(h5_main, savefig=False, num_plots = 16, **kwargs):\n '''\n Replots the SVD showing the skree, abundance maps, and eigenvectors.\n If h5_main is a Dataset, it will default to the most recent SVD group from that\n Dataset.\n If h5_main is the results group, then it will plot the values for that group.\n \n Parameters\n ---------- \n h5_main : USIDataset or h5py Dataset or h5py Group\n \n savefig : bool, optional\n Saves the figures to disk with some default names\n \n num_plots : int\n Default number of eigenvectors and abundance plots to show\n \n kwargs : dict, optional\n keyword arguments for svd filtering\n \n Returns\n -------\n None\n '''\n \n if isinstance(h5_main, h5py.Group):\n\n _U = find_dataset(h5_main, 'U')[-1]\n _V = find_dataset(h5_main, 'V')[-1]\n units = 'arbitrary (a.u.)'\n h5_spec_vals = np.arange(_V.shape[1])\n h5_svd_group = _U.parent\n\n else:\n\n h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]\n units = h5_main.attrs['quantity']\n h5_spec_vals = h5_main.get_spec_values('Time')\n \n h5_U = h5_svd_group['U']\n h5_V = h5_svd_group['V']\n h5_S = h5_svd_group['S']\n \n _U = USIDataset(h5_U)\n [num_rows, num_cols] = _U.pos_dim_sizes\n \n abun_maps = np.reshape(h5_U[:,:16], (num_rows, num_cols,-1))\n eigen_vecs = h5_V[:16, :]\n \n skree_sum = np.zeros(h5_S.shape)\n for i in range(h5_S.shape[0]):\n skree_sum[i] = np.sum(h5_S[:i])\/np.sum(h5_S)\n\n plt.figure()\n plt.plot(skree_sum, 'bo')\n plt.title('Cumulative Variance')\n plt.xlabel('Total Components')\n plt.ylabel('Total variance ratio (a.u.)')\n \n if savefig:\n plt.savefig('Cumulative_variance_plot.png')\n \n fig_skree, axes = plot_utils.plot_scree(h5_S, title='Scree plot')\n fig_skree.tight_layout()\n\n if savefig:\n plt.savefig('Scree_plot.png')\n \n fig_abun, axes = plot_utils.plot_map_stack(abun_maps, num_comps=num_plots, title='SVD Abundance Maps',\n color_bar_mode='single', cmap='inferno', reverse_dims=True, \n fig_mult=(3.5,3.5), facecolor='white', **kwargs)\n fig_abun.tight_layout()\n if savefig:\n plt.savefig('Abundance_maps.png')\n \n\n fig_eigvec, axes = plot_utils.plot_curves(h5_spec_vals*1e3, eigen_vecs, use_rainbow_plots=False, \n x_label='Time (ms)', y_label=units, \n num_plots=num_plots, subtitle_prefix='Component', \n title='SVD Eigenvectors', evenly_spaced=False, \n **kwargs)\n fig_eigvec.tight_layout()\n if savefig:\n plt.savefig('Eigenvectors.png')\n \n return ","license":"mit"} {"repo_name":"juliojsb\/sarviewer","path":"plotters\/matplotlib\/swap.py","copies":"1","size":"2062","content":"#!\/usr\/bin\/env python2\n\"\"\"\nAuthor :Julio Sanz\nWebsite :www.elarraydejota.com\nEmail :juliojosesb@gmail.com\nDescription :Script to create a graph about swap usage\nDependencies :Python 2.x, matplotlib\nUsage :python swap.py\nLicense :GPLv3\n\"\"\"\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt \nimport csv\nfrom datetime import datetime\nimport matplotlib.dates\n\n# ======================\n# VARIABLES\n# ======================\n\n# Aesthetic parameters\nplt.rcParams.update({'font.size': 8})\nplt.rcParams['lines.linewidth'] = 1.5\ntime_format = matplotlib.dates.DateFormatter('%H:%M:%S')\nplt.gca().xaxis.set_major_formatter(time_format)\nplt.gcf().autofmt_xdate()\n\n# Time (column 0)\nx = []\n# Data arrays\nswap_free = []\nswap_used = []\n\n# ======================\n# FUNCTIONS\n# ======================\n\ndef generate_graph():\n with open('..\/..\/data\/swap.dat', 'r') as csvfile:\n data_source = csv.reader(csvfile, delimiter=' ', skipinitialspace=True)\n for row in data_source:\n # [0] column is a time column\n # Convert to datetime data type\n a = datetime.strptime((row[0]),'%H:%M:%S')\n x.append((a))\n # The remaining columns contain data\n swap_free.append(str(int(row[1])\/1024))\n swap_used.append(str(int(row[2])\/1024))\n \n\n # Plot lines\n plt.plot(x,swap_used, label='Used', color='r', antialiased=True)\n plt.plot(x,swap_free, label='Free', color='g', antialiased=True)\n \n # Graph properties\n plt.xlabel('Time',fontstyle='italic')\n plt.ylabel('SWAP (MB)',fontstyle='italic')\n plt.title('SWAP usage graph')\n plt.grid(linewidth=0.4, antialiased=True)\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2, fancybox=True, shadow=True)\n plt.autoscale(True)\n \n # Graph saved to PNG file\n plt.savefig('..\/..\/graphs\/swap.png', bbox_inches='tight')\n #plt.show()\n\n# ======================\n# MAIN\n# ======================\n\nif __name__ == '__main__':\n generate_graph()","license":"gpl-3.0"} {"repo_name":"ryfeus\/lambda-packs","path":"Sklearn_scipy_numpy\/source\/sklearn\/feature_selection\/rfe.py","copies":"6","size":"17502","content":"# Authors: Alexandre Gramfort \n# Vincent Michel \n# Gilles Louppe \n#\n# License: BSD 3 clause\n\n\"\"\"Recursive feature elimination for feature ranking\"\"\"\n\nimport warnings\nimport numpy as np\nfrom ..utils import check_X_y, safe_sqr\nfrom ..utils.metaestimators import if_delegate_has_method\nfrom ..base import BaseEstimator\nfrom ..base import MetaEstimatorMixin\nfrom ..base import clone\nfrom ..base import is_classifier\nfrom ..cross_validation import check_cv\nfrom ..cross_validation import _safe_split, _score\nfrom ..metrics.scorer import check_scoring\nfrom .base import SelectorMixin\n\n\nclass RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):\n \"\"\"Feature ranking with recursive feature elimination.\n\n Given an external estimator that assigns weights to features (e.g., the\n coefficients of a linear model), the goal of recursive feature elimination\n (RFE) is to select features by recursively considering smaller and smaller\n sets of features. First, the estimator is trained on the initial set of\n features and weights are assigned to each one of them. Then, features whose\n absolute weights are the smallest are pruned from the current set features.\n That procedure is recursively repeated on the pruned set until the desired\n number of features to select is eventually reached.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n estimator : object\n A supervised learning estimator with a `fit` method that updates a\n `coef_` attribute that holds the fitted parameters. Important features\n must correspond to high absolute values in the `coef_` array.\n\n For instance, this is the case for most supervised learning\n algorithms such as Support Vector Classifiers and Generalized\n Linear Models from the `svm` and `linear_model` modules.\n\n n_features_to_select : int or None (default=None)\n The number of features to select. If `None`, half of the features\n are selected.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then `step` corresponds to the (integer)\n number of features to remove at each iteration.\n If within (0.0, 1.0), then `step` corresponds to the percentage\n (rounded down) of features to remove at each iteration.\n\n estimator_params : dict\n Parameters for the external estimator.\n This attribute is deprecated as of version 0.16 and will be removed in\n 0.18. Use estimator initialisation or set_params method instead.\n\n verbose : int, default=0\n Controls verbosity of output.\n\n Attributes\n ----------\n n_features_ : int\n The number of selected features.\n\n support_ : array of shape [n_features]\n The mask of selected features.\n\n ranking_ : array of shape [n_features]\n The feature ranking, such that ``ranking_[i]`` corresponds to the\n ranking position of the i-th feature. Selected (i.e., estimated\n best) features are assigned rank 1.\n\n estimator_ : object\n The external estimator fit on the reduced dataset.\n\n Examples\n --------\n The following example shows how to retrieve the 5 right informative\n features in the Friedman #1 dataset.\n\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.feature_selection import RFE\n >>> from sklearn.svm import SVR\n >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)\n >>> estimator = SVR(kernel=\"linear\")\n >>> selector = RFE(estimator, 5, step=1)\n >>> selector = selector.fit(X, y)\n >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE\n array([ True, True, True, True, True,\n False, False, False, False, False], dtype=bool)\n >>> selector.ranking_\n array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])\n\n References\n ----------\n\n .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., \"Gene selection\n for cancer classification using support vector machines\",\n Mach. Learn., 46(1-3), 389--422, 2002.\n \"\"\"\n def __init__(self, estimator, n_features_to_select=None, step=1,\n estimator_params=None, verbose=0):\n self.estimator = estimator\n self.n_features_to_select = n_features_to_select\n self.step = step\n self.estimator_params = estimator_params\n self.verbose = verbose\n\n @property\n def _estimator_type(self):\n return self.estimator._estimator_type\n\n def fit(self, X, y):\n \"\"\"Fit the RFE model and then the underlying estimator on the selected\n features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values.\n \"\"\"\n return self._fit(X, y)\n\n def _fit(self, X, y, step_score=None):\n X, y = check_X_y(X, y, \"csc\")\n # Initialization\n n_features = X.shape[1]\n if self.n_features_to_select is None:\n n_features_to_select = n_features \/\/ 2\n else:\n n_features_to_select = self.n_features_to_select\n\n if 0.0 < self.step < 1.0:\n step = int(max(1, self.step * n_features))\n else:\n step = int(self.step)\n if step <= 0:\n raise ValueError(\"Step must be >0\")\n\n if self.estimator_params is not None:\n warnings.warn(\"The parameter 'estimator_params' is deprecated as \"\n \"of version 0.16 and will be removed in 0.18. The \"\n \"parameter is no longer necessary because the value \"\n \"is set via the estimator initialisation or \"\n \"set_params method.\", DeprecationWarning)\n\n support_ = np.ones(n_features, dtype=np.bool)\n ranking_ = np.ones(n_features, dtype=np.int)\n\n if step_score:\n self.scores_ = []\n\n # Elimination\n while np.sum(support_) > n_features_to_select:\n # Remaining features\n features = np.arange(n_features)[support_]\n\n # Rank the remaining features\n estimator = clone(self.estimator)\n if self.estimator_params:\n estimator.set_params(**self.estimator_params)\n if self.verbose > 0:\n print(\"Fitting estimator with %d features.\" % np.sum(support_))\n\n estimator.fit(X[:, features], y)\n\n # Get coefs\n if hasattr(estimator, 'coef_'):\n coefs = estimator.coef_\n elif hasattr(estimator, 'feature_importances_'):\n coefs = estimator.feature_importances_\n else:\n raise RuntimeError('The classifier does not expose '\n '\"coef_\" or \"feature_importances_\" '\n 'attributes')\n\n # Get ranks\n if coefs.ndim > 1:\n ranks = np.argsort(safe_sqr(coefs).sum(axis=0))\n else:\n ranks = np.argsort(safe_sqr(coefs))\n\n # for sparse case ranks is matrix\n ranks = np.ravel(ranks)\n\n # Eliminate the worse features\n threshold = min(step, np.sum(support_) - n_features_to_select)\n\n # Compute step score on the previous selection iteration\n # because 'estimator' must use features\n # that have not been eliminated yet\n if step_score:\n self.scores_.append(step_score(estimator, features))\n support_[features[ranks][:threshold]] = False\n ranking_[np.logical_not(support_)] += 1\n\n # Set final attributes\n features = np.arange(n_features)[support_]\n self.estimator_ = clone(self.estimator)\n if self.estimator_params:\n self.estimator_.set_params(**self.estimator_params)\n self.estimator_.fit(X[:, features], y)\n\n # Compute step score when only n_features_to_select features left\n if step_score:\n self.scores_.append(step_score(self.estimator_, features))\n self.n_features_ = support_.sum()\n self.support_ = support_\n self.ranking_ = ranking_\n\n return self\n\n @if_delegate_has_method(delegate='estimator')\n def predict(self, X):\n \"\"\"Reduce X to the selected features and then predict using the\n underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape [n_samples]\n The predicted target values.\n \"\"\"\n return self.estimator_.predict(self.transform(X))\n\n @if_delegate_has_method(delegate='estimator')\n def score(self, X, y):\n \"\"\"Reduce X to the selected features and then return the score of the\n underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n \"\"\"\n return self.estimator_.score(self.transform(X), y)\n\n def _get_support_mask(self):\n return self.support_\n\n @if_delegate_has_method(delegate='estimator')\n def decision_function(self, X):\n return self.estimator_.decision_function(self.transform(X))\n\n @if_delegate_has_method(delegate='estimator')\n def predict_proba(self, X):\n return self.estimator_.predict_proba(self.transform(X))\n\n @if_delegate_has_method(delegate='estimator')\n def predict_log_proba(self, X):\n return self.estimator_.predict_log_proba(self.transform(X))\n\n\nclass RFECV(RFE, MetaEstimatorMixin):\n \"\"\"Feature ranking with recursive feature elimination and cross-validated\n selection of the best number of features.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n estimator : object\n A supervised learning estimator with a `fit` method that updates a\n `coef_` attribute that holds the fitted parameters. Important features\n must correspond to high absolute values in the `coef_` array.\n\n For instance, this is the case for most supervised learning\n algorithms such as Support Vector Classifiers and Generalized\n Linear Models from the `svm` and `linear_model` modules.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then `step` corresponds to the (integer)\n number of features to remove at each iteration.\n If within (0.0, 1.0), then `step` corresponds to the percentage\n (rounded down) of features to remove at each iteration.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train\/test splits.\n\n For integer\/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validation strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object \/ function with signature\n ``scorer(estimator, X, y)``.\n\n estimator_params : dict\n Parameters for the external estimator.\n This attribute is deprecated as of version 0.16 and will be removed in\n 0.18. Use estimator initialisation or set_params method instead.\n\n verbose : int, default=0\n Controls verbosity of output.\n\n Attributes\n ----------\n n_features_ : int\n The number of selected features with cross-validation.\n\n support_ : array of shape [n_features]\n The mask of selected features.\n\n ranking_ : array of shape [n_features]\n The feature ranking, such that `ranking_[i]`\n corresponds to the ranking\n position of the i-th feature.\n Selected (i.e., estimated best)\n features are assigned rank 1.\n\n grid_scores_ : array of shape [n_subsets_of_features]\n The cross-validation scores such that\n ``grid_scores_[i]`` corresponds to\n the CV score of the i-th subset of features.\n\n estimator_ : object\n The external estimator fit on the reduced dataset.\n\n Notes\n -----\n The size of ``grid_scores_`` is equal to ceil((n_features - 1) \/ step) + 1,\n where step is the number of features removed at each iteration.\n\n Examples\n --------\n The following example shows how to retrieve the a-priori not known 5\n informative features in the Friedman #1 dataset.\n\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.feature_selection import RFECV\n >>> from sklearn.svm import SVR\n >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)\n >>> estimator = SVR(kernel=\"linear\")\n >>> selector = RFECV(estimator, step=1, cv=5)\n >>> selector = selector.fit(X, y)\n >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE\n array([ True, True, True, True, True,\n False, False, False, False, False], dtype=bool)\n >>> selector.ranking_\n array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])\n\n References\n ----------\n\n .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., \"Gene selection\n for cancer classification using support vector machines\",\n Mach. Learn., 46(1-3), 389--422, 2002.\n \"\"\"\n def __init__(self, estimator, step=1, cv=None, scoring=None,\n estimator_params=None, verbose=0):\n self.estimator = estimator\n self.step = step\n self.cv = cv\n self.scoring = scoring\n self.estimator_params = estimator_params\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the RFE model and automatically tune the number of selected\n features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the total number of features.\n\n y : array-like, shape = [n_samples]\n Target values (integers for classification, real numbers for\n regression).\n \"\"\"\n X, y = check_X_y(X, y, \"csr\")\n if self.estimator_params is not None:\n warnings.warn(\"The parameter 'estimator_params' is deprecated as \"\n \"of version 0.16 and will be removed in 0.18. \"\n \"The parameter is no longer necessary because the \"\n \"value is set via the estimator initialisation or \"\n \"set_params method.\", DeprecationWarning)\n # Initialization\n cv = check_cv(self.cv, X, y, is_classifier(self.estimator))\n scorer = check_scoring(self.estimator, scoring=self.scoring)\n n_features = X.shape[1]\n n_features_to_select = 1\n\n # Determine the number of subsets of features\n scores = []\n\n # Cross-validation\n for n, (train, test) in enumerate(cv):\n X_train, y_train = _safe_split(self.estimator, X, y, train)\n X_test, y_test = _safe_split(self.estimator, X, y, test, train)\n\n rfe = RFE(estimator=self.estimator,\n n_features_to_select=n_features_to_select,\n step=self.step, estimator_params=self.estimator_params,\n verbose=self.verbose - 1)\n\n rfe._fit(X_train, y_train, lambda estimator, features:\n _score(estimator, X_test[:, features], y_test, scorer))\n scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))\n scores = np.sum(np.concatenate(scores, 0), 0)\n # The index in 'scores' when 'n_features' features are selected\n n_feature_index = np.ceil((n_features - n_features_to_select) \/\n float(self.step))\n n_features_to_select = max(n_features_to_select,\n n_features - ((n_feature_index -\n np.argmax(scores)) *\n self.step))\n # Re-execute an elimination with best_k over the whole set\n rfe = RFE(estimator=self.estimator,\n n_features_to_select=n_features_to_select,\n step=self.step, estimator_params=self.estimator_params)\n\n rfe.fit(X, y)\n\n # Set final attributes\n self.support_ = rfe.support_\n self.n_features_ = rfe.n_features_\n self.ranking_ = rfe.ranking_\n self.estimator_ = clone(self.estimator)\n if self.estimator_params:\n self.estimator_.set_params(**self.estimator_params)\n self.estimator_.fit(self.transform(X), y)\n\n # Fixing a normalization error, n is equal to len(cv) - 1\n # here, the scores are normalized by len(cv)\n self.grid_scores_ = scores \/ len(cv)\n return self\n","license":"mit"} {"repo_name":"AlexBryner\/SalesforceTools","path":"SalesforceScripts.py","copies":"1","size":"12737","content":"# coding: utf-8 \nimport numpy as np\nimport pandas as pd\nimport time\nfrom datetime import datetime, timedelta, date\nfrom time import sleep, gmtime, strftime\nfrom pandas import DataFrame, Series, read_csv\n\nfrom salesforce_bulk_api import SalesforceBulkJob\nfrom SalesforceBulkQuery import *\nfrom simple_salesforce import *\n\n###################################################################################################\n\n# Salesforce Credentials\n\n# Creates SimpleSalesforce Login Instance\nsf = Salesforce(username='', password='', security_token='', sandbox=, client_id='')\n\n###################################################################################################\n\ndef getBlankDF():\n return pd.DataFrame(np.nan, index=[], columns=[])\n\ndef NameCaseAsTitles(x):\n if (str(x).isupper() or str(x).islower()) and '@' not in str(x):\n return str(x).title()\n else:\n return x\n \ndef getDate(days):\n return(datetime.today() - timedelta(days=days)).strftime('%Y-%m-%dT00:00:00z') # YYYY-MM-DDThh:mm:ssz\n\n\ndef SFNulls(df, FillWith='#N\/A'):\n \"\"\"\n Description: Fills 0's and NAN's with \"#N\/A\" which is the value that the Salesforce Bulk API recognizes as Null.\n Parameters:\n df = Pandas.DataFrame\n Recognizes 'float64', 'int64', and 'int32' data types.\n \"\"\"\n df.apply(lambda s: pd.to_numeric(s, errors='ignore'))\n NumCol = df.columns.values.tolist()\n for col in NumCol:\n df[col] = df[col].replace(0, np.NAN).fillna('%s' % FillWith)\n\n\ndef SFQuery(SOQL: str, InList=None, LowerHeaders=True, CheckParentChild=True, KeepAttributes=False):\n \"\"\"\n Description: Queries Salesforce returning all results in a pandas dataframe. This also sets all possible data types to numbers and sets column headers to lower case. If using InList, this functionality is built with pandas dataframe columns in mind to help simplify filtering from other SOQL results.\n Parameters:\n SOQL = Salesforce SOQL Statement\n InList* = List of items for an \"IN\" filter. Apex SOQL - \"SELECT Id, Name FROM Account Where Id IN :ids\"\n SOQL parameter must be written out to the point where the : would be set in a SOQL query in Apex.\n EX: SFQuery(\"SELECT Id, Name From Contact WHERE FirstName = 'Alex' and Id IN\", IdsList)\n InList format - ['id1', 'id2', 'id2', 'id3', 'id3', 'id4', 'id5'] becomes ('id1', 'id2', 'id3', 'id4', 'id5')\n I usually use this with a dataframe column. \n ex: \"SFQuery(\"Select Id, Name From Contact Where Id In\", InList=list(your_dataframe['column_name']))\n LowerHeader = Returns Dataframe with column headers lowercase, defaulted true for previous projects\n CheckParentChild = This checks for the relationships by looking for the ordered dictionaries returned by Salesforce. It loops through to ensure reached the end of the line if stepping through multiple parent relationships. Turn off if queries need to run slighly faster.\n \n InList* - This is not an efficent use of api calls. There are limitations to the length of the queries so this is capped out at a default of 300 elements. Nested Select statements in the where clause is a more efficent use for api calls but there are always tradeoffs. At some point it would make more sense to utilize tuples, but unfortunately salesforce did not like the format with the last comma.\n \"\"\"\n def basicSOQL(SOQLstr : str):\n # formats the Salesforce ordered dictionary into a pandas dataframe\n try:\n od = sf.query_all(\"%s\" % SOQLstr)\n items = {val: dict(od['records'][val]) for val in range(len(od['records'])) } \n res = DataFrame.from_dict(items, orient='index')\n if LowerHeaders == True:\n res.columns = map(str.lower, res.columns)\n return res.apply(lambda s: pd.to_numeric(s, errors='ignore'))\n except ValueError:\n pass\n def CreateFilterStr(ListToStr):\n # creates a string from a list \n # ['id1', 'id2', 'id3', 'id4', 'id5'] -> ('id1', 'id2', 'id3', 'id4', 'id5')\n resStr = \"(\"\n r = 0\n for rl in ListToStr:\n if rl is not None:\n if r == 0:\n resStr += \"'\"+str(rl)+\"'\"\n r = 1\n elif r == 1:\n resStr += \",'\"+str(rl)+\"'\"\n resStr += \")\"\n return resStr\n def BatchQueryList(toBatchList):\n # filters the list of duplicates then batches the lists in groups\n # [('id1', 'id2', 'id3', id4', 'id5'),('id6', 'id7', 'id8', 'id9', 'id10')]\n batchSize = 300\n newList = list(set(toBatchList))\n listSize = len(newList)\n startPoint = 0\n endPoint = batchSize\n res = []\n while startPoint < listSize:\n tempStr = CreateFilterStr(newList[startPoint:endPoint])\n res.append([tempStr])\n startPoint = endPoint\n endPoint += batchSize\n return res\n def InListQuery(SOQL, InList):\n # runs a query for each list from the batched lists and stacks the results\n filterLists = BatchQueryList(InList)\n\n resDF = None\n i = 0\n for i in range(0,len(filterLists)):\n tempDF = basicSOQL(SOQLstr = \"%s %s\" % (SOQL, filterLists[i][0]))\n try: resDF = resDF.append(tempDF, ignore_index=True)\n except AttributeError: resDF = tempDF\n i += 1\n return resDF\n def getChildRecords(obj, row):\n if row == None:\n return None\n\n size = row.get('totalSize')\n records = row.get('records')\n tempDic = {}\n\n for i in range(0,size):\n tempDic[i] = {}\n for field in records[i].keys():\n try:\n records[i].get(field).keys()\n continue\n except AttributeError:\n pass\n tempDic[i][obj + '.' + field] = records[i].get(field)\n return tempDic\n def getParentRecords(field, row):\n if row == None:\n return None\n else:\n return row.get(field)\n \n rs = None\n if InList == None:\n rs = basicSOQL(SOQL)\n else:\n InList = list(InList)\n rs = InListQuery(SOQL, InList)\n \n # Drops the attributes column passed through by Salesforce\n if CheckParentChild == False and KeepAttributes == False:\n rs = rs.drop(['attributes'], axis=1)\n \n while CheckParentChild:\n CheckParentChild = False\n\n indexCols = []\n for col in rs:\n obj = None\n relationship = None\n for i in range(len(rs[col])):\n # scans down each column until finding an ordered dict to parse\n if rs[col][i] == None:\n continue\n try:\n if rs[col][i].get('type') != None and col == 'attributes':\n if KeepAttributes == False:\n rs = rs.drop([col], axis=1)\n break\n except AttributeError:\n indexCols.append(col) # will use this later for creating a multi indexed dataframe\n break\n\n # Determines whether parent or child query and the object type\n try:\n obj = rs[col][i].get('attributes').get('type')\n relationship = 'Parent'\n except:\n pass\n try:\n obj = rs[col][i].get('records')[0].get('attributes').get('type')\n relationship = 'Child'\n except:\n pass\n break\n \n if relationship == 'Child' and obj != None:\n rs[col] = rs.apply(lambda row: getChildRecords(obj, row[col]), axis=1)\n\n elif relationship == 'Parent' and obj != None:\n fields = []\n for i in range(len(rs[col])):\n if rs[col][i] != None:\n fields.extend(list(rs[col][i].keys()))\n fields = list(set(fields))\n \n if KeepAttributes == False:\n try:\n fields.remove('attributes')\n except ValueError:\n pass\n for field in fields:\n rs[obj + '.' + field] = rs.apply(lambda row: getParentRecords(field, row[col]), axis=1)\n rs = rs.drop([col], axis=1)\n\n CheckParentChild = True\n\n # next I'd like to setup an option for child relationship queries to return a multi indexed dataframe\n # print(indexCols)\n return rs\n \n \ndef SFFormat(df, SObject, EnforceNulls=False):\n \"\"\"\n Description: Looks up data types and dynamically formats columns to a correct format for the Bulk Api. Returns error messages for invalid data types or column headers. If EnforceNulls is true fills all blanks with #N\/A, if false will set blanks to ''.\n Parameters:\n df = Pandas.DataFrame\n SObject = Type of object for the upload. Ex: 'Account'\n EnforceNulls = If true will fill blanks with #N\/A to set as null in Salesforce\n \n *Currently only formats dates and datetimes\n \"\"\"\n NoFieldError = ''\n InvalidDataError = ''\n \n df.columns = map(str.lower, df.columns)\n fieldDict = getattr(sf, '%s' % SObject).describe()[\"fields\"]\n numFields = len(fieldDict)\n \n NumCol = df.columns.values.tolist()\n for col in NumCol:\n i = 0\n for x in fieldDict:\n if x['name'].lower() == col:\n dtype = x['type']\n length = x['length']\n try:\n if dtype == 'date':\n df[col] = pd.to_datetime(df[col]).dt.strftime('%Y-%m-%d').replace(to_replace='NaT', value='#N\/A') \n elif dtype == 'datetime':\n df[col] = pd.to_datetime(df[col]).dt.strftime('%Y-%m-%dT%H:%M:%S').replace(to_replace='NaT', value='#N\/A')\n except ValueError: \n InvalidDataError += (\"Invalid \"+dtype+\" : \"+col+\"\\n\")\n break\n i += 1\n if i >= numFields:\n NoFieldError += (SObject+\" does not contain : \"+col+\"\\n\")\n \n SFNulls(df)\n if EnforceNulls == False:\n for col in NumCol:\n df[col] = df[col].replace('#N\/A','')\n errors = NoFieldError+InvalidDataError\n if len(errors) > 0:\n return(errors)\n else:\n return('No Errors')\n\n \ndef SFUpload(df, UploadType, Sobject, batchSize=49995, hangtime=0):\n \"\"\"\n Description: Upload a pandas dataframe through the Salesforce Bulk API in batches of 50k. Can run either an insert or update to the listed Sobject. Sobject and UploadType must be listed as a string. ex: 'Update', 'Account' \n Parameters:\n df = Pandas.DataFrame\n UploadType = Update or Insert\n Sobject = Salesforce object in the upload. Ex - Accounts, Contact\n batchSize = Number of rows that the upload will run before submitting the next group of rows in the dataset. Defaults to 49,995 (5 batches of 9999)\n hangtime = Number of seconds to wait before uploading a new batch. Defaults to 0.\n \"\"\"\n\n if len(df) == 0:\n return\n \n startRow = 0\n endRow = batchSize\n\n while startRow < len(df):\n upload = df[startRow:endRow]\n\n Headers = upload.columns.tolist()\n Data = upload.to_records(index=False)\n job = SalesforceBulkJob(UploadType, Sobject, salesforce=sf)\n job.upload(Headers,Data)\n\n startRow = endRow\n endRow = startRow + batchSize\n time.sleep(hangtime)\n \n \ndef SFBulkQuery(SObject, SOQL):\n \"\"\"\n Description: Runs a query through the bulk api. Creates, Tracks, and Closes the Request and returns the results as a Pandas Dataframe. Currently there are lots of slighly obnoxious messages to help with tracking the current status.\n Parameters:\n SObject = Salesforce Object, ex: Account, Contact\n SOQL = Salesforce SOQL Statement for bulk query\n \"\"\"\n sfbulk = SalesforceBulk(sessionId=sf.session_id, host=sf.sf_instance)\n job = sfbulk.create_query_job(SObject, contentType='CSV')\n batch = sfbulk.query(job, SOQL)\n while not sfbulk.is_batch_done(job, batch):\n time.sleep(10)\n sfbulk.close_job(job)\n res = sfbulk.get_batch_result_iter(job, batch)\n return res\n","license":"mit"} {"repo_name":"Eigenstate\/msmbuilder","path":"msmbuilder\/commands\/implied_timescales.py","copies":"12","size":"5214","content":"# Author: Robert McGibbon \n# Contributors:\n# Copyright (c) 2014, Stanford University\n# All rights reserved.\n\"\"\"Scan the implied timescales of MarkovStateModels with respect to lag time.\n\nThis command will build a series of MarkovStateModels at different lag times,\nand save a file to disk containing the relaxation timescales of each of the\nmodels.\n\nA plot of these data can then be used to choose the lag time [1].\n\nReferences\n----------\n.. [1] Beauchamp, Kyle A., et al. \"MSMBuilder2: modeling conformational\n dynamics on the picosecond to millisecond scale.\" J. Chem. Theory.\n Comput. 7.10 (2011): 3412-3419.\n\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\n\nfrom __future__ import print_function, division, absolute_import\nfrom os.path import splitext\nimport sys\nimport json\n\nimport pandas as pd\n\nfrom ..dataset import dataset\nfrom ..cmdline import Command, argument, argument_group, rangetype, FlagAction\nfrom ..msm import MarkovStateModel, implied_timescales\n\n\nclass ImpliedTimescales(Command):\n _group = 'MSM'\n _concrete = True\n description = __doc__\n lag_times = argument('-l', '--lag_times', default='1:10', help='''Range\n of lag times. Specify as 'start:stop' or 'start:stop:step. The\n endpoints are inclusive.''', type=rangetype)\n inp = argument(\n '-i', '--inp', help='''Path to input dataset, a collection of 1D\n integer sequences (such as the output from clustering)''',\n required=True)\n out = argument('--out', help='''Output file''',\n default='timescales.csv')\n fmt = argument('--fmt', help='Output file format', default='csv',\n choices=('csv', 'json', 'excel'))\n _extensions = {'csv': '.csv', 'json': '.json', 'excel': '.xlsx'}\n n_jobs = argument('--n_jobs', help='Number of parallel processes',\n default=1, type=int)\n\n p = argument_group('MSM parameters')\n n_timescales = p.add_argument('--n_timescales', default=10, help='''\n The number of dynamical timescales to calculate when diagonalizing\n the transition matrix.''', type=int)\n reversible_type = p.add_argument('--reversible_type', help='''\n Method by which the reversibility of the transition matrix\n is enforced. 'mle' uses a maximum likelihood method that is\n solved by numerical optimization, and 'transpose'\n uses a more restrictive (but less computationally complex)\n direct symmetrization of the expected number of counts.''',\n choices=('mle', 'transpose'), default='mle')\n ergodic_cutoff = p.add_argument('--ergodic_cutoff', default=1, help='''\n Only the maximal strongly ergodic subgraph of the data is used to build\n an MSM. Ergodicity is determined by ensuring that each state is\n accessible from each other state via one or more paths involving edges\n with a number of observed directed counts greater than or equal to\n ``ergodic_cutoff``. Not that by setting ``ergodic_cutoff`` to 0, this\n trimming is effectively turned off.''', type=int)\n prior_counts = p.add_argument('--prior_counts', help='''Add a number\n of \"pseudo counts\" to each entry in the counts matrix. When\n prior_counts == 0 (default), the assigned transition probability\n between two states with no observed transitions will be zero, whereas\n when prior_counts > 0, even this unobserved transitions will be\n given nonzero probability.''', type=float, default=0)\n verbose = p.add_argument('--verbose', default=True,\n help='Enable verbose printout', action=FlagAction)\n\n def __init__(self, args):\n self.args = args\n\n def start(self):\n kwargs = {\n 'n_timescales': self.args.n_timescales,\n 'reversible_type': self.args.reversible_type,\n 'ergodic_cutoff': self.args.ergodic_cutoff,\n 'prior_counts': self.args.prior_counts,\n 'verbose': self.args.verbose,\n }\n\n with dataset(self.args.inp, mode='r') as ds:\n model = MarkovStateModel(**kwargs)\n lines = implied_timescales(\n ds, lag_times=self.args.lag_times,\n n_timescales=self.args.n_timescales,\n msm=model,\n n_jobs=self.args.n_jobs,\n verbose=self.args.verbose)\n\n cols = ['Timescale %d' % (d+1) for d in range(len(lines[0]))]\n df = pd.DataFrame(data=lines, columns=cols)\n df['Lag Time'] = self.args.lag_times\n df = df.reindex_axis(sorted(df.columns), axis=1)\n self.write_output(df)\n\n def write_output(self, df):\n outfile = splitext(self.args.out)[0] + self._extensions[self.args.fmt]\n\n print('Writing %s' % outfile)\n if self.args.fmt == 'csv':\n df.to_csv(outfile)\n elif self.args.fmt == 'json':\n with open(outfile, 'w') as f:\n json.dump(df.to_dict(orient='records'), f)\n elif self.args.fmt == 'excel':\n df.to_excel(outfile)\n else:\n raise RuntimeError('unknown fmt: %s' % fmt)\n print('All done!')\n","license":"lgpl-2.1"} {"repo_name":"deepmind\/grid-cells","path":"utils.py","copies":"1","size":"5720","content":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Helper functions for creating the training graph and plotting.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport ensembles # pylint: disable=g-bad-import-order\n\n\nnp.seterr(invalid=\"ignore\")\n\n\ndef get_place_cell_ensembles(\n env_size, neurons_seed, targets_type, lstm_init_type, n_pc, pc_scale):\n \"\"\"Create the ensembles for the Place cells.\"\"\"\n place_cell_ensembles = [\n ensembles.PlaceCellEnsemble(\n n,\n stdev=s,\n pos_min=-env_size \/ 2.0,\n pos_max=env_size \/ 2.0,\n seed=neurons_seed,\n soft_targets=targets_type,\n soft_init=lstm_init_type)\n for n, s in zip(n_pc, pc_scale)\n ]\n return place_cell_ensembles\n\n\ndef get_head_direction_ensembles(\n neurons_seed, targets_type, lstm_init_type, n_hdc, hdc_concentration):\n \"\"\"Create the ensembles for the Head direction cells.\"\"\"\n head_direction_ensembles = [\n ensembles.HeadDirectionCellEnsemble(\n n,\n concentration=con,\n seed=neurons_seed,\n soft_targets=targets_type,\n soft_init=lstm_init_type)\n for n, con in zip(n_hdc, hdc_concentration)\n ]\n return head_direction_ensembles\n\n\ndef encode_initial_conditions(init_pos, init_hd, place_cell_ensembles,\n head_direction_ensembles):\n initial_conds = []\n for ens in place_cell_ensembles:\n initial_conds.append(\n tf.squeeze(ens.get_init(init_pos[:, tf.newaxis, :]), axis=1))\n for ens in head_direction_ensembles:\n initial_conds.append(\n tf.squeeze(ens.get_init(init_hd[:, tf.newaxis, :]), axis=1))\n return initial_conds\n\n\ndef encode_targets(target_pos, target_hd, place_cell_ensembles,\n head_direction_ensembles):\n ensembles_targets = []\n for ens in place_cell_ensembles:\n ensembles_targets.append(ens.get_targets(target_pos))\n for ens in head_direction_ensembles:\n ensembles_targets.append(ens.get_targets(target_hd))\n return ensembles_targets\n\n\ndef clip_all_gradients(g, var, limit):\n # print(var.name)\n return (tf.clip_by_value(g, -limit, limit), var)\n\n\ndef clip_bottleneck_gradient(g, var, limit):\n if (\"bottleneck\" in var.name or \"pc_logits\" in var.name):\n return (tf.clip_by_value(g, -limit, limit), var)\n else:\n return (g, var)\n\n\ndef no_clipping(g, var):\n return (g, var)\n\n\ndef concat_dict(acc, new_data):\n \"\"\"Dictionary concatenation function.\"\"\"\n\n def to_array(kk):\n if isinstance(kk, np.ndarray):\n return kk\n else:\n return np.asarray([kk])\n\n for k, v in new_data.iteritems():\n if isinstance(v, dict):\n if k in acc:\n acc[k] = concat_dict(acc[k], v)\n else:\n acc[k] = concat_dict(dict(), v)\n else:\n v = to_array(v)\n if k in acc:\n acc[k] = np.concatenate([acc[k], v])\n else:\n acc[k] = np.copy(v)\n return acc\n\n\ndef get_scores_and_plot(scorer,\n data_abs_xy,\n activations,\n directory,\n filename,\n plot_graphs=True, # pylint: disable=unused-argument\n nbins=20, # pylint: disable=unused-argument\n cm=\"jet\",\n sort_by_score_60=True):\n \"\"\"Plotting function.\"\"\"\n\n # Concatenate all trajectories\n xy = data_abs_xy.reshape(-1, data_abs_xy.shape[-1])\n act = activations.reshape(-1, activations.shape[-1])\n n_units = act.shape[1]\n # Get the rate-map for each unit\n s = [\n scorer.calculate_ratemap(xy[:, 0], xy[:, 1], act[:, i])\n for i in xrange(n_units)\n ]\n # Get the scores\n score_60, score_90, max_60_mask, max_90_mask, sac = zip(\n *[scorer.get_scores(rate_map) for rate_map in s])\n # Separations\n # separations = map(np.mean, max_60_mask)\n # Sort by score if desired\n if sort_by_score_60:\n ordering = np.argsort(-np.array(score_60))\n else:\n ordering = range(n_units)\n # Plot\n cols = 16\n rows = int(np.ceil(n_units \/ cols))\n fig = plt.figure(figsize=(24, rows * 4))\n for i in xrange(n_units):\n rf = plt.subplot(rows * 2, cols, i + 1)\n acr = plt.subplot(rows * 2, cols, n_units + i + 1)\n if i < n_units:\n index = ordering[i]\n title = \"%d (%.2f)\" % (index, score_60[index])\n # Plot the activation maps\n scorer.plot_ratemap(s[index], ax=rf, title=title, cmap=cm)\n # Plot the autocorrelation of the activation maps\n scorer.plot_sac(\n sac[index],\n mask_params=max_60_mask[index],\n ax=acr,\n title=title,\n cmap=cm)\n # Save\n if not os.path.exists(directory):\n os.makedirs(directory)\n with PdfPages(os.path.join(directory, filename), \"w\") as f:\n plt.savefig(f, format=\"pdf\")\n plt.close(fig)\n return (np.asarray(score_60), np.asarray(score_90),\n np.asarray(map(np.mean, max_60_mask)),\n np.asarray(map(np.mean, max_90_mask)))\n","license":"apache-2.0"} {"repo_name":"aayushidwivedi01\/spark-tk","path":"regression-tests\/sparktkregtests\/testcases\/frames\/lda_groupby_flow_test.py","copies":"11","size":"3240","content":"# vim: set encoding=utf-8\n\n# Copyright\u00a0(c)\u00a02016 Intel\u00a0Corporation\u00a0\n#\n# Licensed\u00a0under\u00a0the\u00a0Apache\u00a0License,\u00a0Version\u00a02.0\u00a0(the\u00a0\"License\");\n# you\u00a0may\u00a0not\u00a0use\u00a0this\u00a0file\u00a0except\u00a0in\u00a0compliance\u00a0with\u00a0the\u00a0License.\n# You\u00a0may\u00a0obtain\u00a0a\u00a0copy\u00a0of\u00a0the\u00a0License\u00a0at\n#\n# \u00a0\u00a0\u00a0\u00a0\u00a0 http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless\u00a0required\u00a0by\u00a0applicable\u00a0law\u00a0or\u00a0agreed\u00a0to\u00a0in\u00a0writing,\u00a0software\n# distributed\u00a0under\u00a0the\u00a0License\u00a0is\u00a0distributed\u00a0on\u00a0an\u00a0\"AS\u00a0IS\"\u00a0BASIS,\n# WITHOUT\u00a0WARRANTIES\u00a0OR\u00a0CONDITIONS\u00a0OF\u00a0ANY\u00a0KIND,\u00a0either\u00a0express\u00a0or\u00a0implied.\n# See\u00a0the\u00a0License\u00a0for\u00a0the\u00a0specific\u00a0language\u00a0governing\u00a0permissions\u00a0and\n# limitations\u00a0under\u00a0the\u00a0License.\n#\n\n\"\"\"Sample LDA\/Groupby example\"\"\"\nimport unittest\nfrom sparktkregtests.lib import sparktk_test\nimport numpy\n\n\nclass LDAExample(sparktk_test.SparkTKTestCase):\n\n def test_lda_example(self):\n \"\"\"LDA demo from examples directory\"\"\"\n\n # this is a full worked example of lda and groupby\n # with known correct values\n data = [['nytimes', 'harry', 3], ['nytimes', 'economy', 35], ['nytimes', 'jobs', 40], ['nytimes', 'magic', 1],\n ['nytimes', 'realestate', 15], ['nytimes', 'movies', 6], ['economist', 'economy', 50],\n ['economist', 'jobs', 35], ['economist', 'realestate', 20], ['economist', 'movies', 1],\n ['economist', 'harry', 1], ['economist', 'magic', 1], ['harrypotter', 'harry', 40],\n ['harrypotter', 'magic', 30], ['harrypotter', 'chamber', 20], ['harrypotter', 'secrets', 30]]\n frame = self.context.frame.create(\n data,\n schema=[('doc_id', str),\n ('word_id', str),\n ('word_count', long)])\n\n model = self.context.models.clustering.lda.train(\n frame, \"doc_id\", \"word_id\", \"word_count\", max_iterations=3, num_topics=2)\n\n doc_results = model.topics_given_doc_frame\n word_results = model.word_given_topics_frame\n\n doc_results.rename_columns({'topic_probabilities': 'lda_results_doc'})\n word_results.rename_columns(\n {'topic_probabilities': 'lda_results_word'})\n\n frame = frame.join_left(\n doc_results, left_on=\"doc_id\", right_on=\"doc_id\")\n frame = frame.join_left(\n word_results, left_on=\"word_id\", right_on=\"word_id\")\n\n # similar to calling predict on a model\n frame.dot_product(\n ['lda_results_doc'], ['lda_results_word'], 'lda_score')\n\n word_hist = frame.histogram('word_count', 4)\n lda_hist = frame.histogram('lda_score', 2)\n\n group_frame = frame.group_by(\n 'word_id_L',\n {'word_count': self.context.agg.histogram(\n cutoffs=word_hist.cutoffs,\n include_lowest=True,\n strict_binning=False),\n 'lda_score': self.context.agg.histogram(lda_hist.cutoffs)})\n pandas = group_frame.to_pandas()\n\n for (index, row) in pandas.iterrows():\n if str(row[\"word_id_L\"]) == \"magic\":\n numpy.testing.assert_equal(list(row[\"word_count_HISTOGRAM\"]), [float(2.0\/3.0), 0, float(1.0\/3.0), 0])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","license":"apache-2.0"} {"repo_name":"mattgiguere\/scikit-learn","path":"sklearn\/utils\/arpack.py","copies":"265","size":"64837","content":"\"\"\"\nThis contains a copy of the future version of\nscipy.sparse.linalg.eigen.arpack.eigsh\nIt's an upgraded wrapper of the ARPACK library which\nallows the use of shift-invert mode for symmetric matrices.\n\n\nFind a few eigenvectors and eigenvalues of a matrix.\n\n\nUses ARPACK: http:\/\/www.caam.rice.edu\/software\/ARPACK\/\n\n\"\"\"\n# Wrapper implementation notes\n#\n# ARPACK Entry Points\n# -------------------\n# The entry points to ARPACK are\n# - (s,d)seupd : single and double precision symmetric matrix\n# - (s,d,c,z)neupd: single,double,complex,double complex general matrix\n# This wrapper puts the *neupd (general matrix) interfaces in eigs()\n# and the *seupd (symmetric matrix) in eigsh().\n# There is no Hermetian complex\/double complex interface.\n# To find eigenvalues of a Hermetian matrix you\n# must use eigs() and not eigsh()\n# It might be desirable to handle the Hermetian case differently\n# and, for example, return real eigenvalues.\n\n# Number of eigenvalues returned and complex eigenvalues\n# ------------------------------------------------------\n# The ARPACK nonsymmetric real and double interface (s,d)naupd return\n# eigenvalues and eigenvectors in real (float,double) arrays.\n# Since the eigenvalues and eigenvectors are, in general, complex\n# ARPACK puts the real and imaginary parts in consecutive entries\n# in real-valued arrays. This wrapper puts the real entries\n# into complex data types and attempts to return the requested eigenvalues\n# and eigenvectors.\n\n\n# Solver modes\n# ------------\n# ARPACK and handle shifted and shift-inverse computations\n# for eigenvalues by providing a shift (sigma) and a solver.\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']\nimport warnings\n\nfrom scipy.sparse.linalg.eigen.arpack import _arpack\nimport numpy as np\nfrom scipy.sparse.linalg.interface import aslinearoperator, LinearOperator\nfrom scipy.sparse import identity, isspmatrix, isspmatrix_csr\nfrom scipy.linalg import lu_factor, lu_solve\nfrom scipy.sparse.sputils import isdense\nfrom scipy.sparse.linalg import gmres, splu\nimport scipy\nfrom distutils.version import LooseVersion\n\n\n_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}\n_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}\n\nDNAUPD_ERRORS = {\n 0: \"Normal exit.\",\n 1: \"Maximum number of iterations taken. \"\n \"All possible eigenvalues of OP has been found. IPARAM(5) \"\n \"returns the number of wanted converged Ritz values.\",\n 2: \"No longer an informational error. Deprecated starting \"\n \"with release 2 of ARPACK.\",\n 3: \"No shifts could be applied during a cycle of the \"\n \"Implicitly restarted Arnoldi iteration. One possibility \"\n \"is to increase the size of NCV relative to NEV. \",\n -1: \"N must be positive.\",\n -2: \"NEV must be positive.\",\n -3: \"NCV-NEV >= 2 and less than or equal to N.\",\n -4: \"The maximum number of Arnoldi update iterations allowed \"\n \"must be greater than zero.\",\n -5: \" WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'\",\n -6: \"BMAT must be one of 'I' or 'G'.\",\n -7: \"Length of private work array WORKL is not sufficient.\",\n -8: \"Error return from LAPACK eigenvalue calculation;\",\n -9: \"Starting vector is zero.\",\n -10: \"IPARAM(7) must be 1,2,3,4.\",\n -11: \"IPARAM(7) = 1 and BMAT = 'G' are incompatible.\",\n -12: \"IPARAM(1) must be equal to 0 or 1.\",\n -13: \"NEV and WHICH = 'BE' are incompatible.\",\n -9999: \"Could not build an Arnoldi factorization. \"\n \"IPARAM(5) returns the size of the current Arnoldi \"\n \"factorization. The user is advised to check that \"\n \"enough workspace and array storage has been allocated.\"\n}\n\nSNAUPD_ERRORS = DNAUPD_ERRORS\n\nZNAUPD_ERRORS = DNAUPD_ERRORS.copy()\nZNAUPD_ERRORS[-10] = \"IPARAM(7) must be 1,2,3.\"\n\nCNAUPD_ERRORS = ZNAUPD_ERRORS\n\nDSAUPD_ERRORS = {\n 0: \"Normal exit.\",\n 1: \"Maximum number of iterations taken. \"\n \"All possible eigenvalues of OP has been found.\",\n 2: \"No longer an informational error. Deprecated starting with \"\n \"release 2 of ARPACK.\",\n 3: \"No shifts could be applied during a cycle of the Implicitly \"\n \"restarted Arnoldi iteration. One possibility is to increase \"\n \"the size of NCV relative to NEV. \",\n -1: \"N must be positive.\",\n -2: \"NEV must be positive.\",\n -3: \"NCV must be greater than NEV and less than or equal to N.\",\n -4: \"The maximum number of Arnoldi update iterations allowed \"\n \"must be greater than zero.\",\n -5: \"WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.\",\n -6: \"BMAT must be one of 'I' or 'G'.\",\n -7: \"Length of private work array WORKL is not sufficient.\",\n -8: \"Error return from trid. eigenvalue calculation; \"\n \"Informational error from LAPACK routine dsteqr .\",\n -9: \"Starting vector is zero.\",\n -10: \"IPARAM(7) must be 1,2,3,4,5.\",\n -11: \"IPARAM(7) = 1 and BMAT = 'G' are incompatible.\",\n -12: \"IPARAM(1) must be equal to 0 or 1.\",\n -13: \"NEV and WHICH = 'BE' are incompatible. \",\n -9999: \"Could not build an Arnoldi factorization. \"\n \"IPARAM(5) returns the size of the current Arnoldi \"\n \"factorization. The user is advised to check that \"\n \"enough workspace and array storage has been allocated.\",\n}\n\nSSAUPD_ERRORS = DSAUPD_ERRORS\n\nDNEUPD_ERRORS = {\n 0: \"Normal exit.\",\n 1: \"The Schur form computed by LAPACK routine dlahqr \"\n \"could not be reordered by LAPACK routine dtrsen. \"\n \"Re-enter subroutine dneupd with IPARAM(5)NCV and \"\n \"increase the size of the arrays DR and DI to have \"\n \"dimension at least dimension NCV and allocate at least NCV \"\n \"columns for Z. NOTE: Not necessary if Z and V share \"\n \"the same space. Please notify the authors if this error \"\n \"occurs.\",\n -1: \"N must be positive.\",\n -2: \"NEV must be positive.\",\n -3: \"NCV-NEV >= 2 and less than or equal to N.\",\n -5: \"WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'\",\n -6: \"BMAT must be one of 'I' or 'G'.\",\n -7: \"Length of private work WORKL array is not sufficient.\",\n -8: \"Error return from calculation of a real Schur form. \"\n \"Informational error from LAPACK routine dlahqr .\",\n -9: \"Error return from calculation of eigenvectors. \"\n \"Informational error from LAPACK routine dtrevc.\",\n -10: \"IPARAM(7) must be 1,2,3,4.\",\n -11: \"IPARAM(7) = 1 and BMAT = 'G' are incompatible.\",\n -12: \"HOWMNY = 'S' not yet implemented\",\n -13: \"HOWMNY must be one of 'A' or 'P' if RVEC = .true.\",\n -14: \"DNAUPD did not find any eigenvalues to sufficient \"\n \"accuracy.\",\n -15: \"DNEUPD got a different count of the number of converged \"\n \"Ritz values than DNAUPD got. This indicates the user \"\n \"probably made an error in passing data from DNAUPD to \"\n \"DNEUPD or that the data was modified before entering \"\n \"DNEUPD\",\n}\n\nSNEUPD_ERRORS = DNEUPD_ERRORS.copy()\nSNEUPD_ERRORS[1] = (\"The Schur form computed by LAPACK routine slahqr \"\n \"could not be reordered by LAPACK routine strsen . \"\n \"Re-enter subroutine dneupd with IPARAM(5)=NCV and \"\n \"increase the size of the arrays DR and DI to have \"\n \"dimension at least dimension NCV and allocate at least \"\n \"NCV columns for Z. NOTE: Not necessary if Z and V share \"\n \"the same space. Please notify the authors if this error \"\n \"occurs.\")\nSNEUPD_ERRORS[-14] = (\"SNAUPD did not find any eigenvalues to sufficient \"\n \"accuracy.\")\nSNEUPD_ERRORS[-15] = (\"SNEUPD got a different count of the number of \"\n \"converged Ritz values than SNAUPD got. This indicates \"\n \"the user probably made an error in passing data from \"\n \"SNAUPD to SNEUPD or that the data was modified before \"\n \"entering SNEUPD\")\n\nZNEUPD_ERRORS = {0: \"Normal exit.\",\n 1: \"The Schur form computed by LAPACK routine csheqr \"\n \"could not be reordered by LAPACK routine ztrsen. \"\n \"Re-enter subroutine zneupd with IPARAM(5)=NCV and \"\n \"increase the size of the array D to have \"\n \"dimension at least dimension NCV and allocate at least \"\n \"NCV columns for Z. NOTE: Not necessary if Z and V share \"\n \"the same space. Please notify the authors if this error \"\n \"occurs.\",\n -1: \"N must be positive.\",\n -2: \"NEV must be positive.\",\n -3: \"NCV-NEV >= 1 and less than or equal to N.\",\n -5: \"WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'\",\n -6: \"BMAT must be one of 'I' or 'G'.\",\n -7: \"Length of private work WORKL array is not sufficient.\",\n -8: \"Error return from LAPACK eigenvalue calculation. \"\n \"This should never happened.\",\n -9: \"Error return from calculation of eigenvectors. \"\n \"Informational error from LAPACK routine ztrevc.\",\n -10: \"IPARAM(7) must be 1,2,3\",\n -11: \"IPARAM(7) = 1 and BMAT = 'G' are incompatible.\",\n -12: \"HOWMNY = 'S' not yet implemented\",\n -13: \"HOWMNY must be one of 'A' or 'P' if RVEC = .true.\",\n -14: \"ZNAUPD did not find any eigenvalues to sufficient \"\n \"accuracy.\",\n -15: \"ZNEUPD got a different count of the number of \"\n \"converged Ritz values than ZNAUPD got. This \"\n \"indicates the user probably made an error in passing \"\n \"data from ZNAUPD to ZNEUPD or that the data was \"\n \"modified before entering ZNEUPD\"}\n\nCNEUPD_ERRORS = ZNEUPD_ERRORS.copy()\nCNEUPD_ERRORS[-14] = (\"CNAUPD did not find any eigenvalues to sufficient \"\n \"accuracy.\")\nCNEUPD_ERRORS[-15] = (\"CNEUPD got a different count of the number of \"\n \"converged Ritz values than CNAUPD got. This indicates \"\n \"the user probably made an error in passing data from \"\n \"CNAUPD to CNEUPD or that the data was modified before \"\n \"entering CNEUPD\")\n\nDSEUPD_ERRORS = {\n 0: \"Normal exit.\",\n -1: \"N must be positive.\",\n -2: \"NEV must be positive.\",\n -3: \"NCV must be greater than NEV and less than or equal to N.\",\n -5: \"WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.\",\n -6: \"BMAT must be one of 'I' or 'G'.\",\n -7: \"Length of private work WORKL array is not sufficient.\",\n -8: (\"Error return from trid. eigenvalue calculation; \"\n \"Information error from LAPACK routine dsteqr.\"),\n -9: \"Starting vector is zero.\",\n -10: \"IPARAM(7) must be 1,2,3,4,5.\",\n -11: \"IPARAM(7) = 1 and BMAT = 'G' are incompatible.\",\n -12: \"NEV and WHICH = 'BE' are incompatible.\",\n -14: \"DSAUPD did not find any eigenvalues to sufficient accuracy.\",\n -15: \"HOWMNY must be one of 'A' or 'S' if RVEC = .true.\",\n -16: \"HOWMNY = 'S' not yet implemented\",\n -17: (\"DSEUPD got a different count of the number of converged \"\n \"Ritz values than DSAUPD got. This indicates the user \"\n \"probably made an error in passing data from DSAUPD to \"\n \"DSEUPD or that the data was modified before entering \"\n \"DSEUPD.\")\n}\n\nSSEUPD_ERRORS = DSEUPD_ERRORS.copy()\nSSEUPD_ERRORS[-14] = (\"SSAUPD did not find any eigenvalues \"\n \"to sufficient accuracy.\")\nSSEUPD_ERRORS[-17] = (\"SSEUPD got a different count of the number of \"\n \"converged \"\n \"Ritz values than SSAUPD got. This indicates the user \"\n \"probably made an error in passing data from SSAUPD to \"\n \"SSEUPD or that the data was modified before entering \"\n \"SSEUPD.\")\n\n_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,\n 's': SSAUPD_ERRORS}\n_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,\n 's': SNAUPD_ERRORS,\n 'z': ZNAUPD_ERRORS,\n 'c': CNAUPD_ERRORS}\n_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,\n 's': SSEUPD_ERRORS}\n_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,\n 's': SNEUPD_ERRORS,\n 'z': ZNEUPD_ERRORS,\n 'c': CNEUPD_ERRORS}\n\n# accepted values of parameter WHICH in _SEUPD\n_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']\n\n# accepted values of parameter WHICH in _NAUPD\n_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']\n\n\nclass ArpackError(RuntimeError):\n \"\"\"\n ARPACK error\n \"\"\"\n def __init__(self, info, infodict=_NAUPD_ERRORS):\n msg = infodict.get(info, \"Unknown error\")\n RuntimeError.__init__(self, \"ARPACK error %d: %s\" % (info, msg))\n\n\nclass ArpackNoConvergence(ArpackError):\n \"\"\"\n ARPACK iteration did not converge\n\n Attributes\n ----------\n eigenvalues : ndarray\n Partial result. Converged eigenvalues.\n eigenvectors : ndarray\n Partial result. Converged eigenvectors.\n\n \"\"\"\n def __init__(self, msg, eigenvalues, eigenvectors):\n ArpackError.__init__(self, -1, {-1: msg})\n self.eigenvalues = eigenvalues\n self.eigenvectors = eigenvectors\n\n\nclass _ArpackParams(object):\n def __init__(self, n, k, tp, mode=1, sigma=None,\n ncv=None, v0=None, maxiter=None, which=\"LM\", tol=0):\n if k <= 0:\n raise ValueError(\"k must be positive, k=%d\" % k)\n\n if maxiter is None:\n maxiter = n * 10\n if maxiter <= 0:\n raise ValueError(\"maxiter must be positive, maxiter=%d\" % maxiter)\n\n if tp not in 'fdFD':\n raise ValueError(\"matrix type must be 'f', 'd', 'F', or 'D'\")\n\n if v0 is not None:\n # ARPACK overwrites its initial resid, make a copy\n self.resid = np.array(v0, copy=True)\n info = 1\n else:\n self.resid = np.zeros(n, tp)\n info = 0\n\n if sigma is None:\n #sigma not used\n self.sigma = 0\n else:\n self.sigma = sigma\n\n if ncv is None:\n ncv = 2 * k + 1\n ncv = min(ncv, n)\n\n self.v = np.zeros((n, ncv), tp) # holds Ritz vectors\n self.iparam = np.zeros(11, \"int\")\n\n # set solver mode and parameters\n ishfts = 1\n self.mode = mode\n self.iparam[0] = ishfts\n self.iparam[2] = maxiter\n self.iparam[3] = 1\n self.iparam[6] = mode\n\n self.n = n\n self.tol = tol\n self.k = k\n self.maxiter = maxiter\n self.ncv = ncv\n self.which = which\n self.tp = tp\n self.info = info\n\n self.converged = False\n self.ido = 0\n\n def _raise_no_convergence(self):\n msg = \"No convergence (%d iterations, %d\/%d eigenvectors converged)\"\n k_ok = self.iparam[4]\n num_iter = self.iparam[2]\n try:\n ev, vec = self.extract(True)\n except ArpackError as err:\n msg = \"%s [%s]\" % (msg, err)\n ev = np.zeros((0,))\n vec = np.zeros((self.n, 0))\n k_ok = 0\n raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)\n\n\nclass _SymmetricArpackParams(_ArpackParams):\n def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,\n Minv_matvec=None, sigma=None,\n ncv=None, v0=None, maxiter=None, which=\"LM\", tol=0):\n # The following modes are supported:\n # mode = 1:\n # Solve the standard eigenvalue problem:\n # A*x = lambda*x :\n # A - symmetric\n # Arguments should be\n # matvec = left multiplication by A\n # M_matvec = None [not used]\n # Minv_matvec = None [not used]\n #\n # mode = 2:\n # Solve the general eigenvalue problem:\n # A*x = lambda*M*x\n # A - symmetric\n # M - symmetric positive definite\n # Arguments should be\n # matvec = left multiplication by A\n # M_matvec = left multiplication by M\n # Minv_matvec = left multiplication by M^-1\n #\n # mode = 3:\n # Solve the general eigenvalue problem in shift-invert mode:\n # A*x = lambda*M*x\n # A - symmetric\n # M - symmetric positive semi-definite\n # Arguments should be\n # matvec = None [not used]\n # M_matvec = left multiplication by M\n # or None, if M is the identity\n # Minv_matvec = left multiplication by [A-sigma*M]^-1\n #\n # mode = 4:\n # Solve the general eigenvalue problem in Buckling mode:\n # A*x = lambda*AG*x\n # A - symmetric positive semi-definite\n # AG - symmetric indefinite\n # Arguments should be\n # matvec = left multiplication by A\n # M_matvec = None [not used]\n # Minv_matvec = left multiplication by [A-sigma*AG]^-1\n #\n # mode = 5:\n # Solve the general eigenvalue problem in Cayley-transformed mode:\n # A*x = lambda*M*x\n # A - symmetric\n # M - symmetric positive semi-definite\n # Arguments should be\n # matvec = left multiplication by A\n # M_matvec = left multiplication by M\n # or None, if M is the identity\n # Minv_matvec = left multiplication by [A-sigma*M]^-1\n if mode == 1:\n if matvec is None:\n raise ValueError(\"matvec must be specified for mode=1\")\n if M_matvec is not None:\n raise ValueError(\"M_matvec cannot be specified for mode=1\")\n if Minv_matvec is not None:\n raise ValueError(\"Minv_matvec cannot be specified for mode=1\")\n\n self.OP = matvec\n self.B = lambda x: x\n self.bmat = 'I'\n elif mode == 2:\n if matvec is None:\n raise ValueError(\"matvec must be specified for mode=2\")\n if M_matvec is None:\n raise ValueError(\"M_matvec must be specified for mode=2\")\n if Minv_matvec is None:\n raise ValueError(\"Minv_matvec must be specified for mode=2\")\n\n self.OP = lambda x: Minv_matvec(matvec(x))\n self.OPa = Minv_matvec\n self.OPb = matvec\n self.B = M_matvec\n self.bmat = 'G'\n elif mode == 3:\n if matvec is not None:\n raise ValueError(\"matvec must not be specified for mode=3\")\n if Minv_matvec is None:\n raise ValueError(\"Minv_matvec must be specified for mode=3\")\n\n if M_matvec is None:\n self.OP = Minv_matvec\n self.OPa = Minv_matvec\n self.B = lambda x: x\n self.bmat = 'I'\n else:\n self.OP = lambda x: Minv_matvec(M_matvec(x))\n self.OPa = Minv_matvec\n self.B = M_matvec\n self.bmat = 'G'\n elif mode == 4:\n if matvec is None:\n raise ValueError(\"matvec must be specified for mode=4\")\n if M_matvec is not None:\n raise ValueError(\"M_matvec must not be specified for mode=4\")\n if Minv_matvec is None:\n raise ValueError(\"Minv_matvec must be specified for mode=4\")\n self.OPa = Minv_matvec\n self.OP = lambda x: self.OPa(matvec(x))\n self.B = matvec\n self.bmat = 'G'\n elif mode == 5:\n if matvec is None:\n raise ValueError(\"matvec must be specified for mode=5\")\n if Minv_matvec is None:\n raise ValueError(\"Minv_matvec must be specified for mode=5\")\n\n self.OPa = Minv_matvec\n self.A_matvec = matvec\n\n if M_matvec is None:\n self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)\n self.B = lambda x: x\n self.bmat = 'I'\n else:\n self.OP = lambda x: Minv_matvec(matvec(x)\n + sigma * M_matvec(x))\n self.B = M_matvec\n self.bmat = 'G'\n else:\n raise ValueError(\"mode=%i not implemented\" % mode)\n\n if which not in _SEUPD_WHICH:\n raise ValueError(\"which must be one of %s\"\n % ' '.join(_SEUPD_WHICH))\n if k >= n:\n raise ValueError(\"k must be less than rank(A), k=%d\" % k)\n\n _ArpackParams.__init__(self, n, k, tp, mode, sigma,\n ncv, v0, maxiter, which, tol)\n\n if self.ncv > n or self.ncv <= k:\n raise ValueError(\"ncv must be k= n - 1:\n raise ValueError(\"k must be less than rank(A)-1, k=%d\" % k)\n\n _ArpackParams.__init__(self, n, k, tp, mode, sigma,\n ncv, v0, maxiter, which, tol)\n\n if self.ncv > n or self.ncv <= k + 1:\n raise ValueError(\"ncv must be k+1 k, so we'll\n # throw out this case.\n nreturned -= 1\n i += 1\n\n else:\n # real matrix, mode 3 or 4, imag(sigma) is nonzero:\n # see remark 3 in neupd.f\n # Build complex eigenvalues from real and imaginary parts\n i = 0\n while i <= k:\n if abs(d[i].imag) == 0:\n d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))\n else:\n if i < k:\n z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]\n z[:, i + 1] = z[:, i].conjugate()\n d[i] = ((np.dot(zr[:, i],\n self.matvec(zr[:, i]))\n + np.dot(zr[:, i + 1],\n self.matvec(zr[:, i + 1])))\n + 1j * (np.dot(zr[:, i],\n self.matvec(zr[:, i + 1]))\n - np.dot(zr[:, i + 1],\n self.matvec(zr[:, i]))))\n d[i + 1] = d[i].conj()\n i += 1\n else:\n #last eigenvalue is complex: the imaginary part of\n # the eigenvector has not been returned\n #this can only happen if nreturned > k, so we'll\n # throw out this case.\n nreturned -= 1\n i += 1\n\n # Now we have k+1 possible eigenvalues and eigenvectors\n # Return the ones specified by the keyword \"which\"\n\n if nreturned <= k:\n # we got less or equal as many eigenvalues we wanted\n d = d[:nreturned]\n z = z[:, :nreturned]\n else:\n # we got one extra eigenvalue (likely a cc pair, but which?)\n # cut at approx precision for sorting\n rd = np.round(d, decimals=_ndigits[self.tp])\n if self.which in ['LR', 'SR']:\n ind = np.argsort(rd.real)\n elif self.which in ['LI', 'SI']:\n # for LI,SI ARPACK returns largest,smallest\n # abs(imaginary) why?\n ind = np.argsort(abs(rd.imag))\n else:\n ind = np.argsort(abs(rd))\n if self.which in ['LR', 'LM', 'LI']:\n d = d[ind[-k:]]\n z = z[:, ind[-k:]]\n if self.which in ['SR', 'SM', 'SI']:\n d = d[ind[:k]]\n z = z[:, ind[:k]]\n else:\n # complex is so much simpler...\n d, z, ierr =\\\n self._arpack_extract(\n return_eigenvectors, howmny, sselect, self.sigma, workev,\n self.bmat, self.which, k, self.tol, self.resid, self.v,\n self.iparam, self.ipntr, self.workd, self.workl,\n self.rwork, ierr)\n\n if ierr != 0:\n raise ArpackError(ierr, infodict=self.extract_infodict)\n\n k_ok = self.iparam[4]\n d = d[:k_ok]\n z = z[:, :k_ok]\n\n if return_eigenvectors:\n return d, z\n else:\n return d\n\n\ndef _aslinearoperator_with_dtype(m):\n m = aslinearoperator(m)\n if not hasattr(m, 'dtype'):\n x = np.zeros(m.shape[1])\n m.dtype = (m * x).dtype\n return m\n\n\nclass SpLuInv(LinearOperator):\n \"\"\"\n SpLuInv:\n helper class to repeatedly solve M*x=b\n using a sparse LU-decopposition of M\n \"\"\"\n def __init__(self, M):\n self.M_lu = splu(M)\n LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)\n self.isreal = not np.issubdtype(self.dtype, np.complexfloating)\n\n def _matvec(self, x):\n # careful here: splu.solve will throw away imaginary\n # part of x if M is real\n if self.isreal and np.issubdtype(x.dtype, np.complexfloating):\n return (self.M_lu.solve(np.real(x))\n + 1j * self.M_lu.solve(np.imag(x)))\n else:\n return self.M_lu.solve(x)\n\n\nclass LuInv(LinearOperator):\n \"\"\"\n LuInv:\n helper class to repeatedly solve M*x=b\n using an LU-decomposition of M\n \"\"\"\n def __init__(self, M):\n self.M_lu = lu_factor(M)\n LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)\n\n def _matvec(self, x):\n return lu_solve(self.M_lu, x)\n\n\nclass IterInv(LinearOperator):\n \"\"\"\n IterInv:\n helper class to repeatedly solve M*x=b\n using an iterative method.\n \"\"\"\n def __init__(self, M, ifunc=gmres, tol=0):\n if tol <= 0:\n # when tol=0, ARPACK uses machine tolerance as calculated\n # by LAPACK's _LAMCH function. We should match this\n tol = np.finfo(M.dtype).eps\n self.M = M\n self.ifunc = ifunc\n self.tol = tol\n if hasattr(M, 'dtype'):\n dtype = M.dtype\n else:\n x = np.zeros(M.shape[1])\n dtype = (M * x).dtype\n LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)\n\n def _matvec(self, x):\n b, info = self.ifunc(self.M, x, tol=self.tol)\n if info != 0:\n raise ValueError(\"Error in inverting M: function \"\n \"%s did not converge (info = %i).\"\n % (self.ifunc.__name__, info))\n return b\n\n\nclass IterOpInv(LinearOperator):\n \"\"\"\n IterOpInv:\n helper class to repeatedly solve [A-sigma*M]*x = b\n using an iterative method\n \"\"\"\n def __init__(self, A, M, sigma, ifunc=gmres, tol=0):\n if tol <= 0:\n # when tol=0, ARPACK uses machine tolerance as calculated\n # by LAPACK's _LAMCH function. We should match this\n tol = np.finfo(A.dtype).eps\n self.A = A\n self.M = M\n self.sigma = sigma\n self.ifunc = ifunc\n self.tol = tol\n\n x = np.zeros(A.shape[1])\n if M is None:\n dtype = self.mult_func_M_None(x).dtype\n self.OP = LinearOperator(self.A.shape,\n self.mult_func_M_None,\n dtype=dtype)\n else:\n dtype = self.mult_func(x).dtype\n self.OP = LinearOperator(self.A.shape,\n self.mult_func,\n dtype=dtype)\n LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)\n\n def mult_func(self, x):\n return self.A.matvec(x) - self.sigma * self.M.matvec(x)\n\n def mult_func_M_None(self, x):\n return self.A.matvec(x) - self.sigma * x\n\n def _matvec(self, x):\n b, info = self.ifunc(self.OP, x, tol=self.tol)\n if info != 0:\n raise ValueError(\"Error in inverting [A-sigma*M]: function \"\n \"%s did not converge (info = %i).\"\n % (self.ifunc.__name__, info))\n return b\n\n\ndef get_inv_matvec(M, symmetric=False, tol=0):\n if isdense(M):\n return LuInv(M).matvec\n elif isspmatrix(M):\n if isspmatrix_csr(M) and symmetric:\n M = M.T\n return SpLuInv(M).matvec\n else:\n return IterInv(M, tol=tol).matvec\n\n\ndef get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):\n if sigma == 0:\n return get_inv_matvec(A, symmetric=symmetric, tol=tol)\n\n if M is None:\n #M is the identity matrix\n if isdense(A):\n if (np.issubdtype(A.dtype, np.complexfloating)\n or np.imag(sigma) == 0):\n A = np.copy(A)\n else:\n A = A + 0j\n A.flat[::A.shape[1] + 1] -= sigma\n return LuInv(A).matvec\n elif isspmatrix(A):\n A = A - sigma * identity(A.shape[0])\n if symmetric and isspmatrix_csr(A):\n A = A.T\n return SpLuInv(A.tocsc()).matvec\n else:\n return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,\n tol=tol).matvec\n else:\n if ((not isdense(A) and not isspmatrix(A)) or\n (not isdense(M) and not isspmatrix(M))):\n return IterOpInv(_aslinearoperator_with_dtype(A),\n _aslinearoperator_with_dtype(M), sigma,\n tol=tol).matvec\n elif isdense(A) or isdense(M):\n return LuInv(A - sigma * M).matvec\n else:\n OP = A - sigma * M\n if symmetric and isspmatrix_csr(OP):\n OP = OP.T\n return SpLuInv(OP.tocsc()).matvec\n\n\ndef _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,\n maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,\n OPpart=None):\n \"\"\"\n Find k eigenvalues and eigenvectors of the square matrix A.\n\n Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem\n for w[i] eigenvalues with corresponding eigenvectors x[i].\n\n If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the\n generalized eigenvalue problem for w[i] eigenvalues\n with corresponding eigenvectors x[i]\n\n Parameters\n ----------\n A : An N x N matrix, array, sparse matrix, or LinearOperator representing \\\n the operation A * x, where A is a real or complex square matrix.\n\n k : int, default 6\n The number of eigenvalues and eigenvectors desired.\n `k` must be smaller than N. It is not possible to compute all\n eigenvectors of a matrix.\n\n return_eigenvectors : boolean, default True\n Whether to return the eigenvectors along with the eigenvalues.\n\n M : An N x N matrix, array, sparse matrix, or LinearOperator representing\n the operation M*x for the generalized eigenvalue problem\n ``A * x = w * M * x``\n M must represent a real symmetric matrix. For best results, M should\n be of the same type as A. Additionally:\n * If sigma==None, M is positive definite\n * If sigma is specified, M is positive semi-definite\n If sigma==None, eigs requires an operator to compute the solution\n of the linear equation `M * x = b`. This is done internally via a\n (sparse) LU decomposition for an explicit matrix M, or via an\n iterative solver for a general linear operator. Alternatively,\n the user can supply the matrix or operator Minv, which gives\n x = Minv * b = M^-1 * b\n\n sigma : real or complex\n Find eigenvalues near sigma using shift-invert mode. This requires\n an operator to compute the solution of the linear system\n `[A - sigma * M] * x = b`, where M is the identity matrix if\n unspecified. This is computed internally via a (sparse) LU\n decomposition for explicit matrices A & M, or via an iterative\n solver if either A or M is a general linear operator.\n Alternatively, the user can supply the matrix or operator OPinv,\n which gives x = OPinv * b = [A - sigma * M]^-1 * b.\n For a real matrix A, shift-invert can either be done in imaginary\n mode or real mode, specified by the parameter OPpart ('r' or 'i').\n Note that when sigma is specified, the keyword 'which' (below)\n refers to the shifted eigenvalues w'[i] where:\n * If A is real and OPpart == 'r' (default),\n w'[i] = 1\/2 * [ 1\/(w[i]-sigma) + 1\/(w[i]-conj(sigma)) ]\n * If A is real and OPpart == 'i',\n w'[i] = 1\/2i * [ 1\/(w[i]-sigma) - 1\/(w[i]-conj(sigma)) ]\n * If A is complex,\n w'[i] = 1\/(w[i]-sigma)\n\n v0 : array\n Starting vector for iteration.\n\n ncv : integer\n The number of Lanczos vectors generated\n `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.\n\n which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']\n Which `k` eigenvectors and eigenvalues to find:\n - 'LM' : largest magnitude\n - 'SM' : smallest magnitude\n - 'LR' : largest real part\n - 'SR' : smallest real part\n - 'LI' : largest imaginary part\n - 'SI' : smallest imaginary part\n When sigma != None, 'which' refers to the shifted eigenvalues w'[i]\n (see discussion in 'sigma', above). ARPACK is generally better\n at finding large values than small values. If small eigenvalues are\n desired, consider using shift-invert mode for better performance.\n\n maxiter : integer\n Maximum number of Arnoldi update iterations allowed\n\n tol : float\n Relative accuracy for eigenvalues (stopping criterion)\n The default value of 0 implies machine precision.\n\n return_eigenvectors : boolean\n Return eigenvectors (True) in addition to eigenvalues\n\n Minv : N x N matrix, array, sparse matrix, or linear operator\n See notes in M, above.\n \n OPinv : N x N matrix, array, sparse matrix, or linear operator\n See notes in sigma, above.\n OPpart : 'r' or 'i'.\n See notes in sigma, above\n\n Returns\n -------\n w : array\n Array of k eigenvalues.\n\n v : array\n An array of `k` eigenvectors.\n ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].\n\n Raises\n ------\n ArpackNoConvergence\n When the requested convergence is not obtained.\n\n The currently converged eigenvalues and eigenvectors can be found\n as ``eigenvalues`` and ``eigenvectors`` attributes of the exception\n object.\n\n See Also\n --------\n eigsh : eigenvalues and eigenvectors for symmetric matrix A\n svds : singular value decomposition for a matrix A\n\n Examples\n --------\n Find 6 eigenvectors of the identity matrix:\n\n >>> from sklearn.utils.arpack import eigs\n >>> id = np.identity(13)\n >>> vals, vecs = eigs(id, k=6)\n >>> vals\n array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])\n >>> vecs.shape\n (13, 6)\n\n Notes\n -----\n This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,\n ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to\n find the eigenvalues and eigenvectors [2]_.\n\n References\n ----------\n .. [1] ARPACK Software, http:\/\/www.caam.rice.edu\/software\/ARPACK\/\n .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:\n Solution of Large Scale Eigenvalue Problems by Implicitly Restarted\n Arnoldi Methods. SIAM, Philadelphia, PA, 1998.\n \"\"\"\n if A.shape[0] != A.shape[1]:\n raise ValueError('expected square matrix (shape=%s)' % (A.shape,))\n if M is not None:\n if M.shape != A.shape:\n raise ValueError('wrong M dimensions %s, should be %s'\n % (M.shape, A.shape))\n if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():\n warnings.warn('M does not have the same type precision as A. '\n 'This may adversely affect ARPACK convergence')\n n = A.shape[0]\n\n if k <= 0 or k >= n:\n raise ValueError(\"k must be between 1 and rank(A)-1\")\n\n if sigma is None:\n matvec = _aslinearoperator_with_dtype(A).matvec\n\n if OPinv is not None:\n raise ValueError(\"OPinv should not be specified \"\n \"with sigma = None.\")\n if OPpart is not None:\n raise ValueError(\"OPpart should not be specified with \"\n \"sigma = None or complex A\")\n\n if M is None:\n #standard eigenvalue problem\n mode = 1\n M_matvec = None\n Minv_matvec = None\n if Minv is not None:\n raise ValueError(\"Minv should not be \"\n \"specified with M = None.\")\n else:\n #general eigenvalue problem\n mode = 2\n if Minv is None:\n Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)\n else:\n Minv = _aslinearoperator_with_dtype(Minv)\n Minv_matvec = Minv.matvec\n M_matvec = _aslinearoperator_with_dtype(M).matvec\n else:\n #sigma is not None: shift-invert mode\n if np.issubdtype(A.dtype, np.complexfloating):\n if OPpart is not None:\n raise ValueError(\"OPpart should not be specified \"\n \"with sigma=None or complex A\")\n mode = 3\n elif OPpart is None or OPpart.lower() == 'r':\n mode = 3\n elif OPpart.lower() == 'i':\n if np.imag(sigma) == 0:\n raise ValueError(\"OPpart cannot be 'i' if sigma is real\")\n mode = 4\n else:\n raise ValueError(\"OPpart must be one of ('r','i')\")\n\n matvec = _aslinearoperator_with_dtype(A).matvec\n if Minv is not None:\n raise ValueError(\"Minv should not be specified when sigma is\")\n if OPinv is None:\n Minv_matvec = get_OPinv_matvec(A, M, sigma,\n symmetric=False, tol=tol)\n else:\n OPinv = _aslinearoperator_with_dtype(OPinv)\n Minv_matvec = OPinv.matvec\n if M is None:\n M_matvec = None\n else:\n M_matvec = _aslinearoperator_with_dtype(M).matvec\n\n params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,\n M_matvec, Minv_matvec, sigma,\n ncv, v0, maxiter, which, tol)\n\n while not params.converged:\n params.iterate()\n\n return params.extract(return_eigenvectors)\n\n\ndef _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,\n maxiter=None, tol=0, return_eigenvectors=True, Minv=None,\n OPinv=None, mode='normal'):\n \"\"\"\n Find k eigenvalues and eigenvectors of the real symmetric square matrix\n or complex hermitian matrix A.\n\n Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for\n w[i] eigenvalues with corresponding eigenvectors x[i].\n\n If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the\n generalized eigenvalue problem for w[i] eigenvalues\n with corresponding eigenvectors x[i]\n\n\n Parameters\n ----------\n A : An N x N matrix, array, sparse matrix, or LinearOperator representing\n the operation A * x, where A is a real symmetric matrix\n For buckling mode (see below) A must additionally be positive-definite\n k : integer\n The number of eigenvalues and eigenvectors desired.\n `k` must be smaller than N. It is not possible to compute all\n eigenvectors of a matrix.\n\n M : An N x N matrix, array, sparse matrix, or linear operator representing\n the operation M * x for the generalized eigenvalue problem\n ``A * x = w * M * x``.\n M must represent a real, symmetric matrix. For best results, M should\n be of the same type as A. Additionally:\n * If sigma == None, M is symmetric positive definite\n * If sigma is specified, M is symmetric positive semi-definite\n * In buckling mode, M is symmetric indefinite.\n If sigma == None, eigsh requires an operator to compute the solution\n of the linear equation `M * x = b`. This is done internally via a\n (sparse) LU decomposition for an explicit matrix M, or via an\n iterative solver for a general linear operator. Alternatively,\n the user can supply the matrix or operator Minv, which gives\n x = Minv * b = M^-1 * b\n sigma : real\n Find eigenvalues near sigma using shift-invert mode. This requires\n an operator to compute the solution of the linear system\n `[A - sigma * M] x = b`, where M is the identity matrix if\n unspecified. This is computed internally via a (sparse) LU\n decomposition for explicit matrices A & M, or via an iterative\n solver if either A or M is a general linear operator.\n Alternatively, the user can supply the matrix or operator OPinv,\n which gives x = OPinv * b = [A - sigma * M]^-1 * b.\n Note that when sigma is specified, the keyword 'which' refers to\n the shifted eigenvalues w'[i] where:\n - if mode == 'normal',\n w'[i] = 1 \/ (w[i] - sigma)\n - if mode == 'cayley',\n w'[i] = (w[i] + sigma) \/ (w[i] - sigma)\n - if mode == 'buckling',\n w'[i] = w[i] \/ (w[i] - sigma)\n (see further discussion in 'mode' below)\n v0 : array\n Starting vector for iteration.\n ncv : integer\n The number of Lanczos vectors generated\n ncv must be greater than k and smaller than n;\n it is recommended that ncv > 2*k\n which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']\n If A is a complex hermitian matrix, 'BE' is invalid.\n Which `k` eigenvectors and eigenvalues to find\n - 'LM' : Largest (in magnitude) eigenvalues\n - 'SM' : Smallest (in magnitude) eigenvalues\n - 'LA' : Largest (algebraic) eigenvalues\n - 'SA' : Smallest (algebraic) eigenvalues\n - 'BE' : Half (k\/2) from each end of the spectrum\n When k is odd, return one more (k\/2+1) from the high end\n When sigma != None, 'which' refers to the shifted eigenvalues w'[i]\n (see discussion in 'sigma', above). ARPACK is generally better\n at finding large values than small values. If small eigenvalues are\n desired, consider using shift-invert mode for better performance.\n maxiter : integer\n Maximum number of Arnoldi update iterations allowed\n tol : float\n Relative accuracy for eigenvalues (stopping criterion).\n The default value of 0 implies machine precision.\n Minv : N x N matrix, array, sparse matrix, or LinearOperator\n See notes in M, above\n OPinv : N x N matrix, array, sparse matrix, or LinearOperator\n See notes in sigma, above.\n return_eigenvectors : boolean\n Return eigenvectors (True) in addition to eigenvalues\n mode : string ['normal' | 'buckling' | 'cayley']\n Specify strategy to use for shift-invert mode. This argument applies\n only for real-valued A and sigma != None. For shift-invert mode,\n ARPACK internally solves the eigenvalue problem\n ``OP * x'[i] = w'[i] * B * x'[i]``\n and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]\n into the desired eigenvectors and eigenvalues of the problem\n ``A * x[i] = w[i] * M * x[i]``.\n The modes are as follows:\n - 'normal' : OP = [A - sigma * M]^-1 * M\n B = M\n w'[i] = 1 \/ (w[i] - sigma)\n - 'buckling' : OP = [A - sigma * M]^-1 * A\n B = A\n w'[i] = w[i] \/ (w[i] - sigma)\n - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]\n B = M\n w'[i] = (w[i] + sigma) \/ (w[i] - sigma)\n The choice of mode will affect which eigenvalues are selected by\n the keyword 'which', and can also impact the stability of\n convergence (see [2] for a discussion)\n\n Returns\n -------\n w : array\n Array of k eigenvalues\n v : array\n An array of k eigenvectors\n The v[i] is the eigenvector corresponding to the eigenvector w[i]\n\n Raises\n ------\n ArpackNoConvergence\n When the requested convergence is not obtained.\n\n The currently converged eigenvalues and eigenvectors can be found\n as ``eigenvalues`` and ``eigenvectors`` attributes of the exception\n object.\n\n See Also\n --------\n eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A\n svds : singular value decomposition for a matrix A\n\n Notes\n -----\n This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD\n functions which use the Implicitly Restarted Lanczos Method to\n find the eigenvalues and eigenvectors [2]_.\n\n Examples\n --------\n >>> from sklearn.utils.arpack import eigsh\n >>> id = np.identity(13)\n >>> vals, vecs = eigsh(id, k=6)\n >>> vals # doctest: +SKIP\n array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])\n >>> print(vecs.shape)\n (13, 6)\n\n References\n ----------\n .. [1] ARPACK Software, http:\/\/www.caam.rice.edu\/software\/ARPACK\/\n .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:\n Solution of Large Scale Eigenvalue Problems by Implicitly Restarted\n Arnoldi Methods. SIAM, Philadelphia, PA, 1998.\n \"\"\"\n # complex hermitian matrices should be solved with eigs\n if np.issubdtype(A.dtype, np.complexfloating):\n if mode != 'normal':\n raise ValueError(\"mode=%s cannot be used with \"\n \"complex matrix A\" % mode)\n if which == 'BE':\n raise ValueError(\"which='BE' cannot be used with complex matrix A\")\n elif which == 'LA':\n which = 'LR'\n elif which == 'SA':\n which = 'SR'\n ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,\n ncv=ncv, maxiter=maxiter, tol=tol,\n return_eigenvectors=return_eigenvectors, Minv=Minv,\n OPinv=OPinv)\n\n if return_eigenvectors:\n return ret[0].real, ret[1]\n else:\n return ret.real\n\n if A.shape[0] != A.shape[1]:\n raise ValueError('expected square matrix (shape=%s)' % (A.shape,))\n if M is not None:\n if M.shape != A.shape:\n raise ValueError('wrong M dimensions %s, should be %s'\n % (M.shape, A.shape))\n if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():\n warnings.warn('M does not have the same type precision as A. '\n 'This may adversely affect ARPACK convergence')\n n = A.shape[0]\n\n if k <= 0 or k >= n:\n raise ValueError(\"k must be between 1 and rank(A)-1\")\n\n if sigma is None:\n A = _aslinearoperator_with_dtype(A)\n matvec = A.matvec\n\n if OPinv is not None:\n raise ValueError(\"OPinv should not be specified \"\n \"with sigma = None.\")\n if M is None:\n #standard eigenvalue problem\n mode = 1\n M_matvec = None\n Minv_matvec = None\n if Minv is not None:\n raise ValueError(\"Minv should not be \"\n \"specified with M = None.\")\n else:\n #general eigenvalue problem\n mode = 2\n if Minv is None:\n Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)\n else:\n Minv = _aslinearoperator_with_dtype(Minv)\n Minv_matvec = Minv.matvec\n M_matvec = _aslinearoperator_with_dtype(M).matvec\n else:\n # sigma is not None: shift-invert mode\n if Minv is not None:\n raise ValueError(\"Minv should not be specified when sigma is\")\n\n # normal mode\n if mode == 'normal':\n mode = 3\n matvec = None\n if OPinv is None:\n Minv_matvec = get_OPinv_matvec(A, M, sigma,\n symmetric=True, tol=tol)\n else:\n OPinv = _aslinearoperator_with_dtype(OPinv)\n Minv_matvec = OPinv.matvec\n if M is None:\n M_matvec = None\n else:\n M = _aslinearoperator_with_dtype(M)\n M_matvec = M.matvec\n\n # buckling mode\n elif mode == 'buckling':\n mode = 4\n if OPinv is None:\n Minv_matvec = get_OPinv_matvec(A, M, sigma,\n symmetric=True, tol=tol)\n else:\n Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec\n matvec = _aslinearoperator_with_dtype(A).matvec\n M_matvec = None\n\n # cayley-transform mode\n elif mode == 'cayley':\n mode = 5\n matvec = _aslinearoperator_with_dtype(A).matvec\n if OPinv is None:\n Minv_matvec = get_OPinv_matvec(A, M, sigma,\n symmetric=True, tol=tol)\n else:\n Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec\n if M is None:\n M_matvec = None\n else:\n M_matvec = _aslinearoperator_with_dtype(M).matvec\n\n # unrecognized mode\n else:\n raise ValueError(\"unrecognized mode '%s'\" % mode)\n\n params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,\n M_matvec, Minv_matvec, sigma,\n ncv, v0, maxiter, which, tol)\n\n while not params.converged:\n params.iterate()\n\n return params.extract(return_eigenvectors)\n\n\ndef _svds(A, k=6, ncv=None, tol=0):\n \"\"\"Compute k singular values\/vectors for a sparse matrix using ARPACK.\n\n Parameters\n ----------\n A : sparse matrix\n Array to compute the SVD on\n k : int, optional\n Number of singular values and vectors to compute.\n ncv : integer\n The number of Lanczos vectors generated\n ncv must be greater than k+1 and smaller than n;\n it is recommended that ncv > 2*k\n tol : float, optional\n Tolerance for singular values. Zero (default) means machine precision.\n\n Notes\n -----\n This is a naive implementation using an eigensolver on A.H * A or\n A * A.H, depending on which one is more efficient.\n\n \"\"\"\n if not (isinstance(A, np.ndarray) or isspmatrix(A)):\n A = np.asarray(A)\n\n n, m = A.shape\n\n if np.issubdtype(A.dtype, np.complexfloating):\n herm = lambda x: x.T.conjugate()\n eigensolver = eigs\n else:\n herm = lambda x: x.T\n eigensolver = eigsh\n\n if n > m:\n X = A\n XH = herm(A)\n else:\n XH = A\n X = herm(A)\n\n if hasattr(XH, 'dot'):\n def matvec_XH_X(x):\n return XH.dot(X.dot(x))\n else:\n def matvec_XH_X(x):\n return np.dot(XH, np.dot(X, x))\n\n XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,\n shape=(X.shape[1], X.shape[1]))\n\n # Ignore deprecation warnings here: dot on matrices is deprecated,\n # but this code is a backport anyhow\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DeprecationWarning)\n eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)\n s = np.sqrt(eigvals)\n\n if n > m:\n v = eigvec\n if hasattr(X, 'dot'):\n u = X.dot(v) \/ s\n else:\n u = np.dot(X, v) \/ s\n vh = herm(v)\n else:\n u = eigvec\n if hasattr(X, 'dot'):\n vh = herm(X.dot(u) \/ s)\n else:\n vh = herm(np.dot(X, u) \/ s)\n\n return u, s, vh\n\n# check if backport is actually needed:\nif scipy.version.version >= LooseVersion('0.10'):\n from scipy.sparse.linalg import eigs, eigsh, svds\nelse:\n eigs, eigsh, svds = _eigs, _eigsh, _svds\n","license":"bsd-3-clause"} {"repo_name":"looooo\/paraBEM","path":"examples\/plots\/lifting_line.py","copies":"1","size":"1404","content":"from __future__ import division\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport paraBEM\n\nfrom paraBEM.liftingline import LiftingLine\nfrom paraBEM.utils import check_path\n\n\n# WingGeometry\nspw = 2\nnumpos = 50\nz_fac_1 = -0.3\nz_fac_2 = -0.7\ny = np.sin(np.linspace(0, np.pi\/2, numpos)) * spw\/2\nx = [0. for _ in y]\nz = [i**2 * z_fac_1 + i**6 * z_fac_2 for i in y]\n\nmirror = lambda xyz: [xyz[0], -xyz[1], xyz[2]]\nwing = list(zip(x, y, z))\nwing = list(map(mirror, wing))[::-1] + list(wing)[1:]\nwing = [paraBEM.Vector3(*i) for i in wing]\n\n# LiftingLine\nlifting_line = LiftingLine(wing)\nlifting_line.v_inf = paraBEM.Vector3(1, 0, 0)\nlifting_line.solve_for_best_gamma(1)\ngamma = [i.best_gamma for i in lifting_line.segments]\ngamma_max = max(gamma)\n\n# Plot\ngamma_el = lambda y: gamma_max * (1 - (y \/ spw * 2)**2)**(1 \/ 2)\nmids = [[i.mids.x, i.mids.y, i.mids.z] for i in lifting_line.segments]\nx, y, z = zip(*mids)\n\nfig = plt.figure()\nax1 = fig.add_subplot(3, 1, 1)\nax1.plot(y, z)\n\nax2 = fig.add_subplot(3, 1, 2)\nax2.plot(y, x, marker=\"x\")\n\nax3 = fig.add_subplot(3, 1, 3)\ny_el = np.linspace(-1, 1, 400)\nax3.plot([-spw\/2] + list(y) + [spw\/2], [0] + gamma + [0], marker=\"x\")\nax3.plot(y_el, list(map(gamma_el, y_el)))\nplt.savefig(check_path(\"results\/2d\/liftingline.png\"))\n\ntotal = 0\nfor i in lifting_line.segments:\n total += i.lift_factor * i.best_gamma\nprint(total)\n","license":"gpl-3.0"} {"repo_name":"tdhopper\/scikit-learn","path":"examples\/svm\/plot_svm_scale_c.py","copies":"223","size":"5375","content":"\"\"\"\n==============================================\nScaling the regularization parameter for SVCs\n==============================================\n\nThe following example illustrates the effect of scaling the\nregularization parameter when using :ref:`svm` for\n:ref:`classification `.\nFor SVC classification, we are interested in a risk minimization for the\nequation:\n\n\n.. math::\n\n C \\sum_{i=1, n} \\mathcal{L} (f(x_i), y_i) + \\Omega (w)\n\nwhere\n\n - :math:`C` is used to set the amount of regularization\n - :math:`\\mathcal{L}` is a `loss` function of our samples\n and our model parameters.\n - :math:`\\Omega` is a `penalty` function of our model parameters\n\nIf we consider the loss function to be the individual error per\nsample, then the data-fit term, or the sum of the error for each sample, will\nincrease as we add more samples. The penalization term, however, will not\nincrease.\n\nWhen using, for example, :ref:`cross validation `, to\nset the amount of regularization with `C`, there will be a\ndifferent amount of samples between the main problem and the smaller problems\nwithin the folds of the cross validation.\n\nSince our loss function is dependent on the amount of samples, the latter\nwill influence the selected value of `C`.\nThe question that arises is `How do we optimally adjust C to\naccount for the different amount of training samples?`\n\nThe figures below are used to illustrate the effect of scaling our\n`C` to compensate for the change in the number of samples, in the\ncase of using an `l1` penalty, as well as the `l2` penalty.\n\nl1-penalty case\n-----------------\nIn the `l1` case, theory says that prediction consistency\n(i.e. that under given hypothesis, the estimator\nlearned predicts as well as a model knowing the true distribution)\nis not possible because of the bias of the `l1`. It does say, however,\nthat model consistency, in terms of finding the right set of non-zero\nparameters as well as their signs, can be achieved by scaling\n`C1`.\n\nl2-penalty case\n-----------------\nThe theory says that in order to achieve prediction consistency, the\npenalty parameter should be kept constant\nas the number of samples grow.\n\nSimulations\n------------\n\nThe two figures below plot the values of `C` on the `x-axis` and the\ncorresponding cross-validation scores on the `y-axis`, for several different\nfractions of a generated data-set.\n\nIn the `l1` penalty case, the cross-validation-error correlates best with\nthe test-error, when scaling our `C` with the number of samples, `n`,\nwhich can be seen in the first figure.\n\nFor the `l2` penalty case, the best result comes from the case where `C`\nis not scaled.\n\n.. topic:: Note:\n\n Two separate datasets are used for the two different plots. The reason\n behind this is the `l1` case works better on sparse data, while `l2`\n is better suited to the non-sparse case.\n\"\"\"\nprint(__doc__)\n\n\n# Author: Andreas Mueller \n# Jaques Grobler \n# License: BSD 3 clause\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import ShuffleSplit\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.utils import check_random_state\nfrom sklearn import datasets\n\n\nrnd = check_random_state(1)\n\n# set up dataset\nn_samples = 100\nn_features = 300\n\n# l1 data (only 5 informative features)\nX_1, y_1 = datasets.make_classification(n_samples=n_samples,\n n_features=n_features, n_informative=5,\n random_state=1)\n\n# l2 data: non sparse, but less features\ny_2 = np.sign(.5 - rnd.rand(n_samples))\nX_2 = rnd.randn(n_samples, n_features \/ 5) + y_2[:, np.newaxis]\nX_2 += 5 * rnd.randn(n_samples, n_features \/ 5)\n\nclf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,\n tol=1e-3),\n np.logspace(-2.3, -1.3, 10), X_1, y_1),\n (LinearSVC(penalty='l2', loss='squared_hinge', dual=True,\n tol=1e-4),\n np.logspace(-4.5, -2, 10), X_2, y_2)]\n\ncolors = ['b', 'g', 'r', 'c']\n\nfor fignum, (clf, cs, X, y) in enumerate(clf_sets):\n # set up the plot for each regressor\n plt.figure(fignum, figsize=(9, 10))\n\n for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):\n param_grid = dict(C=cs)\n # To get nice curve, we need a large number of iterations to\n # reduce the variance\n grid = GridSearchCV(clf, refit=False, param_grid=param_grid,\n cv=ShuffleSplit(n=n_samples, train_size=train_size,\n n_iter=250, random_state=1))\n grid.fit(X, y)\n scores = [x[1] for x in grid.grid_scores_]\n\n scales = [(1, 'No scaling'),\n ((n_samples * train_size), '1\/n_samples'),\n ]\n\n for subplotnum, (scaler, name) in enumerate(scales):\n plt.subplot(2, 1, subplotnum + 1)\n plt.xlabel('C')\n plt.ylabel('CV Score')\n grid_cs = cs * float(scaler) # scale the C's\n plt.semilogx(grid_cs, scores, label=\"fraction %.2f\" %\n train_size)\n plt.title('scaling=%s, penalty=%s, loss=%s' %\n (name, clf.penalty, clf.loss))\n\n plt.legend(loc=\"best\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"DTUWindEnergy\/Python4WindEnergy","path":"lesson 3\/results\/ebra.py","copies":"1","size":"8402","content":"# -*- coding: utf-8 -*- 3.0<\/nbformat>\n\n# \n\n# Plotting with Matplotlib\n\n# \n\n# Prepare for action\n\n# \n\nimport numpy as np\nimport scipy as sp\nimport sympy\n\n# Pylab combines the pyplot functionality (for plotting) with the numpy\n# functionality (for mathematics and for working with arrays) in a single namespace\n# aims to provide a closer MATLAB feel (the easy way). Note that his approach\n# should only be used when doing some interactive quick and dirty data inspection.\n# DO NOT USE THIS FOR SCRIPTS\n#from pylab import *\n\n# the convienient Matplotib plotting interface pyplot (the tidy\/right way)\n# use this for building scripts. The examples here will all use pyplot.\nimport matplotlib.pyplot as plt\n\n# for using the matplotlib API directly (the hard and verbose way)\n# use this when building applications, and\/or backends\nimport matplotlib as mpl\n\n# \n\n# How would you like the IPython notebook show your plots? In order to use the\n# matplotlib IPython magic youre IPython notebook should be launched as\n# \n# ipython notebook --matplotlib=inline\n# \n# Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx'\n# \n# %matplotlib qt\n# \n# or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x\n# \n# %matplotib inline\n# \n\n# \n\n# activate pop up plots\n#%matplotlib qt\n# or change to inline plots\n# %matplotlib inline\n\n# \n\n# Matplotlib documentation\n\n# \n\n# Finding your own way (aka RTFM). Hint: there is search box available!\n# \n# * http:\/\/matplotlib.org\/contents.html\n# \n# The Matplotlib API docs:\n# \n# * http:\/\/matplotlib.org\/api\/index.html\n# \n# Pyplot, object oriented plotting:\n# \n# * http:\/\/matplotlib.org\/api\/pyplot_api.html\n# * http:\/\/matplotlib.org\/api\/pyplot_summary.html\n# \n# Extensive gallery with examples:\n# \n# * http:\/\/matplotlib.org\/gallery.html\n\n# \n\n# Tutorials for those who want to start playing\n\n# \n\n# If reading manuals is too much for you, there is a very good tutorial available here:\n# \n# * http:\/\/nbviewer.ipython.org\/github\/jrjohansson\/scientific-python-lectures\/blob\/master\/Lecture-4-Matplotlib.ipynb\n# \n# Note that this tutorial uses\n# \n# from pylab import *\n# \n# which is usually not adviced in more advanced script environments. When using\n# \n# import matplotlib.pyplot as plt\n# \n# you need to preceed all plotting commands as used in the above tutorial with\n# \n# plt.\n\n# \n\n# Give me more!\n# \n# [EuroScipy 2012 Matlotlib tutorial](http:\/\/www.loria.fr\/~rougier\/teaching\/matplotlib\/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.```\n\n# \n\n# Plotting template starting point\n\n# \n\n# some sample data\nx = np.arange(-10,10,0.1)\n\n# \n\n# To change the default plot configuration values.\n\n# \n\npage_width_cm = 13\ndpi = 200\ninch = 2.54 # inch in cm\n# setting global plot configuration using the RC configuration style\nplt.rc('font', family='serif')\nplt.rc('xtick', labelsize=12) # tick labels\nplt.rc('ytick', labelsize=20) # tick labels\nplt.rc('axes', labelsize=20) # axes labels\n# If you don\u2019t need LaTeX, don\u2019t use it. It is slower to plot, and text\n# looks just fine without. If you need it, e.g. for symbols, then use it.\n#plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac\n\n# \n\n# create a figure instance, note that figure size is given in inches!\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))\n# set the big title (note aligment relative to figure)\nfig.suptitle(\"suptitle 16, figure alignment\", fontsize=16)\n\n# actual plotting\nax.plot(x, x**2, label=\"label 12\")\n\n\n# set axes title (note aligment relative to axes)\nax.set_title(\"title 14, axes alignment\", fontsize=14)\n\n# axes labels\nax.set_xlabel('xlabel 12')\nax.set_ylabel(r'$y_{\\alpha}$ 12', fontsize=8)\n\n# legend\nax.legend(fontsize=12, loc=\"best\")\n\n# saving the figure in different formats\n# fig.savefig('figure-%03i.png' % dpi, dpi=dpi)\n# fig.savefig('figure.svg')\n# fig.savefig('figure.eps')\n\n# \n\n# following steps are only relevant when using figures as pop up windows (with %matplotlib qt)\n# to update a figure with has been modified\nfig.canvas.draw()\n# show a figure\nfig.show()\n\n# \n\n# Exercise\n\n# \n\n# The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend!\n\n# \n\n# * add a grid to the plot\n\n# \n\nplt.plot(x,x**2)\nplt.grid('on')\n\n# \n\n# * change the location of the legend to different places\n\n# \n\nplt.plot(x,x**2, label=\"label 12\")\nplt.legend(fontsize=12, loc=\"upper right\")\n\n# \n\n# * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http:\/\/matplotlib.org\/api\/pyplot_api.html#matplotlib.pyplot.plot \n\n# \nstride = max( int(len(x) \/ 20), 1)\nplt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label=\"label 12\") \nplt.legend(fontsize=12, loc=\"upper center\")\n# \n\n# * add different sub-plots\n\n# \n\nfig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)\naxes[0].plot(x,x**2)\naxes[1].plot(x,-x**2)\n\n# \n\n# * size the figure such that when included on an A4 page the fonts are given in their true size\n\n# \n# matplotlib.rcParams.update({'font.size': 22})\nfig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)\naxes[0].plot(x,x**2)\naxes[1].plot(x,-x**2)\nfig.set_size_inches(8.2,3) # using A4 width in inches?\nfig.set_dpi(100)\nfor ax in axes:\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(12)\n# ax[0].set('xtick', labelsize=12) # tick labels\n# .rc('ytick', labelsize=20) # tick labels\n# .rc('axes', labelsize=20) # axes labels\n# fig.savefig('figure.pdf')\n# \n\n# * make a contour plot\n\n# \nX, Y = np.meshgrid(x,x)\nplt.figure()\nplt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1))\nplt.show\n\n\n# im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd\/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)\n# \n\n# * use twinx() to create a second axis on the right for the second plot\n\n# \nplt.figure()\nax=plt.gca()\nax.plot(x,x**2)\nax2 = ax.twinx()\nax2.plot(x,x**4, 'r')\n# \n\n# * add horizontal and vertical lines using axvline(), axhline()\n\n# \n\nplt.figure()\nplt.plot(x,x**2)\nplt.axvline(2)\nplt.axhline(10)\n\n# \n\n# * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate()\n\n# \n\nimport datetime\ndates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)])\nfig, ax = plt.subplots(nrows=1, ncols=1)\nax.plot(dates,xrange(24))\nfig.autofmt_xdate()\n# \n\n# Advanced exercises\n\n# \n\n# We are going to play a bit with regression\n\n# \n\n# * Create a vector x of equally spaced number between $x \\in [0, 5\\pi]$ of 1000 points (keyword: linspace)\n\n# \nn=1000\nx=np.linspace(0,5*np.pi,n)\n\n# \n\n# * create a vector y, so that y=sin(x) with some random noise\n\n# \ny = np.sin(x) +np.random.rand(n)-0.5\nyth = np.sin(x)\n\n# \n\n# * plot it like this: ![test](files\/plt1.png)\n\n# \nfig=plt.figure()\nax=plt.gca()\nax.plot(x,y,'b.')\nax.plot(x,yth,'k--',label=r'$y=sin(x)$')\n\n# \n\n# Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients)\n# \n# Plot it like this (use np.poly1d(coef)(x) to plot polynomials) ![test](files\/plt2.png)\n\n# \nfor order in xrange(9):\n coeff=np.polyfit(x,y,order)\n ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order)\n\n# shrink current axis by 20%\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n# Put a legend to the right of the current axis\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.show()\n# \n\n","license":"apache-2.0"} {"repo_name":"B3AU\/waveTree","path":"sklearn\/utils\/testing.py","copies":"4","size":"12125","content":"\"\"\"Testing utilities.\"\"\"\n\n# Copyright (c) 2011, 2012\n# Authors: Pietro Berkes,\n# Andreas Muller\n# Mathieu Blondel\n# Olivier Grisel\n# Arnaud Joly\n# License: BSD 3 clause\nimport inspect\nimport pkgutil\nimport warnings\n\nimport scipy as sp\nfrom functools import wraps\ntry:\n # Python 2\n from urllib2 import urlopen\n from urllib2 import HTTPError\nexcept ImportError:\n # Python 3+\n from urllib.request import urlopen\n from urllib.error import HTTPError\n\nimport sklearn\nfrom sklearn.base import BaseEstimator\nfrom .fixes import savemat\n\n# Conveniently import all assertions in one place.\nfrom nose.tools import assert_equal\nfrom nose.tools import assert_not_equal\nfrom nose.tools import assert_true\nfrom nose.tools import assert_false\nfrom nose.tools import assert_raises\nfrom nose.tools import raises\nfrom nose import SkipTest\nfrom nose import with_setup\n\nfrom numpy.testing import assert_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_less\nimport numpy as np\n\nfrom sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,\n ClusterMixin)\n\n__all__ = [\"assert_equal\", \"assert_not_equal\", \"assert_raises\", \"raises\",\n \"with_setup\", \"assert_true\", \"assert_false\", \"assert_almost_equal\",\n \"assert_array_equal\", \"assert_array_almost_equal\",\n \"assert_array_less\"]\n\n\ntry:\n from nose.tools import assert_in, assert_not_in\nexcept ImportError:\n # Nose < 1.0.0\n\n def assert_in(x, container):\n assert_true(x in container, msg=\"%r in %r\" % (x, container))\n\n def assert_not_in(x, container):\n assert_false(x in container, msg=\"%r in %r\" % (x, container))\n\n\ndef _assert_less(a, b, msg=None):\n message = \"%r is not lower than %r\" % (a, b)\n if msg is not None:\n message += \": \" + msg\n assert a < b, message\n\n\ndef _assert_greater(a, b, msg=None):\n message = \"%r is not greater than %r\" % (a, b)\n if msg is not None:\n message += \": \" + msg\n assert a > b, message\n\n\n# To remove when we support numpy 1.7\ndef assert_warns(warning_class, func, *args, **kw):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n\n # Trigger a warning.\n result = func(*args, **kw)\n\n # Verify some things\n if not len(w) > 0:\n raise AssertionError(\"No warning raised when calling %s\"\n % func.__name__)\n\n if not w[0].category is warning_class:\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\"\n % (func.__name__, warning_class, w[0]))\n\n return result\n\n\n# To remove when we support numpy 1.7\ndef assert_no_warnings(func, *args, **kw):\n # XXX: once we may depend on python >= 2.6, this can be replaced by the\n # warnings module context manager.\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n\n result = func(*args, **kw)\n if len(w) > 0:\n raise AssertionError(\"Got warnings when calling %s: %s\"\n % (func.__name__, w))\n return result\n\n\ndef ignore_warnings(fn):\n \"\"\"Decorator to catch and hide warnings without visual nesting\"\"\"\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n return fn(*args, **kwargs)\n w[:] = []\n return wrapper\n\n\ntry:\n from nose.tools import assert_less\nexcept ImportError:\n assert_less = _assert_less\n\ntry:\n from nose.tools import assert_greater\nexcept ImportError:\n assert_greater = _assert_greater\n\n\ndef _assert_allclose(actual, desired, rtol=1e-7, atol=0,\n err_msg='', verbose=True):\n actual, desired = np.asanyarray(actual), np.asanyarray(desired)\n if np.allclose(actual, desired, rtol=rtol, atol=atol):\n return\n msg = ('Array not equal to tolerance rtol=%g, atol=%g: '\n 'actual %s, desired %s') % (rtol, atol, actual, desired)\n raise AssertionError(msg)\n\n\nif hasattr(np.testing, 'assert_allclose'):\n assert_allclose = np.testing.assert_allclose\nelse:\n assert_allclose = _assert_allclose\n\n\ndef assert_raise_message(exception, message, function, *args, **kwargs):\n \"\"\"Helper function to test error messages in exceptions\"\"\"\n\n try:\n function(*args, **kwargs)\n raise AssertionError(\"Should have raised %r\" % exception(message))\n except exception as e:\n error_message = str(e)\n assert_in(message, error_message)\n\n\ndef fake_mldata(columns_dict, dataname, matfile, ordering=None):\n \"\"\"Create a fake mldata data set.\n\n Parameters\n ----------\n columns_dict: contains data as\n columns_dict[column_name] = array of data\n dataname: name of data set\n matfile: file-like object or file name\n ordering: list of column_names, determines the ordering in the data set\n\n Note: this function transposes all arrays, while fetch_mldata only\n transposes 'data', keep that into account in the tests.\n \"\"\"\n datasets = dict(columns_dict)\n\n # transpose all variables\n for name in datasets:\n datasets[name] = datasets[name].T\n\n if ordering is None:\n ordering = sorted(list(datasets.keys()))\n # NOTE: setting up this array is tricky, because of the way Matlab\n # re-packages 1D arrays\n datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),\n dtype='object')\n for i, name in enumerate(ordering):\n datasets['mldata_descr_ordering'][0, i] = name\n\n savemat(matfile, datasets, oned_as='column')\n\n\nclass mock_mldata_urlopen(object):\n\n def __init__(self, mock_datasets):\n \"\"\"Object that mocks the urlopen function to fake requests to mldata.\n\n `mock_datasets` is a dictionary of {dataset_name: data_dict}, or\n {dataset_name: (data_dict, ordering).\n `data_dict` itself is a dictionary of {column_name: data_array},\n and `ordering` is a list of column_names to determine the ordering\n in the data set (see `fake_mldata` for details).\n\n When requesting a dataset with a name that is in mock_datasets,\n this object creates a fake dataset in a StringIO object and\n returns it. Otherwise, it raises an HTTPError.\n \"\"\"\n self.mock_datasets = mock_datasets\n\n def __call__(self, urlname):\n dataset_name = urlname.split('\/')[-1]\n if dataset_name in self.mock_datasets:\n resource_name = '_' + dataset_name\n from io import BytesIO\n matfile = BytesIO()\n\n dataset = self.mock_datasets[dataset_name]\n ordering = None\n if isinstance(dataset, tuple):\n dataset, ordering = dataset\n fake_mldata(dataset, resource_name, matfile, ordering)\n\n matfile.seek(0)\n return matfile\n else:\n raise HTTPError(urlname, 404, dataset_name + \" is not available\",\n [], None)\n\n\ndef install_mldata_mock(mock_datasets):\n # Lazy import to avoid mutually recursive imports\n from sklearn import datasets\n datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)\n\n\ndef uninstall_mldata_mock():\n # Lazy import to avoid mutually recursive imports\n from sklearn import datasets\n datasets.mldata.urlopen = urlopen\n\n\n# Meta estimators need another estimator to be instantiated.\nmeta_estimators = [\"OneVsOneClassifier\",\n \"OutputCodeClassifier\", \"OneVsRestClassifier\", \"RFE\",\n \"RFECV\", \"BaseEnsemble\"]\n# estimators that there is no way to default-construct sensibly\nother = [\"Pipeline\", \"FeatureUnion\", \"GridSearchCV\", \"RandomizedSearchCV\"]\n\n\ndef all_estimators(include_meta_estimators=False, include_other=False,\n type_filter=None):\n \"\"\"Get a list of all estimators from sklearn.\n\n This function crawls the module and gets all classes that inherit\n from BaseEstimator. Classes that are defined in test-modules are not\n included.\n By default meta_estimators such as GridSearchCV are also not included.\n\n Parameters\n ----------\n include_meta_estimators : boolean, default=False\n Whether to include meta-estimators that can be constructed using\n an estimator as their first argument. These are currently\n BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,\n OneVsRestClassifier, RFE, RFECV.\n\n include_others : boolean, default=False\n Wether to include meta-estimators that are somehow special and can\n not be default-constructed sensibly. These are currently\n Pipeline, FeatureUnion and GridSearchCV\n\n type_filter : string or None, default=None\n Which kind of estimators should be returned. If None, no filter is\n applied and all estimators are returned. Possible values are\n 'classifier', 'regressor', 'cluster' and 'transformer' to get\n estimators only of these specific types.\n\n Returns\n -------\n estimators : list of tuples\n List of (name, class), where ``name`` is the class name as string\n and ``class`` is the actuall type of the class.\n \"\"\"\n def is_abstract(c):\n if not(hasattr(c, '__abstractmethods__')):\n return False\n if not len(c.__abstractmethods__):\n return False\n return True\n\n all_classes = []\n # get parent folder\n path = sklearn.__path__\n for importer, modname, ispkg in pkgutil.walk_packages(\n path=path, prefix='sklearn.', onerror=lambda x: None):\n module = __import__(modname, fromlist=\"dummy\")\n if \".tests.\" in modname:\n continue\n classes = inspect.getmembers(module, inspect.isclass)\n all_classes.extend(classes)\n\n all_classes = set(all_classes)\n\n estimators = [c for c in all_classes\n if (issubclass(c[1], BaseEstimator)\n and c[0] != 'BaseEstimator')]\n # get rid of abstract base classes\n estimators = [c for c in estimators if not is_abstract(c[1])]\n\n if not include_other:\n estimators = [c for c in estimators if not c[0] in other]\n # possibly get rid of meta estimators\n if not include_meta_estimators:\n estimators = [c for c in estimators if not c[0] in meta_estimators]\n\n if type_filter == 'classifier':\n estimators = [est for est in estimators\n if issubclass(est[1], ClassifierMixin)]\n elif type_filter == 'regressor':\n estimators = [est for est in estimators\n if issubclass(est[1], RegressorMixin)]\n elif type_filter == 'transformer':\n estimators = [est for est in estimators\n if issubclass(est[1], TransformerMixin)]\n elif type_filter == 'cluster':\n estimators = [est for est in estimators\n if issubclass(est[1], ClusterMixin)]\n elif type_filter is not None:\n raise ValueError(\"Parameter type_filter must be 'classifier', \"\n \"'regressor', 'transformer', 'cluster' or None, got\"\n \" %s.\" % repr(type_filter))\n\n # We sort in order to have reproducible test failures\n return sorted(estimators)\n\n\ndef set_random_state(estimator, random_state=0):\n if \"random_state\" in estimator.get_params().keys():\n estimator.set_params(random_state=random_state)\n\n\ndef if_matplotlib(func):\n \"\"\"Test decorator that skips test if matplotlib not installed. \"\"\"\n\n @wraps(func)\n def run_test(*args, **kwargs):\n try:\n import matplotlib\n matplotlib.use('Agg', warn=False)\n # this fails if no $DISPLAY specified\n matplotlib.pylab.figure()\n except:\n raise SkipTest('Matplotlib not available.')\n else:\n return func(*args, **kwargs)\n return run_test\n","license":"bsd-3-clause"} {"repo_name":"kylerbrown\/scikit-learn","path":"sklearn\/covariance\/tests\/test_robust_covariance.py","copies":"213","size":"3359","content":"# Author: Alexandre Gramfort \n# Gael Varoquaux \n# Virgile Fritsch \n#\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.validation import NotFittedError\n\nfrom sklearn import datasets\nfrom sklearn.covariance import empirical_covariance, MinCovDet, \\\n EllipticEnvelope\n\nX = datasets.load_iris().data\nX_1d = X[:, 0]\nn_samples, n_features = X.shape\n\n\ndef test_mcd():\n # Tests the FastMCD algorithm implementation\n # Small data set\n # test without outliers (random independent normal data)\n launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)\n # test with a contaminated data set (medium contamination)\n launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)\n # test with a contaminated data set (strong contamination)\n launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)\n\n # Medium data set\n launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)\n\n # Large data set\n launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)\n\n # 1D data set\n launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)\n\n\ndef launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,\n tol_support):\n\n rand_gen = np.random.RandomState(0)\n data = rand_gen.randn(n_samples, n_features)\n # add some outliers\n outliers_index = rand_gen.permutation(n_samples)[:n_outliers]\n outliers_offset = 10. * \\\n (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)\n data[outliers_index] += outliers_offset\n inliers_mask = np.ones(n_samples).astype(bool)\n inliers_mask[outliers_index] = False\n\n pure_data = data[inliers_mask]\n # compute MCD by fitting an object\n mcd_fit = MinCovDet(random_state=rand_gen).fit(data)\n T = mcd_fit.location_\n S = mcd_fit.covariance_\n H = mcd_fit.support_\n # compare with the estimates learnt from the inliers\n error_location = np.mean((pure_data.mean(0) - T) ** 2)\n assert(error_location < tol_loc)\n error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)\n assert(error_cov < tol_cov)\n assert(np.sum(H) >= tol_support)\n assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)\n\n\ndef test_mcd_issue1127():\n # Check that the code does not break with X.shape = (3, 1)\n # (i.e. n_support = n_samples)\n rnd = np.random.RandomState(0)\n X = rnd.normal(size=(3, 1))\n mcd = MinCovDet()\n mcd.fit(X)\n\n\ndef test_outlier_detection():\n rnd = np.random.RandomState(0)\n X = rnd.randn(100, 10)\n clf = EllipticEnvelope(contamination=0.1)\n assert_raises(NotFittedError, clf.predict, X)\n assert_raises(NotFittedError, clf.decision_function, X)\n clf.fit(X)\n y_pred = clf.predict(X)\n decision = clf.decision_function(X, raw_values=True)\n decision_transformed = clf.decision_function(X, raw_values=False)\n\n assert_array_almost_equal(\n decision, clf.mahalanobis(X))\n assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)\n assert_almost_equal(clf.score(X, np.ones(100)),\n (100 - y_pred[y_pred == -1].size) \/ 100.)\n assert(sum(y_pred == -1) == sum(decision_transformed < 0))\n","license":"bsd-3-clause"} {"repo_name":"cl4rke\/scikit-learn","path":"sklearn\/metrics\/tests\/test_regression.py","copies":"272","size":"6066","content":"from __future__ import division, print_function\n\nimport numpy as np\nfrom itertools import product\n\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\n\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import median_absolute_error\nfrom sklearn.metrics import r2_score\n\nfrom sklearn.metrics.regression import _check_reg_targets\n\n\ndef test_regression_metrics(n_samples=50):\n y_true = np.arange(n_samples)\n y_pred = y_true + 1\n\n assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)\n assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)\n assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)\n assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)\n assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)\n\n\ndef test_multioutput_regression():\n y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])\n y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])\n\n error = mean_squared_error(y_true, y_pred)\n assert_almost_equal(error, (1. \/ 3 + 2. \/ 3 + 2. \/ 3) \/ 4.)\n\n # mean_absolute_error and mean_squared_error are equal because\n # it is a binary problem.\n error = mean_absolute_error(y_true, y_pred)\n assert_almost_equal(error, (1. \/ 3 + 2. \/ 3 + 2. \/ 3) \/ 4.)\n\n error = r2_score(y_true, y_pred, multioutput='variance_weighted')\n assert_almost_equal(error, 1. - 5. \/ 2)\n error = r2_score(y_true, y_pred, multioutput='uniform_average')\n assert_almost_equal(error, -.875)\n\n\n\ndef test_regression_metrics_at_limits():\n assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)\n assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)\n assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)\n assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)\n assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)\n\n\ndef test__check_reg_targets():\n # All of length 3\n EXAMPLES = [\n (\"continuous\", [1, 2, 3], 1),\n (\"continuous\", [[1], [2], [3]], 1),\n (\"continuous-multioutput\", [[1, 1], [2, 2], [3, 1]], 2),\n (\"continuous-multioutput\", [[5, 1], [4, 2], [3, 1]], 2),\n (\"continuous-multioutput\", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),\n ]\n\n for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,\n repeat=2):\n\n if type1 == type2 and n_out1 == n_out2:\n y_type, y_check1, y_check2, multioutput = _check_reg_targets(\n y1, y2, None)\n assert_equal(type1, y_type)\n if type1 == 'continuous':\n assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))\n assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))\n else:\n assert_array_equal(y_check1, y1)\n assert_array_equal(y_check2, y2)\n else:\n assert_raises(ValueError, _check_reg_targets, y1, y2, None)\n\n\ndef test_regression_multioutput_array():\n y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]\n y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]\n\n mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')\n mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')\n r = r2_score(y_true, y_pred, multioutput='raw_values')\n evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')\n\n assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)\n assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)\n assert_array_almost_equal(r, [0.95, 0.93], decimal=2)\n assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)\n\n # mean_absolute_error and mean_squared_error are equal because\n # it is a binary problem.\n y_true = [[0, 0]]*4\n y_pred = [[1, 1]]*4\n mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')\n mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')\n r = r2_score(y_true, y_pred, multioutput='raw_values')\n assert_array_almost_equal(mse, [1., 1.], decimal=2)\n assert_array_almost_equal(mae, [1., 1.], decimal=2)\n assert_array_almost_equal(r, [0., 0.], decimal=2)\n\n r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')\n assert_array_almost_equal(r, [0, -3.5], decimal=2)\n assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],\n multioutput='uniform_average'))\n evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],\n multioutput='raw_values')\n assert_array_almost_equal(evs, [0, -1.25], decimal=2)\n\n # Checking for the condition in which both numerator and denominator is\n # zero.\n y_true = [[1, 3], [-1, 2]]\n y_pred = [[1, 4], [-1, 1]]\n r2 = r2_score(y_true, y_pred, multioutput='raw_values')\n assert_array_almost_equal(r2, [1., -3.], decimal=2)\n assert_equal(np.mean(r2), r2_score(y_true, y_pred,\n multioutput='uniform_average'))\n evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')\n assert_array_almost_equal(evs, [1., -3.], decimal=2)\n assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))\n\n\ndef test_regression_custom_weights():\n y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]\n y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]\n\n msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])\n maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])\n rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])\n evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])\n\n assert_almost_equal(msew, 0.39, decimal=2)\n assert_almost_equal(maew, 0.475, decimal=3)\n assert_almost_equal(rw, 0.94, decimal=2)\n assert_almost_equal(evsw, 0.94, decimal=2)\n","license":"bsd-3-clause"} {"repo_name":"ahye\/FYS2140-Resources","path":"examples\/animation\/func_animate_sin.py","copies":"1","size":"1284","content":"#!\/usr\/bin\/env python\n\"\"\"\nCreated on Mon 2 Dec 2013\n\nEksempelscript som viser hvordan en sinusboelge kan animeres med\nfunksjonsanimasjon.\n\n@author Benedicte Emilie Braekken\n\"\"\"\nfrom numpy import *\nfrom matplotlib.pyplot import *\nfrom matplotlib import animation\n\ndef wave( x, t ):\n '''\n Funksjonen beskriver en sinusboelge ved tiden t og punktet x.\n '''\n omega = 1 # Vinkelhastighet\n k = 1 # Boelgetall\n\n return sin( k * x - omega * t )\n\nT = 10\ndt = 0.01\nnx = 1e3\nnt = int( T \/ dt ) # Antall tidssteg\nt = 0\n\nall_waves = [] # Tom liste for aa ta vare paa boelgetilstandene\nx = linspace( -pi, pi, nx )\n\nwhile t < T:\n # Legger til en ny boelgetilstand for hver kjoering\n all_waves.append( wave( x, t ) )\n\n t += dt\n\n# Tegner initialtilstanden\nfig = figure() # Passer paa aa ta vare paa figuren\nline, = plot( x, all_waves[0] )\ndraw()\n\n# Konstanter til animasjonen\nFPS = 60 # Bilder i sekundet\ninter = 1. \/ FPS # Tid mellom hvert bilde\n\ndef init():\n '''\n '''\n line.set_data( [], [] )\n return line,\n\ndef get_frame( frame ):\n '''\n '''\n line.set_data( x, all_waves[ frame ] )\n return line,\n\nanim = animation.FuncAnimation( fig, get_frame, init_func=init,\n frames=nt, interval=inter, blit=True )\n\nshow()\n","license":"mit"} {"repo_name":"briandalessandro\/courses","path":"deeplearning1\/nbs\/utils\/utils.py","copies":"8","size":"7644","content":"from __future__ import division,print_function\nimport math, os, json, sys, re\nimport cPickle as pickle\nfrom glob import glob\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom operator import itemgetter, attrgetter, methodcaller\nfrom collections import OrderedDict\nimport itertools\nfrom itertools import chain\n\nimport pandas as pd\nimport PIL\nfrom PIL import Image\nfrom numpy.random import random, permutation, randn, normal, uniform, choice\nfrom numpy import newaxis\nimport scipy\nfrom scipy import misc, ndimage\nfrom scipy.ndimage.interpolation import zoom\nfrom scipy.ndimage import imread\nfrom sklearn.metrics import confusion_matrix\nimport bcolz\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.manifold import TSNE\n\nfrom IPython.lib.display import FileLink\n\nimport theano\nfrom theano import shared, tensor as T\nfrom theano.tensor.nnet import conv2d, nnet\nfrom theano.tensor.signal import pool\n\nimport keras\nfrom keras import backend as K\nfrom keras.utils.data_utils import get_file\nfrom keras.utils import np_utils\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional\nfrom keras.layers import TimeDistributed, Activation, SimpleRNN, GRU\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.regularizers import l2, activity_l2, l1, activity_l1\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.utils.layer_utils import layer_from_config\nfrom keras.metrics import categorical_crossentropy, categorical_accuracy\nfrom keras.layers.convolutional import *\nfrom keras.preprocessing import image, sequence\nfrom keras.preprocessing.text import Tokenizer\n\nfrom vgg16 import *\nfrom vgg16bn import *\nnp.set_printoptions(precision=4, linewidth=100)\n\n\nto_bw = np.array([0.299, 0.587, 0.114])\ndef gray(img):\n return np.rollaxis(img,0,3).dot(to_bw)\ndef to_plot(img):\n return np.rollaxis(img, 0, 3).astype(np.uint8)\ndef plot(img):\n plt.imshow(to_plot(img))\n\n\ndef floor(x):\n return int(math.floor(x))\ndef ceil(x):\n return int(math.ceil(x))\n\ndef plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):\n if type(ims[0]) is np.ndarray:\n ims = np.array(ims).astype(np.uint8)\n if (ims.shape[-1] != 3):\n ims = ims.transpose((0,2,3,1))\n f = plt.figure(figsize=figsize)\n for i in range(len(ims)):\n sp = f.add_subplot(rows, len(ims)\/\/rows, i+1)\n if titles is not None:\n sp.set_title(titles[i], fontsize=18)\n plt.imshow(ims[i], interpolation=None if interp else 'none')\n\n\ndef do_clip(arr, mx):\n clipped = np.clip(arr, (1-mx)\/1, mx)\n return clipped\/clipped.sum(axis=1)[:, np.newaxis]\n\n\ndef get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',\n target_size=(224,224)):\n return gen.flow_from_directory(dirname, target_size=target_size,\n class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)\n\n\ndef onehot(x):\n return to_categorical(x)\n\n\ndef wrap_config(layer):\n return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}\n\n\ndef copy_layer(layer): return layer_from_config(wrap_config(layer))\n\n\ndef copy_layers(layers): return [copy_layer(layer) for layer in layers]\n\n\ndef copy_weights(from_layers, to_layers):\n for from_layer,to_layer in zip(from_layers, to_layers):\n to_layer.set_weights(from_layer.get_weights())\n\n\ndef copy_model(m):\n res = Sequential(copy_layers(m.layers))\n copy_weights(m.layers, res.layers)\n return res\n\n\ndef insert_layer(model, new_layer, index):\n res = Sequential()\n for i,layer in enumerate(model.layers):\n if i==index: res.add(new_layer)\n copied = layer_from_config(wrap_config(layer))\n res.add(copied)\n copied.set_weights(layer.get_weights())\n return res\n\n\ndef adjust_dropout(weights, prev_p, new_p):\n scal = (1-prev_p)\/(1-new_p)\n return [o*scal for o in weights]\n\n\ndef get_data(path, target_size=(224,224)):\n batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)\n return np.concatenate([batches.next() for i in range(batches.nb_sample)])\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n (This function is copied from the scikit docs.)\n \"\"\"\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') \/ cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() \/ 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\ndef save_array(fname, arr):\n c=bcolz.carray(arr, rootdir=fname, mode='w')\n c.flush()\n\n\ndef load_array(fname):\n return bcolz.open(fname)[:]\n\n\ndef mk_size(img, r2c):\n r,c,_ = img.shape\n curr_r2c = r\/c\n new_r, new_c = r,c\n if r2c>curr_r2c:\n new_r = floor(c*r2c)\n else:\n new_c = floor(r\/r2c)\n arr = np.zeros((new_r, new_c, 3), dtype=np.float32)\n r2=(new_r-r)\/\/2\n c2=(new_c-c)\/\/2\n arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img\n return arr\n\n\ndef mk_square(img):\n x,y,_ = img.shape\n maxs = max(img.shape[:2])\n y2=(maxs-y)\/\/2\n x2=(maxs-x)\/\/2\n arr = np.zeros((maxs,maxs,3), dtype=np.float32)\n arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img\n return arr\n\n\ndef vgg_ft(out_dim):\n vgg = Vgg16()\n vgg.ft(out_dim)\n model = vgg.model\n return model\n\ndef vgg_ft_bn(out_dim):\n vgg = Vgg16BN()\n vgg.ft(out_dim)\n model = vgg.model\n return model\n\n\ndef get_classes(path):\n batches = get_batches(path+'train', shuffle=False, batch_size=1)\n val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)\n test_batches = get_batches(path+'test', shuffle=False, batch_size=1)\n return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),\n val_batches.filenames, batches.filenames, test_batches.filenames)\n\n\ndef split_at(model, layer_type):\n layers = model.layers\n layer_idx = [index for index,layer in enumerate(layers)\n if type(layer) is layer_type][-1]\n return layers[:layer_idx+1], layers[layer_idx+1:]\n\n\nclass MixIterator(object):\n def __init__(self, iters):\n self.iters = iters\n self.multi = type(iters) is list\n if self.multi:\n self.N = sum([it[0].N for it in self.iters])\n else:\n self.N = sum([it.N for it in self.iters])\n\n def reset(self):\n for it in self.iters: it.reset()\n\n def __iter__(self):\n return self\n\n def next(self, *args, **kwargs):\n if self.multi:\n nexts = [[next(it) for it in o] for o in self.iters]\n n0s = np.concatenate([n[0] for n in o])\n n1s = np.concatenate([n[1] for n in o])\n return (n0, n1)\n else:\n nexts = [next(it) for it in self.iters]\n n0 = np.concatenate([n[0] for n in nexts])\n n1 = np.concatenate([n[1] for n in nexts])\n return (n0, n1)\n\n","license":"apache-2.0"} {"repo_name":"DinoCow\/airflow","path":"tests\/providers\/apache\/pinot\/hooks\/test_pinot.py","copies":"3","size":"9346","content":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport io\nimport os\nimport subprocess\nimport unittest\nfrom unittest import mock\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.providers.apache.pinot.hooks.pinot import PinotAdminHook, PinotDbApiHook\n\n\nclass TestPinotAdminHook(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self.conn = conn = mock.MagicMock()\n self.conn.host = 'host'\n self.conn.port = '1000'\n self.conn.extra_dejson = {'cmd_path': '.\/pinot-admin.sh'}\n\n class PinotAdminHookTest(PinotAdminHook):\n def get_connection(self, conn_id):\n return conn\n\n self.db_hook = PinotAdminHookTest()\n\n @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')\n def test_add_schema(self, mock_run_cli):\n params = [\"schema_file\", False]\n self.db_hook.add_schema(*params)\n mock_run_cli.assert_called_once_with(\n [\n 'AddSchema',\n '-controllerHost',\n self.conn.host,\n '-controllerPort',\n self.conn.port,\n '-schemaFile',\n params[0],\n ]\n )\n\n @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')\n def test_add_table(self, mock_run_cli):\n params = [\"config_file\", False]\n self.db_hook.add_table(*params)\n mock_run_cli.assert_called_once_with(\n [\n 'AddTable',\n '-controllerHost',\n self.conn.host,\n '-controllerPort',\n self.conn.port,\n '-filePath',\n params[0],\n ]\n )\n\n @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')\n def test_create_segment(self, mock_run_cli):\n params = {\n \"generator_config_file\": \"a\",\n \"data_dir\": \"b\",\n \"segment_format\": \"c\",\n \"out_dir\": \"d\",\n \"overwrite\": True,\n \"table_name\": \"e\",\n \"segment_name\": \"f\",\n \"time_column_name\": \"g\",\n \"schema_file\": \"h\",\n \"reader_config_file\": \"i\",\n \"enable_star_tree_index\": False,\n \"star_tree_index_spec_file\": \"j\",\n \"hll_size\": 9,\n \"hll_columns\": \"k\",\n \"hll_suffix\": \"l\",\n \"num_threads\": 8,\n \"post_creation_verification\": True,\n \"retry\": 7,\n }\n\n self.db_hook.create_segment(**params)\n\n mock_run_cli.assert_called_once_with(\n [\n 'CreateSegment',\n '-generatorConfigFile',\n params[\"generator_config_file\"],\n '-dataDir',\n params[\"data_dir\"],\n '-format',\n params[\"segment_format\"],\n '-outDir',\n params[\"out_dir\"],\n '-overwrite',\n params[\"overwrite\"],\n '-tableName',\n params[\"table_name\"],\n '-segmentName',\n params[\"segment_name\"],\n '-timeColumnName',\n params[\"time_column_name\"],\n '-schemaFile',\n params[\"schema_file\"],\n '-readerConfigFile',\n params[\"reader_config_file\"],\n '-starTreeIndexSpecFile',\n params[\"star_tree_index_spec_file\"],\n '-hllSize',\n params[\"hll_size\"],\n '-hllColumns',\n params[\"hll_columns\"],\n '-hllSuffix',\n params[\"hll_suffix\"],\n '-numThreads',\n params[\"num_threads\"],\n '-postCreationVerification',\n params[\"post_creation_verification\"],\n '-retry',\n params[\"retry\"],\n ]\n )\n\n @mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')\n def test_upload_segment(self, mock_run_cli):\n params = [\"segment_dir\", False]\n self.db_hook.upload_segment(*params)\n mock_run_cli.assert_called_once_with(\n [\n 'UploadSegment',\n '-controllerHost',\n self.conn.host,\n '-controllerPort',\n self.conn.port,\n '-segmentDir',\n params[0],\n ]\n )\n\n @mock.patch('subprocess.Popen')\n def test_run_cli_success(self, mock_popen):\n mock_proc = mock.MagicMock()\n mock_proc.returncode = 0\n mock_proc.stdout = io.BytesIO(b'')\n mock_popen.return_value = mock_proc\n\n params = [\"foo\", \"bar\", \"baz\"]\n self.db_hook.run_cli(params)\n params.insert(0, self.conn.extra_dejson.get('cmd_path'))\n mock_popen.assert_called_once_with(\n params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None\n )\n\n @mock.patch('subprocess.Popen')\n def test_run_cli_failure_error_message(self, mock_popen):\n msg = b\"Exception caught\"\n mock_proc = mock.MagicMock()\n mock_proc.returncode = 0\n mock_proc.stdout = io.BytesIO(msg)\n mock_popen.return_value = mock_proc\n\n params = [\"foo\", \"bar\", \"baz\"]\n with self.assertRaises(AirflowException, msg=msg):\n self.db_hook.run_cli(params)\n params.insert(0, self.conn.extra_dejson.get('cmd_path'))\n mock_popen.assert_called_once_with(\n params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None\n )\n\n @mock.patch('subprocess.Popen')\n def test_run_cli_failure_status_code(self, mock_popen):\n mock_proc = mock.MagicMock()\n mock_proc.returncode = 1\n mock_proc.stdout = io.BytesIO(b'')\n mock_popen.return_value = mock_proc\n\n self.db_hook.pinot_admin_system_exit = True\n params = [\"foo\", \"bar\", \"baz\"]\n with self.assertRaises(AirflowException):\n self.db_hook.run_cli(params)\n params.insert(0, self.conn.extra_dejson.get('cmd_path'))\n env = os.environ.copy()\n env.update({\"JAVA_OPTS\": \"-Dpinot.admin.system.exit=true \"})\n mock_popen.assert_called_once_with(\n params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=env\n )\n\n\nclass TestPinotDbApiHook(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self.conn = conn = mock.MagicMock()\n self.conn.host = 'host'\n self.conn.port = '1000'\n self.conn.conn_type = 'http'\n self.conn.extra_dejson = {'endpoint': 'query\/sql'}\n self.cur = mock.MagicMock()\n self.conn.cursor.return_value = self.cur\n self.conn.__enter__.return_value = self.cur\n self.conn.__exit__.return_value = None\n\n class TestPinotDBApiHook(PinotDbApiHook):\n def get_conn(self):\n return conn\n\n def get_connection(self, conn_id):\n return conn\n\n self.db_hook = TestPinotDBApiHook\n\n def test_get_uri(self):\n \"\"\"\n Test on getting a pinot connection uri\n \"\"\"\n db_hook = self.db_hook()\n self.assertEqual(db_hook.get_uri(), 'http:\/\/host:1000\/query\/sql')\n\n def test_get_conn(self):\n \"\"\"\n Test on getting a pinot connection\n \"\"\"\n conn = self.db_hook().get_conn()\n self.assertEqual(conn.host, 'host')\n self.assertEqual(conn.port, '1000')\n self.assertEqual(conn.conn_type, 'http')\n self.assertEqual(conn.extra_dejson.get('endpoint'), 'query\/sql')\n\n def test_get_records(self):\n statement = 'SQL'\n result_sets = [('row1',), ('row2',)]\n self.cur.fetchall.return_value = result_sets\n self.assertEqual(result_sets, self.db_hook().get_records(statement))\n\n def test_get_first(self):\n statement = 'SQL'\n result_sets = [('row1',), ('row2',)]\n self.cur.fetchone.return_value = result_sets[0]\n self.assertEqual(result_sets[0], self.db_hook().get_first(statement))\n\n def test_get_pandas_df(self):\n statement = 'SQL'\n column = 'col'\n result_sets = [('row1',), ('row2',)]\n self.cur.description = [(column,)]\n self.cur.fetchall.return_value = result_sets\n df = self.db_hook().get_pandas_df(statement)\n self.assertEqual(column, df.columns[0])\n for i in range(len(result_sets)): # pylint: disable=consider-using-enumerate\n self.assertEqual(result_sets[i][0], df.values.tolist()[i][0])\n","license":"apache-2.0"} {"repo_name":"rnowling\/pop-gen-models","path":"single-pop\/single_pop.py","copies":"1","size":"3379","content":"import sys\nimport numpy as np\nimport numpy.random as npr\nfrom sklearn.neighbors.kde import KernelDensity\nfrom scipy.special import gammaln\nimport matplotlib.pyplot as plt\nfrom calculate_phist import read_counts\nfrom calculate_phist import normalize_haplotypes\n\ndef log_factorial(n):\n\treturn gammaln(n+1)\n\ndef log_multinomial(xs, ps):\n\tn = np.sum(xs)\n\tlog_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))\n\treturn log_prob\n\nclass KDE_MCMC_Sampler(object):\n\tdef __init__(self, observed_counts):\n\t\t\"\"\"\n\t\tObserved counts is 3D matrix of pop, locus, haplotype\n\t\t\"\"\"\n\t\tself.observed_counts = observed_counts\n\t\tself.individual_counts = observed_counts.sum(axis=2)\n\t\tself.observed_frequencies = normalize_haplotypes(observed_counts)\n\n\t\tself.n_loci, self.n_pop, self.n_haplotypes = self.observed_counts.shape\n\t\t\n\t\t# from bamova\n\t\tself.DWEIGHT = 1.0\n\t\tself.DADD = 0.00001\n\t\tself.SMALL_NUM = 0.0000000000001\n\n\t\tprint \"initializing frequencies\"\n\t\tself.freq = np.zeros((self.n_loci, self.n_haplotypes))\n\t\tfor l in xrange(self.n_loci):\n\t\t\tself.freq[l, :] = self.sample_locus_freq(self.observed_frequencies[l, 0, :])\n\n\tdef sample_locus_freq(self, freq):\n\t\talphas = self.DWEIGHT * freq + self.DADD + self.SMALL_NUM\n\n\t\treturn npr.dirichlet(alphas)\n\n\tdef locus_prob(self, locus_obs_counts, locus_freq):\n\t\tlog_prob_sum = 0.0\n\t\tfor p in xrange(self.n_pop):\n\t\t\tlog_prob_sum += log_multinomial(locus_obs_counts[p], locus_freq)\n\t\treturn log_prob_sum\n\n\n\tdef step(self):\n\t\ttotal_log_prob = 0.0\n\t\tfor l in xrange(self.n_loci):\n\t\t\tlocus_indiv_counts = self.individual_counts[l, :]\n\t\t\tlocus_obs_counts = self.observed_counts[l, :, :]\n\n\t\t\tlog_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])\n\n\t\t\tproposed_locus_freq = self.sample_locus_freq(self.freq[l, :])\n\n\t\t\tproposed_log_prob = self.locus_prob(locus_obs_counts, proposed_locus_freq)\n\t\t\t\t\n\t\t\tlog_prob_ratio = proposed_log_prob - log_prob\n\t\t\tlog_r = np.log(npr.random())\n\n\t\t\tif proposed_log_prob >= log_prob or log_r <= log_prob_ratio:\n\t\t\t\tself.freq[l, :] = proposed_locus_freq\n\t\t\t\tlog_prob = proposed_log_prob\n\n\t\t\ttotal_log_prob += log_prob\n\n\t\tlocus_prob = []\n\t\tfor l in xrange(self.n_loci):\n\t\t\tlog_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])\n\t\t\tlocus_prob.append(log_prob)\n\n\t\treturn self.freq, total_log_prob, locus_prob\n\ndef plot_log_prob(flname, log_probs):\n\tplt.clf()\n\tplt.hold(True)\n\tplt.hist(log_probs, bins=30)\n\tplt.xlabel(\"Log Probability\", fontsize=16)\n\tplt.xlim([min(log_probs), 0.0])\n\tplt.ylabel(\"Occurrences (Loci)\", fontsize=16)\n\tplt.savefig(flname, DPI=200)\n\ndef simulate(occur_fl, n_steps, plot_flname, prob_flname):\n\tprint \"reading occurrences\"\n\tobserved_counts = read_counts(occur_fl)\n\tindividual_counts = observed_counts.sum(axis=2)\n\tobserved_frequencies = normalize_haplotypes(observed_counts)\n\n\tsampler = KDE_MCMC_Sampler(observed_counts)\n\n\tfl = open(prob_flname, \"w\")\n\n\tlocus_log_prob = []\n\tfor i in xrange(n_steps):\n\t\tfreq, log_prob, locus_log_prob = sampler.step()\n\t\tprint \"step\", i, \"log prob\", log_prob\n\n\t\tif i % 100 == 0:\n\t\t\tfor j, prob in enumerate(locus_log_prob):\n\t\t\t\tfl.write(\"%s %s %s\\n\" % (i, j, prob))\n\n\tfl.close()\n\n\n\tplot_log_prob(plot_flname, locus_log_prob)\n\nif __name__ == \"__main__\":\n\toccur_fl = sys.argv[1]\n\tn_steps = int(sys.argv[2])\n\tplot_flname = sys.argv[3]\n\tprob_flname = sys.argv[4]\n\n\tsimulate(occur_fl, n_steps, plot_flname, prob_flname)\n\n\t","license":"apache-2.0"} {"repo_name":"mljar\/mljar-api-python","path":"tests\/result_client_test.py","copies":"1","size":"4641","content":"'''\nResultClient tests.\n'''\nimport os\nimport unittest\nimport pandas as pd\nimport time\n\nfrom mljar.client.project import ProjectClient\nfrom mljar.client.dataset import DatasetClient\nfrom mljar.client.experiment import ExperimentClient\nfrom mljar.client.result import ResultClient\nfrom mljar.exceptions import BadRequestException\n\nfrom .project_based_test import ProjectBasedTest, get_postfix\n\nclass ResultClientTest(ProjectBasedTest):\n\n def setUp(self):\n proj_title = 'Test project-01'+get_postfix()\n proj_task = 'bin_class'\n self.expt_title = 'Test experiment-01'\n self.validation_kfolds = 5\n self.validation_shuffle = True\n self.validation_stratify = True\n self.validation_train_split = None\n self.algorithms = ['xgb']\n self.metric = 'logloss'\n self.tuning_mode = 'Normal'\n self.time_constraint = 1\n self.create_enseble = False\n # setup project\n self.project_client = ProjectClient()\n self.project = self.project_client.create_project(title = proj_title, task = proj_task)\n # load data\n df = pd.read_csv('tests\/data\/test_1.csv')\n cols = ['sepal length', 'sepal width', 'petal length', 'petal width']\n target = 'class'\n # add dataset\n self.dataset = DatasetClient(self.project.hid).add_dataset_if_not_exists(df[cols], df[target])\n\n\n def tearDown(self):\n # clean\n self.project_client.delete_project(self.project.hid)\n\n def test_get_results_for_wrong_project(self):\n with self.assertRaises(BadRequestException) as context:\n # init result client\n rc = ResultClient('wrong-hid')\n self.assertTrue(rc is not None)\n # get results - should raise exception\n rc.get_results()\n\n\n def test_get_results_for_project(self):\n # init result client\n rc = ResultClient(self.project.hid)\n self.assertNotEqual(rc, None)\n # get results - should be empty\n results = rc.get_results()\n self.assertEqual(results, [])\n # add experiment\n ec = ExperimentClient(self.project.hid)\n # create new experiment\n self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,\n self.validation_kfolds, self.validation_shuffle,\n self.validation_stratify, self.validation_train_split,\n self.algorithms, self.metric,\n self.tuning_mode, self.time_constraint, self.create_enseble)\n # wait some time till models are initialized\n time.sleep(60)\n # get results - should be some models there\n results = rc.get_results()\n self.assertNotEqual(len(results), 0)\n\n\n def test_get_results_for_experiment(self):\n # init result client\n rc = ResultClient(self.project.hid)\n self.assertNotEqual(rc, None)\n # get results - should be empty\n results = rc.get_results()\n self.assertEqual(results, [])\n # get results for wrong experiment hid\n results = rc.get_results('wrong-hid')\n self.assertEqual(results, [])\n # add experiment\n ec = ExperimentClient(self.project.hid)\n # create new experiment\n self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,\n self.validation_kfolds, self.validation_shuffle,\n self.validation_stratify, self.validation_train_split,\n self.algorithms, self.metric,\n self.tuning_mode, self.time_constraint, self.create_enseble)\n # wait some time till models are initialized\n time.sleep(60)\n # get results for experiment - should be some models there\n results = rc.get_results(self.experiment.hid)\n self.assertNotEqual(len(results), 0)\n\n # get results for project\n project_results = rc.get_results()\n self.assertNotEqual(results, [])\n # get results for wrong experiment hid\n # all results from project should be returned\n results_2 = rc.get_results('wrong-hid')\n self.assertEqual(len(project_results), len(results_2))\n\n for r in project_results:\n # test __str__ method\n self.assertTrue('id' in str(r))\n self.assertTrue('model' in str(r))\n self.assertTrue('status' in str(r))\n","license":"apache-2.0"} {"repo_name":"ephes\/scikit-learn","path":"sklearn\/feature_extraction\/tests\/test_text.py","copies":"110","size":"34127","content":"from __future__ import unicode_literals\nimport warnings\n\nfrom sklearn.feature_extraction.text import strip_tags\nfrom sklearn.feature_extraction.text import strip_accents_unicode\nfrom sklearn.feature_extraction.text import strip_accents_ascii\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.base import clone\n\nimport numpy as np\nfrom nose import SkipTest\nfrom nose.tools import assert_equal\nfrom nose.tools import assert_false\nfrom nose.tools import assert_not_equal\nfrom nose.tools import assert_true\nfrom nose.tools import assert_almost_equal\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_raises\nfrom sklearn.utils.testing import (assert_in, assert_less, assert_greater,\n assert_warns_message, assert_raise_message,\n clean_warning_registry)\n\nfrom collections import defaultdict, Mapping\nfrom functools import partial\nimport pickle\nfrom io import StringIO\n\n\nJUNK_FOOD_DOCS = (\n \"the pizza pizza beer copyright\",\n \"the pizza burger beer copyright\",\n \"the the pizza beer beer copyright\",\n \"the burger beer beer copyright\",\n \"the coke burger coke copyright\",\n \"the coke burger burger\",\n)\n\nNOTJUNK_FOOD_DOCS = (\n \"the salad celeri copyright\",\n \"the salad salad sparkling water copyright\",\n \"the the celeri celeri copyright\",\n \"the tomato tomato salad water\",\n \"the tomato salad water copyright\",\n)\n\nALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n\ndef uppercase(s):\n return strip_accents_unicode(s).upper()\n\n\ndef strip_eacute(s):\n return s.replace('\\xe9', 'e')\n\n\ndef split_tokenize(s):\n return s.split()\n\n\ndef lazy_analyze(s):\n return ['the_ultimate_feature']\n\n\ndef test_strip_accents():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_unicode(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_unicode(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '\\u0627' # simple halef\n assert_equal(strip_accents_unicode(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_unicode(a), expected)\n\n\ndef test_to_ascii():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_ascii(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_ascii(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '' # halef has no direct ascii match\n assert_equal(strip_accents_ascii(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_ascii(a), expected)\n\n\ndef test_word_analyzer_unigrams():\n for Vectorizer in (CountVectorizer, HashingVectorizer):\n wa = Vectorizer(strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon']\n assert_equal(wa(text), expected)\n\n text = \"This is a test, really.\\n\\n I met Harry yesterday.\"\n expected = ['this', 'is', 'test', 'really', 'met', 'harry',\n 'yesterday']\n assert_equal(wa(text), expected)\n\n wa = Vectorizer(input='file').build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['this', 'is', 'test', 'with', 'file', 'like',\n 'object']\n assert_equal(wa(text), expected)\n\n # with custom preprocessor\n wa = Vectorizer(preprocessor=uppercase).build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \" c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',\n 'ETAIT', 'PAS', 'TRES', 'BON']\n assert_equal(wa(text), expected)\n\n # with custom tokenizer\n wa = Vectorizer(tokenizer=split_tokenize,\n strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = [\"j'ai\", 'mange', 'du', 'kangourou', 'ce', 'midi,',\n \"c'etait\", 'pas', 'tres', 'bon.']\n assert_equal(wa(text), expected)\n\n\ndef test_word_analyzer_unigrams_and_bigrams():\n wa = CountVectorizer(analyzer=\"word\", strip_accents='unicode',\n ngram_range=(1, 2)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',\n 'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',\n 'etait pas', 'pas tres', 'tres bon']\n assert_equal(wa(text), expected)\n\n\ndef test_unicode_decode_error():\n # decode_error default to strict, so this should fail\n # First, encode (as bytes) a unicode string.\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n text_bytes = text.encode('utf-8')\n\n # Then let the Analyzer try to decode it as ascii. It should fail,\n # because we have given it an incorrect encoding.\n wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, wa, text_bytes)\n\n ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),\n encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, ca, text_bytes)\n\n\ndef test_char_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon\"\n expected = [\"j'a\", \"'ai\", 'ai ', 'i m', ' ma']\n assert_equal(cnga(text)[:5], expected)\n expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']\n assert_equal(cnga(text)[-5:], expected)\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n\ndef test_char_wb_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = [' th', 'thi', 'his', 'is ', ' thi']\n assert_equal(cnga(text)[:5], expected)\n\n expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char_wb',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"A test with a file-like object!\")\n expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']\n assert_equal(cnga(text)[:6], expected)\n\n\ndef test_countvectorizer_custom_vocabulary():\n vocab = {\"pizza\": 0, \"beer\": 1}\n terms = set(vocab.keys())\n\n # Try a few of the supported types.\n for typ in [dict, list, iter, partial(defaultdict, int)]:\n v = typ(vocab)\n vect = CountVectorizer(vocabulary=v)\n vect.fit(JUNK_FOOD_DOCS)\n if isinstance(v, Mapping):\n assert_equal(vect.vocabulary_, vocab)\n else:\n assert_equal(set(vect.vocabulary_), terms)\n X = vect.transform(JUNK_FOOD_DOCS)\n assert_equal(X.shape[1], len(terms))\n\n\ndef test_countvectorizer_custom_vocabulary_pipeline():\n what_we_like = [\"pizza\", \"beer\"]\n pipe = Pipeline([\n ('count', CountVectorizer(vocabulary=what_we_like)),\n ('tfidf', TfidfTransformer())])\n X = pipe.fit_transform(ALL_FOOD_DOCS)\n assert_equal(set(pipe.named_steps['count'].vocabulary_),\n set(what_we_like))\n assert_equal(X.shape[1], len(what_we_like))\n\n\ndef test_countvectorizer_custom_vocabulary_repeated_indeces():\n vocab = {\"pizza\": 0, \"beer\": 0}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"vocabulary contains repeated indices\", str(e).lower())\n\n\ndef test_countvectorizer_custom_vocabulary_gap_index():\n vocab = {\"pizza\": 1, \"beer\": 2}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"doesn't contain index\", str(e).lower())\n\n\ndef test_countvectorizer_stop_words():\n cv = CountVectorizer()\n cv.set_params(stop_words='english')\n assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)\n cv.set_params(stop_words='_bad_str_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n cv.set_params(stop_words='_bad_unicode_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n stoplist = ['some', 'other', 'words']\n cv.set_params(stop_words=stoplist)\n assert_equal(cv.get_stop_words(), set(stoplist))\n\n\ndef test_countvectorizer_empty_vocabulary():\n try:\n vect = CountVectorizer(vocabulary=[])\n vect.fit([\"foo\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n try:\n v = CountVectorizer(max_df=1.0, stop_words=\"english\")\n # fit on stopwords only\n v.fit([\"to be or not to be\", \"and me too\", \"and so do you\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n\ndef test_fit_countvectorizer_twice():\n cv = CountVectorizer()\n X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])\n X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])\n assert_not_equal(X1.shape[1], X2.shape[1])\n\n\ndef test_tf_idf_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # this is robust to features with only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n\ndef test_tfidf_no_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert_true((tfidf >= 0).all())\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # the lack of smoothing make IDF fragile in the presence of feature with\n # only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n\n clean_warning_registry()\n with warnings.catch_warnings(record=True) as w:\n 1. \/ np.array([0.])\n numpy_provides_div0_warning = len(w) == 1\n\n in_warning_message = 'divide by zero'\n tfidf = assert_warns_message(RuntimeWarning, in_warning_message,\n tr.fit_transform, X).toarray()\n if not numpy_provides_div0_warning:\n raise SkipTest(\"Numpy does not provide div 0 warnings.\")\n\n\ndef test_sublinear_tf():\n X = [[1], [2], [3]]\n tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)\n tfidf = tr.fit_transform(X).toarray()\n assert_equal(tfidf[0], 1)\n assert_greater(tfidf[1], tfidf[0])\n assert_greater(tfidf[2], tfidf[1])\n assert_less(tfidf[1], 2)\n assert_less(tfidf[2], 3)\n\n\ndef test_vectorizer():\n # raw documents as an iterator\n train_data = iter(ALL_FOOD_DOCS[:-1])\n test_data = [ALL_FOOD_DOCS[-1]]\n n_train = len(ALL_FOOD_DOCS) - 1\n\n # test without vocabulary\n v1 = CountVectorizer(max_df=0.5)\n counts_train = v1.fit_transform(train_data)\n if hasattr(counts_train, 'tocsr'):\n counts_train = counts_train.tocsr()\n assert_equal(counts_train[0, v1.vocabulary_[\"pizza\"]], 2)\n\n # build a vectorizer v1 with the same vocabulary as the one fitted by v1\n v2 = CountVectorizer(vocabulary=v1.vocabulary_)\n\n # compare that the two vectorizer give the same output on the test sample\n for v in (v1, v2):\n counts_test = v.transform(test_data)\n if hasattr(counts_test, 'tocsr'):\n counts_test = counts_test.tocsr()\n\n vocabulary = v.vocabulary_\n assert_equal(counts_test[0, vocabulary[\"salad\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"tomato\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"water\"]], 1)\n\n # stop word from the fixed list\n assert_false(\"the\" in vocabulary)\n\n # stop word found automatically by the vectorizer DF thresholding\n # words that are high frequent across the complete corpus are likely\n # to be not informative (either real stop words of extraction\n # artifacts)\n assert_false(\"copyright\" in vocabulary)\n\n # not present in the sample\n assert_equal(counts_test[0, vocabulary[\"coke\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"burger\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"beer\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"pizza\"]], 0)\n\n # test tf-idf\n t1 = TfidfTransformer(norm='l1')\n tfidf = t1.fit(counts_train).transform(counts_train).toarray()\n assert_equal(len(t1.idf_), len(v1.vocabulary_))\n assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))\n\n # test tf-idf with new data\n tfidf_test = t1.transform(counts_test).toarray()\n assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))\n\n # test tf alone\n t2 = TfidfTransformer(norm='l1', use_idf=False)\n tf = t2.fit(counts_train).transform(counts_train).toarray()\n assert_equal(t2.idf_, None)\n\n # test idf transform with unlearned idf vector\n t3 = TfidfTransformer(use_idf=True)\n assert_raises(ValueError, t3.transform, counts_train)\n\n # test idf transform with incompatible n_features\n X = [[1, 1, 5],\n [1, 1, 0]]\n t3.fit(X)\n X_incompt = [[1, 3],\n [1, 3]]\n assert_raises(ValueError, t3.transform, X_incompt)\n\n # L1-normalized term frequencies sum to one\n assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)\n\n # test the direct tfidf vectorizer\n # (equivalent to term count vectorizer + tfidf transformer)\n train_data = iter(ALL_FOOD_DOCS[:-1])\n tv = TfidfVectorizer(norm='l1')\n\n tv.max_df = v1.max_df\n tfidf2 = tv.fit_transform(train_data).toarray()\n assert_false(tv.fixed_vocabulary_)\n assert_array_almost_equal(tfidf, tfidf2)\n\n # test the direct tfidf vectorizer with new data\n tfidf_test2 = tv.transform(test_data).toarray()\n assert_array_almost_equal(tfidf_test, tfidf_test2)\n\n # test transform on unfitted vectorizer with empty vocabulary\n v3 = CountVectorizer(vocabulary=None)\n assert_raises(ValueError, v3.transform, train_data)\n\n # ascii preprocessor?\n v3.set_params(strip_accents='ascii', lowercase=False)\n assert_equal(v3.build_preprocessor(), strip_accents_ascii)\n\n # error on bad strip_accents param\n v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)\n assert_raises(ValueError, v3.build_preprocessor)\n\n # error with bad analyzer type\n v3.set_params = '_invalid_analyzer_type_'\n assert_raises(ValueError, v3.build_analyzer)\n\n\ndef test_tfidf_vectorizer_setters():\n tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,\n sublinear_tf=False)\n tv.norm = 'l1'\n assert_equal(tv._tfidf.norm, 'l1')\n tv.use_idf = True\n assert_true(tv._tfidf.use_idf)\n tv.smooth_idf = True\n assert_true(tv._tfidf.smooth_idf)\n tv.sublinear_tf = True\n assert_true(tv._tfidf.sublinear_tf)\n\n\ndef test_hashing_vectorizer():\n v = HashingVectorizer()\n X = v.transform(ALL_FOOD_DOCS)\n token_nnz = X.nnz\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # By default the hashed values receive a random sign and l2 normalization\n # makes the feature values bounded\n assert_true(np.min(X.data) > -1)\n assert_true(np.min(X.data) < 0)\n assert_true(np.max(X.data) > 0)\n assert_true(np.max(X.data) < 1)\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)\n\n # Check vectorization with some non-default parameters\n v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')\n X = v.transform(ALL_FOOD_DOCS)\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # ngrams generate more non zeros\n ngrams_nnz = X.nnz\n assert_true(ngrams_nnz > token_nnz)\n assert_true(ngrams_nnz < 2 * token_nnz)\n\n # makes the feature values bounded\n assert_true(np.min(X.data) > 0)\n assert_true(np.max(X.data) < 1)\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)\n\n\ndef test_feature_names():\n cv = CountVectorizer(max_df=0.5)\n\n # test for Value error on unfitted\/empty vocabulary\n assert_raises(ValueError, cv.get_feature_names)\n\n X = cv.fit_transform(ALL_FOOD_DOCS)\n n_samples, n_features = X.shape\n assert_equal(len(cv.vocabulary_), n_features)\n\n feature_names = cv.get_feature_names()\n assert_equal(len(feature_names), n_features)\n assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'],\n feature_names)\n\n for idx, name in enumerate(feature_names):\n assert_equal(idx, cv.vocabulary_.get(name))\n\n\ndef test_vectorizer_max_features():\n vec_factories = (\n CountVectorizer,\n TfidfVectorizer,\n )\n\n expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])\n expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',\n u'sparkling', u'water', u'the'])\n\n for vec_factory in vec_factories:\n # test bounded number of extracted features\n vectorizer = vec_factory(max_df=0.6, max_features=4)\n vectorizer.fit(ALL_FOOD_DOCS)\n assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)\n assert_equal(vectorizer.stop_words_, expected_stop_words)\n\n\ndef test_count_vectorizer_max_features():\n # Regression test: max_features didn't work correctly in 0.14.\n\n cv_1 = CountVectorizer(max_features=1)\n cv_3 = CountVectorizer(max_features=3)\n cv_None = CountVectorizer(max_features=None)\n\n counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n\n features_1 = cv_1.get_feature_names()\n features_3 = cv_3.get_feature_names()\n features_None = cv_None.get_feature_names()\n\n # The most common feature is \"the\", with frequency 7.\n assert_equal(7, counts_1.max())\n assert_equal(7, counts_3.max())\n assert_equal(7, counts_None.max())\n\n # The most common feature should be the same\n assert_equal(\"the\", features_1[np.argmax(counts_1)])\n assert_equal(\"the\", features_3[np.argmax(counts_3)])\n assert_equal(\"the\", features_None[np.argmax(counts_None)])\n\n\ndef test_vectorizer_max_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n vect.fit(test_data)\n assert_true('a' in vect.vocabulary_.keys())\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5\n vect.fit(test_data)\n assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert_true('a' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 2)\n\n vect.max_df = 1\n vect.fit(test_data)\n assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert_true('a' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 2)\n\n\ndef test_vectorizer_min_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', min_df=1)\n vect.fit(test_data)\n assert_true('a' in vect.vocabulary_.keys())\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.min_df = 2\n vect.fit(test_data)\n assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored\n assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain\n assert_true('c' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 4)\n\n vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4\n vect.fit(test_data)\n assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored\n assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains\n assert_true('c' in vect.stop_words_)\n assert_equal(len(vect.stop_words_), 5)\n\n\ndef test_count_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())\n assert_array_equal([[3, 1, 1, 0, 0],\n [1, 2, 0, 1, 1]], X)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal([[1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1]], X)\n\n # check the ability to change the dtype\n vect = CountVectorizer(analyzer='char', max_df=1.0,\n binary=True, dtype=np.float32)\n X_sparse = vect.fit_transform(test_data)\n assert_equal(X_sparse.dtype, np.float32)\n\n\ndef test_hashed_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = HashingVectorizer(analyzer='char', non_negative=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X[0:1].data), 3)\n assert_equal(np.max(X[1:2].data), 2)\n assert_equal(X.dtype, np.float64)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X.data), 1)\n assert_equal(X.dtype, np.float64)\n\n # check the ability to change the dtype\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None, dtype=np.float64)\n X = vect.transform(test_data)\n assert_equal(X.dtype, np.float64)\n\n\ndef test_vectorizer_inverse_transform():\n # raw documents\n data = ALL_FOOD_DOCS\n for vectorizer in (TfidfVectorizer(), CountVectorizer()):\n transformed_data = vectorizer.fit_transform(data)\n inversed_data = vectorizer.inverse_transform(transformed_data)\n analyze = vectorizer.build_analyzer()\n for doc, inversed_terms in zip(data, inversed_data):\n terms = np.sort(np.unique(analyze(doc)))\n inversed_terms = np.sort(np.unique(inversed_terms))\n assert_array_equal(terms, inversed_terms)\n\n # Test that inverse_transform also works with numpy arrays\n transformed_data = transformed_data.toarray()\n inversed_data2 = vectorizer.inverse_transform(transformed_data)\n for terms, terms2 in zip(inversed_data, inversed_data2):\n assert_array_equal(np.sort(terms), np.sort(terms2))\n\n\ndef test_count_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.2, random_state=0)\n\n pipeline = Pipeline([('vect', CountVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'svc__loss': ('hinge', 'squared_hinge')\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n\n\ndef test_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.1, random_state=0)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'vect__norm': ('l1', 'l2'),\n 'svc__loss': ('hinge', 'squared_hinge'),\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n assert_equal(best_vectorizer.norm, 'l2')\n assert_false(best_vectorizer.fixed_vocabulary_)\n\n\ndef test_vectorizer_pipeline_cross_validation():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n cv_scores = cross_val_score(pipeline, data, target, cv=3)\n assert_array_equal(cv_scores, [1., 1., 1.])\n\n\ndef test_vectorizer_unicode():\n # tests that the count vectorizer works with cyrillic.\n document = (\n \"\\xd0\\x9c\\xd0\\xb0\\xd1\\x88\\xd0\\xb8\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\"\n \"\\xb5 \\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd0\"\n \"\\xb5 \\xe2\\x80\\x94 \\xd0\\xbe\\xd0\\xb1\\xd1\\x88\\xd0\\xb8\\xd1\\x80\\xd0\\xbd\"\n \"\\xd1\\x8b\\xd0\\xb9 \\xd0\\xbf\\xd0\\xbe\\xd0\\xb4\\xd1\\x80\\xd0\\xb0\\xd0\\xb7\"\n \"\\xd0\\xb4\\xd0\\xb5\\xd0\\xbb \\xd0\\xb8\\xd1\\x81\\xd0\\xba\\xd1\\x83\\xd1\\x81\"\n \"\\xd1\\x81\\xd1\\x82\\xd0\\xb2\\xd0\\xb5\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\\xb3\"\n \"\\xd0\\xbe \\xd0\\xb8\\xd0\\xbd\\xd1\\x82\\xd0\\xb5\\xd0\\xbb\\xd0\\xbb\\xd0\"\n \"\\xb5\\xd0\\xba\\xd1\\x82\\xd0\\xb0, \\xd0\\xb8\\xd0\\xb7\\xd1\\x83\\xd1\\x87\"\n \"\\xd0\\xb0\\xd1\\x8e\\xd1\\x89\\xd0\\xb8\\xd0\\xb9 \\xd0\\xbc\\xd0\\xb5\\xd1\\x82\"\n \"\\xd0\\xbe\\xd0\\xb4\\xd1\\x8b \\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd1\\x82\\xd1\\x80\"\n \"\\xd0\\xbe\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd1\\x8f \\xd0\\xb0\\xd0\\xbb\\xd0\\xb3\"\n \"\\xd0\\xbe\\xd1\\x80\\xd0\\xb8\\xd1\\x82\\xd0\\xbc\\xd0\\xbe\\xd0\\xb2, \\xd1\\x81\"\n \"\\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd0\\xbe\\xd0\\xb1\\xd0\\xbd\\xd1\\x8b\\xd1\\x85 \"\n \"\\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb0\\xd1\\x82\\xd1\\x8c\\xd1\\x81\\xd1\"\n \"\\x8f.\")\n\n vect = CountVectorizer()\n X_counted = vect.fit_transform([document])\n assert_equal(X_counted.shape, (1, 15))\n\n vect = HashingVectorizer(norm=None, non_negative=True)\n X_hashed = vect.transform([document])\n assert_equal(X_hashed.shape, (1, 2 ** 20))\n\n # No collisions on such a small dataset\n assert_equal(X_counted.nnz, X_hashed.nnz)\n\n # When norm is None and non_negative, the tokens are counted up to\n # collisions\n assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))\n\n\ndef test_tfidf_vectorizer_with_fixed_vocabulary():\n # non regression smoke test for inheritance issues\n vocabulary = ['pizza', 'celeri']\n vect = TfidfVectorizer(vocabulary=vocabulary)\n X_1 = vect.fit_transform(ALL_FOOD_DOCS)\n X_2 = vect.transform(ALL_FOOD_DOCS)\n assert_array_almost_equal(X_1.toarray(), X_2.toarray())\n assert_true(vect.fixed_vocabulary_)\n\n\ndef test_pickling_vectorizer():\n instances = [\n HashingVectorizer(),\n HashingVectorizer(norm='l1'),\n HashingVectorizer(binary=True),\n HashingVectorizer(ngram_range=(1, 2)),\n CountVectorizer(),\n CountVectorizer(preprocessor=strip_tags),\n CountVectorizer(analyzer=lazy_analyze),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),\n TfidfVectorizer(),\n TfidfVectorizer(analyzer=lazy_analyze),\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n ]\n\n for orig in instances:\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_equal(copy.get_params(), orig.get_params())\n assert_array_equal(\n copy.fit_transform(JUNK_FOOD_DOCS).toarray(),\n orig.fit_transform(JUNK_FOOD_DOCS).toarray())\n\n\ndef test_stop_words_removal():\n # Ensure that deleting the stop_words_ attribute doesn't affect transform\n\n fitted_vectorizers = (\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)\n )\n\n for vect in fitted_vectorizers:\n vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n vect.stop_words_ = None\n stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n delattr(vect, 'stop_words_')\n stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n assert_array_equal(stop_None_transform, vect_transform)\n assert_array_equal(stop_del_transform, vect_transform)\n\n\ndef test_pickling_transformer():\n X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)\n orig = TfidfTransformer().fit(X)\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_array_equal(\n copy.fit_transform(X).toarray(),\n orig.fit_transform(X).toarray())\n\n\ndef test_non_unique_vocab():\n vocab = ['a', 'b', 'c', 'a', 'a']\n vect = CountVectorizer(vocabulary=vocab)\n assert_raises(ValueError, vect.fit, [])\n\n\ndef test_hashingvectorizer_nan_in_docs():\n # np.nan can appear when using pandas to load text fields from a csv file\n # with missing values.\n message = \"np.nan is an invalid document, expected byte or unicode string.\"\n exception = ValueError\n\n def func():\n hv = HashingVectorizer()\n hv.fit_transform(['hello world', np.nan, 'hello hello'])\n\n assert_raise_message(exception, message, func)\n\n\ndef test_tfidfvectorizer_binary():\n # Non-regression test: TfidfVectorizer used to ignore its \"binary\" param.\n v = TfidfVectorizer(binary=True, use_idf=False, norm=None)\n assert_true(v.binary)\n\n X = v.fit_transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X.ravel(), [1, 1, 1, 0])\n X2 = v.transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X2.ravel(), [1, 1, 1, 0])\n\n\ndef test_tfidfvectorizer_export_idf():\n vect = TfidfVectorizer(use_idf=True)\n vect.fit(JUNK_FOOD_DOCS)\n assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)\n\n\ndef test_vectorizer_vocab_clone():\n vect_vocab = TfidfVectorizer(vocabulary=[\"the\"])\n vect_vocab_clone = clone(vect_vocab)\n vect_vocab.fit(ALL_FOOD_DOCS)\n vect_vocab_clone.fit(ALL_FOOD_DOCS)\n assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)\n","license":"bsd-3-clause"} {"repo_name":"IssamLaradji\/scikit-learn","path":"sklearn\/qda.py","copies":"15","size":"7139","content":"\"\"\"\nQuadratic Discriminant Analysis\n\"\"\"\n\n# Author: Matthieu Perrot \n#\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\n\nfrom .base import BaseEstimator, ClassifierMixin\nfrom .externals.six.moves import xrange\nfrom .utils import check_array, check_X_y\n\n__all__ = ['QDA']\n\n\nclass QDA(BaseEstimator, ClassifierMixin):\n \"\"\"\n Quadratic Discriminant Analysis (QDA)\n\n A classifier with a quadratic decision boundary, generated\n by fitting class conditional densities to the data\n and using Bayes' rule.\n\n The model fits a Gaussian density to each class.\n\n Parameters\n ----------\n priors : array, optional, shape = [n_classes]\n Priors on classes\n\n reg_param : float, optional\n Regularizes the covariance estimate as\n ``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``\n\n Attributes\n ----------\n covariances_ : list of array-like, shape = [n_features, n_features]\n Covariance matrices of each class.\n\n means_ : array-like, shape = [n_classes, n_features]\n Class means.\n\n priors_ : array-like, shape = [n_classes]\n Class priors (sum to 1).\n\n rotations_ : list of arrays\n For each class an array of shape [n_samples, n_samples], the\n rotation of the Gaussian distribution, i.e. its principal axis.\n\n scalings_ : array-like, shape = [n_classes, n_features]\n Contains the scaling of the Gaussian\n distributions along the principal axes for each\n class, i.e. the variance in the rotated coordinate system.\n\n Examples\n --------\n >>> from sklearn.qda import QDA\n >>> import numpy as np\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = QDA()\n >>> clf.fit(X, y)\n QDA(priors=None, reg_param=0.0)\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n\n See also\n --------\n sklearn.lda.LDA: Linear discriminant analysis\n \"\"\"\n\n def __init__(self, priors=None, reg_param=0.):\n self.priors = np.asarray(priors) if priors is not None else None\n self.reg_param = reg_param\n\n def fit(self, X, y, store_covariances=False, tol=1.0e-4):\n \"\"\"\n Fit the QDA model according to the given training data and parameters.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n y : array, shape = [n_samples]\n Target values (integers)\n\n store_covariances : boolean\n If True the covariance matrices are computed and stored in the\n `self.covariances_` attribute.\n \"\"\"\n X, y = check_X_y(X, y)\n self.classes_, y = np.unique(y, return_inverse=True)\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n if n_classes < 2:\n raise ValueError('y has less than 2 classes')\n if self.priors is None:\n self.priors_ = np.bincount(y) \/ float(n_samples)\n else:\n self.priors_ = self.priors\n\n cov = None\n if store_covariances:\n cov = []\n means = []\n scalings = []\n rotations = []\n for ind in xrange(n_classes):\n Xg = X[y == ind, :]\n meang = Xg.mean(0)\n means.append(meang)\n Xgc = Xg - meang\n # Xgc = U * S * V.T\n U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)\n rank = np.sum(S > tol)\n if rank < n_features:\n warnings.warn(\"Variables are collinear\")\n S2 = (S ** 2) \/ (len(Xg) - 1)\n S2 = ((1 - self.reg_param) * S2) + self.reg_param\n if store_covariances:\n # cov = V * (S^2 \/ (n-1)) * V.T\n cov.append(np.dot(S2 * Vt.T, Vt))\n scalings.append(S2)\n rotations.append(Vt.T)\n if store_covariances:\n self.covariances_ = cov\n self.means_ = np.asarray(means)\n self.scalings_ = np.asarray(scalings)\n self.rotations_ = rotations\n return self\n\n def _decision_function(self, X):\n X = check_array(X)\n norm2 = []\n for i in range(len(self.classes_)):\n R = self.rotations_[i]\n S = self.scalings_[i]\n Xm = X - self.means_[i]\n X2 = np.dot(Xm, R * (S ** (-0.5)))\n norm2.append(np.sum(X2 ** 2, 1))\n norm2 = np.array(norm2).T # shape = [len(X), n_classes]\n return (-0.5 * (norm2 + np.sum(np.log(self.scalings_), 1))\n + np.log(self.priors_))\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples (test vectors).\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,], giving the\n log likelihood ratio of the positive class.\n \"\"\"\n dec_func = self._decision_function(X)\n # handle special case of two classes\n if len(self.classes_) == 2:\n return dec_func[:, 1] - dec_func[:, 0]\n return dec_func\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n The predicted class C for each sample in X is returned.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n C : array, shape = [n_samples]\n \"\"\"\n d = self._decision_function(X)\n y_pred = self.classes_.take(d.argmax(1))\n return y_pred\n\n def predict_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples\/test vectors.\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes]\n Posterior probabilities of classification per class.\n \"\"\"\n values = self._decision_function(X)\n # compute the likelihood of the underlying gaussian models\n # up to a multiplicative constant.\n likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])\n # compute posterior probabilities\n return likelihood \/ likelihood.sum(axis=1)[:, np.newaxis]\n\n def predict_log_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Array of samples\/test vectors.\n\n Returns\n -------\n C : array, shape = [n_samples, n_classes]\n Posterior log-probabilities of classification per class.\n \"\"\"\n # XXX : can do better to avoid precision overflows\n probas_ = self.predict_proba(X)\n return np.log(probas_)\n","license":"bsd-3-clause"} {"repo_name":"elvandy\/nltools","path":"nltools\/data\/adjacency.py","copies":"1","size":"34227","content":"from __future__ import division\n\n'''\nThis data class is for working with similarity\/dissimilarity matrices\n'''\n\n__author__ = [\"Luke Chang\"]\n__license__ = \"MIT\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport six\nfrom copy import deepcopy\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.manifold import MDS\nfrom sklearn.utils import check_random_state\nfrom scipy.spatial.distance import squareform\nfrom scipy.stats import ttest_1samp\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom nltools.stats import (correlation_permutation,\n one_sample_permutation,\n two_sample_permutation,\n summarize_bootstrap,\n matrix_permutation)\nfrom nltools.stats import regress as regression\nfrom nltools.plotting import (plot_stacked_adjacency,\n plot_silhouette)\nfrom nltools.utils import (all_same,\n attempt_to_import,\n concatenate,\n _bootstrap_apply_func)\nfrom .design_matrix import Design_Matrix\nfrom joblib import Parallel, delayed\n\n# Optional dependencies\nnx = attempt_to_import('networkx', 'nx')\n\nMAX_INT = np.iinfo(np.int32).max\n\nclass Adjacency(object):\n\n '''\n Adjacency is a class to represent Adjacency matrices as a vector rather\n than a 2-dimensional matrix. This makes it easier to perform data\n manipulation and analyses.\n\n Args:\n data: pandas data instance or list of files\n matrix_type: (str) type of matrix. Possible values include:\n ['distance','similarity','directed','distance_flat',\n 'similarity_flat','directed_flat']\n Y: Pandas DataFrame of training labels\n **kwargs: Additional keyword arguments\n\n '''\n\n def __init__(self, data=None, Y=None, matrix_type=None, labels=None,\n **kwargs):\n if matrix_type is not None:\n if matrix_type.lower() not in ['distance','similarity','directed',\n 'distance_flat','similarity_flat',\n 'directed_flat']:\n raise ValueError(\"matrix_type must be [None,'distance', \"\n \"'similarity','directed','distance_flat', \"\n \"'similarity_flat','directed_flat']\")\n\n if data is None:\n self.data = np.array([])\n self.matrix_type = 'empty'\n self.is_single_matrix = np.nan\n self.issymmetric = np.nan\n elif isinstance(data, list):\n if isinstance(data[0], Adjacency):\n tmp = concatenate(data)\n for item in ['data', 'matrix_type', 'Y','issymmetric']:\n setattr(self, item, getattr(tmp,item))\n else:\n d_all = []; symmetric_all = []; matrix_type_all = []\n for d in data:\n data_tmp, issymmetric_tmp, matrix_type_tmp, _ = self._import_single_data(d, matrix_type=matrix_type)\n d_all.append(data_tmp)\n symmetric_all.append(issymmetric_tmp)\n matrix_type_all.append(matrix_type_tmp)\n if not all_same(symmetric_all):\n raise ValueError('Not all matrices are of the same '\n 'symmetric type.')\n if not all_same(matrix_type_all):\n raise ValueError('Not all matrices are of the same matrix '\n 'type.')\n self.data = np.array(d_all)\n self.issymmetric = symmetric_all[0]\n self.matrix_type = matrix_type_all[0]\n self.is_single_matrix = False\n else:\n self.data, self.issymmetric, self.matrix_type, self.is_single_matrix = self._import_single_data(data, matrix_type=matrix_type)\n\n if Y is not None:\n if isinstance(Y, six.string_types):\n if os.path.isfile(Y):\n Y = pd.read_csv(Y, header=None, index_col=None)\n if isinstance(Y, pd.DataFrame):\n if self.data.shape[0] != len(Y):\n raise ValueError(\"Y does not match the correct size of \"\n \"data\")\n self.Y = Y\n else:\n raise ValueError(\"Make sure Y is a pandas data frame.\")\n else:\n self.Y = pd.DataFrame()\n\n if labels is not None:\n if not isinstance(labels, (list, np.ndarray)):\n raise ValueError( \"Make sure labels is a list or numpy array.\")\n if self.is_single_matrix:\n if len(labels) != self.square_shape()[0]:\n raise ValueError('Make sure the length of labels matches the shape of data.')\n self.labels = deepcopy(labels)\n else:\n if len(labels) != len(self):\n if len(labels) != self.square_shape()[0]:\n raise ValueError('Make sure length of labels either '\n 'matches the number of Adjacency '\n 'matrices or the size of a single '\n 'matrix.')\n else:\n self.labels = list(labels) * len(self)\n else:\n if np.all(np.array([len(x) for x in labels]) !=self.square_shape()[0]):\n raise ValueError(\"All lists of labels must be same length as shape of data.\")\n self.labels = deepcopy(labels)\n else:\n self.labels = None\n\n def __repr__(self):\n return (\"%s.%s(shape=%s, square_shape=%s, Y=%s, is_symmetric=%s,\"\n \"matrix_type=%s)\") % (\n self.__class__.__module__,\n self.__class__.__name__,\n self.shape(),\n self.square_shape(),\n len(self.Y),\n self.issymmetric,\n self.matrix_type)\n\n def __getitem__(self,index):\n new = self.copy()\n if isinstance(index, int):\n new.data = np.array(self.data[index, :]).flatten()\n new.is_single_matrix = True\n else:\n new.data = np.array(self.data[index, :])\n if not self.Y.empty:\n new.Y = self.Y.iloc[index]\n return new\n\n def __len__(self):\n if self.is_single_matrix:\n return 1\n else:\n return self.data.shape[0]\n\n def __iter__(self):\n for x in range(len(self)):\n yield self[x]\n\n def __add__(self, y):\n new = deepcopy(self)\n if isinstance(y, (int, float)):\n new.data = new.data + y\n if isinstance(y, Adjacency):\n if self.shape() != y.shape():\n raise ValueError('Both Adjacency() instances need to be the '\n 'same shape.')\n new.data = new.data + y.data\n return new\n\n def __sub__(self, y):\n new = deepcopy(self)\n if isinstance(y, (int, float)):\n new.data = new.data - y\n if isinstance(y, Adjacency):\n if self.shape() != y.shape():\n raise ValueError('Both Adjacency() instances need to be the '\n 'same shape.')\n new.data = new.data - y.data\n return new\n\n def __mul__(self, y):\n new = deepcopy(self)\n if isinstance(y, (int, float)):\n new.data = new.data * y\n if isinstance(y, Adjacency):\n if self.shape() != y.shape():\n raise ValueError('Both Adjacency() instances need to be the '\n 'same shape.')\n new.data = np.multiply(new.data, y.data)\n return new\n\n def _import_single_data(self, data, matrix_type=None):\n ''' Helper function to import single data matrix.'''\n\n if isinstance(data, six.string_types):\n if os.path.isfile(data):\n data = pd.read_csv(data)\n else:\n raise ValueError('Make sure you have specified a valid file '\n 'path.')\n\n def test_is_single_matrix(data):\n if len(data.shape) == 1:\n return True\n else:\n return False\n\n if matrix_type is not None:\n if matrix_type.lower() == 'distance_flat':\n matrix_type = 'distance'\n data = np.array(data)\n issymmetric = True\n is_single_matrix = test_is_single_matrix(data)\n elif matrix_type.lower() == 'similarity_flat':\n matrix_type = 'similarity'\n data = np.array(data)\n issymmetric = True\n is_single_matrix = test_is_single_matrix(data)\n elif matrix_type.lower() == 'directed_flat':\n matrix_type = 'directed'\n data = np.array(data).flatten()\n issymmetric = False\n is_single_matrix = test_is_single_matrix(data)\n elif matrix_type.lower() in ['distance', 'similarity', 'directed']:\n if data.shape[0] != data.shape[1]:\n raise ValueError('Data matrix must be square')\n data = np.array(data)\n matrix_type = matrix_type.lower()\n if matrix_type in ['distance', 'similarity']:\n issymmetric = True\n data = data[np.triu_indices(data.shape[0], k=1)]\n else:\n issymmetric = False\n if isinstance(data, pd.DataFrame):\n data = data.values.flatten()\n elif isinstance(data, np.ndarray):\n data = data.flatten()\n is_single_matrix = True\n else:\n if len(data.shape) == 1: # Single Vector\n try:\n data = squareform(data)\n except ValueError:\n print('Data is not flattened upper triangle from '\n 'similarity\/distance matrix or flattened directed '\n 'matrix.')\n is_single_matrix = True\n elif data.shape[0] == data.shape[1]: # Square Matrix\n is_single_matrix = True\n else: # Rectangular Matrix\n data_all = deepcopy(data)\n try:\n data = squareform(data_all[0, :])\n except ValueError:\n print('Data is not flattened upper triangle from multiple '\n 'similarity\/distance matrices or flattened directed '\n 'matrices.')\n is_single_matrix = False\n\n # Test if matrix is symmetrical\n if np.all(data[np.triu_indices(data.shape[0], k=1)] == data.T[np.triu_indices(data.shape[0], k=1)]):\n issymmetric = True\n else:\n issymmetric = False\n\n # Determine matrix type\n if issymmetric:\n if np.sum(np.diag(data)) == 0:\n matrix_type = 'distance'\n elif np.sum(np.diag(data)) == data.shape[0]:\n matrix_type = 'similarity'\n data = data[np.triu_indices(data.shape[0], k=1)]\n else:\n matrix_type = 'directed'\n data = data.flatten()\n\n if not is_single_matrix:\n data = data_all\n\n return (data, issymmetric, matrix_type, is_single_matrix)\n\n def isempty(self):\n '''Check if Adjacency object is empty'''\n return bool(self.matrix_type is 'empty')\n\n def squareform(self):\n '''Convert adjacency back to squareform'''\n if self.issymmetric:\n if self.is_single_matrix:\n return squareform(self.data)\n else:\n return [squareform(x.data) for x in self]\n else:\n if self.is_single_matrix:\n return self.data.reshape(int(np.sqrt(self.data.shape[0])),\n int(np.sqrt(self.data.shape[0])))\n else:\n return [x.data.reshape(int(np.sqrt(x.data.shape[0])),\n int(np.sqrt(x.data.shape[0]))) for x in self]\n\n def plot(self, limit=3, *args, **kwargs):\n ''' Create Heatmap of Adjacency Matrix'''\n\n if self.is_single_matrix:\n f, a = plt.subplots(nrows=1, figsize=(7, 5))\n if self.labels is None:\n sns.heatmap(self.squareform(), square=True, ax=a,\n *args, **kwargs)\n else:\n sns.heatmap(self.squareform(), square=True, ax=a,\n xticklabels=self.labels,\n yticklabels=self.labels,\n *args, **kwargs)\n else:\n n_subs = np.minimum(len(self), limit)\n f, a = plt.subplots(nrows=n_subs, figsize=(7, len(self)*5))\n if self.labels is None:\n for i in range(n_subs):\n sns.heatmap(self[i].squareform(), square=True, ax=a[i],\n *args, **kwargs)\n else:\n for i in range(n_subs):\n sns.heatmap(self[i].squareform(), square=True,\n xticklabels=self.labels[i],\n yticklabels=self.labels[i],\n ax=a[i], *args, **kwargs)\n return f\n\n def mean(self, axis=0):\n ''' Calculate mean of Adjacency\n\n Args:\n axis: calculate mean over features (0) or data (1).\n For data it will be on upper triangle.\n\n Returns:\n mean: float if single, adjacency if axis=0, np.array if axis=1\n and multiple\n\n '''\n\n if self.is_single_matrix:\n return np.mean(self.data)\n else:\n if axis == 0:\n return Adjacency(data=np.mean(self.data, axis=axis),\n matrix_type=self.matrix_type + '_flat')\n elif axis == 1:\n return np.mean(self.data, axis=axis)\n\n def std(self, axis=0):\n ''' Calculate standard deviation of Adjacency\n\n Args:\n axis: calculate std over features (0) or data (1).\n For data it will be on upper triangle.\n\n Returns:\n std: float if single, adjacency if axis=0, np.array if axis=1 and\n multiple\n\n '''\n\n if self.is_single_matrix:\n return np.std(self.data)\n else:\n if axis == 0:\n return Adjacency(data=np.std(self.data, axis=axis),\n matrix_type=self.matrix_type + '_flat')\n elif axis == 1:\n return np.std(self.data, axis=axis)\n\n def shape(self):\n ''' Calculate shape of data. '''\n return self.data.shape\n\n def square_shape(self):\n ''' Calculate shape of squareform data. '''\n if self.matrix_type is 'empty':\n return np.array([])\n else:\n if self.is_single_matrix:\n return self.squareform().shape\n else:\n return self[0].squareform().shape\n\n def copy(self):\n ''' Create a copy of Adjacency object.'''\n return deepcopy(self)\n\n def append(self, data):\n ''' Append data to Adjacency instance\n\n Args:\n data: Adjacency instance to append\n\n Returns:\n out: new appended Adjacency instance\n\n '''\n\n if not isinstance(data, Adjacency):\n raise ValueError('Make sure data is a Adjacency instance.')\n\n if self.isempty():\n out = data.copy()\n else:\n out = self.copy()\n if self.square_shape() != data.square_shape():\n raise ValueError('Data is not the same shape as Adjacency '\n 'instance.')\n\n out.data = np.vstack([self.data, data.data])\n out.is_single_matrix = False\n if out.Y.size:\n out.Y = self.Y.append(data.Y)\n\n return out\n\n def write(self, file_name, method='long'):\n ''' Write out Adjacency object to csv file.\n\n Args:\n file_name (str): name of file name to write\n method (str): method to write out data ['long','square']\n\n '''\n if method not in ['long', 'square']:\n raise ValueError('Make sure method is [\"long\",\"square\"].')\n if self.is_single_matrix:\n if method is 'long':\n out = pd.DataFrame(self.data).to_csv(file_name, index=None)\n elif method is 'square':\n out = pd.DataFrame(self.squareform()).to_csv(file_name,\n index=None)\n else:\n if method is 'long':\n out = pd.DataFrame(self.data).to_csv(file_name, index=None)\n elif method is 'square':\n raise NotImplementedError('Need to decide how we should write '\n 'out multiple matrices. As separate '\n 'files?')\n\n def similarity(self, data, plot=False, perm_type='2d', n_permute=5000, metric='spearman', **kwargs):\n ''' Calculate similarity between two Adjacency matrices.\n Default is to use spearman correlation and permutation test.\n Args:\n data: Adjacency data, or 1-d array same size as self.data\n perm_type: '1d','2d', or None\n metric: 'spearman','pearson','kendall'\n '''\n if not isinstance(data, Adjacency):\n data2 = Adjacency(data)\n else:\n data2 = data.copy()\n if perm_type is None:\n n_permute=0\n similarity_func = correlation_permutation\n elif perm_type == '1d':\n similarity_func = correlation_permutation\n elif perm_type == '2d':\n similarity_func = matrix_permutation\n if self.is_single_matrix:\n if plot:\n plot_stacked_adjacency(self, data)\n return similarity_func(self.data, data2.data, metric=metric, n_permute=n_permute, **kwargs)\n else:\n if plot:\n _, a = plt.subplots(len(self))\n for i in a:\n plot_stacked_adjacency(self, data, ax=i)\n return [similarity_func(x.data, data2.data, metric=metric, n_permute=n_permute, **kwargs) for x in self]\n\n def distance(self, method='correlation', **kwargs):\n ''' Calculate distance between images within an Adjacency() instance.\n\n Args:\n method: type of distance metric (can use any scikit learn or\n sciypy metric)\n\n Returns:\n dist: Outputs a 2D distance matrix.\n\n '''\n return Adjacency(pairwise_distances(self.data, metric=method, **kwargs),\n matrix_type='distance')\n\n def threshold(self, upper=None, lower=None, binarize=False):\n '''Threshold Adjacency instance. Provide upper and lower values or\n percentages to perform two-sided thresholding. Binarize will return\n a mask image respecting thresholds if provided, otherwise respecting\n every non-zero value.\n\n Args:\n upper: (float or str) Upper cutoff for thresholding. If string\n will interpret as percentile; can be None for one-sided\n thresholding.\n lower: (float or str) Lower cutoff for thresholding. If string\n will interpret as percentile; can be None for one-sided\n thresholding.\n binarize (bool): return binarized image respecting thresholds if\n provided, otherwise binarize on every non-zero value;\n default False\n\n Returns:\n Adjacency: thresholded Adjacency instance\n\n '''\n\n b = self.copy()\n if isinstance(upper, six.string_types):\n if upper[-1] is '%':\n upper = np.percentile(b.data, float(upper[:-1]))\n if isinstance(lower, six.string_types):\n if lower[-1] is '%':\n lower = np.percentile(b.data, float(lower[:-1]))\n\n if upper and lower:\n b.data[(b.data < upper) & (b.data > lower)] = 0\n elif upper and not lower:\n b.data[b.data < upper] = 0\n elif lower and not upper:\n b.data[b.data > lower] = 0\n if binarize:\n b.data[b.data != 0] = 1\n return b\n\n def to_graph(self):\n ''' Convert Adjacency into networkx graph. only works on\n single_matrix for now.'''\n\n if self.is_single_matrix:\n if self.matrix_type == 'directed':\n G = nx.DiGraph(self.squareform())\n else:\n G = nx.Graph(self.squareform())\n if self.labels is not None:\n labels = {x:y for x,y in zip(G.nodes,self.labels)}\n nx.relabel_nodes(G, labels, copy=False)\n return G\n else:\n raise NotImplementedError('This function currently only works on '\n 'single matrices.')\n\n def ttest(self, permutation=False, **kwargs):\n ''' Calculate ttest across samples.\n\n Args:\n permutation: (bool) Run ttest as permutation. Note this can be very slow.\n\n Returns:\n out: (dict) contains Adjacency instances of t values (or mean if\n running permutation) and Adjacency instance of p values.\n\n '''\n if self.is_single_matrix:\n raise ValueError('t-test cannot be run on single matrices.')\n\n if permutation:\n t = []; p = []\n for i in range(self.data.shape[1]):\n stats = one_sample_permutation(self.data[:, i], **kwargs)\n t.append(stats['mean'])\n p.append(stats['p'])\n t = Adjacency(np.array(t))\n p = Adjacency(np.array(p))\n else:\n t = self.mean().copy()\n p = deepcopy(t)\n t.data, p.data = ttest_1samp(self.data, 0, 0)\n\n return {'t': t, 'p':p}\n\n def plot_label_distance(self, labels=None, ax=None):\n ''' Create a violin plot indicating within and between label distance\n\n Args:\n labels (np.array): numpy array of labels to plot\n\n Returns:\n violin plot handles\n\n '''\n\n if not self.is_single_matrix:\n raise ValueError('This function only works on single adjacency '\n 'matrices.')\n\n distance = pd.DataFrame(self.squareform())\n\n if labels is None:\n labels = np.array(deepcopy(self.labels))\n else:\n if len(labels) != distance.shape[0]:\n raise ValueError('Labels must be same length as distance matrix')\n\n out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)\n for i in np.unique(labels):\n tmp_w = pd.DataFrame(columns=out.columns, index=None)\n tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]\n tmp_w['Type'] = 'Within'\n tmp_w['Group'] = i\n tmp_b = pd.DataFrame(columns=out.columns, index=None)\n tmp_b['Distance'] = distance.loc[labels != i, labels != i].values[np.triu_indices(sum(labels == i), k=1)]\n tmp_b['Type'] = 'Between'\n tmp_b['Group'] = i\n out = out.append(tmp_w).append(tmp_b)\n f = sns.violinplot(x=\"Group\", y=\"Distance\", hue=\"Type\", data=out, split=True, inner='quartile',\n palette={\"Within\": \"lightskyblue\", \"Between\": \"red\"}, ax=ax)\n f.set_ylabel('Average Distance')\n f.set_title('Average Group Distance')\n return f\n\n def stats_label_distance(self, labels=None, n_permute=5000, n_jobs=-1):\n ''' Calculate permutation tests on within and between label distance.\n\n Args:\n labels (np.array): numpy array of labels to plot\n n_permute (int): number of permutations to run (default=5000)\n\n Returns:\n dict: dictionary of within and between group differences\n and p-values\n\n '''\n\n if not self.is_single_matrix:\n raise ValueError('This function only works on single adjacency '\n 'matrices.')\n\n distance = pd.DataFrame(self.squareform())\n\n if labels is not None:\n labels = deepcopy(self.labels)\n else:\n if len(labels) != distance.shape[0]:\n raise ValueError('Labels must be same length as distance matrix')\n\n out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)\n for i in np.unique(labels):\n tmp_w = pd.DataFrame(columns=out.columns, index=None)\n tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]\n tmp_w['Type'] = 'Within'\n tmp_w['Group'] = i\n tmp_b = pd.DataFrame(columns=out.columns, index=None)\n tmp_b['Distance'] = distance.loc[labels == i, labels != i].values.flatten()\n tmp_b['Type'] = 'Between'\n tmp_b['Group'] = i\n out = out.append(tmp_w).append(tmp_b)\n stats = dict()\n for i in np.unique(labels):\n # Within group test\n tmp1 = out.loc[(out['Group'] == i) & (out['Type'] == 'Within'), 'Distance']\n tmp2 = out.loc[(out['Group'] == i) & (out['Type'] == 'Between'), 'Distance']\n stats[str(i)] = two_sample_permutation(tmp1, tmp2,\n n_permute=n_permute, n_jobs=n_jobs)\n return stats\n\n def plot_silhouette(self, labels=None, ax=None, permutation_test=True,\n n_permute=5000, **kwargs):\n '''Create a silhouette plot'''\n distance = pd.DataFrame(self.squareform())\n\n if labels is None:\n labels = np.array(deepcopy(self.labels))\n else:\n if len(labels) != distance.shape[0]:\n raise ValueError('Labels must be same length as distance matrix')\n\n (f, outAll) = plot_silhouette(distance, labels, ax=None,\n permutation_test=True,\n n_permute=5000, **kwargs)\n return (f,outAll)\n\n def bootstrap(self, function, n_samples=5000, save_weights=False,\n n_jobs=-1, random_state=None, *args, **kwargs):\n '''Bootstrap an Adjacency method.\n\n Example Useage:\n b = dat.bootstrap('mean', n_samples=5000)\n b = dat.bootstrap('predict', n_samples=5000, algorithm='ridge')\n b = dat.bootstrap('predict', n_samples=5000, save_weights=True)\n\n Args:\n function: (str) method to apply to data for each bootstrap\n n_samples: (int) number of samples to bootstrap with replacement\n save_weights: (bool) Save each bootstrap iteration\n (useful for aggregating many bootstraps on a cluster)\n n_jobs: (int) The number of CPUs to use to do the computation.\n -1 means all CPUs.Returns:\n output: summarized studentized bootstrap output\n\n '''\n\n random_state = check_random_state(random_state)\n seeds = random_state.randint(MAX_INT, size=n_samples)\n bootstrapped = Parallel(n_jobs=n_jobs)(\n delayed(_bootstrap_apply_func)(self,\n function, random_state=seeds[i], *args, **kwargs)\n for i in range(n_samples))\n bootstrapped = Adjacency(bootstrapped)\n return summarize_bootstrap(bootstrapped, save_weights=save_weights)\n\n def plot_mds(self, n_components=2, metric=True, labels_color=None,\n cmap=plt.cm.hot_r, n_jobs=-1, view=(30, 20),\n figsize = [12,8], ax = None, *args, **kwargs):\n ''' Plot Multidimensional Scaling\n\n Args:\n n_components: (int) Number of dimensions to project (can be 2 or 3)\n metric: (bool) Perform metric or non-metric dimensional scaling; default\n labels_color: (str) list of colors for labels, if len(1) then make all same color\n n_jobs: (int) Number of parallel jobs\n view: (tuple) view for 3-Dimensional plot; default (30,20)\n\n Returns:\n fig: returns matplotlib figure\n '''\n\n if self.matrix_type != 'distance':\n raise ValueError(\"MDS only works on distance matrices.\")\n if not self.is_single_matrix:\n raise ValueError(\"MDS only works on single matrices.\")\n if n_components not in [2,3]:\n raise ValueError('Cannot plot {0}-d image'.format(n_components))\n if labels_color is not None:\n if self.labels is None:\n raise ValueError(\"Make sure that Adjacency object has labels specified.\")\n if len(self.labels) != len(labels_color):\n raise ValueError(\"Length of labels_color must match self.labels.\")\n\n # Run MDS\n mds = MDS(n_components=n_components, metric=metric, n_jobs=n_jobs,\n dissimilarity=\"precomputed\", *args, **kwargs)\n proj = mds.fit_transform(self.squareform())\n\n # Create Plot\n if ax == None: # Create axis\n returnFig = True\n fig = plt.figure(figsize=figsize)\n if n_components == 3:\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(*view)\n elif n_components == 2:\n ax = fig.add_subplot(111)\n\n # Plot dots\n if n_components == 3:\n ax.scatter(proj[:, 0], proj[:, 1], proj[:, 2], s=1, c='k')\n elif n_components == 2:\n ax.scatter(proj[:, 0], proj[:, 1], s=1, c='k')\n\n # Plot labels\n if labels_color is None:\n labels_color = ['black'] * len(self.labels)\n if n_components == 3:\n for ((x, y, z), label, color) in zip(proj, self.labels, labels_color):\n ax.text(x, y, z, label, color='white', #color,\n bbox=dict(facecolor=color, alpha=1, boxstyle=\"round,pad=0.3\"))\n else:\n for ((x, y), label, color) in zip(proj, self.labels, labels_color):\n ax.text(x, y, label, color='white', #color,\n bbox=dict(facecolor=color, alpha=1, boxstyle=\"round,pad=0.3\"))\n\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n if returnFig:\n return fig\n\n def distance_to_similarity(self, beta=1):\n '''Convert distance matrix to similarity matrix\n\n Args:\n beta: parameter to scale exponential function (default: 1)\n\n Returns:\n Adjacency object\n\n '''\n if self.matrix_type == 'distance':\n return Adjacency(np.exp(-beta*self.squareform()\/self.squareform().std()),\n labels=self.labels, matrix_type='similarity')\n else:\n raise ValueError('Matrix is not a distance matrix.')\n\n def similarity_to_distance(self):\n '''Convert similarity matrix to distance matrix'''\n if self.matrix_type == 'similarity':\n return Adjacency(1-self.squareform(),\n labels=self.labels, matrix_type='distance')\n else:\n raise ValueError('Matrix is not a similarity matrix.')\n\n def within_cluster_mean(self, clusters = None):\n ''' This function calculates mean within cluster labels\n\n Args:\n clusters: list of cluster labels\n Returns:\n dict: within cluster means\n '''\n\n distance=pd.DataFrame(self.squareform())\n clusters = np.array(clusters)\n\n if len(clusters) != distance.shape[0]:\n raise ValueError('Cluster labels must be same length as distance matrix')\n\n out = pd.DataFrame(columns=['Mean','Label'],index=None)\n out = {}\n for i in list(set(clusters)):\n out[i] = np.mean(distance.loc[clusters==i,clusters==i].values[np.triu_indices(sum(clusters==i),k=1)])\n return out\n\n def regress(self, X, mode='ols', **kwargs):\n ''' Run a regression on an adjacency instance.\n You can decompose an adjacency instance with another adjacency instance.\n You can also decompose each pixel by passing a design_matrix instance.\n\n Args:\n X: Design matrix can be an Adjacency or Design_Matrix instance\n method: type of regression (default: ols)\n\n Returns:\n\n '''\n\n stats = {}\n if isinstance(X, Adjacency):\n if X.square_shape()[0] != self.square_shape()[0]:\n raise ValueError('Adjacency instances must be the same size.')\n b,t,p,_,res = regression(X.data.T, self.data, mode=mode, **kwargs)\n stats['beta'],stats['t'],stats['p'],stats['residual'] = (b,t,p,res)\n elif isinstance(X, Design_Matrix):\n if X.shape[0] != len(self):\n raise ValueError('Design matrix must have same number of observations as Adjacency')\n b,t,p,df,res = regression(X, self.data, mode=mode, **kwargs)\n mode = 'ols'\n stats['beta'], stats['t'], stats['p'] = [x for x in self[:3]]\n stats['beta'].data, stats['t'].data, stats['p'].data = b.squeeze(), t.squeeze(), p.squeeze()\n stats['residual'] = self.copy()\n stats['residual'].data = res\n else:\n raise ValueError('X must be a Design_Matrix or Adjacency Instance.')\n\n return stats\n","license":"mit"} {"repo_name":"tmhm\/scikit-learn","path":"examples\/svm\/plot_weighted_samples.py","copies":"188","size":"1943","content":"\"\"\"\n=====================\nSVM: Weighted samples\n=====================\n\nPlot decision function of a weighted dataset, where the size of points\nis proportional to its weight.\n\nThe sample weighting rescales the C parameter, which means that the classifier\nputs more emphasis on getting these points right. The effect might often be\nsubtle.\nTo emphasize the effect here, we particularly weight outliers, making the\ndeformation of the decision boundary very visible.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\n\n\ndef plot_decision_function(classifier, sample_weight, axis, title):\n # plot the decision function\n xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))\n\n Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n # plot the line, the points, and the nearest vectors to the plane\n axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)\n axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,\n cmap=plt.cm.bone)\n\n axis.axis('off')\n axis.set_title(title)\n\n\n# we create 20 points\nnp.random.seed(0)\nX = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]\nY = [1] * 10 + [-1] * 10\nsample_weight_last_ten = abs(np.random.randn(len(X)))\nsample_weight_constant = np.ones(len(X))\n# and bigger weights to some outliers\nsample_weight_last_ten[15:] *= 5\nsample_weight_last_ten[9] *= 15\n\n# for reference, first fit without class weights\n\n# fit the model\nclf_weights = svm.SVC()\nclf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)\n\nclf_no_weights = svm.SVC()\nclf_no_weights.fit(X, Y)\n\nfig, axes = plt.subplots(1, 2, figsize=(14, 6))\nplot_decision_function(clf_no_weights, sample_weight_constant, axes[0],\n \"Constant weights\")\nplot_decision_function(clf_weights, sample_weight_last_ten, axes[1],\n \"Modified weights\")\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"siutanwong\/scikit-learn","path":"examples\/cluster\/plot_mini_batch_kmeans.py","copies":"265","size":"4081","content":"\"\"\"\n====================================================================\nComparison of the K-Means and MiniBatchKMeans clustering algorithms\n====================================================================\n\nWe want to compare the performance of the MiniBatchKMeans and KMeans:\nthe MiniBatchKMeans is faster, but gives slightly different results (see\n:ref:`mini_batch_kmeans`).\n\nWe will cluster a set of data, first with KMeans and then with\nMiniBatchKMeans, and plot the results.\nWe will also plot the points that are labelled differently between the two\nalgorithms.\n\"\"\"\nprint(__doc__)\n\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.metrics.pairwise import pairwise_distances_argmin\nfrom sklearn.datasets.samples_generator import make_blobs\n\n##############################################################################\n# Generate sample data\nnp.random.seed(0)\n\nbatch_size = 45\ncenters = [[1, 1], [-1, -1], [1, -1]]\nn_clusters = len(centers)\nX, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)\n\n##############################################################################\n# Compute clustering with Means\n\nk_means = KMeans(init='k-means++', n_clusters=3, n_init=10)\nt0 = time.time()\nk_means.fit(X)\nt_batch = time.time() - t0\nk_means_labels = k_means.labels_\nk_means_cluster_centers = k_means.cluster_centers_\nk_means_labels_unique = np.unique(k_means_labels)\n\n##############################################################################\n# Compute clustering with MiniBatchKMeans\n\nmbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,\n n_init=10, max_no_improvement=10, verbose=0)\nt0 = time.time()\nmbk.fit(X)\nt_mini_batch = time.time() - t0\nmbk_means_labels = mbk.labels_\nmbk_means_cluster_centers = mbk.cluster_centers_\nmbk_means_labels_unique = np.unique(mbk_means_labels)\n\n##############################################################################\n# Plot result\n\nfig = plt.figure(figsize=(8, 3))\nfig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)\ncolors = ['#4EACC5', '#FF9C34', '#4E9A06']\n\n# We want to have the same colors for the same cluster from the\n# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per\n# closest one.\n\norder = pairwise_distances_argmin(k_means_cluster_centers,\n mbk_means_cluster_centers)\n\n# KMeans\nax = fig.add_subplot(1, 3, 1)\nfor k, col in zip(range(n_clusters), colors):\n my_members = k_means_labels == k\n cluster_center = k_means_cluster_centers[k]\n ax.plot(X[my_members, 0], X[my_members, 1], 'w',\n markerfacecolor=col, marker='.')\n ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=6)\nax.set_title('KMeans')\nax.set_xticks(())\nax.set_yticks(())\nplt.text(-3.5, 1.8, 'train time: %.2fs\\ninertia: %f' % (\n t_batch, k_means.inertia_))\n\n# MiniBatchKMeans\nax = fig.add_subplot(1, 3, 2)\nfor k, col in zip(range(n_clusters), colors):\n my_members = mbk_means_labels == order[k]\n cluster_center = mbk_means_cluster_centers[order[k]]\n ax.plot(X[my_members, 0], X[my_members, 1], 'w',\n markerfacecolor=col, marker='.')\n ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=6)\nax.set_title('MiniBatchKMeans')\nax.set_xticks(())\nax.set_yticks(())\nplt.text(-3.5, 1.8, 'train time: %.2fs\\ninertia: %f' %\n (t_mini_batch, mbk.inertia_))\n\n# Initialise the different array to all False\ndifferent = (mbk_means_labels == 4)\nax = fig.add_subplot(1, 3, 3)\n\nfor l in range(n_clusters):\n different += ((k_means_labels == k) != (mbk_means_labels == order[k]))\n\nidentic = np.logical_not(different)\nax.plot(X[identic, 0], X[identic, 1], 'w',\n markerfacecolor='#bbbbbb', marker='.')\nax.plot(X[different, 0], X[different, 1], 'w',\n markerfacecolor='m', marker='.')\nax.set_title('Difference')\nax.set_xticks(())\nax.set_yticks(())\n\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"BiaDarkia\/scikit-learn","path":"examples\/tree\/plot_iris.py","copies":"30","size":"2062","content":"\"\"\"\n================================================================\nPlot the decision surface of a decision tree on the iris dataset\n================================================================\n\nPlot the decision surface of a decision tree trained on pairs\nof features of the iris dataset.\n\nSee :ref:`decision tree ` for more information on the estimator.\n\nFor each pair of iris features, the decision tree learns decision\nboundaries made of combinations of simple thresholding rules inferred from\nthe training samples.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nplot_colors = \"ryb\"\nplot_step = 0.02\n\n# Load data\niris = load_iris()\n\nfor pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],\n [1, 2], [1, 3], [2, 3]]):\n # We only take the two corresponding features\n X = iris.data[:, pair]\n y = iris.target\n\n # Train\n clf = DecisionTreeClassifier().fit(X, y)\n\n # Plot the decision boundary\n plt.subplot(2, 3, pairidx + 1)\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),\n np.arange(y_min, y_max, plot_step))\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)\n\n plt.xlabel(iris.feature_names[pair[0]])\n plt.ylabel(iris.feature_names[pair[1]])\n\n # Plot the training points\n for i, color in zip(range(n_classes), plot_colors):\n idx = np.where(y == i)\n plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],\n cmap=plt.cm.RdYlBu, edgecolor='black', s=15)\n\nplt.suptitle(\"Decision surface of a decision tree using paired features\")\nplt.legend(loc='lower right', borderpad=0, handletextpad=0)\nplt.axis(\"tight\")\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"erh3cq\/hyperspy","path":"hyperspy\/_signals\/signal1d.py","copies":"2","size":"61717","content":"# -*- coding: utf-8 -*-\n# Copyright 2007-2020 The HyperSpy developers\n#\n# This file is part of HyperSpy.\n#\n# HyperSpy is free software: you can redistribute it and\/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# HyperSpy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with HyperSpy. If not, see .\n\nimport os\nimport logging\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport dask.array as da\nimport scipy.interpolate\nimport scipy as sp\nfrom scipy.signal import savgol_filter\nfrom scipy.ndimage.filters import gaussian_filter1d\n\nfrom hyperspy.signal import BaseSignal\nfrom hyperspy._signals.common_signal1d import CommonSignal1D\nfrom hyperspy.signal_tools import SpikesRemoval, SpikesRemovalInteractive\nfrom hyperspy.models.model1d import Model1D\nfrom hyperspy.misc.lowess_smooth import lowess\n\n\nfrom hyperspy.defaults_parser import preferences\nfrom hyperspy.signal_tools import (\n Signal1DCalibration,\n SmoothingSavitzkyGolay,\n SmoothingLowess,\n SmoothingTV,\n ButterworthFilter)\nfrom hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT\nfrom hyperspy.misc.tv_denoise import _tv_denoise_1d\nfrom hyperspy.signal_tools import BackgroundRemoval\nfrom hyperspy.decorators import interactive_range_selector\nfrom hyperspy.signal_tools import IntegrateArea, _get_background_estimator\nfrom hyperspy._signals.lazy import LazySignal\nfrom hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC, SPIKES_REMOVAL_TOOL_DOCSTRING\nfrom hyperspy.docstrings.signal import (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,\n SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)\nfrom hyperspy.docstrings.plot import (\n BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,\n medfilt_radius=5, maxpeakn=30000, peakgroup=10,\n subchannel=True,):\n \"\"\"Find peaks along a 1D line.\n\n Function to locate the positive peaks in a noisy x-y data set.\n Detects peaks by looking for downward zero-crossings in the first\n derivative that exceed 'slope_thresh'.\n Returns an array containing position, height, and width of each peak.\n Sorted by position.\n 'slope_thresh' and 'amp_thresh', control sensitivity: higher values\n will neglect wider peaks (slope) and smaller features (amp),\n respectively.\n\n Parameters\n ----------\n\n y : array\n 1D input array, e.g. a spectrum\n x : array (optional)\n 1D array describing the calibration of y (must have same shape as y)\n slope_thresh : float (optional)\n 1st derivative threshold to count the peak;\n higher values will neglect broader features;\n default is set to 0.\n amp_thresh : float (optional)\n intensity threshold below which peaks are ignored;\n higher values will neglect smaller features;\n default is set to 10% of max(y).\n medfilt_radius : int (optional)\n median filter window to apply to smooth the data\n (see scipy.signal.medfilt);\n if 0, no filter will be applied;\n default is set to 5.\n peakgroup : int (optional)\n number of points around the \"top part\" of the peak that\n are taken to estimate the peak height; for spikes or\n very narrow peaks, keep PeakGroup=1 or 2; for broad or\n noisy peaks, make PeakGroup larger to reduce the effect\n of noise;\n default is set to 10.\n maxpeakn : int (optional)\n number of maximum detectable peaks;\n default is set to 30000.\n subchannel : bool (optional)\n default is set to True.\n\n Returns\n -------\n P : structured array of shape (npeaks)\n contains fields: 'position', 'width', and 'height' for each peak.\n\n Examples\n --------\n >>> x = np.arange(0,50,0.01)\n >>> y = np.cos(x)\n >>> peaks = find_peaks_ohaver(y, x, 0, 0)\n\n Notes\n -----\n Original code from T. C. O'Haver, 1995.\n Version 2 Last revised Oct 27, 2006 Converted to Python by\n Michael Sarahan, Feb 2011.\n Revised to handle edges better. MCS, Mar 2011\n \"\"\"\n\n if x is None:\n x = np.arange(len(y), dtype=np.int64)\n if not amp_thresh:\n amp_thresh = 0.1 * y.max()\n peakgroup = np.round(peakgroup)\n if medfilt_radius:\n d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))\n else:\n d = np.gradient(y)\n n = np.round(peakgroup \/ 2 + 1)\n peak_dt = np.dtype([('position', np.float),\n ('height', np.float),\n ('width', np.float)])\n P = np.array([], dtype=peak_dt)\n peak = 0\n for j in range(len(y) - 4):\n if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing\n if np.sign(d[j + 1]) == 0:\n continue\n # if slope of derivative is larger than slope_thresh\n if d[j] - d[j + 1] > slope_thresh:\n # if height of peak is larger than amp_thresh\n if y[j] > amp_thresh:\n # the next section is very slow, and actually messes\n # things up for images (discrete pixels),\n # so by default, don't do subchannel precision in the\n # 1D peakfind step.\n if subchannel:\n xx = np.zeros(peakgroup)\n yy = np.zeros(peakgroup)\n s = 0\n for k in range(peakgroup):\n groupindex = int(j + k - n + 1)\n if groupindex < 1:\n xx = xx[1:]\n yy = yy[1:]\n s += 1\n continue\n elif groupindex > y.shape[0] - 1:\n xx = xx[:groupindex - 1]\n yy = yy[:groupindex - 1]\n break\n xx[k - s] = x[groupindex]\n yy[k - s] = y[groupindex]\n avg = np.average(xx)\n stdev = np.std(xx)\n xxf = (xx - avg) \/ stdev\n # Fit parabola to log10 of sub-group with\n # centering and scaling\n yynz = yy != 0\n coef = np.polyfit(\n xxf[yynz], np.log10(np.abs(yy[yynz])), 2)\n c1 = coef[2]\n c2 = coef[1]\n c3 = coef[0]\n with np.errstate(invalid='ignore'):\n width = np.linalg.norm(stdev * 2.35703 \/\n (np.sqrt(2) * np.sqrt(-1 *\n c3)))\n # if the peak is too narrow for least-squares\n # technique to work well, just use the max value\n # of y in the sub-group of points near peak.\n if peakgroup < 7:\n height = np.max(yy)\n position = xx[np.argmin(np.abs(yy - height))]\n else:\n position = - ((stdev * c2 \/ (2 * c3)) - avg)\n height = np.exp(c1 - c3 * (c2 \/ (2 * c3)) ** 2)\n # Fill results array P. One row for each peak\n # detected, containing the\n # peak position (x-value) and peak height (y-value).\n else:\n position = x[j]\n height = y[j]\n # no way to know peak width without\n # the above measurements.\n width = 0\n if (not np.isnan(position) and 0 < position < x[-1]):\n P = np.hstack((P,\n np.array([(position, height, width)],\n dtype=peak_dt)))\n peak += 1\n # return only the part of the array that contains peaks\n # (not the whole maxpeakn x 3 array)\n if len(P) > maxpeakn:\n minh = np.sort(P['height'])[-maxpeakn]\n P = P[P['height'] >= minh]\n\n # Sorts the values as a function of position\n P.sort(0)\n\n return P\n\n\ndef interpolate1D(number_of_interpolation_points, data):\n ip = number_of_interpolation_points\n ch = len(data)\n old_ax = np.linspace(0, 100, ch)\n new_ax = np.linspace(0, 100, ch * ip - (ip - 1))\n interpolator = scipy.interpolate.interp1d(old_ax, data)\n return interpolator(new_ax)\n\n\ndef _estimate_shift1D(data, **kwargs):\n mask = kwargs.get('mask', None)\n ref = kwargs.get('ref', None)\n interpolate = kwargs.get('interpolate', True)\n ip = kwargs.get('ip', 5)\n data_slice = kwargs.get('data_slice', slice(None))\n if bool(mask):\n # asarray is required for consistensy as argmax\n # returns a numpy scalar array\n return np.asarray(np.nan)\n data = data[data_slice]\n if interpolate is True:\n data = interpolate1D(ip, data)\n return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1\n\n\ndef _shift1D(data, **kwargs):\n shift = kwargs.get('shift', 0.)\n original_axis = kwargs.get('original_axis', None)\n fill_value = kwargs.get('fill_value', np.nan)\n kind = kwargs.get('kind', 'linear')\n offset = kwargs.get('offset', 0.)\n scale = kwargs.get('scale', 1.)\n size = kwargs.get('size', 2)\n if np.isnan(shift) or shift == 0:\n return data\n axis = np.linspace(offset, offset + scale * (size - 1), size)\n\n si = sp.interpolate.interp1d(original_axis,\n data,\n bounds_error=False,\n fill_value=fill_value,\n kind=kind)\n offset = float(offset - shift)\n axis = np.linspace(offset, offset + scale * (size - 1), size)\n return si(axis)\n\n\nclass Signal1D(BaseSignal, CommonSignal1D):\n\n \"\"\"\n \"\"\"\n _signal_dimension = 1\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.axes_manager.signal_dimension != 1:\n self.axes_manager.set_signal_dimension(1)\n\n def _get_spikes_diagnosis_histogram_data(self, signal_mask=None,\n navigation_mask=None,\n **kwargs):\n self._check_signal_dimension_equals_one()\n dc = self.data\n if signal_mask is not None:\n dc = dc[..., ~signal_mask]\n if navigation_mask is not None:\n dc = dc[~navigation_mask, :]\n der = np.abs(np.diff(dc, 1, -1))\n n = ((~navigation_mask).sum() if navigation_mask else\n self.axes_manager.navigation_size)\n\n # arbitrary cutoff for number of spectra necessary before histogram\n # data is compressed by finding maxima of each spectrum\n tmp = BaseSignal(der) if n < 2000 else BaseSignal(\n np.ravel(der.max(-1)))\n\n # get histogram signal using smart binning and plot\n return tmp.get_histogram(**kwargs)\n\n def spikes_diagnosis(self, signal_mask=None,\n navigation_mask=None,\n **kwargs):\n \"\"\"Plots a histogram to help in choosing the threshold for\n spikes removal.\n\n Parameters\n ----------\n %s\n %s\n **kwargs : dict\n Keyword arguments pass to\n :py:meth:`~hyperspy.signal.signal.BaseSignal.get_histogram`\n\n See also\n --------\n spikes_removal_tool\n\n \"\"\"\n tmph = self._get_spikes_diagnosis_histogram_data(signal_mask,\n navigation_mask,\n **kwargs)\n tmph.plot()\n\n # Customize plot appearance\n plt.gca().set_title('')\n plt.gca().fill_between(tmph.axes_manager[0].axis,\n tmph.data,\n facecolor='#fddbc7',\n interpolate=True,\n color='none')\n ax = tmph._plot.signal_plot.ax\n axl = tmph._plot.signal_plot.ax_lines[0]\n axl.set_line_properties(color='#b2182b')\n plt.xlabel('Derivative magnitude')\n plt.ylabel('Log(Counts)')\n ax.set_yscale('log')\n ax.set_ylim(10 ** -1, plt.ylim()[1])\n ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])\n plt.draw()\n\n spikes_diagnosis.__doc__ %= (SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)\n\n\n def spikes_removal_tool(self, signal_mask=None, navigation_mask=None,\n threshold='auto', interactive=True,\n display=True, toolkit=None):\n self._check_signal_dimension_equals_one()\n if interactive:\n sr = SpikesRemovalInteractive(self,\n signal_mask=signal_mask,\n navigation_mask=navigation_mask,\n threshold=threshold)\n return sr.gui(display=display, toolkit=toolkit)\n else:\n SpikesRemoval(self,\n signal_mask=signal_mask,\n navigation_mask=navigation_mask,\n threshold=threshold)\n\n spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (\n SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, \"\", DISPLAY_DT, TOOLKIT_DT)\n\n def create_model(self, dictionary=None):\n \"\"\"Create a model for the current data.\n\n Returns\n -------\n model : `Model1D` instance.\n\n \"\"\"\n\n model = Model1D(self, dictionary=dictionary)\n return model\n\n def shift1D(\n self,\n shift_array,\n interpolation_method='linear',\n crop=True,\n expand=False,\n fill_value=np.nan,\n parallel=None,\n show_progressbar=None,\n max_workers=None,\n ):\n \"\"\"Shift the data in place over the signal axis by the amount specified\n by an array.\n\n Parameters\n ----------\n shift_array : numpy array\n An array containing the shifting amount. It must have\n `axes_manager._navigation_shape_in_array` shape.\n interpolation_method : str or int\n Specifies the kind of interpolation as a string ('linear',\n 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an\n integer specifying the order of the spline interpolator to\n use.\n %s\n expand : bool\n If True, the data will be expanded to fit all data after alignment.\n Overrides `crop`.\n fill_value : float\n If crop is False fill the data outside of the original\n interval with the given value where needed.\n %s\n %s\n %s\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n if not np.any(shift_array):\n # Nothing to do, the shift array if filled with zeros\n return\n if show_progressbar is None:\n show_progressbar = preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0]\n\n # Figure out min\/max shifts, and translate to shifts in index as well\n minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)\n if minimum < 0:\n ihigh = 1 + axis.value2index(\n axis.high_value + minimum,\n rounding=math.floor)\n else:\n ihigh = axis.high_index + 1\n if maximum > 0:\n ilow = axis.value2index(axis.offset + maximum,\n rounding=math.ceil)\n else:\n ilow = axis.low_index\n if expand:\n if self._lazy:\n ind = axis.index_in_array\n pre_shape = list(self.data.shape)\n post_shape = list(self.data.shape)\n pre_chunks = list(self.data.chunks)\n post_chunks = list(self.data.chunks)\n\n pre_shape[ind] = axis.high_index - ihigh + 1\n post_shape[ind] = ilow - axis.low_index\n for chunks, shape in zip((pre_chunks, post_chunks),\n (pre_shape, post_shape)):\n maxsize = min(np.max(chunks[ind]), shape[ind])\n num = np.ceil(shape[ind] \/ maxsize)\n chunks[ind] = tuple(len(ar) for ar in\n np.array_split(np.arange(shape[ind]),\n num))\n pre_array = da.full(tuple(pre_shape),\n fill_value,\n chunks=tuple(pre_chunks))\n\n post_array = da.full(tuple(post_shape),\n fill_value,\n chunks=tuple(post_chunks))\n\n self.data = da.concatenate((pre_array, self.data, post_array),\n axis=ind)\n else:\n padding = []\n for i in range(self.data.ndim):\n if i == axis.index_in_array:\n padding.append((axis.high_index - ihigh + 1,\n ilow - axis.low_index))\n else:\n padding.append((0, 0))\n self.data = np.pad(self.data, padding, mode='constant',\n constant_values=(fill_value,))\n axis.offset += minimum\n axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index\n\n self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),\n original_axis=axis.axis,\n fill_value=fill_value,\n kind=interpolation_method,\n offset=axis.offset,\n scale=axis.scale,\n size=axis.size,\n show_progressbar=show_progressbar,\n parallel=parallel,\n max_workers=max_workers,\n ragged=False)\n\n if crop and not expand:\n _logger.debug(\"Cropping %s from index %i to %i\"\n % (self, ilow, ihigh))\n self.crop(axis.index_in_axes_manager,\n ilow,\n ihigh)\n\n self.events.data_changed.trigger(obj=self)\n shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)\n\n def interpolate_in_between(\n self,\n start,\n end,\n delta=3,\n show_progressbar=None,\n parallel=None,\n max_workers=None,\n **kwargs,\n ):\n \"\"\"Replace the data in a given range by interpolation.\n The operation is performed in place.\n\n Parameters\n ----------\n start, end : int or float\n The limits of the interval. If int they are taken as the\n axis index. If float they are taken as the axis value.\n delta : int or float\n The windows around the (start, end) to use for interpolation\n %s\n %s\n %s\n **kwargs :\n All extra keyword arguments are passed to\n :py:func:`scipy.interpolate.interp1d`. See the function documentation\n for details.\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n if show_progressbar is None:\n show_progressbar = preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0]\n i1 = axis._get_index(start)\n i2 = axis._get_index(end)\n if isinstance(delta, float):\n delta = int(delta \/ axis.scale)\n i0 = int(np.clip(i1 - delta, 0, np.inf))\n i3 = int(np.clip(i2 + delta, 0, axis.size))\n\n def interpolating_function(dat):\n dat_int = sp.interpolate.interp1d(\n list(range(i0, i1)) + list(range(i2, i3)),\n dat[i0:i1].tolist() + dat[i2:i3].tolist(),\n **kwargs)\n dat[i1:i2] = dat_int(list(range(i1, i2)))\n return dat\n self._map_iterate(interpolating_function,\n ragged=False,\n parallel=parallel,\n show_progressbar=show_progressbar,\n max_workers=max_workers)\n self.events.data_changed.trigger(obj=self)\n\n interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)\n\n def estimate_shift1D(\n self,\n start=None,\n end=None,\n reference_indices=None,\n max_shift=None,\n interpolate=True,\n number_of_interpolation_points=5,\n mask=None,\n show_progressbar=None,\n parallel=None,\n max_workers=None,\n ):\n \"\"\"Estimate the shifts in the current signal axis using\n cross-correlation.\n This method can only estimate the shift by comparing\n unidimensional features that should not change the position in\n the signal axis. To decrease the memory usage, the time of\n computation and the accuracy of the results it is convenient to\n select the feature of interest providing sensible values for\n `start` and `end`. By default interpolation is used to obtain\n subpixel precision.\n\n Parameters\n ----------\n start, end : int, float or None\n The limits of the interval. If int they are taken as the\n axis index. If float they are taken as the axis value.\n reference_indices : tuple of ints or None\n Defines the coordinates of the spectrum that will be used\n as eference. If None the spectrum at the current\n coordinates is used for this purpose.\n max_shift : int\n \"Saturation limit\" for the shift.\n interpolate : bool\n If True, interpolation is used to provide sub-pixel\n accuracy.\n number_of_interpolation_points : int\n Number of interpolation points. Warning: making this number\n too big can saturate the memory\n mask : `BaseSignal` of bool.\n It must have signal_dimension = 0 and navigation_shape equal to the\n current signal. Where mask is True the shift is not computed\n and set to nan.\n %s\n %s\n %s\n\n Returns\n -------\n An array with the result of the estimation in the axis units.\n Although the computation is performed in batches if the signal is\n lazy, the result is computed in memory because it depends on the\n current state of the axes that could change later on in the workflow.\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n if show_progressbar is None:\n show_progressbar = preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n ip = number_of_interpolation_points + 1\n axis = self.axes_manager.signal_axes[0]\n self._check_navigation_mask(mask)\n # we compute for now\n if isinstance(start, da.Array):\n start = start.compute()\n if isinstance(end, da.Array):\n end = end.compute()\n i1, i2 = axis._get_index(start), axis._get_index(end)\n if reference_indices is None:\n reference_indices = self.axes_manager.indices\n ref = self.inav[reference_indices].data[i1:i2]\n\n if interpolate is True:\n ref = interpolate1D(ip, ref)\n iterating_kwargs = ()\n if mask is not None:\n iterating_kwargs += (('mask', mask),)\n shift_signal = self._map_iterate(\n _estimate_shift1D,\n iterating_kwargs=iterating_kwargs,\n data_slice=slice(i1, i2),\n ref=ref,\n ip=ip,\n interpolate=interpolate,\n ragged=False,\n parallel=parallel,\n inplace=False,\n show_progressbar=show_progressbar,\n max_workers=max_workers,\n )\n shift_array = shift_signal.data\n if max_shift is not None:\n if interpolate is True:\n max_shift *= ip\n shift_array.clip(-max_shift, max_shift)\n if interpolate is True:\n shift_array = shift_array \/ ip\n shift_array *= axis.scale\n if self._lazy:\n # We must compute right now because otherwise any changes to the\n # axes_manager of the signal later in the workflow may result in\n # a wrong shift_array\n shift_array = shift_array.compute()\n return shift_array\n\n estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)\n\n def align1D(self,\n start=None,\n end=None,\n reference_indices=None,\n max_shift=None,\n interpolate=True,\n number_of_interpolation_points=5,\n interpolation_method='linear',\n crop=True,\n expand=False,\n fill_value=np.nan,\n also_align=None,\n mask=None,\n show_progressbar=None):\n \"\"\"Estimate the shifts in the signal axis using\n cross-correlation and use the estimation to align the data in place.\n This method can only estimate the shift by comparing\n unidimensional\n features that should not change the position.\n\n To decrease memory usage, time of computation and improve\n accuracy it is convenient to select the feature of interest\n setting the `start` and `end` keywords. By default interpolation is\n used to obtain subpixel precision.\n\n Parameters\n ----------\n start, end : int, float or None\n The limits of the interval. If int they are taken as the\n axis index. If float they are taken as the axis value.\n reference_indices : tuple of ints or None\n Defines the coordinates of the spectrum that will be used\n as eference. If None the spectrum at the current\n coordinates is used for this purpose.\n max_shift : int\n \"Saturation limit\" for the shift.\n interpolate : bool\n If True, interpolation is used to provide sub-pixel\n accuracy.\n number_of_interpolation_points : int\n Number of interpolation points. Warning: making this number\n too big can saturate the memory\n interpolation_method : str or int\n Specifies the kind of interpolation as a string ('linear',\n 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an\n integer specifying the order of the spline interpolator to\n use.\n %s\n expand : bool\n If True, the data will be expanded to fit all data after alignment.\n Overrides `crop`.\n fill_value : float\n If crop is False fill the data outside of the original\n interval with the given value where needed.\n also_align : list of signals, None\n A list of BaseSignal instances that has exactly the same\n dimensions as this one and that will be aligned using the shift map\n estimated using the this signal.\n mask : `BaseSignal` or bool data type.\n It must have signal_dimension = 0 and navigation_shape equal to the\n current signal. Where mask is True the shift is not computed\n and set to nan.\n %s\n\n Returns\n -------\n An array with the result of the estimation.\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n\n See also\n --------\n estimate_shift1D\n \"\"\"\n if also_align is None:\n also_align = []\n self._check_signal_dimension_equals_one()\n if self._lazy:\n _logger.warning('In order to properly expand, the lazy '\n 'reference signal will be read twice (once to '\n 'estimate shifts, and second time to shift '\n 'appropriatelly), which might take a long time. '\n 'Use expand=False to only pass through the data '\n 'once.')\n shift_array = self.estimate_shift1D(\n start=start,\n end=end,\n reference_indices=reference_indices,\n max_shift=max_shift,\n interpolate=interpolate,\n number_of_interpolation_points=number_of_interpolation_points,\n mask=mask,\n show_progressbar=show_progressbar)\n signals_to_shift = [self] + also_align\n for signal in signals_to_shift:\n signal.shift1D(shift_array=shift_array,\n interpolation_method=interpolation_method,\n crop=crop,\n fill_value=fill_value,\n expand=expand,\n show_progressbar=show_progressbar)\n align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)\n\n def integrate_in_range(self, signal_range='interactive',\n display=True, toolkit=None):\n \"\"\"Sums the spectrum over an energy range, giving the integrated\n area.\n The energy range can either be selected through a GUI or the command\n line.\n\n Parameters\n ----------\n signal_range : a tuple of this form (l, r) or \"interactive\"\n l and r are the left and right limits of the range. They can be\n numbers or None, where None indicates the extremes of the interval.\n If l and r are floats the `signal_range` will be in axis units (for\n example eV). If l and r are integers the `signal_range` will be in\n index units. When `signal_range` is \"interactive\" (default) the\n range is selected using a GUI. Note that ROIs can be used\n in place of a tuple.\n\n Returns\n --------\n integrated_spectrum : `BaseSignal` subclass\n\n See Also\n --------\n integrate_simpson\n\n Examples\n --------\n Using the GUI\n\n >>> s = hs.signals.Signal1D(range(1000))\n >>> s.integrate_in_range() #doctest: +SKIP\n\n Using the CLI\n\n >>> s_int = s.integrate_in_range(signal_range=(560,None))\n\n Selecting a range in the axis units, by specifying the\n signal range with floats.\n\n >>> s_int = s.integrate_in_range(signal_range=(560.,590.))\n\n Selecting a range using the index, by specifying the\n signal range with integers.\n\n >>> s_int = s.integrate_in_range(signal_range=(100,120))\n \"\"\"\n from hyperspy.misc.utils import deprecation_warning\n msg = (\n \"The `Signal1D.integrate_in_range` method is deprecated and will \"\n \"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` \"\n \"instead.\")\n deprecation_warning(msg)\n\n if signal_range == 'interactive':\n self_copy = self.deepcopy()\n ia = IntegrateArea(self_copy, signal_range)\n ia.gui(display=display, toolkit=toolkit)\n integrated_signal1D = self_copy\n else:\n integrated_signal1D = self._integrate_in_range_commandline(\n signal_range)\n return integrated_signal1D\n\n def _integrate_in_range_commandline(self, signal_range):\n e1 = signal_range[0]\n e2 = signal_range[1]\n integrated_signal1D = self.isig[e1:e2].integrate1D(-1)\n return integrated_signal1D\n\n def calibrate(self, display=True, toolkit=None):\n \"\"\"\n Calibrate the spectral dimension using a gui.\n It displays a window where the new calibration can be set by:\n\n * setting the values of offset, units and scale directly\n * or selecting a range by dragging the mouse on the spectrum figure\n and setting the new values for the given range limits\n\n Parameters\n ----------\n %s\n %s\n\n Notes\n -----\n For this method to work the output_dimension must be 1.\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n self._check_signal_dimension_equals_one()\n calibration = Signal1DCalibration(self)\n return calibration.gui(display=display, toolkit=toolkit)\n\n calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)\n\n def smooth_savitzky_golay(\n self,\n polynomial_order=None,\n window_length=None,\n differential_order=0,\n parallel=None,\n max_workers=None,\n display=True,\n toolkit=None,\n ):\n \"\"\"\n Apply a Savitzky-Golay filter to the data in place.\n If `polynomial_order` or `window_length` or `differential_order` are\n None the method is run in interactive mode.\n\n Parameters\n ----------\n polynomial_order : int, optional\n The order of the polynomial used to fit the samples.\n `polyorder` must be less than `window_length`.\n window_length : int, optional\n The length of the filter window (i.e. the number of coefficients).\n `window_length` must be a positive odd integer.\n differential_order: int, optional\n The order of the derivative to compute. This must be a\n nonnegative integer. The default is 0, which means to filter\n the data without differentiating.\n %s\n %s\n %s\n %s\n\n Notes\n -----\n More information about the filter in `scipy.signal.savgol_filter`.\n \"\"\"\n self._check_signal_dimension_equals_one()\n if (polynomial_order is not None and\n window_length is not None):\n axis = self.axes_manager.signal_axes[0]\n self.map(savgol_filter, window_length=window_length,\n polyorder=polynomial_order, deriv=differential_order,\n delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)\n else:\n # Interactive mode\n smoother = SmoothingSavitzkyGolay(self)\n smoother.differential_order = differential_order\n if polynomial_order is not None:\n smoother.polynomial_order = polynomial_order\n if window_length is not None:\n smoother.window_length = window_length\n return smoother.gui(display=display, toolkit=toolkit)\n\n smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)\n\n def smooth_lowess(\n self,\n smoothing_parameter=None,\n number_of_iterations=None,\n show_progressbar=None,\n parallel=None,\n max_workers=None,\n display=True,\n toolkit=None,\n ):\n \"\"\"\n Lowess data smoothing in place.\n If `smoothing_parameter` or `number_of_iterations` are None the method\n is run in interactive mode.\n\n Parameters\n ----------\n smoothing_parameter: float or None\n Between 0 and 1. The fraction of the data used\n when estimating each y-value.\n number_of_iterations: int or None\n The number of residual-based reweightings\n to perform.\n %s\n %s\n %s\n %s\n %s\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if smoothing_parameter is None or number_of_iterations is None:\n smoother = SmoothingLowess(self)\n if smoothing_parameter is not None:\n smoother.smoothing_parameter = smoothing_parameter\n if number_of_iterations is not None:\n smoother.number_of_iterations = number_of_iterations\n return smoother.gui(display=display, toolkit=toolkit)\n else:\n self.map(lowess,\n x=self.axes_manager[-1].axis,\n f=smoothing_parameter,\n n_iter=number_of_iterations,\n show_progressbar=show_progressbar,\n ragged=False,\n parallel=parallel,\n max_workers=max_workers)\n smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)\n\n def smooth_tv(\n self,\n smoothing_parameter=None,\n show_progressbar=None,\n parallel=None,\n max_workers=None,\n display=True,\n toolkit=None,\n ):\n \"\"\"\n Total variation data smoothing in place.\n\n Parameters\n ----------\n smoothing_parameter: float or None\n Denoising weight relative to L2 minimization. If None the method\n is run in interactive mode.\n %s\n %s\n %s\n %s\n %s\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n self._check_signal_dimension_equals_one()\n if smoothing_parameter is None:\n smoother = SmoothingTV(self)\n return smoother.gui(display=display, toolkit=toolkit)\n else:\n self.map(_tv_denoise_1d, weight=smoothing_parameter,\n ragged=False,\n show_progressbar=show_progressbar,\n parallel=parallel,\n max_workers=max_workers)\n\n smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)\n\n def filter_butterworth(self,\n cutoff_frequency_ratio=None,\n type='low',\n order=2, display=True, toolkit=None):\n \"\"\"\n Butterworth filter in place.\n\n Parameters\n ----------\n %s\n %s\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n self._check_signal_dimension_equals_one()\n smoother = ButterworthFilter(self)\n if cutoff_frequency_ratio is not None:\n smoother.cutoff_frequency_ratio = cutoff_frequency_ratio\n smoother.type = type\n smoother.order = order\n smoother.apply()\n else:\n return smoother.gui(display=display, toolkit=toolkit)\n\n filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)\n\n def _remove_background_cli(\n self, signal_range, background_estimator, fast=True,\n zero_fill=False, show_progressbar=None, model=None,\n return_model=False):\n \"\"\" See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. \"\"\"\n if model is None:\n from hyperspy.models.model1d import Model1D\n model = Model1D(self)\n if background_estimator not in model:\n model.append(background_estimator)\n background_estimator.estimate_parameters(\n self,\n signal_range[0],\n signal_range[1],\n only_current=False)\n\n if not fast:\n model.set_signal_range(signal_range[0], signal_range[1])\n model.multifit(show_progressbar=show_progressbar,\n iterpath='serpentine')\n model.reset_signal_range()\n\n if self._lazy:\n result = self - model.as_signal(show_progressbar=show_progressbar)\n else:\n try:\n axis = self.axes_manager.signal_axes[0]\n scale_factor = axis.scale if self.metadata.Signal.binned else 1\n bkg = background_estimator.function_nd(axis.axis) * scale_factor\n result = self - bkg\n except MemoryError:\n result = self - model.as_signal(\n show_progressbar=show_progressbar)\n\n if zero_fill:\n if self._lazy:\n low_idx = result.axes_manager[-1].value2index(signal_range[0])\n z = da.zeros(low_idx, chunks=(low_idx,))\n cropped_da = result.data[low_idx:]\n result.data = da.concatenate([z, cropped_da])\n else:\n result.isig[:signal_range[0]] = 0\n if return_model:\n if fast:\n # Calculate the variance for each navigation position only when\n # using fast, otherwise the chisq is already calculated when\n # doing the multifit\n d = result.data[..., np.where(model.channel_switches)[0]]\n variance = model._get_variance(only_current=False)\n d *= d \/ (1. * variance) # d = difference^2 \/ variance.\n model.chisq.data = d.sum(-1)\n result = (result, model)\n return result\n\n def remove_background(\n self,\n signal_range='interactive',\n background_type='Power law',\n polynomial_order=2,\n fast=True,\n zero_fill=False,\n plot_remainder=True,\n show_progressbar=None,\n return_model=False,\n display=True,\n toolkit=None):\n \"\"\"\n Remove the background, either in place using a GUI or returned as a new\n spectrum using the command line. The fast option is not accurate for\n most background types - except Gaussian, Offset and\n Power law - but it is useful to estimate the initial fitting parameters\n before performing a full fit.\n\n Parameters\n ----------\n signal_range : \"interactive\", tuple of ints or floats, optional\n If this argument is not specified, the signal range has to be\n selected using a GUI. And the original spectrum will be replaced.\n If tuple is given, the a spectrum will be returned.\n background_type : str\n The type of component which should be used to fit the background.\n Possible components: Doniach, Gaussian, Lorentzian, Offset,\n Polynomial, PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.\n If Polynomial is used, the polynomial order can be specified\n polynomial_order : int, default 2\n Specify the polynomial order if a Polynomial background is used.\n fast : bool\n If True, perform an approximative estimation of the parameters.\n If False, the signal is fitted using non-linear least squares\n afterwards. This is slower compared to the estimation but\n often more accurate.\n zero_fill : bool\n If True, all spectral channels lower than the lower bound of the\n fitting range will be set to zero (this is the default behavior\n of Gatan's DigitalMicrograph). Setting this value to False\n allows for inspection of the quality of background fit throughout\n the pre-fitting region.\n plot_remainder : bool\n If True, add a (green) line previewing the remainder signal after\n background removal. This preview is obtained from a Fast calculation\n so the result may be different if a NLLS calculation is finally\n performed.\n return_model : bool\n If True, the background model is returned. The chi\u00b2 can be obtained\n from this model using\n :py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.\n %s\n %s\n %s\n\n Returns\n -------\n {None, signal, background_model or (signal, background_model)}\n If signal_range is not 'interactive', the signal with background\n substracted is returned. If return_model is True, returns the\n background model, otherwise, the GUI widget dictionary is returned\n if `display=False` - see the display parameter documentation.\n\n Examples\n --------\n Using GUI, replaces spectrum s\n\n >>> s = hs.signals.Signal1D(range(1000))\n >>> s.remove_background() #doctest: +SKIP\n\n Using command line, returns a Signal1D:\n\n >>> s.remove_background(signal_range=(400,450),\n background_type='PowerLaw')\n \n\n Using a full model to fit the background:\n\n >>> s.remove_background(signal_range=(400,450), fast=False)\n \n\n Returns background substracted and the model:\n\n >>> s.remove_background(signal_range=(400,450),\n fast=False,\n return_model=True)\n (, )\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n\n self._check_signal_dimension_equals_one()\n # Create model here, so that we can return it\n from hyperspy.models.model1d import Model1D\n model = Model1D(self)\n if signal_range == 'interactive':\n br = BackgroundRemoval(self, background_type=background_type,\n polynomial_order=polynomial_order,\n fast=fast,\n plot_remainder=plot_remainder,\n show_progressbar=show_progressbar,\n zero_fill=zero_fill,\n model=model)\n gui_dict = br.gui(display=display, toolkit=toolkit)\n if return_model:\n return model\n else:\n # for testing purposes\n return gui_dict\n else:\n background_estimator = _get_background_estimator(\n background_type, polynomial_order)[0]\n result = self._remove_background_cli(\n signal_range=signal_range,\n background_estimator=background_estimator,\n fast=fast,\n zero_fill=zero_fill,\n show_progressbar=show_progressbar,\n model=model,\n return_model=return_model)\n return result\n remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)\n\n @interactive_range_selector\n def crop_signal1D(self, left_value=None, right_value=None,):\n \"\"\"Crop in place the spectral dimension.\n\n Parameters\n ----------\n left_value, righ_value : int, float or None\n If int the values are taken as indices. If float they are\n converted to indices using the spectral axis calibration.\n If left_value is None crops from the beginning of the axis.\n If right_value is None crops up to the end of the axis. If\n both are\n None the interactive cropping interface is activated\n enabling\n cropping the spectrum using a span selector in the signal\n plot.\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n self._check_signal_dimension_equals_one()\n try:\n left_value, right_value = left_value\n except TypeError:\n # It was not a ROI, we carry on\n pass\n self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,\n start=left_value, end=right_value)\n\n def gaussian_filter(self, FWHM):\n \"\"\"Applies a Gaussian filter in the spectral dimension in place.\n\n Parameters\n ----------\n FWHM : float\n The Full Width at Half Maximum of the gaussian in the\n spectral axis units\n\n Raises\n ------\n ValueError\n If FWHM is equal or less than zero.\n\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n self._check_signal_dimension_equals_one()\n if FWHM <= 0:\n raise ValueError(\n \"FWHM must be greater than zero\")\n axis = self.axes_manager.signal_axes[0]\n FWHM *= 1 \/ axis.scale\n self.map(gaussian_filter1d, sigma=FWHM \/ 2.35482, ragged=False)\n\n def hanning_taper(self, side='both', channels=None, offset=0):\n \"\"\"Apply a hanning taper to the data in place.\n\n Parameters\n ----------\n side : 'left', 'right' or 'both'\n Specify which side to use.\n channels : None or int\n The number of channels to taper. If None 5% of the total\n number of channels are tapered.\n offset : int\n\n Returns\n -------\n channels\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n if not np.issubdtype(self.data.dtype, np.floating):\n raise TypeError(\"The data dtype should be `float`. It can be \"\n \"changed by using the `change_dtype('float')` \"\n \"method of the signal.\")\n\n # TODO: generalize it\n self._check_signal_dimension_equals_one()\n if channels is None:\n channels = int(round(len(self()) * 0.02))\n if channels < 20:\n channels = 20\n dc = self._data_aligned_with_axes\n if self._lazy and offset != 0:\n shp = dc.shape\n if len(shp) == 1:\n nav_shape = ()\n nav_chunks = ()\n else:\n nav_shape = shp[:-1]\n nav_chunks = dc.chunks[:-1]\n zeros = da.zeros(nav_shape + (offset,),\n chunks=nav_chunks + ((offset,),))\n\n if side == 'left' or side == 'both':\n if self._lazy:\n tapered = dc[..., offset:channels + offset]\n tapered *= np.hanning(2 * channels)[:channels]\n therest = dc[..., channels + offset:]\n thelist = [] if offset == 0 else [zeros]\n thelist.extend([tapered, therest])\n dc = da.concatenate(thelist, axis=-1)\n else:\n dc[..., offset:channels + offset] *= (\n np.hanning(2 * channels)[:channels])\n dc[..., :offset] *= 0.\n if side == 'right' or side == 'both':\n rl = None if offset == 0 else -offset\n if self._lazy:\n therest = dc[..., :-channels - offset]\n tapered = dc[..., -channels - offset:rl]\n tapered *= np.hanning(2 * channels)[-channels:]\n thelist = [therest, tapered]\n if offset != 0:\n thelist.append(zeros)\n dc = da.concatenate(thelist, axis=-1)\n else:\n dc[..., -channels - offset:rl] *= (\n np.hanning(2 * channels)[-channels:])\n if offset != 0:\n dc[..., -offset:] *= 0.\n\n if self._lazy:\n self.data = dc\n self.events.data_changed.trigger(obj=self)\n return channels\n\n def find_peaks1D_ohaver(self, xdim=None,\n slope_thresh=0,\n amp_thresh=None,\n subchannel=True,\n medfilt_radius=5,\n maxpeakn=30000,\n peakgroup=10,\n parallel=None,\n max_workers=None):\n \"\"\"Find positive peaks along a 1D Signal. It detects peaks by looking\n for downward zero-crossings in the first derivative that exceed\n 'slope_thresh'.\n\n 'slope_thresh' and 'amp_thresh', control sensitivity: higher\n values will neglect broad peaks (slope) and smaller features (amp),\n respectively.\n\n `peakgroup` is the number of points around the top of the peak\n that are taken to estimate the peak height. For spikes or very\n narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,\n make `peakgroup` larger to reduce the effect of noise.\n\n Parameters\n ----------\n slope_thresh : float, optional\n 1st derivative threshold to count the peak;\n higher values will neglect broader features;\n default is set to 0.\n amp_thresh : float, optional\n intensity threshold below which peaks are ignored;\n higher values will neglect smaller features;\n default is set to 10%% of max(y).\n medfilt_radius : int, optional\n median filter window to apply to smooth the data\n (see :py:func:`scipy.signal.medfilt`);\n if 0, no filter will be applied;\n default is set to 5.\n peakgroup : int, optional\n number of points around the \"top part\" of the peak\n that are taken to estimate the peak height;\n default is set to 10\n maxpeakn : int, optional\n number of maximum detectable peaks;\n default is set to 5000.\n subchannel : bool, default True\n default is set to True.\n %s\n %s\n\n Returns\n -------\n structured array of shape (npeaks) containing fields: 'position',\n 'width', and 'height' for each peak.\n\n\n Raises\n ------\n SignalDimensionError\n If the signal dimension is not 1.\n \"\"\"\n # TODO: add scipy.signal.find_peaks_cwt\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0].axis\n peaks = self.map(find_peaks_ohaver,\n x=axis,\n slope_thresh=slope_thresh,\n amp_thresh=amp_thresh,\n medfilt_radius=medfilt_radius,\n maxpeakn=maxpeakn,\n peakgroup=peakgroup,\n subchannel=subchannel,\n ragged=True,\n parallel=parallel,\n max_workers=max_workers,\n inplace=False)\n return peaks.data\n\n find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)\n\n def estimate_peak_width(\n self,\n factor=0.5,\n window=None,\n return_interval=False,\n parallel=None,\n show_progressbar=None,\n max_workers=None,\n ):\n \"\"\"Estimate the width of the highest intensity of peak\n of the spectra at a given fraction of its maximum.\n\n It can be used with asymmetric peaks. For accurate results any\n background must be previously substracted.\n The estimation is performed by interpolation using cubic splines.\n\n Parameters\n ----------\n factor : 0 < float < 1\n The default, 0.5, estimates the FWHM.\n window : None or float\n The size of the window centred at the peak maximum\n used to perform the estimation.\n The window size must be chosen with care: if it is narrower\n than the width of the peak at some positions or if it is\n so wide that it includes other more intense peaks this\n method cannot compute the width and a NaN is stored instead.\n return_interval: bool\n If True, returns 2 extra signals with the positions of the\n desired height fraction at the left and right of the\n peak.\n %s\n %s\n %s\n\n Returns\n -------\n width or [width, left, right], depending on the value of\n `return_interval`.\n\n Notes\n -----\n Parallel operation of this function is not supported\n on Windows platforms.\n\n \"\"\"\n if show_progressbar is None:\n show_progressbar = preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n if not 0 < factor < 1:\n raise ValueError(\"factor must be between 0 and 1.\")\n\n if parallel != False and os.name in [\"nt\", \"dos\"]: # pragma: no cover\n # Due to a scipy bug where scipy.interpolate.UnivariateSpline\n # appears to not be thread-safe on Windows, we raise a warning\n # here. See https:\/\/github.com\/hyperspy\/hyperspy\/issues\/2320\n # Until\/if the scipy bug is fixed, we should do this.\n _logger.warning(\n \"Parallel operation is not supported on Windows. \"\n \"Setting `parallel=False`\"\n )\n parallel = False\n\n axis = self.axes_manager.signal_axes[0]\n # x = axis.axis\n maxval = self.axes_manager.navigation_size\n show_progressbar = show_progressbar and maxval > 0\n\n def estimating_function(spectrum,\n window=None,\n factor=0.5,\n axis=None):\n x = axis.axis\n if window is not None:\n vmax = axis.index2value(spectrum.argmax())\n slices = axis._get_array_slices(\n slice(vmax - window * 0.5, vmax + window * 0.5))\n spectrum = spectrum[slices]\n x = x[slices]\n spline = scipy.interpolate.UnivariateSpline(\n x,\n spectrum - factor * spectrum.max(),\n s=0)\n roots = spline.roots()\n if len(roots) == 2:\n return np.array(roots)\n else:\n return np.full((2,), np.nan)\n\n both = self._map_iterate(estimating_function,\n window=window,\n factor=factor,\n axis=axis,\n ragged=False,\n inplace=False,\n parallel=parallel,\n show_progressbar=show_progressbar,\n max_workers=None)\n left, right = both.T.split()\n width = right - left\n if factor == 0.5:\n width.metadata.General.title = (\n self.metadata.General.title + \" FWHM\")\n left.metadata.General.title = (\n self.metadata.General.title + \" FWHM left position\")\n\n right.metadata.General.title = (\n self.metadata.General.title + \" FWHM right position\")\n else:\n width.metadata.General.title = (\n self.metadata.General.title +\n \" full-width at %.1f maximum\" % factor)\n\n left.metadata.General.title = (\n self.metadata.General.title +\n \" full-width at %.1f maximum left position\" % factor)\n right.metadata.General.title = (\n self.metadata.General.title +\n \" full-width at %.1f maximum right position\" % factor)\n for signal in (left, width, right):\n signal.axes_manager.set_signal_dimension(0)\n signal.set_signal_type(\"\")\n if return_interval is True:\n return [width, left, right]\n else:\n return width\n\n estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)\n\n def plot(self,\n navigator=\"auto\",\n plot_markers=True,\n autoscale='v',\n norm=\"auto\",\n axes_manager=None,\n navigator_kwds={},\n **kwargs):\n \"\"\"%s\n %s\n %s\n \"\"\"\n for c in autoscale:\n if c not in ['x', 'v']:\n raise ValueError(\"`autoscale` only accepts 'x', 'v' as \"\n \"valid characters.\")\n super().plot(navigator=navigator,\n plot_markers=plot_markers,\n autoscale=autoscale,\n norm=norm,\n axes_manager=axes_manager,\n navigator_kwds=navigator_kwds,\n **kwargs)\n plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,\n PLOT1D_DOCSTRING)\n\n\nclass LazySignal1D(LazySignal, Signal1D):\n\n \"\"\"\n \"\"\"\n _lazy = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.axes_manager.set_signal_dimension(1)\n","license":"gpl-3.0"} {"repo_name":"kobejean\/tensorflow","path":"tensorflow\/contrib\/metrics\/python\/ops\/metric_ops.py","copies":"5","size":"178391","content":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains metric-computing operations on streamed tensors.\n\nModule documentation, including \"@@\" callouts, should be put in\nthird_party\/tensorflow\/contrib\/metrics\/__init__.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as collections_lib\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import confusion_matrix\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics\nfrom tensorflow.python.ops import metrics_impl\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.ops.distributions.normal import Normal\nfrom tensorflow.python.util.deprecation import deprecated\n\n# Epsilon constant used to represent extremely small quantity.\n_EPSILON = 1e-7\n\n\ndef _safe_div(numerator, denominator, name):\n \"\"\"Divides two values, returning 0 if the denominator is <= 0.\n\n Args:\n numerator: A real `Tensor`.\n denominator: A real `Tensor`, with dtype matching `numerator`.\n name: Name for the returned op.\n\n Returns:\n 0 if `denominator` <= 0, else `numerator` \/ `denominator`\n \"\"\"\n return array_ops.where(\n math_ops.greater(denominator, 0),\n math_ops.truediv(numerator, denominator),\n 0,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_true_positives(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of true_positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.true_positives(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_true_negatives(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of true_negatives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.true_negatives(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_false_positives(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of false positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.false_positives(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_false_negatives(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of false negatives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n return metrics.false_negatives(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.mean')\ndef streaming_mean(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the (weighted) mean of the given values.\n\n The `streaming_mean` function creates two local variables, `total` and `count`\n that are used to compute the average of `values`. This average is ultimately\n returned as `mean` which is an idempotent operation that simply divides\n `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: `Tensor` whose rank is either 0, or the same rank as `values`, and\n must be broadcastable to `values` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A `Tensor` representing the current mean, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n return metrics.mean(\n values=values,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.mean_tensor')\ndef streaming_mean_tensor(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the element-wise (weighted) mean of the given tensors.\n\n In contrast to the `streaming_mean` function which returns a scalar with the\n mean, this function returns an average tensor with the same shape as the\n input tensors.\n\n The `streaming_mean_tensor` function creates two local variables,\n `total_tensor` and `count_tensor` that are used to compute the average of\n `values`. This average is ultimately returned as `mean` which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: `Tensor` whose rank is either 0, or the same rank as `values`, and\n must be broadcastable to `values` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `values` dimension).\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A float `Tensor` representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n return metrics.mean_tensor(\n values=values,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '\n 'of the labels and predictions arguments has been switched.')\ndef streaming_accuracy(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculates how often `predictions` matches `labels`.\n\n The `streaming_accuracy` function creates two local variables, `total` and\n `count` that are used to compute the frequency with which `predictions`\n matches `labels`. This frequency is ultimately returned as `accuracy`: an\n idempotent operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `accuracy`.\n Internally, an `is_correct` operation computes a `Tensor` with elements 1.0\n where the corresponding elements of `predictions` and `labels` match and 0.0\n otherwise. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `is_correct`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of any shape.\n labels: The ground truth values, a `Tensor` whose shape matches\n `predictions`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `accuracy` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n accuracy: A `Tensor` representing the accuracy, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `accuracy`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.accuracy(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '\n 'of the labels and predictions arguments has been switched.')\ndef streaming_precision(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the precision of the predictions with respect to the labels.\n\n The `streaming_precision` function creates two local variables,\n `true_positives` and `false_positives`, that are used to compute the\n precision. This value is ultimately returned as `precision`, an idempotent\n operation that simply divides `true_positives` by the sum of `true_positives`\n and `false_positives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`. `update_op` weights each prediction by the corresponding value in\n `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `precision` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: Scalar float `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.precision(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '\n 'of the labels and predictions arguments has been switched.')\ndef streaming_recall(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the recall of the predictions with respect to the labels.\n\n The `streaming_recall` function creates two local variables, `true_positives`\n and `false_negatives`, that are used to compute the recall. This value is\n ultimately returned as `recall`, an idempotent operation that simply divides\n `true_positives` by the sum of `true_positives` and `false_negatives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` that updates these variables and returns the `recall`. `update_op`\n weights each prediction by the corresponding value in `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: Scalar float `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.recall(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_false_positive_rate(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the false positive rate of predictions with respect to labels.\n\n The `false_positive_rate` function creates two local variables,\n `false_positives` and `true_negatives`, that are used to compute the\n false positive rate. This value is ultimately returned as\n `false_positive_rate`, an idempotent operation that simply divides\n `false_positives` by the sum of `false_positives` and `true_negatives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `false_positive_rate`. `update_op` weights each prediction by the\n corresponding value in `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `false_positive_rate` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_positive_rate: Scalar float `Tensor` with the value of\n `false_positives` divided by the sum of `false_positives` and\n `true_negatives`.\n update_op: `Operation` that increments `false_positives` and\n `true_negatives` variables appropriately and whose value matches\n `false_positive_rate`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'false_positive_rate',\n (predictions, labels, weights)):\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n\n false_p, false_positives_update_op = metrics.false_positives(\n labels=labels,\n predictions=predictions,\n weights=weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n true_n, true_negatives_update_op = metrics.true_negatives(\n labels=labels,\n predictions=predictions,\n weights=weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n\n def compute_fpr(fp, tn, name):\n return array_ops.where(\n math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)\n\n fpr = compute_fpr(false_p, true_n, 'value')\n update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,\n 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, fpr)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return fpr, update_op\n\n\ndef streaming_false_negative_rate(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the false negative rate of predictions with respect to labels.\n\n The `false_negative_rate` function creates two local variables,\n `false_negatives` and `true_positives`, that are used to compute the\n false positive rate. This value is ultimately returned as\n `false_negative_rate`, an idempotent operation that simply divides\n `false_negatives` by the sum of `false_negatives` and `true_positives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `false_negative_rate`. `update_op` weights each prediction by the\n corresponding value in `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `false_negative_rate` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_negative_rate: Scalar float `Tensor` with the value of\n `false_negatives` divided by the sum of `false_negatives` and\n `true_positives`.\n update_op: `Operation` that increments `false_negatives` and\n `true_positives` variables appropriately and whose value matches\n `false_negative_rate`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'false_negative_rate',\n (predictions, labels, weights)):\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n\n false_n, false_negatives_update_op = metrics.false_negatives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n true_p, true_positives_update_op = metrics.true_positives(\n labels,\n predictions,\n weights,\n metrics_collections=None,\n updates_collections=None,\n name=None)\n\n def compute_fnr(fn, tp, name):\n return array_ops.where(\n math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)\n\n fnr = compute_fnr(false_n, true_p, 'value')\n update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,\n 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, fnr)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return fnr, update_op\n\n\ndef _streaming_confusion_matrix_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None,\n includes=None):\n \"\"\"Computes true_positives, false_negatives, true_negatives, false_positives.\n\n This function creates up to four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives`.\n `true_positive[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `false_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `true_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `False`.\n `false_positives[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `False`.\n\n For estimation of these metrics over a stream of data, for each metric the\n function respectively creates an `update_op` operation that updates the\n variable and returns its value.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast\n to `bool`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,\n default to all four.\n\n Returns:\n values: Dict of variables of shape `[len(thresholds)]`. Keys are from\n `includes`.\n update_ops: Dict of operations that increments the `values`. Keys are from\n `includes`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `includes` contains invalid keys.\n \"\"\"\n all_includes = ('tp', 'fn', 'tn', 'fp')\n if includes is None:\n includes = all_includes\n else:\n for include in includes:\n if include not in all_includes:\n raise ValueError('Invalid key: %s.' % include)\n\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions, labels, weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n num_thresholds = len(thresholds)\n\n # Reshape predictions and labels.\n predictions_2d = array_ops.reshape(predictions, [-1, 1])\n labels_2d = array_ops.reshape(\n math_ops.cast(labels, dtype=dtypes.bool), [1, -1])\n\n # Use static shape if known.\n num_predictions = predictions_2d.get_shape().as_list()[0]\n\n # Otherwise use dynamic shape.\n if num_predictions is None:\n num_predictions = array_ops.shape(predictions_2d)[0]\n thresh_tiled = array_ops.tile(\n array_ops.expand_dims(array_ops.constant(thresholds), [1]),\n array_ops.stack([1, num_predictions]))\n\n # Tile the predictions after thresholding them across different thresholds.\n pred_is_pos = math_ops.greater(\n array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),\n thresh_tiled)\n if ('fn' in includes) or ('tn' in includes):\n pred_is_neg = math_ops.logical_not(pred_is_pos)\n\n # Tile labels by number of thresholds\n label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])\n if ('fp' in includes) or ('tn' in includes):\n label_is_neg = math_ops.logical_not(label_is_pos)\n\n if weights is not None:\n broadcast_weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_float(weights), predictions)\n weights_tiled = array_ops.tile(\n array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])\n thresh_tiled.get_shape().assert_is_compatible_with(\n weights_tiled.get_shape())\n else:\n weights_tiled = None\n\n values = {}\n update_ops = {}\n\n if 'tp' in includes:\n true_positives = metrics_impl.metric_variable(\n [num_thresholds], dtypes.float32, name='true_positives')\n is_true_positive = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_pos))\n if weights_tiled is not None:\n is_true_positive *= weights_tiled\n update_ops['tp'] = state_ops.assign_add(true_positives,\n math_ops.reduce_sum(\n is_true_positive, 1))\n values['tp'] = true_positives\n\n if 'fn' in includes:\n false_negatives = metrics_impl.metric_variable(\n [num_thresholds], dtypes.float32, name='false_negatives')\n is_false_negative = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_neg))\n if weights_tiled is not None:\n is_false_negative *= weights_tiled\n update_ops['fn'] = state_ops.assign_add(false_negatives,\n math_ops.reduce_sum(\n is_false_negative, 1))\n values['fn'] = false_negatives\n\n if 'tn' in includes:\n true_negatives = metrics_impl.metric_variable(\n [num_thresholds], dtypes.float32, name='true_negatives')\n is_true_negative = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_neg))\n if weights_tiled is not None:\n is_true_negative *= weights_tiled\n update_ops['tn'] = state_ops.assign_add(true_negatives,\n math_ops.reduce_sum(\n is_true_negative, 1))\n values['tn'] = true_negatives\n\n if 'fp' in includes:\n false_positives = metrics_impl.metric_variable(\n [num_thresholds], dtypes.float32, name='false_positives')\n is_false_positive = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_pos))\n if weights_tiled is not None:\n is_false_positive *= weights_tiled\n update_ops['fp'] = state_ops.assign_add(false_positives,\n math_ops.reduce_sum(\n is_false_positive, 1))\n values['fp'] = false_positives\n\n return values, update_ops\n\n\ndef streaming_true_positives_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights=weights, includes=('tp',))\n return values['tp'], update_ops['tp']\n\n\ndef streaming_false_negatives_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights=weights, includes=('fn',))\n return values['fn'], update_ops['fn']\n\n\ndef streaming_false_positives_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights=weights, includes=('fp',))\n return values['fp'], update_ops['fp']\n\n\ndef streaming_true_negatives_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights=weights, includes=('tn',))\n return values['tn'], update_ops['tn']\n\n\ndef streaming_curve_points(labels=None,\n predictions=None,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n curve='ROC',\n name=None):\n \"\"\"Computes curve (ROC or PR) values for a prespecified number of points.\n\n The `streaming_curve_points` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n that are used to compute the curve values. To discretize the curve, a linearly\n spaced set of thresholds is used to compute pairs of recall and precision\n values.\n\n For best results, `predictions` should be distributed approximately uniformly\n in the range [0, 1] and not peaked around 0 or 1.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: A `Tensor` whose shape matches `predictions`. Will be cast to\n `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use when discretizing the roc\n curve.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n curve: Specifies the name of the curve to be computed, 'ROC' [default] or\n 'PR' for the Precision-Recall-curve.\n name: An optional variable_scope name.\n\n Returns:\n points: A `Tensor` with shape [num_thresholds, 2] that contains points of\n the curve.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n\n TODO(chizeng): Consider rewriting this method to make use of logic within the\n precision_recall_at_equal_thresholds method (to improve run time).\n \"\"\"\n with variable_scope.variable_scope(name, 'curve_points',\n (labels, predictions, weights)):\n if curve != 'ROC' and curve != 'PR':\n raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))\n kepsilon = _EPSILON # to account for floating point imprecisions\n thresholds = [\n (i + 1) * 1.0 \/ (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n labels=labels,\n predictions=predictions,\n thresholds=thresholds,\n weights=weights)\n\n # Add epsilons to avoid dividing by 0.\n epsilon = 1.0e-6\n\n def compute_points(tp, fn, tn, fp):\n \"\"\"Computes the roc-auc or pr-auc based on confusion counts.\"\"\"\n rec = math_ops.div(tp + epsilon, tp + fn + epsilon)\n if curve == 'ROC':\n fp_rate = math_ops.div(fp, fp + tn + epsilon)\n return fp_rate, rec\n else: # curve == 'PR'.\n prec = math_ops.div(tp + epsilon, tp + fp + epsilon)\n return rec, prec\n\n xs, ys = compute_points(values['tp'], values['fn'], values['tn'],\n values['fp'])\n points = array_ops.stack([xs, ys], axis=1)\n update_op = control_flow_ops.group(*update_ops.values())\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, points)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return points, update_op\n\n\n@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '\n 'the labels and predictions arguments has been switched.')\ndef streaming_auc(predictions,\n labels,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n curve='ROC',\n name=None):\n \"\"\"Computes the approximate AUC via a Riemann sum.\n\n The `streaming_auc` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` that are used to\n compute the AUC. To discretize the AUC curve, a linearly spaced set of\n thresholds is used to compute pairs of recall and precision values. The area\n under the ROC-curve is therefore computed using the height of the recall\n values by the false positive rate, while the area under the PR-curve is the\n computed using the height of the precision values by the recall.\n\n This value is ultimately returned as `auc`, an idempotent operation that\n computes the area under a discretized curve of precision versus recall values\n (computed using the aforementioned variables). The `num_thresholds` variable\n controls the degree of discretization with larger numbers of thresholds more\n closely approximating the true AUC. The quality of the approximation may vary\n dramatically depending on `num_thresholds`.\n\n For best results, `predictions` should be distributed approximately uniformly\n in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC\n approximation may be poor if this is not the case.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `auc`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use when discretizing the roc\n curve.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n curve: Specifies the name of the curve to be computed, 'ROC' [default] or\n 'PR' for the Precision-Recall-curve.\n name: An optional variable_scope name.\n\n Returns:\n auc: A scalar `Tensor` representing the current area-under-curve.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `auc`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.auc(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n num_thresholds=num_thresholds,\n curve=curve,\n updates_collections=updates_collections,\n name=name)\n\n\ndef _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):\n \"\"\"Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.\n\n Computes the area under the ROC or PR curve using each prediction as a\n threshold. This could be slow for large batches, but has the advantage of not\n having its results degrade depending on the distribution of predictions.\n\n Args:\n labels: A `Tensor` of ground truth labels with the same shape as\n `predictions` with values of 0 or 1 and type `int64`.\n predictions: A 1-D `Tensor` of predictions whose values are `float64`.\n curve: The name of the curve to be computed, 'ROC' for the Receiving\n Operating Characteristic or 'PR' for the Precision-Recall curve.\n weights: A 1-D `Tensor` of weights whose values are `float64`.\n\n Returns:\n A scalar `Tensor` containing the area-under-curve value for the input.\n \"\"\"\n # Compute the total weight and the total positive weight.\n size = array_ops.size(predictions)\n if weights is None:\n weights = array_ops.ones_like(labels, dtype=dtypes.float64)\n labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(\n labels, predictions, weights)\n total_weight = math_ops.reduce_sum(weights)\n total_positive = math_ops.reduce_sum(\n array_ops.where(\n math_ops.greater(labels, 0), weights,\n array_ops.zeros_like(labels, dtype=dtypes.float64)))\n\n def continue_computing_dynamic_auc():\n \"\"\"Continues dynamic auc computation, entered if labels are not all equal.\n\n Returns:\n A scalar `Tensor` containing the area-under-curve value.\n \"\"\"\n # Sort the predictions descending, keeping the same order for the\n # corresponding labels and weights.\n ordered_predictions, indices = nn.top_k(predictions, k=size)\n ordered_labels = array_ops.gather(labels, indices)\n ordered_weights = array_ops.gather(weights, indices)\n\n # Get the counts of the unique ordered predictions.\n _, _, counts = array_ops.unique_with_counts(ordered_predictions)\n\n # Compute the indices of the split points between different predictions.\n splits = math_ops.cast(\n array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)\n\n # Count the positives to the left of the split indices.\n true_positives = array_ops.gather(\n array_ops.pad(\n math_ops.cumsum(\n array_ops.where(\n math_ops.greater(ordered_labels, 0), ordered_weights,\n array_ops.zeros_like(ordered_labels,\n dtype=dtypes.float64))),\n paddings=[[1, 0]]), splits)\n if curve == 'ROC':\n # Compute the weight of the negatives to the left of every split point and\n # the total weight of the negatives number of negatives for computing the\n # FPR.\n false_positives = array_ops.gather(\n array_ops.pad(\n math_ops.cumsum(\n array_ops.where(\n math_ops.less(ordered_labels, 1), ordered_weights,\n array_ops.zeros_like(\n ordered_labels, dtype=dtypes.float64))),\n paddings=[[1, 0]]), splits)\n total_negative = total_weight - total_positive\n x_axis_values = math_ops.truediv(false_positives, total_negative)\n y_axis_values = math_ops.truediv(true_positives, total_positive)\n elif curve == 'PR':\n x_axis_values = math_ops.truediv(true_positives, total_positive)\n # For conformance, set precision to 1 when the number of positive\n # classifications is 0.\n positives = array_ops.gather(\n array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),\n splits)\n y_axis_values = array_ops.where(\n math_ops.greater(splits, 0),\n math_ops.truediv(true_positives, positives),\n array_ops.ones_like(true_positives, dtype=dtypes.float64))\n\n # Calculate trapezoid areas.\n heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) \/ 2.0\n widths = math_ops.abs(\n math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))\n return math_ops.reduce_sum(math_ops.multiply(heights, widths))\n\n # If all the labels are the same, AUC isn't well-defined (but raising an\n # exception seems excessive) so we return 0, otherwise we finish computing.\n return control_flow_ops.cond(\n math_ops.logical_or(\n math_ops.equal(total_positive, 0), math_ops.equal(\n total_positive, total_weight)),\n true_fn=lambda: array_ops.constant(0, dtypes.float64),\n false_fn=continue_computing_dynamic_auc)\n\n\ndef streaming_dynamic_auc(labels,\n predictions,\n curve='ROC',\n metrics_collections=(),\n updates_collections=(),\n name=None,\n weights=None):\n \"\"\"Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.\n\n USAGE NOTE: this approach requires storing all of the predictions and labels\n for a single evaluation in memory, so it may not be usable when the evaluation\n batch size and\/or the number of evaluation steps is very large.\n\n Computes the area under the ROC or PR curve using each prediction as a\n threshold. This has the advantage of being resilient to the distribution of\n predictions by aggregating across batches, accumulating labels and predictions\n and performing the final calculation using all of the concatenated values.\n\n Args:\n labels: A `Tensor` of ground truth labels with the same shape as `labels`\n and with values of 0 or 1 whose values are castable to `int64`.\n predictions: A `Tensor` of predictions whose values are castable to\n `float64`. Will be flattened into a 1-D `Tensor`.\n curve: The name of the curve for which to compute AUC, 'ROC' for the\n Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.\n metrics_collections: An optional iterable of collections that `auc` should\n be added to.\n updates_collections: An optional iterable of collections that `update_op`\n should be added to.\n name: An optional name for the variable_scope that contains the metric\n variables.\n weights: A 'Tensor' of non-negative weights whose values are castable to\n `float64`. Will be flattened into a 1-D `Tensor`.\n\n Returns:\n auc: A scalar `Tensor` containing the current area-under-curve value.\n update_op: An operation that concatenates the input labels and predictions\n to the accumulated values.\n\n Raises:\n ValueError: If `labels` and `predictions` have mismatched shapes or if\n `curve` isn't a recognized curve type.\n \"\"\"\n\n if curve not in ['PR', 'ROC']:\n raise ValueError('curve must be either ROC or PR, %s unknown' % curve)\n\n with variable_scope.variable_scope(name, default_name='dynamic_auc'):\n labels.get_shape().assert_is_compatible_with(predictions.get_shape())\n predictions = array_ops.reshape(\n math_ops.cast(predictions, dtypes.float64), [-1])\n labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])\n with ops.control_dependencies([\n check_ops.assert_greater_equal(\n labels,\n array_ops.zeros_like(labels, dtypes.int64),\n message='labels must be 0 or 1, at least one is <0'),\n check_ops.assert_less_equal(\n labels,\n array_ops.ones_like(labels, dtypes.int64),\n message='labels must be 0 or 1, at least one is >1'),\n ]):\n preds_accum, update_preds = streaming_concat(\n predictions, name='concat_preds')\n labels_accum, update_labels = streaming_concat(\n labels, name='concat_labels')\n if weights is not None:\n weights = array_ops.reshape(\n math_ops.cast(weights, dtypes.float64), [-1])\n weights_accum, update_weights = streaming_concat(\n weights, name='concat_weights')\n update_op = control_flow_ops.group(update_labels, update_preds,\n update_weights)\n else:\n weights_accum = None\n update_op = control_flow_ops.group(update_labels, update_preds)\n auc = _compute_dynamic_auc(\n labels_accum, preds_accum, curve=curve, weights=weights_accum)\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, auc)\n return auc, update_op\n\n\ndef _compute_placement_auc(labels, predictions, weights, alpha,\n logit_transformation, is_valid):\n \"\"\"Computes the AUC and asymptotic normally distributed confidence interval.\n\n The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the\n concept of placement values for each labeled group, as presented by Delong and\n Delong (1988). The actual algorithm used is a more computationally efficient\n approach presented by Sun and Xu (2014). This could be slow for large batches,\n but has the advantage of not having its results degrade depending on the\n distribution of predictions.\n\n Args:\n labels: A `Tensor` of ground truth labels with the same shape as\n `predictions` with values of 0 or 1 and type `int64`.\n predictions: A 1-D `Tensor` of predictions whose values are `float64`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`.\n alpha: Confidence interval level desired.\n logit_transformation: A boolean value indicating whether the estimate should\n be logit transformed prior to calculating the confidence interval. Doing\n so enforces the restriction that the AUC should never be outside the\n interval [0,1].\n is_valid: A bool tensor describing whether the input is valid.\n\n Returns:\n A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence\n interval values.\n \"\"\"\n # Disable the invalid-name checker so that we can capitalize the name.\n # pylint: disable=invalid-name\n AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])\n # pylint: enable=invalid-name\n\n # If all the labels are the same or if number of observations are too few,\n # AUC isn't well-defined\n size = array_ops.size(predictions, out_type=dtypes.int32)\n\n # Count the total number of positive and negative labels in the input.\n total_0 = math_ops.reduce_sum(\n math_ops.cast(1 - labels, weights.dtype) * weights)\n total_1 = math_ops.reduce_sum(\n math_ops.cast(labels, weights.dtype) * weights)\n\n # Sort the predictions ascending, as well as\n # (i) the corresponding labels and\n # (ii) the corresponding weights.\n ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)\n ordered_predictions = array_ops.reverse(\n ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))\n indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))\n ordered_labels = array_ops.gather(labels, indices)\n ordered_weights = array_ops.gather(weights, indices)\n\n # We now compute values required for computing placement values.\n\n # We generate a list of indices (segmented_indices) of increasing order. An\n # index is assigned for each unique prediction float value. Prediction\n # values that are the same share the same index.\n _, segmented_indices = array_ops.unique(ordered_predictions)\n\n # We create 2 tensors of weights. weights_for_true is non-zero for true\n # labels. weights_for_false is non-zero for false labels.\n float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)\n float_labels_for_false = 1.0 - float_labels_for_true\n weights_for_true = ordered_weights * float_labels_for_true\n weights_for_false = ordered_weights * float_labels_for_false\n\n # For each set of weights with the same segmented indices, we add up the\n # weight values. Note that for each label, we deliberately rely on weights\n # for the opposite label.\n weight_totals_for_true = math_ops.segment_sum(weights_for_false,\n segmented_indices)\n weight_totals_for_false = math_ops.segment_sum(weights_for_true,\n segmented_indices)\n\n # These cumulative sums of weights importantly exclude the current weight\n # sums.\n cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,\n exclusive=True)\n cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,\n exclusive=True)\n\n # Compute placement values using the formula. Values with the same segmented\n # indices and labels share the same placement values.\n placements_for_true = (\n (cum_weight_totals_for_true + weight_totals_for_true \/ 2.0) \/\n (math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))\n placements_for_false = (\n (cum_weight_totals_for_false + weight_totals_for_false \/ 2.0) \/\n (math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))\n\n # We expand the tensors of placement values (for each label) so that their\n # shapes match that of predictions.\n placements_for_true = array_ops.gather(placements_for_true, segmented_indices)\n placements_for_false = array_ops.gather(placements_for_false,\n segmented_indices)\n\n # Select placement values based on the label for each index.\n placement_values = (\n placements_for_true * float_labels_for_true +\n placements_for_false * float_labels_for_false)\n\n # Split placement values by labeled groups.\n placement_values_0 = placement_values * math_ops.cast(\n 1 - ordered_labels, weights.dtype)\n weights_0 = ordered_weights * math_ops.cast(\n 1 - ordered_labels, weights.dtype)\n placement_values_1 = placement_values * math_ops.cast(\n ordered_labels, weights.dtype)\n weights_1 = ordered_weights * math_ops.cast(\n ordered_labels, weights.dtype)\n\n # Calculate AUC using placement values\n auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) \/\n (total_0 + _EPSILON))\n auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) \/\n (total_1 + _EPSILON))\n auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)\n\n # Calculate variance and standard error using the placement values.\n var_0 = (\n math_ops.reduce_sum(\n weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) \/\n (total_0 - 1. + _EPSILON))\n var_1 = (\n math_ops.reduce_sum(\n weights_1 * math_ops.square(placement_values_1 - auc_1)) \/\n (total_1 - 1. + _EPSILON))\n auc_std_err = math_ops.sqrt(\n (var_0 \/ (total_0 + _EPSILON)) + (var_1 \/ (total_1 + _EPSILON)))\n\n # Calculate asymptotic normal confidence intervals\n std_norm_dist = Normal(loc=0., scale=1.)\n z_value = std_norm_dist.quantile((1.0 - alpha) \/ 2.0)\n if logit_transformation:\n estimate = math_ops.log(auc \/ (1. - auc + _EPSILON))\n std_err = auc_std_err \/ (auc * (1. - auc + _EPSILON))\n transformed_auc_lower = estimate + (z_value * std_err)\n transformed_auc_upper = estimate - (z_value * std_err)\n def inverse_logit_transformation(x):\n exp_negative = math_ops.exp(math_ops.negative(x))\n return 1. \/ (1. + exp_negative + _EPSILON)\n\n auc_lower = inverse_logit_transformation(transformed_auc_lower)\n auc_upper = inverse_logit_transformation(transformed_auc_upper)\n else:\n estimate = auc\n std_err = auc_std_err\n auc_lower = estimate + (z_value * std_err)\n auc_upper = estimate - (z_value * std_err)\n\n ## If estimate is 1 or 0, no variance is present so CI = 1\n ## n.b. This can be misleading, since number obs can just be too low.\n lower = array_ops.where(\n math_ops.logical_or(\n math_ops.equal(auc, array_ops.ones_like(auc)),\n math_ops.equal(auc, array_ops.zeros_like(auc))),\n auc, auc_lower)\n upper = array_ops.where(\n math_ops.logical_or(\n math_ops.equal(auc, array_ops.ones_like(auc)),\n math_ops.equal(auc, array_ops.zeros_like(auc))),\n auc, auc_upper)\n\n # If all the labels are the same, AUC isn't well-defined (but raising an\n # exception seems excessive) so we return 0, otherwise we finish computing.\n trivial_value = array_ops.constant(0.0)\n\n return AucData(*control_flow_ops.cond(\n is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))\n\n\ndef auc_with_confidence_intervals(labels,\n predictions,\n weights=None,\n alpha=0.95,\n logit_transformation=True,\n metrics_collections=(),\n updates_collections=(),\n name=None):\n \"\"\"Computes the AUC and asymptotic normally distributed confidence interval.\n\n USAGE NOTE: this approach requires storing all of the predictions and labels\n for a single evaluation in memory, so it may not be usable when the evaluation\n batch size and\/or the number of evaluation steps is very large.\n\n Computes the area under the ROC curve and its confidence interval using\n placement values. This has the advantage of being resilient to the\n distribution of predictions by aggregating across batches, accumulating labels\n and predictions and performing the final calculation using all of the\n concatenated values.\n\n Args:\n labels: A `Tensor` of ground truth labels with the same shape as `labels`\n and with values of 0 or 1 whose values are castable to `int64`.\n predictions: A `Tensor` of predictions whose values are castable to\n `float64`. Will be flattened into a 1-D `Tensor`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`.\n alpha: Confidence interval level desired.\n logit_transformation: A boolean value indicating whether the estimate should\n be logit transformed prior to calculating the confidence interval. Doing\n so enforces the restriction that the AUC should never be outside the\n interval [0,1].\n metrics_collections: An optional iterable of collections that `auc` should\n be added to.\n updates_collections: An optional iterable of collections that `update_op`\n should be added to.\n name: An optional name for the variable_scope that contains the metric\n variables.\n\n Returns:\n auc: A 1-D `Tensor` containing the current area-under-curve, lower, and\n upper confidence interval values.\n update_op: An operation that concatenates the input labels and predictions\n to the accumulated values.\n\n Raises:\n ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes\n or if `alpha` isn't in the range (0,1).\n \"\"\"\n if not (alpha > 0 and alpha < 1):\n raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)\n\n if weights is None:\n weights = array_ops.ones_like(predictions)\n\n with variable_scope.variable_scope(\n name,\n default_name='auc_with_confidence_intervals',\n values=[labels, predictions, weights]):\n\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=predictions,\n labels=labels,\n weights=weights)\n\n total_weight = math_ops.reduce_sum(weights)\n\n weights = array_ops.reshape(weights, [-1])\n predictions = array_ops.reshape(\n math_ops.cast(predictions, dtypes.float64), [-1])\n labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])\n\n with ops.control_dependencies([\n check_ops.assert_greater_equal(\n labels,\n array_ops.zeros_like(labels, dtypes.int64),\n message='labels must be 0 or 1, at least one is <0'),\n check_ops.assert_less_equal(\n labels,\n array_ops.ones_like(labels, dtypes.int64),\n message='labels must be 0 or 1, at least one is >1'),\n ]):\n preds_accum, update_preds = streaming_concat(\n predictions, name='concat_preds')\n labels_accum, update_labels = streaming_concat(labels,\n name='concat_labels')\n weights_accum, update_weights = streaming_concat(\n weights, name='concat_weights')\n update_op_for_valid_case = control_flow_ops.group(\n update_labels, update_preds, update_weights)\n\n # Only perform updates if this case is valid.\n all_labels_positive_or_0 = math_ops.logical_and(\n math_ops.equal(math_ops.reduce_min(labels), 0),\n math_ops.equal(math_ops.reduce_max(labels), 1))\n sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)\n is_valid = math_ops.logical_and(all_labels_positive_or_0,\n sums_of_weights_at_least_1)\n\n update_op = control_flow_ops.cond(\n sums_of_weights_at_least_1,\n lambda: update_op_for_valid_case, control_flow_ops.no_op)\n\n auc = _compute_placement_auc(\n labels_accum,\n preds_accum,\n weights_accum,\n alpha=alpha,\n logit_transformation=logit_transformation,\n is_valid=is_valid)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n if metrics_collections:\n ops.add_to_collections(metrics_collections, auc)\n return auc, update_op\n\n\ndef precision_recall_at_equal_thresholds(labels,\n predictions,\n weights=None,\n num_thresholds=None,\n use_locking=None,\n name=None):\n \"\"\"A helper method for creating metrics related to precision-recall curves.\n\n These values are true positives, false negatives, true negatives, false\n positives, precision, and recall. This function returns a data structure that\n contains ops within it.\n\n Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)\n space and run time), this op exhibits O(T + N) space and run time, where T is\n the number of thresholds and N is the size of the predictions tensor. Hence,\n it may be advantageous to use this function when `predictions` is big.\n\n For instance, prefer this method for per-pixel classification tasks, for which\n the predictions tensor may be very large.\n\n Each number in `predictions`, a float in `[0, 1]`, is compared with its\n corresponding label in `labels`, and counts as a single tp\/fp\/tn\/fn value at\n each threshold. This is then multiplied with `weights` which can be used to\n reweight certain values, or more commonly used for masking values.\n\n Args:\n labels: A bool `Tensor` whose shape matches `predictions`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n weights: Optional; If provided, a `Tensor` that has the same dtype as,\n and broadcastable to, `predictions`. This tensor is multiplied by counts.\n num_thresholds: Optional; Number of thresholds, evenly distributed in\n `[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins\n is 1 less than `num_thresholds`. Using an even `num_thresholds` value\n instead of an odd one may yield unfriendly edges for bins.\n use_locking: Optional; If True, the op will be protected by a lock.\n Otherwise, the behavior is undefined, but may exhibit less contention.\n Defaults to True.\n name: Optional; variable_scope name. If not provided, the string\n 'precision_recall_at_equal_threshold' is used.\n\n Returns:\n result: A named tuple (See PrecisionRecallData within the implementation of\n this function) with properties that are variables of shape\n `[num_thresholds]`. The names of the properties are tp, fp, tn, fn,\n precision, recall, thresholds. Types are same as that of predictions.\n update_op: An op that accumulates values.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `includes` contains invalid keys.\n \"\"\"\n # Disable the invalid-name checker so that we can capitalize the name.\n # pylint: disable=invalid-name\n PrecisionRecallData = collections_lib.namedtuple(\n 'PrecisionRecallData',\n ['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])\n # pylint: enable=invalid-name\n\n if num_thresholds is None:\n num_thresholds = 201\n\n if weights is None:\n weights = 1.0\n\n if use_locking is None:\n use_locking = True\n\n check_ops.assert_type(labels, dtypes.bool)\n\n with variable_scope.variable_scope(name,\n 'precision_recall_at_equal_thresholds',\n (labels, predictions, weights)):\n # Make sure that predictions are within [0.0, 1.0].\n with ops.control_dependencies([\n check_ops.assert_greater_equal(\n predictions,\n math_ops.cast(0.0, dtype=predictions.dtype),\n message='predictions must be in [0, 1]'),\n check_ops.assert_less_equal(\n predictions,\n math_ops.cast(1.0, dtype=predictions.dtype),\n message='predictions must be in [0, 1]')\n ]):\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=predictions,\n labels=labels,\n weights=weights)\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n # It's important we aggregate using float64 since we're accumulating a lot\n # of 1.0's for the true\/false labels, and accumulating to float32 will\n # be quite inaccurate even with just a modest amount of values (~20M).\n # We use float64 instead of integer primarily since GPU scatter kernel\n # only support floats.\n agg_dtype = dtypes.float64\n\n f_labels = math_ops.cast(labels, agg_dtype)\n weights = math_ops.cast(weights, agg_dtype)\n true_labels = f_labels * weights\n false_labels = (1.0 - f_labels) * weights\n\n # Flatten predictions and labels.\n predictions = array_ops.reshape(predictions, [-1])\n true_labels = array_ops.reshape(true_labels, [-1])\n false_labels = array_ops.reshape(false_labels, [-1])\n\n # To compute TP\/FP\/TN\/FN, we are measuring a binary classifier\n # C(t) = (predictions >= t)\n # at each threshold 't'. So we have\n # TP(t) = sum( C(t) * true_labels )\n # FP(t) = sum( C(t) * false_labels )\n #\n # But, computing C(t) requires computation for each t. To make it fast,\n # observe that C(t) is a cumulative integral, and so if we have\n # thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}\n # where n = num_thresholds, and if we can compute the bucket function\n # B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )\n # then we get\n # C(t_i) = sum( B(j), j >= i )\n # which is the reversed cumulative sum in tf.cumsum().\n #\n # We can compute B(i) efficiently by taking advantage of the fact that\n # our thresholds are evenly distributed, in that\n # width = 1.0 \/ (num_thresholds - 1)\n # thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]\n # Given a prediction value p, we can map it to its bucket by\n # bucket_index(p) = floor( p * (num_thresholds - 1) )\n # so we can use tf.scatter_add() to update the buckets in one pass.\n #\n # This implementation exhibits a run time and space complexity of O(T + N),\n # where T is the number of thresholds and N is the size of predictions.\n # Metrics that rely on _streaming_confusion_matrix_at_thresholds instead\n # exhibit a complexity of O(T * N).\n\n # Compute the bucket indices for each prediction value.\n bucket_indices = math_ops.cast(\n math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)\n\n with ops.name_scope('variables'):\n tp_buckets_v = metrics_impl.metric_variable(\n [num_thresholds], agg_dtype, name='tp_buckets')\n fp_buckets_v = metrics_impl.metric_variable(\n [num_thresholds], agg_dtype, name='fp_buckets')\n\n with ops.name_scope('update_op'):\n update_tp = state_ops.scatter_add(\n tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)\n update_fp = state_ops.scatter_add(\n fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)\n\n # Set up the cumulative sums to compute the actual metrics.\n tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')\n fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')\n # fn = sum(true_labels) - tp\n # = sum(tp_buckets) - tp\n # = tp[0] - tp\n # Similarly,\n # tn = fp[0] - fp\n tn = fp[0] - fp\n fn = tp[0] - tp\n\n # We use a minimum to prevent division by 0.\n epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)\n precision = tp \/ math_ops.maximum(epsilon, tp + fp)\n recall = tp \/ math_ops.maximum(epsilon, tp + fn)\n\n # Convert all tensors back to predictions' dtype (as per function contract).\n out_dtype = predictions.dtype\n _convert = lambda tensor: math_ops.cast(tensor, out_dtype)\n result = PrecisionRecallData(\n tp=_convert(tp),\n fp=_convert(fp),\n tn=_convert(tn),\n fn=_convert(fn),\n precision=_convert(precision),\n recall=_convert(recall),\n thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))\n update_op = control_flow_ops.group(update_tp, update_fp)\n return result, update_op\n\n\ndef streaming_specificity_at_sensitivity(predictions,\n labels,\n sensitivity,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the specificity at a given sensitivity.\n\n The `streaming_specificity_at_sensitivity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the specificity at the given\n sensitivity value. The threshold for the given sensitivity value is computed\n and used to evaluate the corresponding specificity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `specificity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https:\/\/en.wikipedia.org\/wiki\/Sensitivity_and_specificity\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n sensitivity: A scalar value in range `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n sensitivity.\n metrics_collections: An optional list of collections that `specificity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n specificity: A scalar `Tensor` representing the specificity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `specificity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `sensitivity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n \"\"\"\n return metrics.specificity_at_sensitivity(\n sensitivity=sensitivity,\n num_thresholds=num_thresholds,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_sensitivity_at_specificity(predictions,\n labels,\n specificity,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the sensitivity at a given specificity.\n\n The `streaming_sensitivity_at_specificity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the sensitivity at the given\n specificity value. The threshold for the given specificity value is computed\n and used to evaluate the corresponding sensitivity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https:\/\/en.wikipedia.org\/wiki\/Sensitivity_and_specificity\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n specificity: A scalar value in range `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n specificity.\n metrics_collections: An optional list of collections that `sensitivity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n sensitivity: A scalar `Tensor` representing the sensitivity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `sensitivity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `specificity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n \"\"\"\n return metrics.sensitivity_at_specificity(\n specificity=specificity,\n num_thresholds=num_thresholds,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None,\n 'Please switch to tf.metrics.precision_at_thresholds. Note that '\n 'the order of the labels and predictions arguments are switched.')\ndef streaming_precision_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision values for different `thresholds` on `predictions`.\n\n The `streaming_precision_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `precision[i]` is defined as the total\n weight of values in `predictions` above `thresholds[i]` whose corresponding\n entry in `labels` is `True`, divided by the total weight of values in\n `predictions` above `thresholds[i]` (`true_positives[i] \/ (true_positives[i] +\n false_positives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `precision` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.precision_at_thresholds(\n thresholds=thresholds,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None,\n 'Please switch to tf.metrics.recall_at_thresholds. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_recall_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes various recall values for different `thresholds` on `predictions`.\n\n The `streaming_recall_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `recall[i]` is defined as the total weight\n of values in `predictions` above `thresholds[i]` whose corresponding entry in\n `labels` is `True`, divided by the total weight of `True` values in `labels`\n (`true_positives[i] \/ (true_positives[i] + false_negatives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `recall`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.recall_at_thresholds(\n thresholds=thresholds,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_false_positive_rate_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes various fpr values for different `thresholds` on `predictions`.\n\n The `streaming_false_positive_rate_at_thresholds` function creates two\n local variables, `false_positives`, `true_negatives`, for various values of\n thresholds. `false_positive_rate[i]` is defined as the total weight\n of values in `predictions` above `thresholds[i]` whose corresponding entry in\n `labels` is `False`, divided by the total weight of `False` values in `labels`\n (`false_positives[i] \/ (false_positives[i] + true_negatives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `false_positive_rate`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `false_positive_rate` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `false_positives` and\n `true_negatives` variables that are used in the computation of\n `false_positive_rate`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',\n (predictions, labels, weights)):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights, includes=('fp', 'tn'))\n\n # Avoid division by zero.\n epsilon = _EPSILON\n\n def compute_fpr(fp, tn, name):\n return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)\n\n fpr = compute_fpr(values['fp'], values['tn'], 'value')\n update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, fpr)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return fpr, update_op\n\n\ndef streaming_false_negative_rate_at_thresholds(predictions,\n labels,\n thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes various fnr values for different `thresholds` on `predictions`.\n\n The `streaming_false_negative_rate_at_thresholds` function creates two\n local variables, `false_negatives`, `true_positives`, for various values of\n thresholds. `false_negative_rate[i]` is defined as the total weight\n of values in `predictions` above `thresholds[i]` whose corresponding entry in\n `labels` is `False`, divided by the total weight of `True` values in `labels`\n (`false_negatives[i] \/ (false_negatives[i] + true_positives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `false_positive_rate`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `false_negative_rate` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `false_negatives` and\n `true_positives` variables that are used in the computation of\n `false_negative_rate`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',\n (predictions, labels, weights)):\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights, includes=('fn', 'tp'))\n\n # Avoid division by zero.\n epsilon = _EPSILON\n\n def compute_fnr(fn, tp, name):\n return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)\n\n fnr = compute_fnr(values['fn'], values['tp'], 'value')\n update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, fnr)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return fnr, update_op\n\n\ndef _at_k_name(name, k=None, class_id=None):\n if k is not None:\n name = '%s_at_%d' % (name, k)\n else:\n name = '%s_at_k' % (name)\n if class_id is not None:\n name = '%s_class%d' % (name, class_id)\n return name\n\n\n@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '\n 'and reshape labels from [batch_size] to [batch_size, 1].')\ndef streaming_recall_at_k(predictions,\n labels,\n k,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the recall@k of the predictions with respect to dense labels.\n\n The `streaming_recall_at_k` function creates two local variables, `total` and\n `count`, that are used to compute the recall@k frequency. This frequency is\n ultimately returned as `recall_at_`: an idempotent operation that simply\n divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_`. Internally, an `in_top_k` operation computes a `Tensor` with\n shape [batch_size] whose elements indicate whether or not the corresponding\n label is in the top `k` `predictions`. Then `update_op` increments `total`\n with the reduced sum of `weights` where `in_top_k` is `True`, and it\n increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A float `Tensor` of dimension [batch_size, num_classes].\n labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,\n `int64`.\n k: The number of top elements to look at for computing recall.\n weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and\n must be broadcastable to `labels` (i.e., all dimensions must be either\n `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall_at_k`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n recall_at_k: A `Tensor` representing the recall@k, the fraction of labels\n which fall into the top `k` predictions.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `recall_at_k`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))\n return streaming_mean(in_top_k, weights, metrics_collections,\n updates_collections, name or _at_k_name('recall', k))\n\n\n# TODO(ptucker): Validate range of values in labels?\ndef streaming_sparse_recall_at_k(predictions,\n labels,\n k,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of the predictions with respect to sparse labels.\n\n If `class_id` is not specified, we'll calculate recall as the ratio of true\n positives (i.e., correct predictions, items in the top `k` highest\n `predictions` that are found in the corresponding row in `labels`) to\n actual positives (the full `labels` row).\n If `class_id` is specified, we calculate recall by considering only the rows\n in the batch for which `class_id` is in `labels`, and computing the\n fraction of them for which `class_id` is in the corresponding row in\n `labels`.\n\n `streaming_sparse_recall_at_k` creates two local variables,\n `true_positive_at_` and `false_negative_at_`, that are used to compute\n the recall_at_k frequency. This frequency is ultimately returned as\n `recall_at_`: an idempotent operation that simply divides\n `true_positive_at_` by total (`true_positive_at_` +\n `false_negative_at_`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false negatives weighted by\n `weights`. Then `update_op` increments `true_positive_at_` and\n `false_negative_at_` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.\n Values should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range always count\n towards `false_negative_at_`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If class_id is outside this range, the method returns NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n return metrics.recall_at_k(\n k=k,\n class_id=class_id,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n# TODO(ptucker): Validate range of values in labels?\ndef streaming_sparse_precision_at_k(predictions,\n labels,\n k,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the predictions with respect to sparse labels.\n\n If `class_id` is not specified, we calculate precision as the ratio of true\n positives (i.e., correct predictions, items in the top `k` highest\n `predictions` that are found in the corresponding row in `labels`) to\n positives (all top `k` `predictions`).\n If `class_id` is specified, we calculate precision by considering only the\n rows in the batch for which `class_id` is in the top `k` highest\n `predictions`, and computing the fraction of them for which `class_id` is\n in the corresponding row in `labels`.\n\n We expect precision to decrease as `k` increases.\n\n `streaming_sparse_precision_at_k` creates two local variables,\n `true_positive_at_` and `false_positive_at_`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_`: an idempotent operation that simply divides\n `true_positive_at_` by total (`true_positive_at_` +\n `false_positive_at_`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_` and\n `false_positive_at_` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n return metrics.precision_at_k(\n k=k,\n class_id=class_id,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n# TODO(ptucker): Validate range of values in labels?\ndef streaming_sparse_precision_at_top_k(top_k_predictions,\n labels,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of top-k predictions with respect to sparse labels.\n\n If `class_id` is not specified, we calculate precision as the ratio of\n true positives (i.e., correct predictions, items in `top_k_predictions`\n that are found in the corresponding row in `labels`) to positives (all\n `top_k_predictions`).\n If `class_id` is specified, we calculate precision by considering only the\n rows in the batch for which `class_id` is in the top `k` highest\n `predictions`, and computing the fraction of them for which `class_id` is\n in the corresponding row in `labels`.\n\n We expect precision to decrease as `k` increases.\n\n `streaming_sparse_precision_at_top_k` creates two local variables,\n `true_positive_at_k` and `false_positive_at_k`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_k`: an idempotent operation that simply divides\n `true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_k`. Internally, set operations applied to `top_k_predictions`\n and `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_k` and\n `false_positive_at_k` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where\n N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].\n The final dimension contains the indices of top-k labels. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `top_k_predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n ValueError: If `top_k_predictions` has rank < 2.\n \"\"\"\n default_name = _at_k_name('precision', class_id=class_id)\n with ops.name_scope(name, default_name,\n (top_k_predictions, labels, weights)) as name_scope:\n return metrics_impl.precision_at_top_k(\n labels=labels,\n predictions_idx=top_k_predictions,\n class_id=class_id,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name_scope)\n\n\ndef sparse_recall_at_top_k(labels,\n top_k_predictions,\n class_id=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of top-k predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate recall by considering only the\n entries in the batch for which `class_id` is in the label, and computing\n the fraction of them for which `class_id` is in the top-k `predictions`.\n If `class_id` is not specified, we'll calculate recall as how often on\n average a class among the labels of a batch entry is in the top-k\n `predictions`.\n\n `sparse_recall_at_top_k` creates two local variables, `true_positive_at_`\n and `false_negative_at_`, that are used to compute the recall_at_k\n frequency. This frequency is ultimately returned as `recall_at_`: an\n idempotent operation that simply divides `true_positive_at_` by total\n (`true_positive_at_` + `false_negative_at_`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_`. Set operations applied to `top_k` and `labels` calculate the\n true positives and false negatives weighted by `weights`. Then `update_op`\n increments `true_positive_at_` and `false_negative_at_` using these\n values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `top_k_predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range always count towards `false_negative_at_`.\n top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where\n N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].\n The final dimension contains the indices of top-k labels. [D1, ... DN]\n must match `labels`.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If class_id is outside this range, the method returns NAN.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n default_name = _at_k_name('recall', class_id=class_id)\n with ops.name_scope(name, default_name,\n (top_k_predictions, labels, weights)) as name_scope:\n return metrics_impl.recall_at_top_k(\n labels=labels,\n predictions_idx=top_k_predictions,\n class_id=class_id,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name_scope)\n\n\ndef _compute_recall_at_precision(tp, fp, fn, precision, name,\n strict_mode=False):\n \"\"\"Helper function to compute recall at a given `precision`.\n\n Args:\n tp: The number of true positives.\n fp: The number of false positives.\n fn: The number of false negatives.\n precision: The precision for which the recall will be calculated.\n name: An optional variable_scope name.\n strict_mode: If true and there exists a threshold where the precision is\n no smaller than the target precision, return the corresponding recall at\n the threshold. Otherwise, return 0. If false, find the threshold where the\n precision is closest to the target precision and return the recall at the\n threshold.\n\n Returns:\n The recall at a given `precision`.\n \"\"\"\n precisions = math_ops.div(tp, tp + fp + _EPSILON)\n if not strict_mode:\n tf_index = math_ops.argmin(\n math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)\n # Now, we have the implicit threshold, so compute the recall:\n return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,\n name)\n else:\n # We aim to find the threshold where the precision is minimum but no smaller\n # than the target precision.\n # The rationale:\n # 1. Compute the difference between precisions (by different thresholds) and\n # the target precision.\n # 2. Take the reciprocal of the values by the above step. The intention is\n # to make the positive values rank before negative values and also the\n # smaller positives rank before larger positives.\n tf_index = math_ops.argmax(\n math_ops.div(1.0, precisions - precision + _EPSILON),\n 0,\n output_type=dtypes.int32)\n\n def _return_good_recall():\n return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,\n name)\n\n return control_flow_ops.cond(precisions[tf_index] >= precision,\n _return_good_recall, lambda: .0)\n\n\ndef recall_at_precision(labels,\n predictions,\n precision,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None,\n strict_mode=False):\n \"\"\"Computes `recall` at `precision`.\n\n The `recall_at_precision` function creates four local variables,\n `tp` (true positives), `fp` (false positives) and `fn` (false negatives)\n that are used to compute the `recall` at the given `precision` value. The\n threshold for the given `precision` value is computed and used to evaluate the\n corresponding `recall`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the\n weight of each case found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n precision: A scalar value in range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n `precision`.\n metrics_collections: An optional list of collections that `recall`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n strict_mode: If true and there exists a threshold where the precision is\n above the target precision, return the corresponding recall at the\n threshold. Otherwise, return 0. If false, find the threshold where the\n precision is closest to the target precision and return the recall at the\n threshold.\n\n Returns:\n recall: A scalar `Tensor` representing the recall at the given\n `precision` value.\n update_op: An operation that increments the `tp`, `fp` and `fn`\n variables appropriately and whose value matches `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `precision` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n\n \"\"\"\n if not 0 <= precision <= 1:\n raise ValueError('`precision` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'recall_at_precision',\n (predictions, labels, weights)):\n thresholds = [\n i * 1.0 \/ (num_thresholds - 1) for i in range(1, num_thresholds - 1)\n ]\n thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]\n\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights)\n\n recall = _compute_recall_at_precision(values['tp'], values['fp'],\n values['fn'], precision, 'value',\n strict_mode)\n update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],\n update_ops['fn'], precision,\n 'update_op', strict_mode)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, recall)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return recall, update_op\n\n\ndef precision_at_recall(labels,\n predictions,\n target_recall,\n weights=None,\n num_thresholds=200,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the precision at a given recall.\n\n This function creates variables to track the true positives, false positives,\n true negatives, and false negatives at a set of thresholds. Among those\n thresholds where recall is at least `target_recall`, precision is computed\n at the threshold where recall is closest to `target_recall`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n precision at `target_recall`. `update_op` increments the counts of true\n positives, false positives, true negatives, and false negatives with the\n weight of each case found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about precision and recall, see\n http:\/\/en.wikipedia.org\/wiki\/Precision_and_recall\n\n Args:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n target_recall: A scalar value in range `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n num_thresholds: The number of thresholds to use for matching the given\n recall.\n metrics_collections: An optional list of collections to which `precision`\n should be added.\n updates_collections: An optional list of collections to which `update_op`\n should be added.\n name: An optional variable_scope name.\n\n Returns:\n precision: A scalar `Tensor` representing the precision at the given\n `target_recall` value.\n update_op: An operation that increments the variables for tracking the\n true positives, false positives, true negatives, and false negatives and\n whose value matches `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `target_recall` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.precision_at_recall is not '\n 'supported when eager execution is enabled.')\n\n if target_recall < 0 or target_recall > 1:\n raise ValueError('`target_recall` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'precision_at_recall',\n (predictions, labels, weights)):\n kepsilon = 1e-7 # Used to avoid division by zero.\n thresholds = [\n (i + 1) * 1.0 \/ (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n values, update_ops = _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights)\n\n def compute_precision_at_recall(tp, fp, fn, name):\n \"\"\"Computes the precision at a given recall.\n\n Args:\n tp: True positives.\n fp: False positives.\n fn: False negatives.\n name: A name for the operation.\n\n Returns:\n The precision at the desired recall.\n \"\"\"\n recalls = math_ops.div(tp, tp + fn + kepsilon)\n\n # Because recall is monotone decreasing as a function of the threshold,\n # the smallest recall exceeding target_recall occurs at the largest\n # threshold where recall >= target_recall.\n admissible_recalls = math_ops.cast(\n math_ops.greater_equal(recalls, target_recall), dtypes.int64)\n tf_index = math_ops.reduce_sum(admissible_recalls) - 1\n\n # Now we have the threshold at which to compute precision:\n return math_ops.div(tp[tf_index] + kepsilon,\n tp[tf_index] + fp[tf_index] + kepsilon,\n name)\n\n precision_value = compute_precision_at_recall(\n values['tp'], values['fp'], values['fn'], 'value')\n update_op = compute_precision_at_recall(\n update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, precision_value)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return precision_value, update_op\n\n\ndef streaming_sparse_average_precision_at_k(predictions,\n labels,\n k,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n See `sparse_average_precision_at_k` for details on formula. `weights` are\n applied to the result of `sparse_average_precision_at_k`\n\n `streaming_sparse_average_precision_at_k` creates two local variables,\n `average_precision_at_\/total` and `average_precision_at_\/max`, that\n are used to compute the frequency. This frequency is ultimately returned as\n `average_precision_at_`: an idempotent operation that simply divides\n `average_precision_at_\/total` by `average_precision_at_\/max`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_` and\n `false_positive_at_` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and `predictions` has shape\n [batch size, num_classes]. The final dimension contains the logit values\n for each class. [D1, ... DN] must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric. This will calculate an average precision for\n range `[1,k]`, as documented above.\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n mean_average_precision: Scalar `float64` `Tensor` with the mean average\n precision values.\n update: `Operation` that increments variables appropriately, and whose\n value matches `metric`.\n \"\"\"\n return metrics.average_precision_at_k(\n k=k,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_sparse_average_precision_at_top_k(top_k_predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n `streaming_sparse_average_precision_at_top_k` creates two local variables,\n `average_precision_at_\/total` and `average_precision_at_\/max`, that\n are used to compute the frequency. This frequency is ultimately returned as\n `average_precision_at_`: an idempotent operation that simply divides\n `average_precision_at_\/total` by `average_precision_at_\/max`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_`. Set operations applied to `top_k` and `labels` calculate\n the true positives and false positives weighted by `weights`. Then `update_op`\n increments `true_positive_at_` and `false_positive_at_` using these\n values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.\n Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final\n dimension must be set and contains the top `k` predicted class indices.\n [D1, ... DN] must match `labels`. Values should be in range\n [0, num_classes).\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies\n num_labels=1. N >= 1 and num_labels is the number of target classes for\n the associated prediction. Commonly, N=1 and `labels` has shape\n [batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.\n Values should be in range [0, num_classes).\n weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of\n `labels`. If the latter, it must be broadcastable to `labels` (i.e., all\n dimensions must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n mean_average_precision: Scalar `float64` `Tensor` with the mean average\n precision values.\n update: `Operation` that increments variables appropriately, and whose\n value matches `metric`.\n\n Raises:\n ValueError: if the last dimension of top_k_predictions is not set.\n \"\"\"\n return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access\n predictions_idx=top_k_predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\n@deprecated(None,\n 'Please switch to tf.metrics.mean_absolute_error. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_mean_absolute_error(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean absolute error between the labels and predictions.\n\n The `streaming_mean_absolute_error` function creates two local variables,\n `total` and `count` that are used to compute the mean absolute error. This\n average is weighted by `weights`, and it is ultimately returned as\n `mean_absolute_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_absolute_error`. Internally, an `absolute_errors` operation computes the\n absolute value of the differences between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `absolute_errors`, and it increments `count` with the reduced\n sum of `weights`\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_absolute_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_absolute_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_absolute_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.mean_absolute_error(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_mean_relative_error(predictions,\n labels,\n normalizer,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean relative error by normalizing with the given values.\n\n The `streaming_mean_relative_error` function creates two local variables,\n `total` and `count` that are used to compute the mean relative absolute error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_relative_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_reative_error`. Internally, a `relative_errors` operation divides the\n absolute value of the differences between `predictions` and `labels` by the\n `normalizer`. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `relative_errors`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n normalizer: A `Tensor` of the same shape as `predictions`.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_relative_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_relative_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_relative_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.mean_relative_error(\n normalizer=normalizer,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n@deprecated(None,\n 'Please switch to tf.metrics.mean_squared_error. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_mean_squared_error(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean squared error between the labels and predictions.\n\n The `streaming_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_squared_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_squared_error`. Internally, a `squared_error` operation computes the\n element-wise square of the difference between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_squared_error: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.mean_squared_error(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n@deprecated(\n None,\n 'Please switch to tf.metrics.root_mean_squared_error. Note that the '\n 'order of the labels and predictions arguments has been switched.')\ndef streaming_root_mean_squared_error(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the root mean squared error between the labels and predictions.\n\n The `streaming_root_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the root mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `root_mean_squared_error`: an idempotent operation that takes the square root\n of the division of `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `root_mean_squared_error`. Internally, a `squared_error` operation computes\n the element-wise square of the difference between `predictions` and `labels`.\n Then `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that\n `root_mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n root_mean_squared_error: A `Tensor` representing the current mean, the value\n of `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `root_mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.root_mean_squared_error(\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_covariance(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the unbiased sample covariance between `predictions` and `labels`.\n\n The `streaming_covariance` function creates four local variables,\n `comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to\n compute the sample covariance between predictions and labels across multiple\n batches of data. The covariance is ultimately returned as an idempotent\n operation that simply divides `comoment` by `count` - 1. We use `count` - 1\n in order to get an unbiased estimate.\n\n The algorithm used for this online computation is described in\n https:\/\/en.wikipedia.org\/wiki\/Algorithms_for_calculating_variance.\n Specifically, the formula used to combine two sample comoments is\n `C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B \/ n_AB`\n The comoment for a single batch of data is simply\n `sum((x - E[x]) * (y - E[y]))`, optionally weighted.\n\n If `weights` is not None, then it is used to compute weighted comoments,\n means, and count. NOTE: these weights are treated as \"frequency weights\", as\n opposed to \"reliability weights\". See discussion of the difference on\n https:\/\/wikipedia.org\/wiki\/Weighted_arithmetic_mean#Weighted_sample_variance\n\n To facilitate the computation of covariance across multiple batches of data,\n the function creates an `update_op` operation, which updates underlying\n variables and returns the updated covariance.\n\n Args:\n predictions: A `Tensor` of arbitrary size.\n labels: A `Tensor` of the same size as `predictions`.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n covariance: A `Tensor` representing the current unbiased sample covariance,\n `comoment` \/ (`count` - 1).\n update_op: An operation that updates the local variables appropriately.\n\n Raises:\n ValueError: If labels and predictions are of different sizes or if either\n `metrics_collections` or `updates_collections` are not a list or tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'covariance',\n (predictions, labels, weights)):\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions, labels, weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')\n mean_prediction = metrics_impl.metric_variable(\n [], dtypes.float32, name='mean_prediction')\n mean_label = metrics_impl.metric_variable(\n [], dtypes.float32, name='mean_label')\n comoment = metrics_impl.metric_variable( # C_A in update equation\n [], dtypes.float32, name='comoment')\n\n if weights is None:\n batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn\n weighted_predictions = predictions\n weighted_labels = labels\n else:\n weights = weights_broadcast_ops.broadcast_weights(weights, labels)\n batch_count = math_ops.reduce_sum(weights) # n_B in eqn\n weighted_predictions = math_ops.multiply(predictions, weights)\n weighted_labels = math_ops.multiply(labels, weights)\n\n update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn\n prev_count = update_count - batch_count # n_A in update equation\n\n # We update the means by Delta=Error*BatchCount\/(BatchCount+PrevCount)\n # batch_mean_prediction is E[x_B] in the update equation\n batch_mean_prediction = _safe_div(\n math_ops.reduce_sum(weighted_predictions), batch_count,\n 'batch_mean_prediction')\n delta_mean_prediction = _safe_div(\n (batch_mean_prediction - mean_prediction) * batch_count, update_count,\n 'delta_mean_prediction')\n update_mean_prediction = state_ops.assign_add(mean_prediction,\n delta_mean_prediction)\n # prev_mean_prediction is E[x_A] in the update equation\n prev_mean_prediction = update_mean_prediction - delta_mean_prediction\n\n # batch_mean_label is E[y_B] in the update equation\n batch_mean_label = _safe_div(\n math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')\n delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,\n update_count, 'delta_mean_label')\n update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)\n # prev_mean_label is E[y_A] in the update equation\n prev_mean_label = update_mean_label - delta_mean_label\n\n unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *\n (labels - batch_mean_label))\n # batch_comoment is C_B in the update equation\n if weights is None:\n batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)\n else:\n batch_comoment = math_ops.reduce_sum(\n unweighted_batch_coresiduals * weights)\n\n # View delta_comoment as = C_AB - C_A in the update equation above.\n # Since C_A is stored in a var, by how much do we need to increment that var\n # to make the var = C_AB?\n delta_comoment = (\n batch_comoment + (prev_mean_prediction - batch_mean_prediction) *\n (prev_mean_label - batch_mean_label) *\n (prev_count * batch_count \/ update_count))\n update_comoment = state_ops.assign_add(comoment, delta_comoment)\n\n covariance = array_ops.where(\n math_ops.less_equal(count_, 1.),\n float('nan'),\n math_ops.truediv(comoment, count_ - 1),\n name='covariance')\n with ops.control_dependencies([update_comoment]):\n update_op = array_ops.where(\n math_ops.less_equal(count_, 1.),\n float('nan'),\n math_ops.truediv(comoment, count_ - 1),\n name='update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, covariance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return covariance, update_op\n\n\ndef streaming_pearson_correlation(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes Pearson correlation coefficient between `predictions`, `labels`.\n\n The `streaming_pearson_correlation` function delegates to\n `streaming_covariance` the tracking of three [co]variances:\n\n - `streaming_covariance(predictions, labels)`, i.e. covariance\n - `streaming_covariance(predictions, predictions)`, i.e. variance\n - `streaming_covariance(labels, labels)`, i.e. variance\n\n The product-moment correlation ultimately returned is an idempotent operation\n `cov(predictions, labels) \/ sqrt(var(predictions) * var(labels))`. To\n facilitate correlation computation across multiple batches, the function\n groups the `update_op`s of the underlying streaming_covariance and returns an\n `update_op`.\n\n If `weights` is not None, then it is used to compute a weighted correlation.\n NOTE: these weights are treated as \"frequency weights\", as opposed to\n \"reliability weights\". See discussion of the difference on\n https:\/\/wikipedia.org\/wiki\/Weighted_arithmetic_mean#Weighted_sample_variance\n\n Args:\n predictions: A `Tensor` of arbitrary size.\n labels: A `Tensor` of the same size as predictions.\n weights: Optional `Tensor` indicating the frequency with which an example is\n sampled. Rank must be 0, or the same rank as `labels`, and must be\n broadcastable to `labels` (i.e., all dimensions must be either `1`, or\n the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n pearson_r: A `Tensor` representing the current Pearson product-moment\n correlation coefficient, the value of\n `cov(predictions, labels) \/ sqrt(var(predictions) * var(labels))`.\n update_op: An operation that updates the underlying variables appropriately.\n\n Raises:\n ValueError: If `labels` and `predictions` are of different sizes, or if\n `weights` is the wrong size, or if either `metrics_collections` or\n `updates_collections` are not a `list` or `tuple`.\n \"\"\"\n with variable_scope.variable_scope(name, 'pearson_r',\n (predictions, labels, weights)):\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions, labels, weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n # Broadcast weights here to avoid duplicate broadcasting in each call to\n # `streaming_covariance`.\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, labels)\n cov, update_cov = streaming_covariance(\n predictions, labels, weights=weights, name='covariance')\n var_predictions, update_var_predictions = streaming_covariance(\n predictions, predictions, weights=weights, name='variance_predictions')\n var_labels, update_var_labels = streaming_covariance(\n labels, labels, weights=weights, name='variance_labels')\n\n pearson_r = math_ops.truediv(\n cov,\n math_ops.multiply(\n math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),\n name='pearson_r')\n update_op = math_ops.truediv(\n update_cov,\n math_ops.multiply(\n math_ops.sqrt(update_var_predictions),\n math_ops.sqrt(update_var_labels)),\n name='update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, pearson_r)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return pearson_r, update_op\n\n\n# TODO(nsilberman): add a 'normalized' flag so that the user can request\n# normalization if the inputs are not normalized.\ndef streaming_mean_cosine_distance(predictions,\n labels,\n dim,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the cosine distance between the labels and predictions.\n\n The `streaming_mean_cosine_distance` function creates two local variables,\n `total` and `count` that are used to compute the average cosine distance\n between `predictions` and `labels`. This average is weighted by `weights`,\n and it is ultimately returned as `mean_distance`, which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_distance`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of the same shape as `labels`.\n labels: A `Tensor` of arbitrary shape.\n dim: The dimension along which the cosine distance is computed.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`,\n and whose dimension `dim` is 1.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_distance: A `Tensor` representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions, labels, weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n radial_diffs = math_ops.multiply(predictions, labels)\n radial_diffs = math_ops.reduce_sum(\n radial_diffs, reduction_indices=[\n dim,\n ], keepdims=True)\n mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,\n name or 'mean_cosine_distance')\n mean_distance = math_ops.subtract(1.0, mean_distance)\n update_op = math_ops.subtract(1.0, update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_distance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_distance, update_op\n\n\ndef streaming_percentage_less(values,\n threshold,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the percentage of values less than the given threshold.\n\n The `streaming_percentage_less` function creates two local variables,\n `total` and `count` that are used to compute the percentage of `values` that\n fall below `threshold`. This rate is weighted by `weights`, and it is\n ultimately returned as `percentage` which is an idempotent operation that\n simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `percentage`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A numeric `Tensor` of arbitrary size.\n threshold: A scalar threshold.\n weights: An optional `Tensor` whose shape is broadcastable to `values`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n percentage: A `Tensor` representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n return metrics.percentage_below(\n values=values,\n threshold=threshold,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef streaming_mean_iou(predictions,\n labels,\n num_classes,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculate per-step mean Intersection-Over-Union (mIOU).\n\n Mean Intersection-Over-Union is a common evaluation metric for\n semantic image segmentation, which first computes the IOU for each\n semantic class and then computes the average over classes.\n IOU is defined as follows:\n IOU = true_positive \/ (true_positive + false_positive + false_negative).\n The predictions are accumulated in a confusion matrix, weighted by `weights`,\n and mIOU is then calculated from it.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean_iou`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened, if its rank > 1.\n labels: A `Tensor` of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened, if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `mean_iou`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_iou: A `Tensor` representing the mean intersection-over-union.\n update_op: An operation that increments the confusion matrix.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n return metrics.mean_iou(\n num_classes=num_classes,\n predictions=predictions,\n labels=labels,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=name)\n\n\ndef _next_array_size(required_size, growth_factor=1.5):\n \"\"\"Calculate the next size for reallocating a dynamic array.\n\n Args:\n required_size: number or tf.Tensor specifying required array capacity.\n growth_factor: optional number or tf.Tensor specifying the growth factor\n between subsequent allocations.\n\n Returns:\n tf.Tensor with dtype=int32 giving the next array size.\n \"\"\"\n exponent = math_ops.ceil(\n math_ops.log(math_ops.cast(required_size, dtypes.float32)) \/ math_ops.log(\n math_ops.cast(growth_factor, dtypes.float32)))\n return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)\n\n\ndef streaming_concat(values,\n axis=0,\n max_size=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Concatenate values along an axis across batches.\n\n The function `streaming_concat` creates two local variables, `array` and\n `size`, that are used to store concatenated values. Internally, `array` is\n used as storage for a dynamic array (if `maxsize` is `None`), which ensures\n that updates can be run in amortized constant time.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that appends the values of a tensor and returns the\n length of the concatenated axis.\n\n This op allows for evaluating metrics that cannot be updated incrementally\n using the same framework as other streaming metrics.\n\n Args:\n values: `Tensor` to concatenate. Rank and the shape along all axes other\n than the axis to concatenate along must be statically known.\n axis: optional integer axis to concatenate along.\n max_size: optional integer maximum size of `value` along the given axis.\n Once the maximum size is reached, further updates are no-ops. By default,\n there is no maximum size: the array is resized as necessary.\n metrics_collections: An optional list of collections that `value`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n value: A `Tensor` representing the concatenated values.\n update_op: An operation that concatenates the next values.\n\n Raises:\n ValueError: if `values` does not have a statically known rank, `axis` is\n not in the valid range or the size of `values` is not statically known\n along any axis other than `axis`.\n \"\"\"\n with variable_scope.variable_scope(name, 'streaming_concat', (values,)):\n # pylint: disable=invalid-slice-index\n values_shape = values.get_shape()\n if values_shape.dims is None:\n raise ValueError('`values` must have known statically known rank')\n\n ndim = len(values_shape)\n if axis < 0:\n axis += ndim\n if not 0 <= axis < ndim:\n raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))\n\n fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]\n if any(value is None for value in fixed_shape):\n raise ValueError('all dimensions of `values` other than the dimension to '\n 'concatenate along must have statically known size')\n\n # We move `axis` to the front of the internal array so assign ops can be\n # applied to contiguous slices\n init_size = 0 if max_size is None else max_size\n init_shape = [init_size] + fixed_shape\n array = metrics_impl.metric_variable(\n init_shape, values.dtype, validate_shape=False, name='array')\n size = metrics_impl.metric_variable([], dtypes.int32, name='size')\n\n perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]\n valid_array = array[:size]\n valid_array.set_shape([None] + fixed_shape)\n value = array_ops.transpose(valid_array, perm, name='concat')\n\n values_size = array_ops.shape(values)[axis]\n if max_size is None:\n batch_size = values_size\n else:\n batch_size = math_ops.minimum(values_size, max_size - size)\n\n perm = [axis] + [n for n in range(ndim) if n != axis]\n batch_values = array_ops.transpose(values, perm)[:batch_size]\n\n def reallocate():\n next_size = _next_array_size(new_size)\n next_shape = array_ops.stack([next_size] + fixed_shape)\n new_value = array_ops.zeros(next_shape, dtype=values.dtype)\n old_value = array.value()\n assign_op = state_ops.assign(array, new_value, validate_shape=False)\n with ops.control_dependencies([assign_op]):\n copy_op = array[:size].assign(old_value[:size])\n # return value needs to be the same dtype as no_op() for cond\n with ops.control_dependencies([copy_op]):\n return control_flow_ops.no_op()\n\n new_size = size + batch_size\n array_size = array_ops.shape_internal(array, optimize=False)[0]\n maybe_reallocate_op = control_flow_ops.cond(\n new_size > array_size, reallocate, control_flow_ops.no_op)\n with ops.control_dependencies([maybe_reallocate_op]):\n append_values_op = array[size:new_size].assign(batch_values)\n with ops.control_dependencies([append_values_op]):\n update_op = size.assign(new_size)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, value)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return value, update_op\n # pylint: enable=invalid-slice-index\n\n\ndef aggregate_metrics(*value_update_tuples):\n \"\"\"Aggregates the metric value tensors and update ops into two lists.\n\n Args:\n *value_update_tuples: a variable number of tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n A list of value `Tensor` objects and a list of update ops.\n\n Raises:\n ValueError: if `value_update_tuples` is empty.\n \"\"\"\n if not value_update_tuples:\n raise ValueError('Expected at least one value_tensor\/update_op pair')\n value_ops, update_ops = zip(*value_update_tuples)\n return list(value_ops), list(update_ops)\n\n\ndef aggregate_metric_map(names_to_tuples):\n \"\"\"Aggregates the metric names to tuple dictionary.\n\n This function is useful for pairing metric names with their associated value\n and update ops when the list of metrics is long. For example:\n\n ```python\n metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({\n 'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(\n predictions, labels, weights),\n 'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(\n predictions, labels, labels, weights),\n 'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n 'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n })\n ```\n\n Args:\n names_to_tuples: a map of metric names to tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n A dictionary from metric names to value ops and a dictionary from metric\n names to update ops.\n \"\"\"\n metric_names = names_to_tuples.keys()\n value_ops, update_ops = zip(*names_to_tuples.values())\n return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))\n\n\ndef count(values,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the number of examples, or sum of `weights`.\n\n This metric keeps track of the denominator in `tf.metrics.mean`.\n When evaluating some metric (e.g. mean) on one or more subsets of the data,\n this auxiliary metric is useful for keeping track of how many examples there\n are in each subset.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions. Only it's shape is used.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n must be either `1`, or the same as the corresponding `labels`\n dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n count: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the metric from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.contrib.metrics.count is not supported when eager '\n 'execution is enabled.')\n\n with variable_scope.variable_scope(name, 'count', (values, weights)):\n\n count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')\n\n if weights is None:\n num_values = math_ops.to_float(array_ops.size(values))\n else:\n values = math_ops.to_float(values)\n values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=values,\n labels=None,\n weights=weights)\n weights = weights_broadcast_ops.broadcast_weights(\n math_ops.to_float(weights), values)\n num_values = math_ops.reduce_sum(weights)\n\n with ops.control_dependencies([values]):\n update_count_op = state_ops.assign_add(count_, num_values)\n\n count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_count_op)\n\n return count_, update_count_op\n\n\ndef cohen_kappa(labels,\n predictions_idx,\n num_classes,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculates Cohen's kappa.\n\n [Cohen's kappa](https:\/\/en.wikipedia.org\/wiki\/Cohen's_kappa) is a statistic\n that measures inter-annotator agreement.\n\n The `cohen_kappa` function calculates the confusion matrix, and creates three\n local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,\n which refer to the diagonal part, rows and columns totals of the confusion\n matrix, respectively. This value is ultimately returned as `kappa`, an\n idempotent operation that is calculated by\n\n pe = (pe_row * pe_col) \/ N\n k = (sum(po) - sum(pe)) \/ (N - sum(pe))\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `kappa`. `update_op` weights each prediction by the corresponding value in\n `weights`.\n\n Class labels are expected to start at 0. E.g., if `num_classes`\n was three, then the possible labels would be [0, 1, 2].\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method\n doesn't support weighted matrix yet.\n\n Args:\n labels: 1-D `Tensor` of real labels for the classification task. Must be\n one of the following types: int16, int32, int64.\n predictions_idx: 1-D `Tensor` of predicted class indices for a given\n classification. Must have the same type as `labels`.\n num_classes: The possible number of labels.\n weights: Optional `Tensor` whose shape matches `predictions`.\n metrics_collections: An optional list of collections that `kappa` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n kappa: Scalar float `Tensor` representing the current Cohen's kappa.\n update_op: `Operation` that increments `po`, `pe_row` and `pe_col`\n variables appropriately and whose value matches `kappa`.\n\n Raises:\n ValueError: If `num_classes` is less than 2, or `predictions` and `labels`\n have mismatched shapes, or if `weights` is not `None` and its shape\n doesn't match `predictions`, or if either `metrics_collections` or\n `updates_collections` are not a list or tuple.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '\n 'when eager execution is enabled.')\n if num_classes < 2:\n raise ValueError('`num_classes` must be >= 2.'\n 'Found: {}'.format(num_classes))\n with variable_scope.variable_scope(name, 'cohen_kappa',\n (labels, predictions_idx, weights)):\n # Convert 2-dim (num, 1) to 1-dim (num,)\n labels.get_shape().with_rank_at_most(2)\n if labels.get_shape().ndims == 2:\n labels = array_ops.squeeze(labels, axis=[-1])\n predictions_idx, labels, weights = (\n metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access\n predictions=predictions_idx,\n labels=labels,\n weights=weights))\n predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())\n\n stat_dtype = (\n dtypes.int64\n if weights is None or weights.dtype.is_integer else dtypes.float32)\n po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')\n pe_row = metrics_impl.metric_variable(\n (num_classes,), stat_dtype, name='pe_row')\n pe_col = metrics_impl.metric_variable(\n (num_classes,), stat_dtype, name='pe_col')\n\n # Table of the counts of agreement:\n counts_in_table = confusion_matrix.confusion_matrix(\n labels,\n predictions_idx,\n num_classes=num_classes,\n weights=weights,\n dtype=stat_dtype,\n name='counts_in_table')\n\n po_t = array_ops.diag_part(counts_in_table)\n pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)\n pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)\n update_po = state_ops.assign_add(po, po_t)\n update_pe_row = state_ops.assign_add(pe_row, pe_row_t)\n update_pe_col = state_ops.assign_add(pe_col, pe_col_t)\n\n def _calculate_k(po, pe_row, pe_col, name):\n po_sum = math_ops.reduce_sum(po)\n total = math_ops.reduce_sum(pe_row)\n pe_sum = math_ops.reduce_sum(\n metrics_impl._safe_div( # pylint: disable=protected-access\n pe_row * pe_col, total, None))\n po_sum, pe_sum, total = (math_ops.to_double(po_sum),\n math_ops.to_double(pe_sum),\n math_ops.to_double(total))\n # kappa = (po - pe) \/ (N - pe)\n k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access\n po_sum - pe_sum,\n total - pe_sum,\n name=name)\n return k\n\n kappa = _calculate_k(po, pe_row, pe_col, name='value')\n update_op = _calculate_k(\n update_po, update_pe_row, update_pe_col, name='update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, kappa)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return kappa, update_op\n\n\n__all__ = [\n 'auc_with_confidence_intervals',\n 'aggregate_metric_map',\n 'aggregate_metrics',\n 'cohen_kappa',\n 'count',\n 'precision_recall_at_equal_thresholds',\n 'recall_at_precision',\n 'sparse_recall_at_top_k',\n 'streaming_accuracy',\n 'streaming_auc',\n 'streaming_curve_points',\n 'streaming_dynamic_auc',\n 'streaming_false_negative_rate',\n 'streaming_false_negative_rate_at_thresholds',\n 'streaming_false_negatives',\n 'streaming_false_negatives_at_thresholds',\n 'streaming_false_positive_rate',\n 'streaming_false_positive_rate_at_thresholds',\n 'streaming_false_positives',\n 'streaming_false_positives_at_thresholds',\n 'streaming_mean',\n 'streaming_mean_absolute_error',\n 'streaming_mean_cosine_distance',\n 'streaming_mean_iou',\n 'streaming_mean_relative_error',\n 'streaming_mean_squared_error',\n 'streaming_mean_tensor',\n 'streaming_percentage_less',\n 'streaming_precision',\n 'streaming_precision_at_thresholds',\n 'streaming_recall',\n 'streaming_recall_at_k',\n 'streaming_recall_at_thresholds',\n 'streaming_root_mean_squared_error',\n 'streaming_sensitivity_at_specificity',\n 'streaming_sparse_average_precision_at_k',\n 'streaming_sparse_average_precision_at_top_k',\n 'streaming_sparse_precision_at_k',\n 'streaming_sparse_precision_at_top_k',\n 'streaming_sparse_recall_at_k',\n 'streaming_specificity_at_sensitivity',\n 'streaming_true_negatives',\n 'streaming_true_negatives_at_thresholds',\n 'streaming_true_positives',\n 'streaming_true_positives_at_thresholds',\n]\n","license":"apache-2.0"} {"repo_name":"wathen\/PhD","path":"MHD\/FEniCS\/ShiftCurlCurl\/CppGradient\/Efficient\/CurlCurlSecondOrder.py","copies":"1","size":"5726","content":"import petsc4py, sys\npetsc4py.init(sys.argv)\nfrom petsc4py import PETSc\nimport os, inspect\nfrom dolfin import *\nimport numpy\nimport ExactSol\nimport MatrixOperations as MO\nimport CheckPetsc4py as CP\n\n\nimport HiptmairPrecond\nimport HiptmairSetup\nfrom timeit import default_timer as timer\nm = 8\nerrL2b =numpy.zeros((m-1,1))\nerrCurlb =numpy.zeros((m-1,1))\n\nl2border = numpy.zeros((m-1,1))\nCurlborder =numpy.zeros((m-1,1))\n\nItsSave = numpy.zeros((m-1,1))\nDimSave = numpy.zeros((m-1,1))\nTimeSave = numpy.zeros((m-1,1))\nNN = numpy.zeros((m-1,1))\nCurlgrad = numpy.zeros((m-1,1))\nMassgrad = numpy.zeros((m-1,1))\nLaplgrad = numpy.zeros((m-1,1))\ndim =3\n\nfor xx in xrange(1,m):\n NN[xx-1] = xx+0\n nn = int(2**(NN[xx-1][0]))\n # nn = 1\n omega = 1\n if dim == 2:\n esh = UnitSquareMesh(int(nn),int(nn))\n# mesh = RectangleMesh(0.0, 0.0, 1.0, 1.5, int(nn), int(nn), 'left')\n u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M2D(2,Show=\"yes\", Mass = omega)\n else:\n mesh = UnitCubeMesh(int(nn),int(nn),int(nn))\n u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M3D(1,Show=\"yes\", Mass = omega)\n\n order = 2\n parameters['reorder_dofs_serial'] = False\n Magnetic = FunctionSpace(mesh, \"N1curl\", order)\n Lagrange = FunctionSpace(mesh, \"CG\", order)\n parameters['reorder_dofs_serial'] = False\n\n DimSave[xx-1] = Magnetic.dim()\n print Magnetic.dim()\n parameters['linear_algebra_backend'] = 'uBLAS'\n\n # tic()\n# C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)\n# G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)\n # endTimeB = toc()\n # print endTimeB\n print \"\\n\"\n # tic()\n # C, P = HiptmairSetup.HiptmairMatrixSetup(mesh, Magnetic.dim(), Lagrange.dim())\n # G, P = HiptmairSetup.HiptmairBCsetup(C,P, mesh, [Magnetic,Lagrange])\n # endTime = toc()\n # print endTime\n\n # ataaa\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(Magnetic,u0, boundary)\n bcu = DirichletBC(Lagrange, Expression((\"0.0\")), boundary)\n\n\n (v) = TestFunction(Magnetic)\n (u) = TrialFunction(Magnetic)\n\n (p) = TrialFunction(Lagrange)\n (q) = TestFunction(Lagrange)\n\n a = inner(curl(u),curl(v))*dx + inner(u,v)*dx\n L1 = inner(v, CurlMass)*dx\n tic()\n Acurl,b = assemble_system(a,L1,bc, form_compiler_parameters={\"eliminate_zeros\": True})\n print \"System assembled, time: \", toc()\n\n tic()\n A,b = CP.Assemble(Acurl,b)\n x = b.duplicate()\n print \"PETSc system assembled, time: \", toc()\n MatVec = 'yes'\n if MatVec == \"yes\":\n tic()\n VecLagrange, kspMass, VectorLaplacian, ScalarLaplacian, B, BC = HiptmairSetup.HiptmairAnyOrder(Magnetic,Lagrange)\n # del b1, b2\n print \"Hiptmair Laplacians BC assembled, time: \", toc()\n\n\n ksp = PETSc.KSP().create()\n ksp.setTolerances(1e-6)\n ksp.setType('cg')\n ksp.setOperators(A,A)\n pc = ksp.getPC()\n reshist = {}\n def monitor(ksp, its, rnorm):\n reshist[its] = rnorm\n print its, ' ', rnorm\n ksp.setMonitor(monitor)\n pc.setType(PETSc.PC.Type.PYTHON)\n kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)\n del A, VectorLaplacian, ScalarLaplacian\n pc.setPythonContext(HiptmairPrecond.HiptmairApply([Magnetic,Lagrange,VecLagrange] ,B, kspMass, kspVector, kspScalar, diag, BC))\n scale = b.norm()\n b = b\/scale\n tic()\n ksp.solve(b, x)\n TimeSave[xx-1] = toc()\n x = x*scale\n print ksp.its\n print TimeSave[xx-1]\n ItsSave[xx-1] = ksp.its\n print \" \\n\\n\\n\\n\"\n else:\n # tic()\n C, P = HiptmairSetup.HiptmairMatrixSetupBoundary(mesh, Magnetic.dim(), Lagrange.dim(),dim)\n G, P = HiptmairSetup.HiptmairBCsetupBoundary(C,P,mesh)\n # endTimeB = toc()\n # print endTimeB\n print \"\\n\"\n tic()\n ScalarLaplacian, b1 = assemble_system(inner(grad(p),grad(q))*dx,inner(p0,q)*dx,bcu)\n VectorLaplacian, b2 = assemble_system(inner(grad(p),grad(q))*dx+inner(p,q)*dx,inner(p0,q)*dx,bcu)\n del b1, b2\n print \"Hiptmair Laplacians BC assembled, time: \", toc()\n\n tic()\n VectorLaplacian = PETSc.Mat().createAIJ(size=VectorLaplacian.sparray().shape,csr=(VectorLaplacian.sparray().indptr, VectorLaplacian.sparray().indices, VectorLaplacian.sparray().data))\n ScalarLaplacian = PETSc.Mat().createAIJ(size=ScalarLaplacian.sparray().shape,csr=(ScalarLaplacian.sparray().indptr, ScalarLaplacian.sparray().indices, ScalarLaplacian.sparray().data))\n print \"PETSc Laplacians assembled, time: \", toc()\n\n ksp = PETSc.KSP().create()\n ksp.setTolerances(1e-6)\n ksp.setType('cg')\n ksp.setOperators(A,A)\n pc = ksp.getPC()\n pc.setType(PETSc.PC.Type.PYTHON)\n kspVector, kspScalar, diag = HiptmairSetup.HiptmairKSPsetup(VectorLaplacian, ScalarLaplacian,A)\n del A, VectorLaplacian, ScalarLaplacian\n pc.setPythonContext(HiptmairPrecond.GSvector(G, P, kspVector, kspScalar, diag))\n scale = b.norm()\n b = b\/scale\n tic()\n ksp.solve(b, x)\n TimeSave[xx-1] = toc()\n x = x*scale\n print ksp.its\n print TimeSave[xx-1]\n ItsSave[xx-1] = ksp.its\n print \" \\n\\n\\n\\n\"\n\nimport pandas as pd\n\n\nprint \"\\n\\n\\n\"\nItsTitlesB = [\"l\",\"B DoF\",\"Time\",\"Iterations\"]\nItsValuesB = numpy.concatenate((NN,DimSave,TimeSave,ItsSave),axis=1)\nItsTableB= pd.DataFrame(ItsValuesB, columns = ItsTitlesB)\npd.set_option('precision',5)\nprint ItsTableB.to_latex()\n\nif m !=2:\n print numpy.abs((TimeSave[1:]\/TimeSave[:-1]))\/(2*dim)\n","license":"mit"} {"repo_name":"robcarver17\/pysystemtrade","path":"systems\/provided\/futures_chapter15\/rules.py","copies":"1","size":"4311","content":"\"\"\"\nTrading rules for futures system\n\"\"\"\nfrom syscore.dateutils import ROOT_BDAYS_INYEAR\nimport pandas as pd\nfrom sysquant.estimators.vol import robust_vol_calc\n\n\ndef ewmac(price, vol, Lfast, Lslow):\n \"\"\"\n Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback\n\n Assumes that 'price' and vol is daily data\n\n This version uses a precalculated price volatility, and does not do capping or scaling\n\n :param price: The price or other series to use (assumed Tx1)\n :type price: pd.Series\n\n :param vol: The daily price unit volatility (NOT % vol)\n :type vol: pd.Series aligned to price\n\n :param Lfast: Lookback for fast in days\n :type Lfast: int\n\n :param Lslow: Lookback for slow in days\n :type Lslow: int\n\n :returns: pd.DataFrame -- unscaled, uncapped forecast\n\n\n >>> from systems.tests.testdata import get_test_object_futures\n >>> from systems.basesystem import System\n >>> (rawdata, data, config)=get_test_object_futures()\n >>> system=System( [rawdata], data, config)\n >>>\n >>> ewmac(rawdata.get_daily_prices(\"EDOLLAR\"), rawdata.daily_returns_volatility(\"EDOLLAR\"), 64, 256).tail(2)\n 2015-12-10 5.327019\n 2015-12-11 4.927339\n Freq: B, dtype: float64\n \"\"\"\n # price: This is the stitched price series\n # We can't use the price of the contract we're trading, or the volatility will be jumpy\n # And we'll miss out on the rolldown. See\n # https:\/\/qoppac.blogspot.com\/2015\/05\/systems-building-futures-rolling.html\n\n # We don't need to calculate the decay parameter, just use the span\n # directly\n\n fast_ewma = price.ewm(span=Lfast).mean()\n slow_ewma = price.ewm(span=Lslow).mean()\n raw_ewmac = fast_ewma - slow_ewma\n\n return raw_ewmac \/ vol.ffill()\n\n\ndef ewmac_calc_vol(price, Lfast, Lslow, vol_days=35):\n \"\"\"\n Calculate the ewmac trading rule forecast, given a price and EWMA speeds Lfast, Lslow and vol_lookback\n\n Assumes that 'price' and vol is daily data\n\n This version recalculates the price volatility, and does not do capping or scaling\n\n :param price: The price or other series to use (assumed Tx1)\n :type price: pd.Series\n\n :param Lfast: Lookback for fast in days\n :type Lfast: int\n\n :param Lslow: Lookback for slow in days\n :type Lslow: int\n\n :returns: pd.DataFrame -- unscaled, uncapped forecast\n\n\n >>> from systems.tests.testdata import get_test_object_futures\n >>> from systems.basesystem import System\n >>> (rawdata, data, config)=get_test_object_futures()\n >>> system=System( [rawdata], data, config)\n >>>\n >>> ewmac(rawdata.get_daily_prices(\"EDOLLAR\"), rawdata.daily_returns_volatility(\"EDOLLAR\"), 64, 256).tail(2)\n 2015-12-10 5.327019\n 2015-12-11 4.927339\n Freq: B, dtype: float64\n \"\"\"\n # price: This is the stitched price series\n # We can't use the price of the contract we're trading, or the volatility will be jumpy\n # And we'll miss out on the rolldown. See\n # https:\/\/qoppac.blogspot.com\/2015\/05\/systems-building-futures-rolling.html\n\n # We don't need to calculate the decay parameter, just use the span\n # directly\n\n fast_ewma = price.ewm(span=Lfast).mean()\n slow_ewma = price.ewm(span=Lslow).mean()\n raw_ewmac = fast_ewma - slow_ewma\n\n vol = robust_vol_calc(price, vol_days)\n\n return raw_ewmac \/ vol.ffill()\n\n\ndef carry(daily_ann_roll, vol, smooth_days=90):\n \"\"\"\n Old carry rule\n \"\"\"\n raise Exception(\"DEPRECATED: USE carry2\")\n\n\ndef carry2(raw_carry, smooth_days=90):\n \"\"\"\n Calculate carry forecast, given that there exists a raw_carry() in rawdata\n\n Assumes that everything is daily data\n\n :param raw_carry: The annualised sharpe ratio of rolldown\n :type raw_carry: pd.DataFrame (assumed Tx1)\n\n >>> from systems.tests.testdata import get_test_object_futures\n >>> from systems.basesystem import System\n >>> (rawdata, data, config)=get_test_object_futures()\n >>> system=System( [rawdata], data, config)\n >>>\n >>> carry2(rawdata.raw_carry(\"EDOLLAR\")).tail(2)\n 2015-12-10 0.411686\n 2015-12-11 0.411686\n Freq: B, dtype: float64\n \"\"\"\n\n smooth_carry = raw_carry.ewm(smooth_days).mean()\n\n return smooth_carry\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","license":"gpl-3.0"} {"repo_name":"q1ang\/scikit-learn","path":"sklearn\/neighbors\/tests\/test_kd_tree.py","copies":"159","size":"7852","content":"import numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,\n simultaneous_sort, kernel_norm,\n nodeheap_sort, DTYPE, ITYPE)\nfrom sklearn.neighbors.dist_metrics import DistanceMetric\nfrom sklearn.utils.testing import SkipTest, assert_allclose\n\nV = np.random.random((3, 3))\nV = np.dot(V, V.T)\n\nDIMENSION = 3\n\nMETRICS = {'euclidean': {},\n 'manhattan': {},\n 'chebyshev': {},\n 'minkowski': dict(p=3)}\n\n\ndef brute_force_neighbors(X, Y, k, metric, **kwargs):\n D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)\n ind = np.argsort(D, axis=1)[:, :k]\n dist = D[np.arange(Y.shape[0])[:, None], ind]\n return dist, ind\n\n\ndef test_kd_tree_query():\n np.random.seed(0)\n X = np.random.random((40, DIMENSION))\n Y = np.random.random((10, DIMENSION))\n\n def check_neighbors(dualtree, breadth_first, k, metric, kwargs):\n kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)\n dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,\n breadth_first=breadth_first)\n dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)\n\n # don't check indices here: if there are any duplicate distances,\n # the indices may not match. Distances should not have this problem.\n assert_array_almost_equal(dist1, dist2)\n\n for (metric, kwargs) in METRICS.items():\n for k in (1, 3, 5):\n for dualtree in (True, False):\n for breadth_first in (True, False):\n yield (check_neighbors,\n dualtree, breadth_first,\n k, metric, kwargs)\n\n\ndef test_kd_tree_query_radius(n_samples=100, n_features=10):\n np.random.seed(0)\n X = 2 * np.random.random(size=(n_samples, n_features)) - 1\n query_pt = np.zeros(n_features, dtype=float)\n\n eps = 1E-15 # roundoff error can cause test to fail\n kdt = KDTree(X, leaf_size=5)\n rad = np.sqrt(((X - query_pt) ** 2).sum(1))\n\n for r in np.linspace(rad[0], rad[-1], 100):\n ind = kdt.query_radius([query_pt], r + eps)[0]\n i = np.where(rad <= r + eps)[0]\n\n ind.sort()\n i.sort()\n\n assert_array_almost_equal(i, ind)\n\n\ndef test_kd_tree_query_radius_distance(n_samples=100, n_features=10):\n np.random.seed(0)\n X = 2 * np.random.random(size=(n_samples, n_features)) - 1\n query_pt = np.zeros(n_features, dtype=float)\n\n eps = 1E-15 # roundoff error can cause test to fail\n kdt = KDTree(X, leaf_size=5)\n rad = np.sqrt(((X - query_pt) ** 2).sum(1))\n\n for r in np.linspace(rad[0], rad[-1], 100):\n ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)\n\n ind = ind[0]\n dist = dist[0]\n\n d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))\n\n assert_array_almost_equal(d, dist)\n\n\ndef compute_kernel_slow(Y, X, kernel, h):\n d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))\n norm = kernel_norm(h, X.shape[1], kernel)\n\n if kernel == 'gaussian':\n return norm * np.exp(-0.5 * (d * d) \/ (h * h)).sum(-1)\n elif kernel == 'tophat':\n return norm * (d < h).sum(-1)\n elif kernel == 'epanechnikov':\n return norm * ((1.0 - (d * d) \/ (h * h)) * (d < h)).sum(-1)\n elif kernel == 'exponential':\n return norm * (np.exp(-d \/ h)).sum(-1)\n elif kernel == 'linear':\n return norm * ((1 - d \/ h) * (d < h)).sum(-1)\n elif kernel == 'cosine':\n return norm * (np.cos(0.5 * np.pi * d \/ h) * (d < h)).sum(-1)\n else:\n raise ValueError('kernel not recognized')\n\n\ndef test_kd_tree_kde(n_samples=100, n_features=3):\n np.random.seed(0)\n X = np.random.random((n_samples, n_features))\n Y = np.random.random((n_samples, n_features))\n kdt = KDTree(X, leaf_size=10)\n\n for kernel in ['gaussian', 'tophat', 'epanechnikov',\n 'exponential', 'linear', 'cosine']:\n for h in [0.01, 0.1, 1]:\n dens_true = compute_kernel_slow(Y, X, kernel, h)\n\n def check_results(kernel, h, atol, rtol, breadth_first):\n dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,\n kernel=kernel,\n breadth_first=breadth_first)\n assert_allclose(dens, dens_true, atol=atol,\n rtol=max(rtol, 1e-7))\n\n for rtol in [0, 1E-5]:\n for atol in [1E-6, 1E-2]:\n for breadth_first in (True, False):\n yield (check_results, kernel, h, atol, rtol,\n breadth_first)\n\n\ndef test_gaussian_kde(n_samples=1000):\n # Compare gaussian KDE results to scipy.stats.gaussian_kde\n from scipy.stats import gaussian_kde\n np.random.seed(0)\n x_in = np.random.normal(0, 1, n_samples)\n x_out = np.linspace(-5, 5, 30)\n\n for h in [0.01, 0.1, 1]:\n kdt = KDTree(x_in[:, None])\n try:\n gkde = gaussian_kde(x_in, bw_method=h \/ np.std(x_in))\n except TypeError:\n raise SkipTest(\"Old scipy, does not accept explicit bandwidth.\")\n\n dens_kdt = kdt.kernel_density(x_out[:, None], h) \/ n_samples\n dens_gkde = gkde.evaluate(x_out)\n\n assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)\n\n\ndef test_kd_tree_two_point(n_samples=100, n_features=3):\n np.random.seed(0)\n X = np.random.random((n_samples, n_features))\n Y = np.random.random((n_samples, n_features))\n r = np.linspace(0, 1, 10)\n kdt = KDTree(X, leaf_size=10)\n\n D = DistanceMetric.get_metric(\"euclidean\").pairwise(Y, X)\n counts_true = [(D <= ri).sum() for ri in r]\n\n def check_two_point(r, dualtree):\n counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)\n assert_array_almost_equal(counts, counts_true)\n\n for dualtree in (True, False):\n yield check_two_point, r, dualtree\n\n\ndef test_kd_tree_pickle():\n import pickle\n np.random.seed(0)\n X = np.random.random((10, 3))\n kdt1 = KDTree(X, leaf_size=1)\n ind1, dist1 = kdt1.query(X)\n\n def check_pickle_protocol(protocol):\n s = pickle.dumps(kdt1, protocol=protocol)\n kdt2 = pickle.loads(s)\n ind2, dist2 = kdt2.query(X)\n assert_array_almost_equal(ind1, ind2)\n assert_array_almost_equal(dist1, dist2)\n\n for protocol in (0, 1, 2):\n yield check_pickle_protocol, protocol\n\n\ndef test_neighbors_heap(n_pts=5, n_nbrs=10):\n heap = NeighborsHeap(n_pts, n_nbrs)\n\n for row in range(n_pts):\n d_in = np.random.random(2 * n_nbrs).astype(DTYPE)\n i_in = np.arange(2 * n_nbrs, dtype=ITYPE)\n for d, i in zip(d_in, i_in):\n heap.push(row, d, i)\n\n ind = np.argsort(d_in)\n d_in = d_in[ind]\n i_in = i_in[ind]\n\n d_heap, i_heap = heap.get_arrays(sort=True)\n\n assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])\n assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])\n\n\ndef test_node_heap(n_nodes=50):\n vals = np.random.random(n_nodes).astype(DTYPE)\n\n i1 = np.argsort(vals)\n vals2, i2 = nodeheap_sort(vals)\n\n assert_array_almost_equal(i1, i2)\n assert_array_almost_equal(vals[i1], vals2)\n\n\ndef test_simultaneous_sort(n_rows=10, n_pts=201):\n dist = np.random.random((n_rows, n_pts)).astype(DTYPE)\n ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)\n\n dist2 = dist.copy()\n ind2 = ind.copy()\n\n # simultaneous sort rows using function\n simultaneous_sort(dist, ind)\n\n # simultaneous sort rows using numpy\n i = np.argsort(dist2, axis=1)\n row_ind = np.arange(n_rows)[:, None]\n dist2 = dist2[row_ind, i]\n ind2 = ind2[row_ind, i]\n\n assert_array_almost_equal(dist, dist2)\n assert_array_almost_equal(ind, ind2)\n","license":"bsd-3-clause"} {"repo_name":"bartosh\/zipline","path":"tests\/pipeline\/test_downsampling.py","copies":"4","size":"24457","content":"\"\"\"\nTests for Downsampled Filters\/Factors\/Classifiers\n\"\"\"\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\nfrom zipline.pipeline import (\n Pipeline,\n CustomFactor,\n CustomFilter,\n CustomClassifier,\n)\nfrom zipline.pipeline.data.testing import TestingDataSet\nfrom zipline.pipeline.factors import SimpleMovingAverage\nfrom zipline.pipeline.filters.smoothing import All\nfrom zipline.testing import ZiplineTestCase, parameter_space\nfrom zipline.testing.fixtures import (\n WithTradingSessions,\n WithSeededRandomPipelineEngine,\n)\nfrom zipline.utils.input_validation import _qualified_name\nfrom zipline.utils.numpy_utils import int64_dtype\n\n\nclass NDaysAgoFactor(CustomFactor):\n inputs = [TestingDataSet.float_col]\n\n def compute(self, today, assets, out, floats):\n out[:] = floats[0]\n\n\nclass NDaysAgoFilter(CustomFilter):\n inputs = [TestingDataSet.bool_col]\n\n def compute(self, today, assets, out, bools):\n out[:] = bools[0]\n\n\nclass NDaysAgoClassifier(CustomClassifier):\n inputs = [TestingDataSet.categorical_col]\n dtype = TestingDataSet.categorical_col.dtype\n\n def compute(self, today, assets, out, cats):\n out[:] = cats[0]\n\n\nclass ComputeExtraRowsTestcase(WithTradingSessions, ZiplineTestCase):\n\n DATA_MIN_DAY = pd.Timestamp('2012-06', tz='UTC')\n DATA_MAX_DAY = pd.Timestamp('2015', tz='UTC')\n TRADING_CALENDAR_STRS = ('NYSE',)\n\n # Test with different window_lengths to ensure that window length is not\n # used when calculating exra rows for the top-level term.\n factor1 = TestingDataSet.float_col.latest\n factor11 = NDaysAgoFactor(window_length=11)\n factor91 = NDaysAgoFactor(window_length=91)\n\n filter1 = TestingDataSet.bool_col.latest\n filter11 = NDaysAgoFilter(window_length=11)\n filter91 = NDaysAgoFilter(window_length=91)\n\n classifier1 = TestingDataSet.categorical_col.latest\n classifier11 = NDaysAgoClassifier(window_length=11)\n classifier91 = NDaysAgoClassifier(window_length=91)\n\n all_terms = [\n factor1,\n factor11,\n factor91,\n filter1,\n filter11,\n filter91,\n classifier1,\n classifier11,\n classifier91,\n ]\n\n @parameter_space(\n calendar_name=TRADING_CALENDAR_STRS,\n base_terms=[\n (factor1, factor11, factor91),\n (filter1, filter11, filter91),\n (classifier1, classifier11, classifier91),\n ],\n __fail_fast=True\n )\n def test_yearly(self, base_terms, calendar_name):\n downsampled_terms = tuple(\n t.downsample('year_start') for t in base_terms\n )\n all_terms = base_terms + downsampled_terms\n\n all_sessions = self.trading_sessions[calendar_name]\n end_session = all_sessions[-1]\n\n years = all_sessions.year\n sessions_in_2012 = all_sessions[years == 2012]\n sessions_in_2013 = all_sessions[years == 2013]\n sessions_in_2014 = all_sessions[years == 2014]\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the first date in 2014. We shouldn't request any\n # additional rows for the regular terms or the downsampled terms.\n for i in range(0, 30, 5):\n start_session = sessions_in_2014[i]\n self.check_extra_row_calculations(\n all_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the second date in 2014. We should request one more extra\n # row in the downsampled terms to push us back to the first date in\n # 2014.\n for i in range(0, 30, 5):\n start_session = sessions_in_2014[i + 1]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i + 1,\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the last date of 2013. The downsampled terms should request\n # enough extra rows to push us back to the start of 2013.\n for i in range(0, 30, 5):\n start_session = sessions_in_2014[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(sessions_in_2013),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the last date of 2012. The downsampled terms should request\n # enough extra rows to push us back to the first known date, which is\n # in the middle of 2012\n for i in range(0, 30, 5):\n start_session = sessions_in_2013[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(sessions_in_2012),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n @parameter_space(\n calendar_name=TRADING_CALENDAR_STRS,\n base_terms=[\n (factor1, factor11, factor91),\n (filter1, filter11, filter91),\n (classifier1, classifier11, classifier91),\n ],\n __fail_fast=True\n )\n def test_quarterly(self, calendar_name, base_terms):\n downsampled_terms = tuple(\n t.downsample('quarter_start') for t in base_terms\n )\n all_terms = base_terms + downsampled_terms\n\n # This region intersects with Q4 2013, Q1 2014, and Q2 2014.\n tmp = self.trading_sessions[calendar_name]\n all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-04-30')]\n end_session = all_sessions[-1]\n\n months = all_sessions.month\n Q4_2013 = all_sessions[months == 12]\n Q1_2014 = all_sessions[(months == 1) | (months == 2) | (months == 3)]\n Q2_2014 = all_sessions[months == 4]\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the first date in Q2 2014. We shouldn't request any\n # additional rows for the regular terms or the downsampled terms.\n for i in range(0, 15, 5):\n start_session = Q2_2014[i]\n self.check_extra_row_calculations(\n all_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the second date in Q2 2014.\n # The downsampled terms should request one more extra row.\n for i in range(0, 15, 5):\n start_session = Q2_2014[i + 1]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i + 1,\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the last date in Q1 2014. The downsampled terms\n # should request enough extra rows to push us back to the first date of\n # Q1 2014.\n for i in range(0, 15, 5):\n start_session = Q2_2014[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(Q1_2014),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the last date in Q4 2013. The downsampled terms\n # should request enough extra rows to push us back to the first known\n # date, which is in the middle of december 2013.\n for i in range(0, 15, 5):\n start_session = Q1_2014[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(Q4_2013),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n @parameter_space(\n calendar_name=TRADING_CALENDAR_STRS,\n base_terms=[\n (factor1, factor11, factor91),\n (filter1, filter11, filter91),\n (classifier1, classifier11, classifier91),\n ],\n __fail_fast=True\n )\n def test_monthly(self, calendar_name, base_terms):\n downsampled_terms = tuple(\n t.downsample('month_start') for t in base_terms\n )\n all_terms = base_terms + downsampled_terms\n\n # This region intersects with Dec 2013, Jan 2014, and Feb 2014.\n tmp = self.trading_sessions[calendar_name]\n all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-02-28')]\n end_session = all_sessions[-1]\n\n months = all_sessions.month\n dec2013 = all_sessions[months == 12]\n jan2014 = all_sessions[months == 1]\n feb2014 = all_sessions[months == 2]\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the first date in feb 2014. We shouldn't request any\n # additional rows for the regular terms or the downsampled terms.\n for i in range(0, 10, 2):\n start_session = feb2014[i]\n self.check_extra_row_calculations(\n all_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the second date in feb 2014. We should request one more\n # extra row in the downsampled terms to push us back to the first date\n # in 2014.\n for i in range(0, 10, 2):\n start_session = feb2014[i + 1]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i + 1,\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the last date of jan 2014. The downsampled terms should\n # request enough extra rows to push us back to the start of jan 2014.\n for i in range(0, 10, 2):\n start_session = feb2014[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(jan2014),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land on the last date of dec 2013. The downsampled terms should\n # request enough extra rows to push us back to the first known date,\n # which is in the middle of december 2013.\n for i in range(0, 10, 2):\n start_session = jan2014[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(dec2013),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n @parameter_space(\n calendar_name=TRADING_CALENDAR_STRS,\n base_terms=[\n (factor1, factor11, factor91),\n (filter1, filter11, filter91),\n (classifier1, classifier11, classifier91),\n ],\n __fail_fast=True\n )\n def test_weekly(self, calendar_name, base_terms):\n downsampled_terms = tuple(\n t.downsample('week_start') for t in base_terms\n )\n all_terms = base_terms + downsampled_terms\n\n # December 2013\n # Mo Tu We Th Fr Sa Su\n # 1\n # 2 3 4 5 6 7 8\n # 9 10 11 12 13 14 15\n # 16 17 18 19 20 21 22\n # 23 24 25 26 27 28 29\n # 30 31\n\n # January 2014\n # Mo Tu We Th Fr Sa Su\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n # This region intersects with the last full week of 2013, the week\n # shared by 2013 and 2014, and the first full week of 2014.\n tmp = self.trading_sessions[calendar_name]\n all_sessions = tmp[tmp.slice_indexer('2013-12-27', '2014-01-12')]\n end_session = all_sessions[-1]\n\n week0 = all_sessions[\n all_sessions.slice_indexer('2013-12-27', '2013-12-29')\n ]\n week1 = all_sessions[\n all_sessions.slice_indexer('2013-12-30', '2014-01-05')\n ]\n week2 = all_sessions[\n all_sessions.slice_indexer('2014-01-06', '2014-01-12')\n ]\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the first date in week 2. We shouldn't request any\n # additional rows for the regular terms or the downsampled terms.\n for i in range(3):\n start_session = week2[i]\n self.check_extra_row_calculations(\n all_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the second date in week 2. The downsampled terms\n # should request one more extra row.\n for i in range(3):\n start_session = week2[i + 1]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i + 1,\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i,\n expected_extra_rows=i,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the last date in week 1. The downsampled terms\n # should request enough extra rows to push us back to the first date of\n # week 1.\n for i in range(3):\n start_session = week2[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(week1),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n # Simulate requesting computation where the unaltered lookback would\n # land exactly on the last date in week0. The downsampled terms\n # should request enough extra rows to push us back to the first known\n # date, which is in the middle of december 2013.\n for i in range(3):\n start_session = week1[i]\n self.check_extra_row_calculations(\n downsampled_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + len(week0),\n )\n self.check_extra_row_calculations(\n base_terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows=i + 1,\n expected_extra_rows=i + 1,\n )\n\n def check_extra_row_calculations(self,\n terms,\n all_sessions,\n start_session,\n end_session,\n min_extra_rows,\n expected_extra_rows):\n \"\"\"\n Check that each term in ``terms`` computes an expected number of extra\n rows for the given parameters.\n \"\"\"\n for term in terms:\n result = term.compute_extra_rows(\n all_sessions,\n start_session,\n end_session,\n min_extra_rows,\n )\n self.assertEqual(\n result,\n expected_extra_rows,\n \"Expected {} extra_rows from {}, but got {}.\".format(\n expected_extra_rows,\n term,\n result,\n )\n )\n\n\nclass DownsampledPipelineTestCase(WithSeededRandomPipelineEngine,\n ZiplineTestCase):\n\n # Extend into the last few days of 2013 to test year\/quarter boundaries.\n START_DATE = pd.Timestamp('2013-12-15', tz='UTC')\n\n # Extend into the first few days of 2015 to test year\/quarter boundaries.\n END_DATE = pd.Timestamp('2015-01-06', tz='UTC')\n\n ASSET_FINDER_EQUITY_SIDS = tuple(range(10))\n\n def check_downsampled_term(self, term):\n\n # June 2014\n # Mo Tu We Th Fr Sa Su\n # 1\n # 2 3 4 5 6 7 8\n # 9 10 11 12 13 14 15\n # 16 17 18 19 20 21 22\n # 23 24 25 26 27 28 29\n # 30\n all_sessions = self.nyse_sessions\n compute_dates = all_sessions[\n all_sessions.slice_indexer('2014-06-05', '2015-01-06')\n ]\n start_date, end_date = compute_dates[[0, -1]]\n\n pipe = Pipeline({\n 'year': term.downsample(frequency='year_start'),\n 'quarter': term.downsample(frequency='quarter_start'),\n 'month': term.downsample(frequency='month_start'),\n 'week': term.downsample(frequency='week_start'),\n })\n\n # Raw values for term, computed each day from 2014 to the end of the\n # target period.\n raw_term_results = self.run_pipeline(\n Pipeline({'term': term}),\n start_date=pd.Timestamp('2014-01-02', tz='UTC'),\n end_date=pd.Timestamp('2015-01-06', tz='UTC'),\n )['term'].unstack()\n\n expected_results = {\n 'year': (raw_term_results\n .groupby(pd.TimeGrouper('AS'))\n .first()\n .reindex(compute_dates, method='ffill')),\n 'quarter': (raw_term_results\n .groupby(pd.TimeGrouper('QS'))\n .first()\n .reindex(compute_dates, method='ffill')),\n 'month': (raw_term_results\n .groupby(pd.TimeGrouper('MS'))\n .first()\n .reindex(compute_dates, method='ffill')),\n 'week': (raw_term_results\n .groupby(pd.TimeGrouper('W', label='left'))\n .first()\n .reindex(compute_dates, method='ffill')),\n }\n\n results = self.run_pipeline(pipe, start_date, end_date)\n\n for frequency in expected_results:\n result = results[frequency].unstack()\n expected = expected_results[frequency]\n assert_frame_equal(result, expected)\n\n def test_downsample_windowed_factor(self):\n self.check_downsampled_term(\n SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=5,\n )\n )\n\n def test_downsample_non_windowed_factor(self):\n sma = SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=5,\n )\n\n self.check_downsampled_term(((sma + sma) \/ 2).rank())\n\n def test_downsample_windowed_filter(self):\n sma = SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=5,\n )\n self.check_downsampled_term(All(inputs=[sma.top(4)], window_length=5))\n\n def test_downsample_nonwindowed_filter(self):\n sma = SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=5,\n )\n self.check_downsampled_term(sma > 5)\n\n def test_downsample_windowed_classifier(self):\n\n class IntSumClassifier(CustomClassifier):\n inputs = [TestingDataSet.float_col]\n window_length = 8\n dtype = int64_dtype\n missing_value = -1\n\n def compute(self, today, assets, out, floats):\n out[:] = floats.sum(axis=0).astype(int) % 4\n\n self.check_downsampled_term(IntSumClassifier())\n\n def test_downsample_nonwindowed_classifier(self):\n sma = SimpleMovingAverage(\n inputs=[TestingDataSet.float_col],\n window_length=5,\n )\n self.check_downsampled_term(sma.quantiles(5))\n\n def test_errors_on_bad_downsample_frequency(self):\n\n f = NDaysAgoFactor(window_length=3)\n with self.assertRaises(ValueError) as e:\n f.downsample('bad')\n\n expected = (\n \"{}() expected a value in \"\n \"('month_start', 'quarter_start', 'week_start', 'year_start') \"\n \"for argument 'frequency', but got 'bad' instead.\"\n ).format(_qualified_name(f.downsample))\n self.assertEqual(str(e.exception), expected)\n","license":"apache-2.0"} {"repo_name":"Kongsea\/tensorflow","path":"tensorflow\/examples\/learn\/hdf5_classification.py","copies":"75","size":"2899","content":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of DNNClassifier for Iris plant dataset, hdf5 format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn import model_selection\nimport tensorflow as tf\nimport h5py # pylint: disable=g-bad-import-order\n\n\nX_FEATURE = 'x' # Name of the input feature.\n\n\ndef main(unused_argv):\n # Load dataset.\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = model_selection.train_test_split(\n iris.data, iris.target, test_size=0.2, random_state=42)\n\n # Note that we are saving and load iris data as h5 format as a simple\n # demonstration here.\n h5f = h5py.File('\/tmp\/test_hdf5.h5', 'w')\n h5f.create_dataset('X_train', data=x_train)\n h5f.create_dataset('X_test', data=x_test)\n h5f.create_dataset('y_train', data=y_train)\n h5f.create_dataset('y_test', data=y_test)\n h5f.close()\n\n h5f = h5py.File('\/tmp\/test_hdf5.h5', 'r')\n x_train = np.array(h5f['X_train'])\n x_test = np.array(h5f['X_test'])\n y_train = np.array(h5f['y_train'])\n y_test = np.array(h5f['y_test'])\n\n # Build 3 layer DNN with 10, 20, 10 units respectively.\n feature_columns = [\n tf.feature_column.numeric_column(\n X_FEATURE, shape=np.array(x_train).shape[1:])]\n classifier = tf.estimator.DNNClassifier(\n feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)\n\n # Train.\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)\n classifier.train(input_fn=train_input_fn, steps=200)\n\n # Predict.\n test_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)\n predictions = classifier.predict(input_fn=test_input_fn)\n y_predicted = np.array(list(p['class_ids'] for p in predictions))\n y_predicted = y_predicted.reshape(np.array(y_test).shape)\n\n # Score with sklearn.\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy (sklearn): {0:f}'.format(score))\n\n # Score with tensorflow.\n scores = classifier.evaluate(input_fn=test_input_fn)\n print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","license":"apache-2.0"} {"repo_name":"msmbuilder\/msmbuilder","path":"msmbuilder\/decomposition\/kernel_approximation.py","copies":"9","size":"4210","content":"# Author: Carlos Xavier Hernandez \n# Contributors: Muneeb Sultan , Evan Feinberg \n# Copyright (c) 2015, Stanford University and the Authors\n# All rights reserved.\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nfrom scipy.linalg import svd\n\nfrom sklearn import kernel_approximation\nfrom sklearn.metrics.pairwise import pairwise_kernels\n\nfrom .base import MultiSequenceDecompositionMixin\n\n\n__all__ = ['Nystroem', 'LandmarkNystroem']\n\n\nclass Nystroem(MultiSequenceDecompositionMixin, kernel_approximation.Nystroem):\n __doc__ = kernel_approximation.Nystroem.__doc__\n\n\nclass LandmarkNystroem(Nystroem):\n \"\"\"Approximate a kernel map using a subset of the training data.\n\n Constructs an approximate feature map for an arbitrary kernel\n using a subset of the data as basis.\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n landmarks : ndarray of shape (n_frames, n_features)\n Custom landmark points for the Nyostroem approximation\n kernel : string or callable, default=\"rbf\"\n Kernel map to be approximated. A callable should accept two arguments\n and the keyword arguments passed to this object as kernel_params, and\n should return a floating point number.\n n_components : int\n Number of features to construct.\n How many data points will be used to construct the mapping.\n gamma : float, default=None\n Gamma parameter for the RBF, polynomial, exponential chi2 and\n sigmoid kernels. Interpretation of the default value is left to\n the kernel; see the documentation for sklearn.metrics.pairwise.\n Ignored by other kernels.\n degree : float, default=3\n Degree of the polynomial kernel. Ignored by other kernels.\n coef0 : float, default=1\n Zero coefficient for polynomial and sigmoid kernels.\n Ignored by other kernels.\n kernel_params : mapping of string to any, optional\n Additional parameters (keyword arguments) for kernel function passed\n as callable object.\n random_state : {int, RandomState}, optional\n If int, random_state is the seed used by the random number generator;\n if RandomState instance, random_state is the random number generator.\n\n Attributes\n ----------\n components_ : array, shape (n_components, n_features)\n Subset of training points used to construct the feature map.\n component_indices_ : array, shape (n_components)\n Indices of ``components_`` in the training set.\n normalization_ : array, shape (n_components, n_components)\n Normalization matrix needed for embedding.\n Square root of the kernel matrix on ``components_``.\n\n References\n ----------\n .. [1] Williams, C.K.I. and Seeger, M.\n \"Using the Nystroem method to speed up kernel machines\",\n Advances in neural information processing systems 2001\n .. [2] T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou\n \"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical\n Comparison\",\n Advances in Neural Information Processing Systems 2012\n\n See also\n --------\n Nystroem : Approximate a kernel map using a subset of the training data.\n \"\"\"\n\n def __init__(self, landmarks=None, **kwargs):\n if (landmarks is not None and\n not isinstance(landmarks, (int, np.ndarray))):\n raise ValueError('landmarks should be an int, ndarray, or None.')\n self.landmarks = landmarks\n super(LandmarkNystroem, self).__init__(**kwargs)\n\n def fit(self, sequences, y=None):\n if self.landmarks is not None:\n basis_kernel = pairwise_kernels(self.landmarks, metric=self.kernel,\n filter_params=True,\n **self._get_kernel_params())\n\n U, S, V = svd(basis_kernel)\n S = np.maximum(S, 1e-12)\n self.normalization_ = np.dot(U * 1. \/ np.sqrt(S), V)\n self.components_ = self.landmarks\n self.component_indices_ = None\n\n return self\n\n super(Nystroem, self).fit(sequences, y=y)\n","license":"lgpl-2.1"} {"repo_name":"fenglu-g\/incubator-airflow","path":"airflow\/hooks\/presto_hook.py","copies":"5","size":"4772","content":"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom builtins import str\n\nfrom pyhive import presto\nfrom pyhive.exc import DatabaseError\nfrom requests.auth import HTTPBasicAuth\n\nfrom airflow.hooks.dbapi_hook import DbApiHook\n\n\nclass PrestoException(Exception):\n pass\n\n\nclass PrestoHook(DbApiHook):\n \"\"\"\n Interact with Presto through PyHive!\n\n >>> ph = PrestoHook()\n >>> sql = \"SELECT count(1) AS num FROM airflow.static_babynames\"\n >>> ph.get_records(sql)\n [[340698]]\n \"\"\"\n\n conn_name_attr = 'presto_conn_id'\n default_conn_name = 'presto_default'\n\n def get_conn(self):\n \"\"\"Returns a connection object\"\"\"\n db = self.get_connection(self.presto_conn_id)\n reqkwargs = None\n if db.password is not None:\n reqkwargs = {'auth': HTTPBasicAuth(db.login, db.password)}\n return presto.connect(\n host=db.host,\n port=db.port,\n username=db.login,\n source=db.extra_dejson.get('source', 'airflow'),\n protocol=db.extra_dejson.get('protocol', 'http'),\n catalog=db.extra_dejson.get('catalog', 'hive'),\n requests_kwargs=reqkwargs,\n schema=db.schema)\n\n @staticmethod\n def _strip_sql(sql):\n return sql.strip().rstrip(';')\n\n @staticmethod\n def _get_pretty_exception_message(e):\n \"\"\"\n Parses some DatabaseError to provide a better error message\n \"\"\"\n if (hasattr(e, 'message') and\n 'errorName' in e.message and\n 'message' in e.message):\n return ('{name}: {message}'.format(\n name=e.message['errorName'],\n message=e.message['message']))\n else:\n return str(e)\n\n def get_records(self, hql, parameters=None):\n \"\"\"\n Get a set of records from Presto\n \"\"\"\n try:\n return super(PrestoHook, self).get_records(\n self._strip_sql(hql), parameters)\n except DatabaseError as e:\n raise PrestoException(self._get_pretty_exception_message(e))\n\n def get_first(self, hql, parameters=None):\n \"\"\"\n Returns only the first row, regardless of how many rows the query\n returns.\n \"\"\"\n try:\n return super(PrestoHook, self).get_first(\n self._strip_sql(hql), parameters)\n except DatabaseError as e:\n raise PrestoException(self._get_pretty_exception_message(e))\n\n def get_pandas_df(self, hql, parameters=None):\n \"\"\"\n Get a pandas dataframe from a sql query.\n \"\"\"\n import pandas\n cursor = self.get_cursor()\n try:\n cursor.execute(self._strip_sql(hql), parameters)\n data = cursor.fetchall()\n except DatabaseError as e:\n raise PrestoException(self._get_pretty_exception_message(e))\n column_descriptions = cursor.description\n if data:\n df = pandas.DataFrame(data)\n df.columns = [c[0] for c in column_descriptions]\n else:\n df = pandas.DataFrame()\n return df\n\n def run(self, hql, parameters=None):\n \"\"\"\n Execute the statement against Presto. Can be used to create views.\n \"\"\"\n return super(PrestoHook, self).run(self._strip_sql(hql), parameters)\n\n # TODO Enable commit_every once PyHive supports transaction.\n # Unfortunately, PyHive 0.5.1 doesn't support transaction for now,\n # whereas Presto 0.132+ does.\n def insert_rows(self, table, rows, target_fields=None):\n \"\"\"\n A generic way to insert a set of tuples into a table.\n\n :param table: Name of the target table\n :type table: str\n :param rows: The rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: The names of the columns to fill in the table\n :type target_fields: iterable of strings\n \"\"\"\n super(PrestoHook, self).insert_rows(table, rows, target_fields, 0)\n","license":"apache-2.0"} {"repo_name":"Obus\/scikit-learn","path":"examples\/semi_supervised\/plot_label_propagation_structure.py","copies":"247","size":"2432","content":"\"\"\"\n==============================================\nLabel Propagation learning a complex structure\n==============================================\n\nExample of LabelPropagation learning a complex internal structure\nto demonstrate \"manifold learning\". The outer circle should be\nlabeled \"red\" and the inner circle \"blue\". Because both label groups\nlie inside their own distinct shape, we can see that the labels\npropagate correctly around the circle.\n\"\"\"\nprint(__doc__)\n\n# Authors: Clay Woolam \n# Andreas Mueller \n# Licence: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.semi_supervised import label_propagation\nfrom sklearn.datasets import make_circles\n\n# generate ring with inner box\nn_samples = 200\nX, y = make_circles(n_samples=n_samples, shuffle=False)\nouter, inner = 0, 1\nlabels = -np.ones(n_samples)\nlabels[0] = outer\nlabels[-1] = inner\n\n###############################################################################\n# Learn with LabelSpreading\nlabel_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)\nlabel_spread.fit(X, labels)\n\n###############################################################################\n# Plot output labels\noutput_labels = label_spread.transduction_\nplt.figure(figsize=(8.5, 4))\nplt.subplot(1, 2, 1)\nplot_outer_labeled, = plt.plot(X[labels == outer, 0],\n X[labels == outer, 1], 'rs')\nplot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')\nplot_inner_labeled, = plt.plot(X[labels == inner, 0],\n X[labels == inner, 1], 'bs')\nplt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),\n ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',\n numpoints=1, shadow=False)\nplt.title(\"Raw data (2 classes=red and blue)\")\n\nplt.subplot(1, 2, 2)\noutput_label_array = np.asarray(output_labels)\nouter_numbers = np.where(output_label_array == outer)[0]\ninner_numbers = np.where(output_label_array == inner)[0]\nplot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')\nplot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')\nplt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),\n 'upper left', numpoints=1, shadow=False)\nplt.title(\"Labels learned with Label Spreading (KNN)\")\n\nplt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)\nplt.show()\n","license":"bsd-3-clause"} {"repo_name":"rexshihaoren\/scikit-learn","path":"doc\/sphinxext\/gen_rst.py","copies":"142","size":"40026","content":"\"\"\"\nExample generation for the scikit learn\n\nGenerate the rst files for the examples by iterating over the python\nexample files.\n\nFiles that generate images should start with 'plot'\n\n\"\"\"\nfrom __future__ import division, print_function\nfrom time import time\nimport ast\nimport os\nimport re\nimport shutil\nimport traceback\nimport glob\nimport sys\nimport gzip\nimport posixpath\nimport subprocess\nimport warnings\nfrom sklearn.externals import six\n\n\n# Try Python 2 first, otherwise load from Python 3\ntry:\n from StringIO import StringIO\n import cPickle as pickle\n import urllib2 as urllib\n from urllib2 import HTTPError, URLError\nexcept ImportError:\n from io import StringIO\n import pickle\n import urllib.request\n import urllib.error\n import urllib.parse\n from urllib.error import HTTPError, URLError\n\n\ntry:\n # Python 2 built-in\n execfile\nexcept NameError:\n def execfile(filename, global_vars=None, local_vars=None):\n with open(filename, encoding='utf-8') as f:\n code = compile(f.read(), filename, 'exec')\n exec(code, global_vars, local_vars)\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nimport token\nimport tokenize\nimport numpy as np\n\ntry:\n # make sure that the Agg backend is set before importing any\n # matplotlib\n import matplotlib\n matplotlib.use('Agg')\nexcept ImportError:\n # this script can be imported by nosetest to find tests to run: we should not\n # impose the matplotlib requirement in that case.\n pass\n\n\nfrom sklearn.externals import joblib\n\n###############################################################################\n# A tee object to redict streams to multiple outputs\n\nclass Tee(object):\n\n def __init__(self, file1, file2):\n self.file1 = file1\n self.file2 = file2\n\n def write(self, data):\n self.file1.write(data)\n self.file2.write(data)\n\n def flush(self):\n self.file1.flush()\n self.file2.flush()\n\n###############################################################################\n# Documentation link resolver objects\n\n\ndef _get_data(url):\n \"\"\"Helper function to get data over http or from a local file\"\"\"\n if url.startswith('http:\/\/'):\n # Try Python 2, use Python 3 on exception\n try:\n resp = urllib.urlopen(url)\n encoding = resp.headers.dict.get('content-encoding', 'plain')\n except AttributeError:\n resp = urllib.request.urlopen(url)\n encoding = resp.headers.get('content-encoding', 'plain')\n data = resp.read()\n if encoding == 'plain':\n pass\n elif encoding == 'gzip':\n data = StringIO(data)\n data = gzip.GzipFile(fileobj=data).read()\n else:\n raise RuntimeError('unknown encoding')\n else:\n with open(url, 'r') as fid:\n data = fid.read()\n fid.close()\n\n return data\n\nmem = joblib.Memory(cachedir='_build')\nget_data = mem.cache(_get_data)\n\n\ndef parse_sphinx_searchindex(searchindex):\n \"\"\"Parse a Sphinx search index\n\n Parameters\n ----------\n searchindex : str\n The Sphinx search index (contents of searchindex.js)\n\n Returns\n -------\n filenames : list of str\n The file names parsed from the search index.\n objects : dict\n The objects parsed from the search index.\n \"\"\"\n def _select_block(str_in, start_tag, end_tag):\n \"\"\"Select first block delimited by start_tag and end_tag\"\"\"\n start_pos = str_in.find(start_tag)\n if start_pos < 0:\n raise ValueError('start_tag not found')\n depth = 0\n for pos in range(start_pos, len(str_in)):\n if str_in[pos] == start_tag:\n depth += 1\n elif str_in[pos] == end_tag:\n depth -= 1\n\n if depth == 0:\n break\n sel = str_in[start_pos + 1:pos]\n return sel\n\n def _parse_dict_recursive(dict_str):\n \"\"\"Parse a dictionary from the search index\"\"\"\n dict_out = dict()\n pos_last = 0\n pos = dict_str.find(':')\n while pos >= 0:\n key = dict_str[pos_last:pos]\n if dict_str[pos + 1] == '[':\n # value is a list\n pos_tmp = dict_str.find(']', pos + 1)\n if pos_tmp < 0:\n raise RuntimeError('error when parsing dict')\n value = dict_str[pos + 2: pos_tmp].split(',')\n # try to convert elements to int\n for i in range(len(value)):\n try:\n value[i] = int(value[i])\n except ValueError:\n pass\n elif dict_str[pos + 1] == '{':\n # value is another dictionary\n subdict_str = _select_block(dict_str[pos:], '{', '}')\n value = _parse_dict_recursive(subdict_str)\n pos_tmp = pos + len(subdict_str)\n else:\n raise ValueError('error when parsing dict: unknown elem')\n\n key = key.strip('\"')\n if len(key) > 0:\n dict_out[key] = value\n\n pos_last = dict_str.find(',', pos_tmp)\n if pos_last < 0:\n break\n pos_last += 1\n pos = dict_str.find(':', pos_last)\n\n return dict_out\n\n # Make sure searchindex uses UTF-8 encoding\n if hasattr(searchindex, 'decode'):\n searchindex = searchindex.decode('UTF-8')\n\n # parse objects\n query = 'objects:'\n pos = searchindex.find(query)\n if pos < 0:\n raise ValueError('\"objects:\" not found in search index')\n\n sel = _select_block(searchindex[pos:], '{', '}')\n objects = _parse_dict_recursive(sel)\n\n # parse filenames\n query = 'filenames:'\n pos = searchindex.find(query)\n if pos < 0:\n raise ValueError('\"filenames:\" not found in search index')\n filenames = searchindex[pos + len(query) + 1:]\n filenames = filenames[:filenames.find(']')]\n filenames = [f.strip('\"') for f in filenames.split(',')]\n\n return filenames, objects\n\n\nclass SphinxDocLinkResolver(object):\n \"\"\" Resolve documentation links using searchindex.js generated by Sphinx\n\n Parameters\n ----------\n doc_url : str\n The base URL of the project website.\n searchindex : str\n Filename of searchindex, relative to doc_url.\n extra_modules_test : list of str\n List of extra module names to test.\n relative : bool\n Return relative links (only useful for links to documentation of this\n package).\n \"\"\"\n\n def __init__(self, doc_url, searchindex='searchindex.js',\n extra_modules_test=None, relative=False):\n self.doc_url = doc_url\n self.relative = relative\n self._link_cache = {}\n\n self.extra_modules_test = extra_modules_test\n self._page_cache = {}\n if doc_url.startswith('http:\/\/'):\n if relative:\n raise ValueError('Relative links are only supported for local '\n 'URLs (doc_url cannot start with \"http:\/\/)\"')\n searchindex_url = doc_url + '\/' + searchindex\n else:\n searchindex_url = os.path.join(doc_url, searchindex)\n\n # detect if we are using relative links on a Windows system\n if os.name.lower() == 'nt' and not doc_url.startswith('http:\/\/'):\n if not relative:\n raise ValueError('You have to use relative=True for the local'\n ' package on a Windows system.')\n self._is_windows = True\n else:\n self._is_windows = False\n\n # download and initialize the search index\n sindex = get_data(searchindex_url)\n filenames, objects = parse_sphinx_searchindex(sindex)\n\n self._searchindex = dict(filenames=filenames, objects=objects)\n\n def _get_link(self, cobj):\n \"\"\"Get a valid link, False if not found\"\"\"\n\n fname_idx = None\n full_name = cobj['module_short'] + '.' + cobj['name']\n if full_name in self._searchindex['objects']:\n value = self._searchindex['objects'][full_name]\n if isinstance(value, dict):\n value = value[next(iter(value.keys()))]\n fname_idx = value[0]\n elif cobj['module_short'] in self._searchindex['objects']:\n value = self._searchindex['objects'][cobj['module_short']]\n if cobj['name'] in value.keys():\n fname_idx = value[cobj['name']][0]\n\n if fname_idx is not None:\n fname = self._searchindex['filenames'][fname_idx] + '.html'\n\n if self._is_windows:\n fname = fname.replace('\/', '\\\\')\n link = os.path.join(self.doc_url, fname)\n else:\n link = posixpath.join(self.doc_url, fname)\n\n if hasattr(link, 'decode'):\n link = link.decode('utf-8', 'replace')\n\n if link in self._page_cache:\n html = self._page_cache[link]\n else:\n html = get_data(link)\n self._page_cache[link] = html\n\n # test if cobj appears in page\n comb_names = [cobj['module_short'] + '.' + cobj['name']]\n if self.extra_modules_test is not None:\n for mod in self.extra_modules_test:\n comb_names.append(mod + '.' + cobj['name'])\n url = False\n if hasattr(html, 'decode'):\n # Decode bytes under Python 3\n html = html.decode('utf-8', 'replace')\n\n for comb_name in comb_names:\n if hasattr(comb_name, 'decode'):\n # Decode bytes under Python 3\n comb_name = comb_name.decode('utf-8', 'replace')\n if comb_name in html:\n url = link + u'#' + comb_name\n link = url\n else:\n link = False\n\n return link\n\n def resolve(self, cobj, this_url):\n \"\"\"Resolve the link to the documentation, returns None if not found\n\n Parameters\n ----------\n cobj : dict\n Dict with information about the \"code object\" for which we are\n resolving a link.\n cobi['name'] : function or class name (str)\n cobj['module_short'] : shortened module name (str)\n cobj['module'] : module name (str)\n this_url: str\n URL of the current page. Needed to construct relative URLs\n (only used if relative=True in constructor).\n\n Returns\n -------\n link : str | None\n The link (URL) to the documentation.\n \"\"\"\n full_name = cobj['module_short'] + '.' + cobj['name']\n link = self._link_cache.get(full_name, None)\n if link is None:\n # we don't have it cached\n link = self._get_link(cobj)\n # cache it for the future\n self._link_cache[full_name] = link\n\n if link is False or link is None:\n # failed to resolve\n return None\n\n if self.relative:\n link = os.path.relpath(link, start=this_url)\n if self._is_windows:\n # replace '\\' with '\/' so it on the web\n link = link.replace('\\\\', '\/')\n\n # for some reason, the relative link goes one directory too high up\n link = link[3:]\n\n return link\n\n\n###############################################################################\nrst_template = \"\"\"\n\n.. _example_%(short_fname)s:\n\n%(docstring)s\n\n**Python source code:** :download:`%(fname)s <%(fname)s>`\n\n.. literalinclude:: %(fname)s\n :lines: %(end_row)s-\n \"\"\"\n\nplot_rst_template = \"\"\"\n\n.. _example_%(short_fname)s:\n\n%(docstring)s\n\n%(image_list)s\n\n%(stdout)s\n\n**Python source code:** :download:`%(fname)s <%(fname)s>`\n\n.. literalinclude:: %(fname)s\n :lines: %(end_row)s-\n\n**Total running time of the example:** %(time_elapsed) .2f seconds\n(%(time_m) .0f minutes %(time_s) .2f seconds)\n \"\"\"\n\n# The following strings are used when we have several pictures: we use\n# an html div tag that our CSS uses to turn the lists into horizontal\n# lists.\nHLIST_HEADER = \"\"\"\n.. rst-class:: horizontal\n\n\"\"\"\n\nHLIST_IMAGE_TEMPLATE = \"\"\"\n *\n\n .. image:: images\/%s\n :scale: 47\n\"\"\"\n\nSINGLE_IMAGE = \"\"\"\n.. image:: images\/%s\n :align: center\n\"\"\"\n\n# The following dictionary contains the information used to create the\n# thumbnails for the front page of the scikit-learn home page.\n# key: first image in set\n# values: (number of plot in set, height of thumbnail)\ncarousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),\n 'plot_outlier_detection_001.png': (3, 372),\n 'plot_gp_regression_001.png': (2, 250),\n 'plot_adaboost_twoclass_001.png': (1, 372),\n 'plot_compare_methods_001.png': (1, 349)}\n\n\ndef extract_docstring(filename, ignore_heading=False):\n \"\"\" Extract a module-level docstring, if any\n \"\"\"\n if six.PY2:\n lines = open(filename).readlines()\n else:\n lines = open(filename, encoding='utf-8').readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n docstring = ''\n first_par = ''\n line_iterator = iter(lines)\n tokens = tokenize.generate_tokens(lambda: next(line_iterator))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs, extract\n # the first one:\n paragraphs = '\\n'.join(\n line.rstrip() for line\n in docstring.split('\\n')).split('\\n\\n')\n if paragraphs:\n if ignore_heading:\n if len(paragraphs) > 1:\n first_par = re.sub('\\n', ' ', paragraphs[1])\n first_par = ((first_par[:95] + '...')\n if len(first_par) > 95 else first_par)\n else:\n raise ValueError(\"Docstring not found by gallery.\\n\"\n \"Please check the layout of your\"\n \" example file:\\n {}\\n and make sure\"\n \" it's correct\".format(filename))\n else:\n first_par = paragraphs[0]\n\n break\n return docstring, first_par, erow + 1 + start_row\n\n\ndef generate_example_rst(app):\n \"\"\" Generate the list of examples, as well as the contents of\n examples.\n \"\"\"\n root_dir = os.path.join(app.builder.srcdir, 'auto_examples')\n example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',\n 'examples'))\n generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,\n 'modules', 'generated'))\n\n try:\n plot_gallery = eval(app.builder.config.plot_gallery)\n except TypeError:\n plot_gallery = bool(app.builder.config.plot_gallery)\n if not os.path.exists(example_dir):\n os.makedirs(example_dir)\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if not os.path.exists(generated_dir):\n os.makedirs(generated_dir)\n\n # we create an index.rst with all examples\n fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')\n # Note: The sidebar button has been removed from the examples page for now\n # due to how it messes up the layout. Will be fixed at a later point\n fhindex.write(\"\"\"\\\n\n\n\n.. raw:: html\n\n\n