repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
young-oct/complex_sporco
|
[
"88488e69d77805ccf25030388dc9e03bcc6a6df1"
] |
[
"tests/dictlrn/test_cbpdndl.py"
] |
[
"from __future__ import division\nfrom builtins import object\n\nimport numpy as np\n\nfrom sporco.dictlrn import cbpdndl\n\n\n\nclass TestSet01(object):\n\n def setup_method(self, method):\n N = 16\n Nd = 5\n M = 4\n K = 3\n np.random.seed(12345)\n self.D0 = np.random.randn(Nd, Nd, M)\n self.S = np.random.randn(N, N, K)\n\n\n def test_01(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10})\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S[..., 0], lmbda,\n opt=opt, dimK=0)\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_02(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10})\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt)\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_03(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10},\n dmethod='cg')\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda,\n opt=opt, dmethod='cg')\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_04(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10},\n dmethod='cns')\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt,\n dmethod='cns')\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_05(self):\n N = 16\n Nc = 3\n Nd = 5\n M = 4\n K = 3\n D0 = np.random.randn(Nd, Nd, Nc, M)\n S = np.random.randn(N, N, Nc, K)\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10})\n try:\n b = cbpdndl.ConvBPDNDictLearn(D0, S, lmbda, opt=opt)\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_06(self):\n N = 16\n Nc = 3\n Nd = 5\n M = 4\n K = 3\n D0 = np.random.randn(Nd, Nd, 1, M)\n S = np.random.randn(N, N, Nc, K)\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10})\n try:\n b = cbpdndl.ConvBPDNDictLearn(D0, S, lmbda, opt=opt)\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_07(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'AccurateDFid': True,\n 'MaxMainIter': 10})\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt)\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_08(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10},\n dmethod='fista')\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt,\n dmethod='fista')\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n\n def test_09(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10},\n xmethod='fista')\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt,\n xmethod='fista')\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n\n\n\n def test_10(self):\n lmbda = 1e-1\n opt = cbpdndl.ConvBPDNDictLearn.Options({'MaxMainIter': 10},\n xmethod='fista', dmethod='cns')\n try:\n b = cbpdndl.ConvBPDNDictLearn(self.D0, self.S, lmbda, opt=opt,\n xmethod='fista', dmethod='cns')\n b.solve()\n except Exception as e:\n print(e)\n assert 0\n"
] |
[
[
"numpy.random.randn",
"numpy.random.seed"
]
] |
krantikiran68/EzPC
|
[
"cacf10f31cddf55e4a06908fcfc64f8d7d0f85bd"
] |
[
"Athos/tests/tf/unittests/test_convolution.py"
] |
[
"\"\"\"\n\nAuthors: Pratik Bhatu.\n\nCopyright:\nCopyright (c) 2021 Microsoft Research\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nimport pytest\n\nimport sys\nimport os\n\n# Athos DIR\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\"))\nfrom tests.utils import Config, Compiler, assert_almost_equal\n\n\[email protected](\n \"tfOp, a_shape, kernel_shape, strides, padding\",\n [\n (tf.nn.conv2d, [1, 5, 5, 1], [2, 2, 1, 2], [1, 1, 1, 1], \"SAME\"),\n (tf.nn.conv2d, [1, 5, 5, 1], [2, 2, 1, 2], [1, 1, 1, 1], \"VALID\"),\n (tf.nn.conv3d, [1, 5, 5, 5, 1], [2, 2, 2, 1, 2], [1, 1, 1, 1, 1], \"SAME\"),\n (tf.nn.conv3d, [1, 5, 5, 5, 1], [2, 2, 2, 1, 2], [1, 1, 1, 1, 1], \"VALID\"),\n ],\n)\[email protected](\"dtype\", [np.single])\ndef test_conv(test_dir, backend, tfOp, a_shape, kernel_shape, strides, padding, dtype):\n if tfOp == tf.nn.conv3d and backend in [\"2PC_HE\", \"2PC_OT\"]:\n pytest.skip(\"[conv3d] Missing Support in SCI\")\n graph = tf.Graph()\n a_inp = dtype(np.random.randn(*a_shape))\n kernel_inp = dtype(np.random.randn(*kernel_shape))\n with graph.as_default():\n a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name=\"a\")\n filters = tf.constant(kernel_inp, name=\"filter\")\n output = tfOp(a, filters, strides, padding, name=\"output\")\n with tf.compat.v1.Session(graph=graph) as sess:\n expected_output = sess.run(output, feed_dict={a: a_inp})\n\n config = Config(backend).add_input(a).add_output(output)\n config.config[\"scale\"] = 12\n compiler = Compiler(graph, config, test_dir)\n mpc_output = compiler.compile_and_run([a_inp])\n assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)\n return\n\n\[email protected](\n \"tfOp, a_shape, kernel_shape, strides, padding\",\n [\n (tf.nn.depthwise_conv2d, [1, 5, 5, 1], [2, 2, 1, 3], [1, 1, 1, 1], \"VALID\"),\n (tf.nn.depthwise_conv2d, [1, 5, 5, 1], [2, 2, 1, 3], [1, 1, 1, 1], \"SAME\"),\n (tf.nn.depthwise_conv2d, [1, 5, 5, 3], [2, 2, 3, 2], [1, 1, 1, 1], \"VALID\"),\n ],\n)\[email protected](\"dtype\", [np.single])\ndef test_depthwise_conv(\n test_dir, backend, tfOp, a_shape, kernel_shape, strides, padding, dtype\n):\n if backend in [\"2PC_HE\"]:\n pytest.skip(\"[SCI][grouped_conv] Missing Support in SCI\")\n graph = tf.Graph()\n a_inp = dtype(np.random.randn(*a_shape))\n kernel_inp = dtype(np.random.randn(*kernel_shape))\n with graph.as_default():\n a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name=\"a\")\n filters = tf.constant(kernel_inp, name=\"filter\")\n output = tfOp(a, filters, strides, padding, name=\"output\")\n with tf.compat.v1.Session(graph=graph) as sess:\n expected_output = sess.run(output, feed_dict={a: a_inp})\n\n config = Config(backend).add_input(a).add_output(output)\n config.config[\"scale\"] = 12\n compiler = Compiler(graph, config, test_dir)\n mpc_output = compiler.compile_and_run([a_inp])\n assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)\n return\n\n\[email protected](\n \"tfOp, a_shape, kernel_shape, output_shape, strides, padding\",\n [\n (\n tf.nn.conv3d_transpose,\n [1, 4, 4, 4, 2],\n [2, 2, 2, 1, 2],\n [1, 5, 5, 5, 1],\n [1, 1, 1, 1, 1],\n \"VALID\",\n ),\n pytest.param(\n tf.nn.conv3d_transpose,\n [1, 5, 5, 5, 2],\n [2, 2, 2, 1, 2],\n [1, 5, 5, 5, 1],\n [1, 1, 1, 1, 1],\n \"SAME\",\n marks=pytest.mark.skip(reason=\"[conv3d_transpose] SAME padding bug\"),\n ),\n ],\n)\[email protected](\"dtype\", [np.single])\ndef test_conv_transpose(\n test_dir,\n backend,\n tfOp,\n a_shape,\n kernel_shape,\n output_shape,\n strides,\n padding,\n dtype,\n):\n if backend in [\"2PC_HE\", \"2PC_OT\"]:\n pytest.skip(\"[conv3d] Missing Support in SCI\")\n graph = tf.Graph()\n a_inp = dtype(np.random.randn(*a_shape))\n kernel_inp = dtype(np.random.randn(*kernel_shape))\n with graph.as_default():\n a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name=\"a\")\n filters = tf.constant(kernel_inp, name=\"filter\")\n output = tfOp(a, filters, output_shape, strides, padding, name=\"output\")\n with tf.compat.v1.Session(graph=graph) as sess:\n expected_output = sess.run(output, feed_dict={a: a_inp})\n\n config = Config(backend).add_input(a).add_output(output)\n config.config[\"scale\"] = 12\n compiler = Compiler(graph, config, test_dir)\n mpc_output = compiler.compile_and_run([a_inp])\n assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)\n return\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.as_dtype",
"tensorflow.compat.v1.Session",
"numpy.random.randn"
]
] |
ekunnii/APPIAN
|
[
"1460ef4e1b5c98a558b7f89753f1a1a5541374cf",
"1460ef4e1b5c98a558b7f89753f1a1a5541374cf"
] |
[
"Test/validation_qc.py",
"Initialization/initialization.py"
] |
[
"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom pyminc.volumes.factory import *\nimport sys\nimport os\nimport numpy as np\nfrom scipy.ndimage.measurements import center_of_mass\nif __name__ == \"__main__\":\n if os.path.exists(sys.argv[1]) :\n vol=volumeFromFile(sys.argv[1])\n if len(vol.data.shape) > 3 :\n ar = np.sum(vol.data, axis=0)\n else : \n ar = np.array(vol.data)\n plt.title(os.path.basename(sys.argv[1]))\n com=np.round(center_of_mass(np.array(ar))).astype(int)\n plt.subplot(1,3,1)\n plt.imshow(ar[:,:,com[2]], origin='lower')\n plt.axis('off')\n plt.subplot(1,3,2)\n plt.imshow(ar[:,com[1],:], origin='lower')\n plt.axis('off')\n plt.subplot(1,3,3)\n plt.imshow(ar[com[0],:,:], origin='lower')\n plt.axis('off')\n plt.colorbar()\n plt.savefig(sys.argv[2], dpi=300) #, bbox_inches='tight')\n print(\"Creating image:\", sys.argv[2])\n exit(0)\n print(\"Error: could not find file \", sys.argv[1])\n exit(1)\n",
"import os\nimport numpy as np\nimport tempfile\nimport shutil\nimport json\nimport ntpath\nimport shutil\nimport nibabel as nib\nimport re\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.utility as niu\nfrom nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,\n BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)\nfrom nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile)\nfrom math import *\nfrom time import gmtime, strftime\nfrom glob import glob\nfrom os.path import basename\nfrom Extra.utils import splitext\nfrom sets import Set\n\ndef pexit(pstring=\"Error\", exitcode=1):\n print(pstring)\n exit(exitcode)\n\n\nclass pet_brain_maskOutput(TraitedSpec):\n out_file = File(desc=\"Headmask from PET volume\")\n\nclass pet_brain_maskInput(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"PET volume\")\n in_json = File(exists=True, mandatory=True, desc=\"PET json file\")\n out_file = File(desc=\"Head mask\")\n slice_factor = traits.Float(usedefault=True, default_value=0.25, desc=\"Value (between 0. to 1.) that is multiplied by the maximum of the slices of the PET image. Used to threshold slices. Lower value means larger mask\")\n total_factor = traits.Float(usedefault=True, default_value=0.333, desc=\"Value (between 0. to 1.) that is multiplied by the thresholded means of each slice. \")\n\n clobber = traits.Bool(usedefault=True, default_value=True, desc=\"Overwrite output file\")\n run = traits.Bool(usedefault=True, default_value=True, desc=\"Run the commands\")\n verbose = traits.Int(usedefault=True, default_value=True, desc=\"Write messages indicating progress\")\n\nclass pet_brain_mask(BaseInterface):\n input_spec = pet_brain_maskInput\n output_spec = pet_brain_maskOutput\n _suffix = \"_brain_mask\"\n\n def _run_interface(self, runtime):\n if not isdefined(self.inputs.out_file):\n base = os.path.basename(self.inputs.in_file)\n split = splitext(base)\n self.inputs.out_file = os.getcwd() +os.sep + split[0] + self._suffix + split[1]\n #Load PET 3D volume\n infile = nib.load(self.inputs.in_file)\n shape=infile.get_shape()\n zmax=shape[2]\n data=infile.get_data()\n #Get max slice values and multiply by pet_mask_slice_threshold (0.25 by default)\n slice_thresholds=np.amax(data, axis=(1,2)) * self.inputs.slice_factor\n #Get mean for all values above slice_max\n slice_mean_f=lambda t, d, i: float(np.mean(d[i, d[i,:,:] > t[i]]))\n slice_mean = np.array([slice_mean_f(slice_thresholds, data, i) for i in range(zmax) ])\n #Remove nan from slice_mean\n slice_mean =slice_mean[ ~ np.isnan(slice_mean) ]\n #Calculate overall mean from mean of thresholded slices\n overall_mean = np.mean(slice_mean)\n #Calcuate threshold\n threshold = overall_mean * self.inputs.total_factor\n #Apply threshold and create and write outputfile\n \n idx = data >= threshold\n data[ idx ] = 1\n data[~idx ] = 0\n\n outfile = nib.Nifti1Image(data, infile.get_affine())\n outfile.to_file(self.inputs.out_file)\n return runtime\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if not isdefined(self.inputs.out_file):\n self.inputs.out_file = fname_presuffix(self.inputs.in_file, suffix=self._suffix)\n outputs[\"out_file\"] = self.inputs.out_file\n return outputs\n\n\nclass header_output(TraitedSpec):\n out_file=traits.File(desc=\"Input json header file\")\nclass header_input(BaseInterfaceInputSpec):\n in_file=traits.File(exists=True,mandatory=True,desc=\"Input json header file\")\n out_file=traits.File(desc=\"Input json header file\")\n quant_method = traits.Str(desc=\"Quant method\")\n\nclass validate_header(BaseInterface):\n input_spec = header_input\n output_spec = header_output\n \n def _set_duration(self, d):\n frame_times=[]\n try :\n frame_times = d[\"Time\"][\"FrameTimes\"][\"Values\"]\n except KeyError :\n print(\"\\nError Could not find Time:FrameTimes:Values in header\\n\")\n exit(1)\n FrameLengths=[]\n\n c0=c1=1 #Time unit conversion variables. Time should be in seconds\n try :\n if d[\"Time\"][\"FrameTimes\"][\"Units\"][0] == 'm' :\n c0=60\n elif d[\"Time\"][\"FrameTimes\"][\"Units\"][0] == 'h' :\n c0=60*60\n if d[\"Time\"][\"FrameTimes\"][\"Units\"][1] == 'm' :\n c1=60\n elif d[\"Time\"][\"FrameTimes\"][\"Units\"][1] == 'h' :\n c1=60*60\n except KeyError :\n print(\"\\nError Could not find Time:FrameTimes:Units in header\\n\")\n exit(1)\n\n for s, e in frame_times :\n FrameLengths.append(c1*e - c0*s)\n\n d[\"Time\"][\"FrameTimes\"][\"Duration\"] = FrameLengths\n return d\n\n def _run_interface(self, runtime):\n if not isdefined(self.inputs.out_file) :\n self.inputs.out_file=os.getcwd() +os.sep+ os.path.basename(self.inputs.in_file)\n d=json.load(open(self.inputs.in_file,'r')) \n \n fields=[[\"Time\",\"FrameTimes\",\"Units\"],\n [\"Time\",\"FrameTimes\",\"Values\"],\n ]\n if self.inputs.quant_method == \"suv\" :\n fields.append([[\"Info\",\"BodyWeight\"],\n [\"RadioChem\", \"InjectedRadioactivity\"],\n [\"InjectedRadioactivityUnits\", \"kBq\"]])\n\n for f in fields :\n try :\n test_dict=d\n for key in f :\n test_dict=test_dict[key]\n except ValueError :\n pexit(\"Error: json header does not contain key: \"+\":\".join(f)+\"for file\"+self.inputs.in_file)\n d=self._set_duration(d)\n json.dump(d,open(self.inputs.out_file,'w+'))\n return runtime\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if not isdefined(self.inputs.out_file) :\n self.inputs.out_file=os.getcwd() +os.sep+ os.path.basename(self.inputs.in_file)\n outputs[\"out_file\"] = self.inputs.out_file\n return outputs\n\nclass SplitArgsOutput(TraitedSpec):\n cid = traits.Str(mandatory=True, desc=\"Condition ID\")\n sid = traits.Str(mandatory=True, desc=\"Subject ID\")\n task = traits.Str(desc=\"Task ID\")\n ses = traits.Str(desc=\"Session ID\")\n run = traits.Str(desc=\"Run ID\")\n #compression = traits.Str(desc=\"Compression\")\n RoiSuffix = traits.Str(desc=\"Suffix for subject ROI\")\n\nclass SplitArgsInput(BaseInterfaceInputSpec):\n task = traits.Str(desc=\"Task ID\")\n ses = traits.Str(desc=\"Session ID\")\n sid = traits.Str(desc=\"Subject ID\")\n cid = traits.Str(desc=\"Condition ID\")\n run = traits.Str(desc=\"Run ID\")\n ses_sub_only = traits.Bool(default_value=False, usedefault=True)\n RoiSuffix = traits.Str(desc=\"Suffix for subject ROI\")\n args = traits.Dict(mandatory=True, desc=\"Overwrite output file\")\n\nclass SplitArgsRunning(BaseInterface):\n input_spec = SplitArgsInput\n output_spec = SplitArgsOutput\n \n def _run_interface(self, runtime):\n cid=''\n if not isdefined(self.inputs.ses) :\n self.inputs.ses=self.inputs.args['ses']\n cid = cid + '_' +self.inputs.args['ses']\n if not isdefined(self.inputs.sid) :\n self.inputs.sid=self.inputs.args['sid']\n if self.inputs.ses_sub_only : return runtime\n\n try :\n self.inputs.task=self.inputs.args['task']\n cid = cid + '_' +self.inputs.args['task']\n except KeyError:\n pass\n\n try:\n self.inputs.run=self.inputs.args['run']\n cid = cid + '_' +self.inputs.args['run']\n except KeyError:\n pass\n self.inputs.cid=cid\n\n if isdefined(self.inputs.RoiSuffix):\n self.inputs.RoiSuffix=self.inputs.RoiSuffix\n return runtime\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if isdefined(self.inputs.cid):\n outputs[\"cid\"] = self.inputs.cid\n if isdefined(self.inputs.sid):\n outputs[\"sid\"] = self.inputs.sid\n\n if isdefined(self.inputs.ses):\n outputs[\"ses\"] = self.inputs.ses\n\n if isdefined(self.inputs.task):\n outputs[\"task\"] = self.inputs.task\n\n if isdefined(self.inputs.run):\n outputs[\"run\"] = self.inputs.run\n\n if isdefined(self.inputs.RoiSuffix):\n outputs[\"RoiSuffix\"]= self.inputs.RoiSuffix\n return outputs\n\nclass pet3DVolumeOutput(TraitedSpec):\n out_file = File(desc=\"Image after centering\")\n\nclass pet3DVolumeInput(BaseInterfaceInputSpec):\n in_file = File(position=0, argstr=\"%s\", mandatory=True, desc=\"Image\")\n out_file = File(argstr=\"%s\", desc=\"Image after centering\")\n verbose = traits.Int(argstr=\"-verbose\", usedefault=True, default_value=True, desc=\"Write messages indicating progress\")\n\nclass pet3DVolume(BaseInterface):\n input_spec = pet3DVolumeInput\n output_spec = pet3DVolumeOutput\n _suffix = \"_3D\"\n\n def _gen_output(self, basefile, _suffix):\n fname = ntpath.basename(basefile)\n fname_list = splitext(fname) # [0]= base filename; [1] =extension\n dname = os.getcwd()\n return dname+ os.sep+fname_list[0] + _suffix + fname_list[1]\n\n def _run_interface(self, runtime):\n if not isdefined(self.inputs.out_file):\n self.inputs.out_file = self._gen_output(self.inputs.in_file, self._suffix)\n infile = nib.load(self.inputs.in_file)\n shape=infile.get_shape()\n\n if len(shape) >= 4 :\n affine=infile.get_affine()\n data = infile.get_data()\n ti=np.argmin(data.shape)\n dims = list(data.shape) \n dims.remove(dims[ti])\n\n nFrames = shape[ti]\n rank=0.20\n\n first=int(floor(nFrames*rank))\n last=nFrames\n \n volume_subsets=np.split(data, [first,last], axis=ti) \n volume_subset=volume_subsets[1]\n \n volume_average=np.mean(volume_subset, axis=ti)\n print(\"Frames to concatenate -- First:\", first, \"Last:\", last) \n outfile = nib.Nifti1Image(volume_average, affine)\n nib.save(outfile, self.inputs.out_file)\n else :\n #If there is no \"time\" dimension (i.e., in 3D file), just copy the PET file\n shutil.copy(self.inputs.in_file, self.inputs.out_file)\n return runtime\n\n def _list_outputs(self):\n if not isdefined(self.inputs.out_file):\n self.inputs.out_file = self._gen_output(self.inputs.in_file, self._suffix)\n\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n\n if not isdefined(self.inputs.out_file):\n self.inputs.out_file = fname_presuffix(self.inputs.in_file, suffix=self._suffix)\n return outputs\n\n\"\"\"\n.. module:: initialization\n :platform: Unix\n :synopsis: Workflow to initialize PET images\n.. moduleauthor:: Thomas Funck <[email protected]>\n\"\"\"\n\ndef get_workflow(name, infosource, opts):\n '''\n Nipype workflow that initializes the PET images by\n 1. Centering the PET image: petCenter\n 2. Exlcude start and end frames: petExcludeFr\n 3. Average 4D PET image into 3D image: petVolume\n 4. Extract information from header\n\n :param name: Name of workflow\n :param infosource: Infosource for basic variables like subject id (sid) and condition id (cid)\n :param datasink: Node in which output data is sent\n :param opts: User options\n\n :returns: workflow\n '''\n workflow = pe.Workflow(name=name)\n\n #Define input node that will receive input from outside of workflow\n default_field=[\"pet\",\"pet_header_json\"]\n inputnode = pe.Node(niu.IdentityInterface(fields=default_field), name='inputnode')\n\n #Define empty node for output\n outputnode = pe.Node(niu.IdentityInterface(fields=[\"pet_volume\",\"pet_header_json\",\"pet_brain_mask\"]), name='outputnode')\n\n petVolume = pe.Node(interface=pet3DVolume(), name=\"petVolume\")\n petVolume.inputs.verbose = opts.verbose\n \n \n petHeader=pe.Node(interface=validate_header(), name=\"petHeader\")\n if opts.quant_method != None :\n petHeader.inputs.quant_method=opts.quant_method\n \n workflow.connect(inputnode, 'pet_header_json', petHeader, 'in_file')\n \n workflow.connect(inputnode, 'pet', petVolume, 'in_file')\n \n if opts.pet_brain_mask :\n petBrainMask=pe.Node(pet_brain_mask(), \"pet_brain_mask\")\n workflow.connect(petVolume, 'out_file', petBrainMask, 'in_file')\n workflow.connect(petBrainMask, 'out_file', outputnode, 'pet_brain_mask')\n \n workflow.connect(petVolume, 'out_file', outputnode, 'pet_volume')\n workflow.connect(petHeader, 'out_file', outputnode, 'pet_header_json')\n\n return(workflow)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.sum"
],
[
"numpy.amax",
"numpy.split",
"numpy.isnan",
"numpy.mean",
"numpy.argmin"
]
] |
AndreaCoop/Video_creator
|
[
"2518a3527bff013466e887f6d1bc06fe2a8e4912"
] |
[
"video_generator_script.py"
] |
[
"# script to create an animated plot from a simulation\n\n# import data\n\n# import packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\n# +\n# input \n\n# data from a .txt file\nfile_data = \"sin_expdecay.txt\"\n\n#set title and legth of the movie\ntitle_movie = 'example3_20s_5dpi'\nlength_movie = 20 # in seconds\n\nn_datapoint_per_interval = 5\n\n# Decorate the plot\nX_label = 'time [s]'\nY_label = 'Signal'\nplot_title = 'Best plot ever!'\n\n\n# +\n\ndef video_generator(file_data, X_label, Y_label, plot_title, movie_title, movie_length, n_datapoint_per_interval=1):\n \"\"\"Create a video animation from a data set\n \n The video is saved in a .mp4 file in the same folder\n \n Parameters\n ----------\n file_data : .txt file \n file containing the data to plot, in the format of a np.array([x, y])\n X_label : str\n label of X axis\n Y_label : str\n label of y axis\n plot_title : str\n title of the plot\n movie_title : str\n name to give to the .mp4 file when saved\n movie_length : int\n duration in seconds of the movie\n n_datapoint_per interval: int, optional \n number of datapoints taken in each frame (default=1)\n if n_datapoint_per interval = 1 : it takes as number of frames as the number of datapoints\n if n_datapoint_per interval > 1 : there will be less frames, \n the video will be less smooth \n but it will take less time to create it\n \"\"\"\n # load data\n [X,Y] = np.loadtxt(file_data)\n\n # set number of frames for creating the plot\n X_length = len(X)\n X_interval = X_length // n_datapoint_per_interval\n\n # set limits of the plot\n X_start = X[0]\n X_stop = X[-1]\n Y_gap = (np.amax(Y) - np.amin(Y)) * 0.1\n Y_start = np.amin(Y) - Y_gap\n Y_stop = np.amax(Y) + Y_gap\n\n\n # print(X_length, n_datapoint_per_interval, X_interval)\n\n # code with animation function of matplotlib\n\n # First set up the figure, the axis, and the plot element we want to animate\n fig = plt.figure()\n ax = plt.axes(xlim=(X_start, X_stop), ylim=(Y_start, Y_stop))\n ax.set_xlabel(X_label)\n ax.set_ylabel(Y_label)\n ax.set_title(plot_title)\n line, = ax.plot([], [], lw=2)\n\n # set movie_title\n title_movie = movie_title + '.mp4'\n\n # frames per second\n frames_per_seconds = (X_interval+1) // movie_length \n\n # initialization function: plot the background of each frame\n def init():\n line.set_data([], [])\n return line,\n\n # animation function. This is called sequentially\n def animate(i):\n line.set_data(X[0:i*n_datapoint_per_interval], Y[0:i*n_datapoint_per_interval])\n return line,\n\n # call the animator. blit=True means only re-draw the parts that have changed.\n anim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=X_interval+1, interval=200, blit=True)\n\n\n # save the animation as an mp4. This requires ffmpeg or mencoder to be\n # installed. The extra_args ensure that the x264 codec is used, so that\n # the video can be embedded in html5. You may need to adjust this for\n # your system: for more information, see\n # http://matplotlib.sourceforge.net/api/animation_api.html\n anim.save(title_movie, fps=frames_per_seconds, extra_args=['-vcodec', 'libx264'])\n # fps: frames per second in the movie \n # --> the higher the number of fps, the faster will be animation in the movie\n # es. if frames = 1000 in FuncAnimation and fps = 100: the movie will last 10 seconds\n\n plt.show()\n# -\nvideo_generator(file_data, X_label, Y_label, plot_title, title_movie, length_movie, n_datapoint_per_interval)\n\n\n"
] |
[
[
"numpy.amax",
"numpy.amin",
"matplotlib.pyplot.axes",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
aturbofly/crab
|
[
"ca3d6a8f50573a09d154274dd14331ebc91abe17"
] |
[
"scikits/crab/recommenders/knn/classes.py"
] |
[
"\"\"\"\nGeneralized Recommender models.\n\nThis module contains basic memory recommender interfaces used throughout\nthe whole scikit-crab package.\n\nThe interfaces are realized as abstract base classes (ie., some optional\nfunctionality is provided in the interface itself, so that the interfaces\ncan be subclassed).\n\n\"\"\"\n\n# Author: Marcel Caraciolo <[email protected]>\n#\n# License: BSD Style.\n\nfrom base import ItemRecommender, UserRecommender\nfrom item_strategies import ItemsNeighborhoodStrategy\nfrom neighborhood_strategies import NearestNeighborsStrategy\nimport numpy as np\n\n\nclass ItemBasedRecommender(ItemRecommender):\n \"\"\"\n Item Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n items_selection_strategy: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n `items_selection_strategy`: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import ItemBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import ItemSimilarity\n >>> from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> items_strategy = ItemsNeighborhoodStrategy()\n >>> similarity = ItemSimilarity(model, euclidean_distances)\n >>> recsys = ItemBasedRecommender(model, similarity, items_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['The Night Listener', 'Superman Returns']\n\n Notes\n -----------\n This ItemBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n Item-based collaborative filtering recommendation algorithms by Sarwar\n http://portal.acm.org/citation.cfm?id=372071\n\n \"\"\"\n\n def __init__(self, model, similarity, items_selection_strategy=None,\n capper=True, with_preference=False):\n ItemRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if items_selection_strategy is None:\n self.items_selection_strategy = ItemsNeighborhoodStrategy()\n else:\n self.items_selection_strategy = items_selection_strategy\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n self.set_params(**params)\n\n candidate_items = self.all_other_items(user_id)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n preference = self.model.preference_value(user_id, item_id)\n\n if not np.isnan(preference):\n return preference\n\n #TODO: It needs optimization\n prefs = self.model.preferences_from_user(user_id)\n\n if not self.model.has_preference_values():\n prefs = [(pref, 1.0) for pref in prefs]\n\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in prefs if to_item_id != item_id]).flatten()\n\n prefs = np.array([pref for it, pref in prefs])\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on\n #just one. This is a bit of a band-aid on the 'stock'\n #item-based algorithm for the moment.\n #The reason is that in this case the estimate is, simply,\n #the user's rating for one item\n #that happened to have a defined similarity.\n #The similarity score doesn't matter, and that\n #seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n return estimated\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n return self.items_selection_strategy.candidate_items(user_id, \\\n self.model)\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def most_similar_items(self, item_id, how_many=None):\n '''\n Return the most similar items to the given item, ordered\n from most similar to least.\n\n Parameters\n -----------\n item_id: int or string\n ID of item for which to find most similar other items\n\n how_many: int\n Desired number of most similar items to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[item_id]\n self.similarity.num_best = old_how_many\n\n return np.array([item for item, pref in similarities \\\n if item != item_id and not np.isnan(pref)])\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the items that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return items that the user prefers and that\n are similar to the given item.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_from_user(user_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n item_ids = np.array([it for it, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n item_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(item_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [item_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n\n#=====================\n#User Based Recommender\n\nclass UserBasedRecommender(UserRecommender):\n \"\"\"\n User Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n neighborhood_strategy: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n `neighborhood_strategy`: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import UserBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import UserSimilarity\n >>> from scikits.crab.recommenders.knn.neighborhood_strategies import NearestNeighborsStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> nhood_strategy = NearestNeighborsStrategy()\n >>> similarity = UserSimilarity(model, euclidean_distances)\n >>> recsys = UserBasedRecommender(model, similarity, nhood_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['Lorena Abreu', 'Marcel Caraciolo']\n\n Notes\n -----------\n This UserBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n User-based collaborative filtering recommendation algorithms by\n\n \"\"\"\n\n def __init__(self, model, similarity, neighborhood_strategy=None,\n capper=True, with_preference=False):\n UserRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if neighborhood_strategy is None:\n self.neighborhood_strategy = NearestNeighborsStrategy()\n else:\n self.neighborhood_strategy = neighborhood_strategy\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed. (default= 'user_similarity')\n\n Optional Parameters\n --------------------\n n_similarity: string\n The similarity used in the neighborhood strategy\n\n distance: the metrics.pairwise function to set.\n The pairwise function to compute the similarity (default = euclidean_distances)\n\n nhood_size: int\n The neighborhood size (default=None ALL)\n\n minimal_similarity: float\n minimal similarity required for neighbors (default = 0.0)\n\n sampling_rate: int\n percentage of users to consider when building neighborhood\n (default = 1)\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n items_from_user_id = self.model.items_from_user(user_id)\n possible_items = []\n for to_user_id in nearest_neighbors:\n possible_items.extend(self.model.items_from_user(to_user_id))\n\n possible_items = np.unique(np.array(possible_items).flatten())\n\n return np.setdiff1d(possible_items, items_from_user_id)\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n\n preference = self.model.preference_value(user_id, item_id)\n if not np.isnan(preference):\n return preference\n\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n preference = 0.0\n total_similarity = 0.0\n\n similarities = np.array([self.similarity.get_similarity(user_id, to_user_id)\n for to_user_id in nearest_neighbors]).flatten()\n\n prefs = np.array([self.model.preference_value(to_user_id, item_id)\n for to_user_id in nearest_neighbors])\n\n #prefs = prefs[~np.isnan(prefs)]\n similarities = similarities[~np.isnan(prefs)]\n\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on just one. This is a bit\n #of a band-aid on the 'stock' item-based algorithm for\n #the moment. The reason is that in this case the estimate\n #is, simply, the user's rating for one item that happened\n #to have a defined similarity. The similarity score doesn't\n #matter, and that seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n\n return estimated\n\n def most_similar_users(self, user_id, how_many=None):\n '''\n Return the most similar users to the given user, ordered\n from most similar to least.\n\n Parameters\n -----------\n user_id: int or string\n ID of user for which to find most similar other users\n\n how_many: int\n Desired number of most similar users to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[user_id]\n self.similarity.num_best = old_how_many\n return np.array([to_user_id for to_user_id, pref in similarities \\\n if user_id != to_user_id and not np.isnan(pref)])\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n\n self.set_params(**params)\n\n candidate_items = self.all_other_items(user_id, **params)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the users that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return users that prefers the recommended item and that\n are similar to the given user.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_for_item(item_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id, pref in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n user_ids = np.array([usr for usr, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n user_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(user_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [user_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n"
] |
[
[
"numpy.isnan",
"numpy.lexsort",
"numpy.setdiff1d",
"numpy.vectorize",
"numpy.array",
"numpy.sum"
]
] |
tristan-paul/TweetOff
|
[
"efaa020af8df43190f37a371272eee3a7731dd7e"
] |
[
"tweetoff/predict.py"
] |
[
"import numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nfrom .models import User\nfrom .twitter import BASILICA\n\ndef predict_user(user1_name, user2_name, tweet_text):\n user1 = User.query.filter(User.name == user1_name).one()\n user2 = User.query.filter(User.name == user2_name).one()\n user1_embeddings = np.array([tweet.embedding for tweet in user1.tweets])\n user2_embeddings = np.array([tweet.embedding for tweet in user2.tweets])\n user1_labels = np.ones(len(user1.tweets))\n user2_labels = np.zeros(len(user2.tweets))\n\n embeddings = np.vstack([user1_embeddings, user2_embeddings])\n labels = np.concatenate([user1_labels, user2_labels])\n\n log_reg = LogisticRegression(solver = 'lbfgs', max_iter=1000)\n log_reg.fit(embeddings, labels)\n\n tweet_embedding = BASILICA.embed_sentence(tweet_text, model = 'twitter')\n prediction = log_reg.predict(np.array(tweet_embedding).reshape(1, -1))\n return prediction\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"sklearn.linear_model.LogisticRegression",
"numpy.vstack"
]
] |
theimgclist/tensorflow
|
[
"fdfb6f190577416b402b0b113568222ff4e0d672"
] |
[
"models/research/deep_speech/deep_speech_model.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Network structure for DeepSpeech model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Supported rnn cells\nSUPPORTED_RNNS = {\n \"lstm\": tf.keras.layers.LSTM,\n \"rnn\": tf.keras.layers.SimpleRNN,\n \"gru\": tf.keras.layers.GRU,\n}\n\n# Parameters for batch normalization\n_MOMENTUM = 0.1\n_EPSILON = 1e-05\n\n\ndef _conv_bn_layer(cnn_input, filters, kernel_size, strides, layer_id):\n \"\"\"2D convolution + batch normalization layer.\n\n Args:\n cnn_input: input data for convolution layer.\n filters: an integer, number of output filters in the convolution.\n kernel_size: a tuple specifying the height and width of the 2D convolution\n window.\n strides: a tuple specifying the stride length of the convolution.\n layer_id: an integer specifying the layer index.\n\n Returns:\n tensor output from the current layer.\n \"\"\"\n output = tf.keras.layers.Conv2D(\n filters=filters, kernel_size=kernel_size, strides=strides, padding=\"same\",\n activation=\"linear\", name=\"cnn_{}\".format(layer_id))(cnn_input)\n output = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(output)\n return output\n\n\ndef _rnn_layer(input_data, rnn_cell, rnn_hidden_size, layer_id, rnn_activation,\n is_batch_norm, is_bidirectional):\n \"\"\"Defines a batch normalization + rnn layer.\n\n Args:\n input_data: input tensors for the current layer.\n rnn_cell: RNN cell instance to use.\n rnn_hidden_size: an integer for the dimensionality of the rnn output space.\n layer_id: an integer for the index of current layer.\n rnn_activation: activation function to use.\n is_batch_norm: a boolean specifying whether to perform batch normalization\n on input states.\n is_bidirectional: a boolean specifying whether the rnn layer is\n bi-directional.\n\n Returns:\n tensor output for the current layer.\n \"\"\"\n if is_batch_norm:\n input_data = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(input_data)\n rnn_layer = rnn_cell(\n rnn_hidden_size, activation=rnn_activation, return_sequences=True,\n name=\"rnn_{}\".format(layer_id))\n if is_bidirectional:\n rnn_layer = tf.keras.layers.Bidirectional(rnn_layer, merge_mode=\"sum\")\n\n return rnn_layer(input_data)\n\n\ndef _ctc_lambda_func(args):\n \"\"\"Compute ctc loss.\"\"\"\n # py2 needs explicit tf import for keras Lambda layer\n import tensorflow as tf\n\n y_pred, labels, input_length, label_length = args\n return tf.keras.backend.ctc_batch_cost(\n labels, y_pred, input_length, label_length)\n\n\ndef _calc_ctc_input_length(args):\n \"\"\"Compute the actual input length after convolution for ctc_loss function.\n\n Basically, we need to know the scaled input_length after conv layers.\n new_input_length = old_input_length * ctc_time_steps / max_time_steps\n\n Args:\n args: the input args to compute ctc input length.\n\n Returns:\n ctc_input_length, which is required for ctc loss calculation.\n \"\"\"\n # py2 needs explicit tf import for keras Lambda layer\n import tensorflow as tf\n\n input_length, input_data, y_pred = args\n max_time_steps = tf.shape(input_data)[1]\n ctc_time_steps = tf.shape(y_pred)[1]\n ctc_input_length = tf.multiply(\n tf.to_float(input_length), tf.to_float(ctc_time_steps))\n ctc_input_length = tf.to_int32(tf.floordiv(\n ctc_input_length, tf.to_float(max_time_steps)))\n return ctc_input_length\n\n\nclass DeepSpeech(tf.keras.models.Model):\n \"\"\"DeepSpeech model.\"\"\"\n\n def __init__(self, input_shape, num_rnn_layers, rnn_type, is_bidirectional,\n rnn_hidden_size, rnn_activation, num_classes, use_bias):\n \"\"\"Initialize DeepSpeech model.\n\n Args:\n input_shape: an tuple to indicate the dimension of input dataset. It has\n the format of [time_steps(T), feature_bins(F), channel(1)]\n num_rnn_layers: an integer, the number of rnn layers. By default, it's 5.\n rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm.\n is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.\n rnn_hidden_size: an integer for the number of hidden states in each unit.\n rnn_activation: a string to indicate rnn activation function. It can be\n one of tanh and relu.\n num_classes: an integer, the number of output classes/labels.\n use_bias: a boolean specifying whether to use bias in the last fc layer.\n \"\"\"\n # Input variables\n input_data = tf.keras.layers.Input(\n shape=input_shape, name=\"features\")\n\n # Two cnn layers\n conv_layer_1 = _conv_bn_layer(\n input_data, filters=32, kernel_size=(41, 11), strides=(2, 2),\n layer_id=1)\n\n conv_layer_2 = _conv_bn_layer(\n conv_layer_1, filters=32, kernel_size=(21, 11), strides=(2, 1),\n layer_id=2)\n # output of conv_layer2 with the shape of\n # [batch_size (N), times (T), features (F), channels (C)]\n\n # RNN layers.\n # Convert the conv output to rnn input\n rnn_input = tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten())(\n conv_layer_2)\n\n rnn_cell = SUPPORTED_RNNS[rnn_type]\n for layer_counter in xrange(num_rnn_layers):\n # No batch normalization on the first layer\n is_batch_norm = (layer_counter != 0)\n rnn_input = _rnn_layer(\n rnn_input, rnn_cell, rnn_hidden_size, layer_counter + 1,\n rnn_activation, is_batch_norm, is_bidirectional)\n\n # FC layer with batch norm\n fc_input = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(rnn_input)\n\n y_pred = tf.keras.layers.Dense(num_classes, activation=\"softmax\",\n use_bias=use_bias, name=\"y_pred\")(fc_input)\n\n # For ctc loss\n labels = tf.keras.layers.Input(name=\"labels\", shape=[None,], dtype=\"int32\")\n label_length = tf.keras.layers.Input(\n name=\"label_length\", shape=[1], dtype=\"int32\")\n input_length = tf.keras.layers.Input(\n name=\"input_length\", shape=[1], dtype=\"int32\")\n ctc_input_length = tf.keras.layers.Lambda(\n _calc_ctc_input_length, output_shape=(1,), name=\"ctc_input_length\")(\n [input_length, input_data, y_pred])\n\n # Keras doesn't currently support loss funcs with extra parameters\n # so CTC loss is implemented in a lambda layer\n ctc_loss = tf.keras.layers.Lambda(\n _ctc_lambda_func, output_shape=(1,), name=\"ctc_loss\")(\n [y_pred, labels, ctc_input_length, label_length])\n\n super(DeepSpeech, self).__init__(\n inputs=[input_data, labels, input_length, label_length],\n outputs=[ctc_input_length, ctc_loss, y_pred])\n"
] |
[
[
"tensorflow.keras.layers.Lambda",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.ctc_batch_cost",
"tensorflow.keras.layers.Bidirectional",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.to_float",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] |
EPFL-LCN/pub-illing2021-neurips
|
[
"f5c9d7380f123a155a7c2b913df6f1fddb787be1"
] |
[
"vision/CLAPPVision/vision/models/Supervised_Loss.py"
] |
[
"import torch.nn as nn\nimport torch\n\nfrom CLAPPVision.utils import utils\n\nclass Supervised_Loss(nn.Module):\n def __init__(self, opt, hidden_dim, calc_accuracy):\n super(Supervised_Loss, self).__init__()\n\n self.opt = opt\n\n self.pool = None\n self.hidden_dim = hidden_dim\n self.calc_accuracy = calc_accuracy\n\n # create linear classifier\n if opt.dataset == \"stl10\":\n n_classes = 10\n else:\n raise Exception(\"Other datasets are not implemented yet\")\n\n self.linear_classifier = nn.Sequential(\n nn.Linear(self.hidden_dim, n_classes)\n ).to(self.opt.device)\n\n self.classification_loss = nn.CrossEntropyLoss()\n\n self.label_num = 1\n\n\n def forward(self, z, label):\n total_loss, accuracies = self.calc_supervised_loss(\n z, label\n )\n return total_loss, accuracies\n\n\n def calc_supervised_loss(self, z, labels):\n # forward pass\n z = nn.functional.adaptive_avg_pool2d(z, 1).squeeze()\n\n output = self.linear_classifier(z)\n\n loss = self.classification_loss(output, labels)\n\n accuracy = torch.zeros(1)\n \n # calculate accuracy\n if self.calc_accuracy:\n accuracy[0], = utils.accuracy(output.data, labels, topk=(1,))\n\n return loss, accuracy\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.zeros"
]
] |
kristianeschenburg/ddCRP
|
[
"3c503418050b45e2156964c1e0165e92eb980fab"
] |
[
"ddCRP/Priors.py"
] |
[
"import numpy as np\nfrom numpy.linalg import det\nfrom scipy.special import gammaln, multigammaln\nfrom ddCRP.PriorBase import Prior\n\n\nclass NIW(Prior):\n\n \"\"\"\n Normal-Inverse-Chi-Squared prior model for connectivity features.\n\n Parameters:\n - - - - - -\n mu0, kappa0: float\n priors on distribution mean\n nu0, lambda0: float, array\n priors on distribution variance\n \"\"\"\n\n def __init__(self, mu0, kappa0, nu0, lambda0):\n\n [d, _] = lambda0.shape\n assert nu0 > (d-1), \"Degrees of freedom must be greater than the dimension-1.\"\n\n self.mu0 = mu0\n self.kappa0 = kappa0\n self.nu0 = nu0\n self.lambda0 = lambda0\n\n @staticmethod\n def sufficient_statistics(features):\n \"\"\"\n Compute sufficient statistics for data.\n\n Parameters:\n - - - - -\n features : float, array\n data array for single cluster\n\n Returns:\n - - - -\n n : int\n sample size\n mu : array\n mean of each feature\n ssq : array\n sum of squares of each feature\n \"\"\"\n # n samples\n [n, _] = features.shape\n # feature means\n mu = features.mean(0)\n # scatter matrix\n S = features.T - mu[:, None]\n S = S.dot(S.T)\n\n return [float(n), mu, S]\n\n def posterior_parameters(self, suff_stats):\n \"\"\"\n Computes cluster-specific marginal likelihood hyperparameters\n of a Normal / Normal-Inverse-Chi-Squared model.\n Parameters:\n - - - - -\n suff_stats : sufficient statistics for single cluster\n Returns:\n - - - -\n kappaN : updated kappa\n nuN : updated nu\n sigmaN : updated sigma\n \"\"\"\n # extract sufficient statistics\n n, mu, S = suff_stats[0:3]\n\n # update kappa and nu\n kappaN = self.kappa0 + n\n nuN = self.nu0 + n\n\n central = mu - self.mu0\n scatter = central.dot(central.T)\n\n deviation = ((n*self.kappa0) / (n+self.kappa0)) * scatter\n lambdaN = self.lambda0 + S + deviation\n\n return [kappaN, nuN, lambdaN]\n\n def marginal_evidence(self, posteriors, suff_stats):\n\n \"\"\"\n Compute the log-likelihood of the data.\n\n Parameters:\n - - - - -\n posteriors: list, floats\n posterior hyperparameters\n suff_stats: list, floats\n sufficient statistics\n \"\"\"\n\n kappa, nu, L = posteriors[0:3]\n _, p = L.shape\n n = suff_stats[0]\n\n numer = multigammaln(nu/2, p) + \\\n (self.nu0/2)*np.log(np.abs(det(self.lambda0))) + \\\n (p/2)*np.log(self.kappa0) \n\n denom = multigammaln(self.nu0/2, p) + \\\n (nu/2)*np.log(np.abs(det(L))) + \\\n (p/2)*np.log(kappa) + \\\n (n*p/2)*np.log(np.pi)\n\n lp = numer - denom\n\n return lp\n\n def full_evidence(self, parcels, features):\n\n \"\"\"\n Compute the full marginal evidence of a given clustering.\n\n Parameters:\n - - - - - \n parcels: dictionary\n mapping of cluster labels to sample indices\n features: float, array\n data feature vectors\n \n Returns:\n - - - -\n lp: float\n full evidence of model\n \"\"\"\n\n feats = [features[idx, :] for idx in parcels.values()]\n suff_stats = map(self.sufficient_statistics, feats)\n posteriors = map(self.posterior_parameters, suff_stats)\n cluster_prob = map(self.marginal_evidence, posteriors, suff_stats)\n\n lp = np.sum(list(cluster_prob))\n\n return lp\n\n\n\nclass NIX2(Prior):\n\n \"\"\"\n Normal-Inverse-Chi-Squared prior model for connectivity features.\n\n Parameters:\n - - - - - -\n mu0, kappa0: float\n priors on distribution mean\n nu0, sigma0: float\n priors on distribution variance\n \"\"\"\n\n def __init__(self, mu0, kappa0, nu0, sigma0):\n\n self.mu0 = mu0\n self.kappa0 = kappa0\n self.nu0 = nu0\n self.sigma0 = sigma0\n\n @staticmethod\n def sufficient_statistics(features):\n \"\"\"\n Compute sufficient statistics for data.\n\n Parameters:\n - - - - -\n features : float, array\n data array for single cluster\n\n Returns:\n - - - -\n n : int\n sample size\n mu : array\n mean of each feature\n ssq : array\n sum of squares of each feature\n \"\"\"\n # n samples\n [n, _] = features.shape\n # feature means\n mu = features.mean(0)\n # feature sum of squares\n ssq = ((features-mu[None, :])**2).sum(0)\n\n return [float(n), mu, ssq]\n\n def posterior_parameters(self, suff_stats):\n \"\"\"\n Computes cluster-specific marginal likelihood hyperparameters\n of a Normal / Normal-Inverse-Chi-Squared model.\n Parameters:\n - - - - -\n suff_stats : sufficient statistics for single cluster\n Returns:\n - - - -\n kappaN : updated kappa\n nuN : updated nu\n sigmaN : updated sigma\n \"\"\"\n # extract sufficient statistics\n n, mu, ssq = suff_stats[0:3]\n\n # update kappa and nu\n kappaN = self.kappa0 + n\n nuN = self.nu0 + n\n\n deviation = ((n*self.kappa0) / (n+self.kappa0)) * ((self.mu0 - mu)**2)\n sigmaN = (1./nuN) * (self.nu0*self.sigma0 + ssq + deviation)\n\n return [kappaN, nuN, sigmaN]\n\n def marginal_evidence(self, posteriors, suff_stats):\n\n \"\"\"\n Compute the log-likelihood of the data.\n\n Parameters:\n - - - - -\n posteriors: list, floats\n posterior hyperparameters\n suff_stats: list, floats\n sufficient statistics\n \"\"\"\n\n kappa, nu, sigma = posteriors[0:3]\n p = len(sigma)\n n = suff_stats[0]\n\n # ratio of gamma functions\n gam = gammaln(nu/2) - gammaln(self.nu0/2)\n\n # terms with square roots in likelihood function\n inner = (1./2) * (np.log(self.kappa0) + self.nu0*np.log(\n self.nu0*self.sigma0) - np.log(kappa) -\n nu*np.log(nu) - n*np.log(np.pi))\n\n # sum of sigma_n for each feature\n outer = (-nu/2.)*np.log(sigma).sum()\n\n lp = p*(gam + inner) + outer\n\n return lp\n\n def full_evidence(self, parcels, features):\n \"\"\"\n Compute the full marginal evidence of a given clustering.\n\n Parameters:\n - - - - - \n parcels: dictionary\n mapping of cluster labels to sample indices\n features: float, array\n data feature vectors\n \n Returns:\n - - - -\n lp: float\n full evidence of model\n \"\"\"\n\n feats = [features[idx, :] for idx in parcels.values()]\n suff_stats = map(self.sufficient_statistics, feats)\n posteriors = map(self.posterior_parameters, suff_stats)\n cluster_prob = map(self.marginal_evidence, posteriors, suff_stats)\n\n lp = np.sum(list(cluster_prob))\n\n return lp\n"
] |
[
[
"numpy.linalg.det",
"numpy.log",
"scipy.special.multigammaln",
"scipy.special.gammaln"
]
] |
csala/RDT
|
[
"ca639dc1eeae5f1bb9f78e8b163659680ce627e3"
] |
[
"tests/integration/test_hyper_transformer.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nfrom rdt import HyperTransformer\nfrom rdt.transformers import OneHotEncodingTransformer\n\n\ndef get_input_data_with_nan():\n data = pd.DataFrame({\n 'integer': [1, 2, 1, 3, 1],\n 'float': [0.1, 0.2, 0.1, np.nan, 0.1],\n 'categorical': ['a', 'a', np.nan, 'b', 'a'],\n 'bool': [False, np.nan, False, True, False],\n 'datetime': [\n np.nan, '2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'\n ],\n 'names': ['Jon', 'Arya', 'Arya', 'Jon', 'Jon'],\n })\n data['datetime'] = pd.to_datetime(data['datetime'])\n\n return data\n\n\ndef get_input_data_without_nan():\n data = pd.DataFrame({\n 'integer': [1, 2, 1, 3],\n 'float': [0.1, 0.2, 0.1, 0.1],\n 'categorical': ['a', 'a', 'b', 'a'],\n 'bool': [False, False, True, False],\n 'datetime': [\n '2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'\n ],\n 'names': ['Jon', 'Arya', 'Jon', 'Jon'],\n })\n data['datetime'] = pd.to_datetime(data['datetime'])\n data['bool'] = data['bool'].astype('O') # boolean transformer returns O instead of bool\n\n return data\n\n\ndef get_transformed_data():\n return pd.DataFrame({\n 'integer': [1, 2, 1, 3],\n 'float': [0.1, 0.2, 0.1, 0.1],\n 'categorical': [0.375, 0.375, 0.875, 0.375],\n 'bool': [0.0, 0.0, 1.0, 0.0],\n 'datetime': [\n 1.2649824e+18,\n 1.262304e+18,\n 1.2649824e+18,\n 1.262304e+18\n ],\n 'names': [0.375, 0.875, 0.375, 0.375]\n })\n\n\ndef get_transformed_nan_data():\n return pd.DataFrame({\n 'integer': [1, 2, 1, 3, 1],\n 'float': [0.1, 0.2, 0.1, 0.125, 0.1],\n 'float#1': [0.0, 0.0, 0.0, 1.0, 0.0],\n 'categorical': [0.3, 0.3, 0.9, 0.7, 0.3],\n 'bool': [0.0, -1.0, 0.0, 1.0, 0.0],\n 'bool#1': [0.0, 1.0, 0.0, 0.0, 0.0],\n 'datetime': [\n 1.2636432e+18, 1.2649824e+18, 1.262304e+18,\n 1.2649824e+18, 1.262304e+18\n ],\n 'datetime#1': [1.0, 0.0, 0.0, 0.0, 0.0],\n 'names': [0.3, 0.8, 0.8, 0.3, 0.3],\n })\n\n\ndef get_transformers():\n return {\n 'integer': {\n 'class': 'NumericalTransformer',\n 'kwargs': {\n 'dtype': np.int64,\n }\n },\n 'float': {\n 'class': 'NumericalTransformer',\n 'kwargs': {\n 'dtype': np.float64,\n }\n },\n 'categorical': {\n 'class': 'CategoricalTransformer'\n },\n 'bool': {\n 'class': 'BooleanTransformer'\n },\n 'datetime': {\n 'class': 'DatetimeTransformer'\n },\n 'names': {\n 'class': 'CategoricalTransformer',\n },\n }\n\n\ndef test_hypertransformer_with_transformers():\n data = get_input_data_without_nan()\n transformers = get_transformers()\n\n ht = HyperTransformer(transformers)\n ht.fit(data)\n transformed = ht.transform(data)\n\n expected = get_transformed_data()\n\n np.testing.assert_allclose(\n transformed.sort_index(axis=1).values,\n expected.sort_index(axis=1).values\n )\n\n reversed_data = ht.reverse_transform(transformed)\n\n original_names = data.pop('names')\n reversed_names = reversed_data.pop('names')\n\n pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))\n\n for name in original_names:\n assert name not in reversed_names\n\n\ndef test_hypertransformer_with_transformers_nan_data():\n data = get_input_data_with_nan()\n transformers = get_transformers()\n\n ht = HyperTransformer(transformers)\n ht.fit(data)\n transformed = ht.transform(data)\n\n expected = get_transformed_nan_data()\n\n np.testing.assert_allclose(\n transformed.sort_index(axis=1).values,\n expected.sort_index(axis=1).values\n )\n\n reversed_data = ht.reverse_transform(transformed)\n\n original_names = data.pop('names')\n reversed_names = reversed_data.pop('names')\n\n pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))\n\n for name in original_names:\n assert name not in reversed_names\n\n\ndef test_hypertransformer_without_transformers():\n data = get_input_data_without_nan()\n\n ht = HyperTransformer()\n ht.fit(data)\n transformed = ht.transform(data)\n\n expected = get_transformed_data()\n\n np.testing.assert_allclose(\n transformed.sort_index(axis=1).values,\n expected.sort_index(axis=1).values\n )\n\n reversed_data = ht.reverse_transform(transformed)\n\n original_names = data.pop('names')\n reversed_names = reversed_data.pop('names')\n\n pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))\n\n for name in original_names:\n assert name not in reversed_names\n\n\ndef test_hypertransformer_without_transformers_nan_data():\n data = get_input_data_with_nan()\n\n ht = HyperTransformer()\n ht.fit(data)\n transformed = ht.transform(data)\n\n expected = get_transformed_nan_data()\n\n np.testing.assert_allclose(\n transformed.sort_index(axis=1).values,\n expected.sort_index(axis=1).values\n )\n\n reversed_data = ht.reverse_transform(transformed)\n\n original_names = data.pop('names')\n reversed_names = reversed_data.pop('names')\n\n pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))\n\n for name in original_names:\n assert name not in reversed_names\n\n\ndef test_single_category():\n ht = HyperTransformer(transformers={\n 'a': OneHotEncodingTransformer()\n })\n data = pd.DataFrame({\n 'a': ['a', 'a', 'a']\n })\n\n ht.fit(data)\n transformed = ht.transform(data)\n\n reverse = ht.reverse_transform(transformed)\n\n pd.testing.assert_frame_equal(data, reverse)\n\n\ndef test_dtype_category():\n df = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')\n\n ht = HyperTransformer()\n ht.fit(df)\n\n trans = ht.transform(df)\n\n rever = ht.reverse_transform(trans)\n\n pd.testing.assert_frame_equal(df, rever)\n\n\ndef test_empty_transformers():\n \"\"\"If transformers is an empty dict, do nothing.\"\"\"\n data = get_input_data_without_nan()\n\n ht = HyperTransformer(transformers={})\n ht.fit(data)\n\n transformed = ht.transform(data)\n reverse = ht.reverse_transform(transformed)\n\n pd.testing.assert_frame_equal(data, transformed)\n pd.testing.assert_frame_equal(data, reverse)\n\n\ndef test_empty_transformers_nan_data():\n \"\"\"If transformers is an empty dict, do nothing.\"\"\"\n data = get_input_data_with_nan()\n\n ht = HyperTransformer(transformers={})\n ht.fit(data)\n\n transformed = ht.transform(data)\n reverse = ht.reverse_transform(transformed)\n\n pd.testing.assert_frame_equal(data, transformed)\n pd.testing.assert_frame_equal(data, reverse)\n\n\ndef test_subset_of_columns():\n \"\"\"HyperTransform should be able to transform a subset of the training columns.\n\n See https://github.com/sdv-dev/RDT/issues/152\n \"\"\"\n data = get_input_data_without_nan()\n\n ht = HyperTransformer()\n ht.fit(data)\n\n subset = data[[data.columns[0]]]\n transformed = ht.transform(subset)\n reverse = ht.reverse_transform(transformed)\n\n pd.testing.assert_frame_equal(subset, reverse)\n\n\ndef test_subset_of_columns_nan_data():\n \"\"\"HyperTransform should be able to transform a subset of the training columns.\n\n See https://github.com/sdv-dev/RDT/issues/152\n \"\"\"\n data = get_input_data_with_nan()\n\n ht = HyperTransformer()\n ht.fit(data)\n\n subset = data[[data.columns[0]]]\n transformed = ht.transform(subset)\n reverse = ht.reverse_transform(transformed)\n\n pd.testing.assert_frame_equal(subset, reverse)\n"
] |
[
[
"pandas.to_datetime",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
ishansharma/open_cv_feature_detection
|
[
"34f09d6e144d8220cca9295f0a59dba7f9488516"
] |
[
"image_operations/transformations.py"
] |
[
"import cv2\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\nimage = mpimg.imread('../../dataset/Hands/Hand_0000083.jpg')\n\n\ndef resize():\n scaleup = cv2.resize(image, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)\n scaledown = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)\n\n plt.figure(\"Stretch and shrink\")\n\n plt.subplot(131)\n plt.imshow(image)\n plt.title(\"Original Image\")\n\n plt.subplot(132)\n plt.imshow(scaleup)\n plt.title(\"Stretched Image(2x\")\n\n plt.subplot(133)\n plt.imshow(scaledown)\n plt.title(\"Shrinked Image [0.5x]\")\n\n plt.tight_layout()\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.image.imread",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
sylvainlapeyrade/LSTM_KDD99_Keras
|
[
"e07c05803dfd4ad454cf5043531bb3e205ec022b"
] |
[
"src/unsw_processing.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import (StandardScaler, OrdinalEncoder,\n LabelEncoder, MinMaxScaler)\npd.options.mode.chained_assignment = None # default='warn' | Disable warnings\n\n# ***** UNSW STRING FEATURES VALUES *****\nproto_values = ['tcp', 'udp', 'arp', 'ospf', 'icmp', 'igmp', 'rtp', 'ddp',\n 'ipv6-frag', 'cftp', 'wsn', 'pvp', 'wb-expak', 'mtp',\n 'pri-enc', 'sat-mon', 'cphb', 'sun-nd', 'iso-ip', 'xtp', 'il',\n 'unas', 'mfe-nsp', '3pc', 'ipv6-route', 'idrp', 'bna', 'swipe',\n 'kryptolan', 'cpnx', 'rsvp', 'wb-mon', 'vmtp', 'ib', 'dgp',\n 'eigrp', 'ax.25', 'gmtp', 'pnni', 'sep', 'pgm', 'idpr-cmtp',\n 'zero', 'rvd', 'mobile', 'narp', 'fc', 'pipe', 'ipcomp',\n 'ipv6-no', 'sat-expak', 'ipv6-opts', 'snp', 'ipcv',\n 'br-sat-mon', 'ttp', 'tcf', 'nsfnet-igp', 'sprite-rpc',\n 'aes-sp3-d', 'sccopmce', 'sctp', 'qnx', 'scps', 'etherip',\n 'aris', 'pim', 'compaq-peer', 'vrrp', 'iatp', 'stp',\n 'l2tp', 'srp', 'sm', 'isis', 'smp', 'fire', 'ptp', 'crtp',\n 'sps', 'merit-inp', 'idpr', 'skip', 'any', 'larp', 'ipip',\n 'micp', 'encap', 'ifmp', 'tp++', 'a/n', 'ipv6', 'i-nlsp',\n 'ipx-n-ip', 'sdrp', 'tlsp', 'gre', 'mhrp', 'ddx', 'ippc',\n 'visa', 'secure-vmtp', 'uti', 'vines', 'crudp', 'iplt',\n 'ggp', 'ip', 'ipnip', 'st2', 'argus', 'bbn-rcc', 'egp',\n 'emcon', 'igp', 'nvp', 'pup', 'xnet', 'chaos', 'mux', 'dcn',\n 'hmp', 'prm', 'trunk-1', 'xns-idp', 'leaf-1', 'leaf-2', 'rdp',\n 'irtp', 'iso-tp4', 'netblt', 'trunk-2', 'cbt']\n\nstate_values = ['FIN', 'INT', 'CON', 'ECO', 'REQ', 'RST', 'PAR', 'URN', 'no',\n 'ACC', 'CLO']\n\nservice_values = ['-', 'ftp', 'smtp', 'snmp', 'http', 'ftp-data',\n 'dns', 'ssh', 'radius', 'pop3', 'dhcp', 'ssl', 'irc']\n\nattack_cat_values = ['Normal', 'Backdoor', 'Analysis', 'Fuzzers', 'Shellcode',\n 'Reconnaissance', 'Exploits', 'DoS', 'Worms', 'Generic']\n\n\ndef unsw_encoding(params):\n # ***** DATA PATHS *****\n data_path = \"./data/\"\n train_data_path = data_path+\"UNSW_NB15_training-set.csv\"\n test_data_path = data_path+\"UNSW_NB15_testing-set.csv\"\n\n # Load csv data into dataframes without 'id' and 'Label'\n train_df = pd.read_csv(train_data_path).drop(['id', 'label'], axis=1)\n test_df = pd.read_csv(test_data_path).drop(['id', 'label'], axis=1)\n\n def process_dataframe(df):\n # Replace attack string with an int\n for i in range(len(attack_cat_values)):\n df['attack_cat'] = df['attack_cat'].replace(\n [attack_cat_values[i]], i)\n\n # Assign x (inputs) and y (outputs) of the network\n y = df['attack_cat']\n x = df.drop(columns='attack_cat')\n\n # ***** MULTIPLE ENCODER CHOICE *****\n # Encode categorical features as an integer array\n if params['encoder'] == 'ordinalencoder':\n x = OrdinalEncoder().fit_transform(x)\n # Encode labels with value between 0 and n_classes-1.\n elif params['encoder'] == 'labelencoder':\n x = x.apply(LabelEncoder().fit_transform)\n else:\n # Replace String features with ints\n for i in range(len(proto_values)):\n x['proto'] = x['proto'].replace(proto_values[i], i)\n\n for i in range(len(state_values)):\n x['state'] = x['state'].replace(state_values[i], i)\n\n for i in range(len(service_values)):\n x['service'] = x['service'].replace(service_values[i], i)\n # Standardize by removing the mean and scaling to unit variance\n if params['encoder'] == \"standardscaler\":\n x = StandardScaler().fit_transform(x)\n # Transforms features by scaling each feature to range [0, 1]\n elif params['encoder'] == \"minmaxscaler01\":\n x = MinMaxScaler(feature_range=(0, 1)).fit_transform(x)\n # Transforms features by scaling each feature to range [-1, 1]\n elif params['encoder'] == \"minmaxscaler11\":\n x = MinMaxScaler(feature_range=(-1, 1)).fit_transform(x)\n\n return x, y\n\n x_train, Y_train = process_dataframe(train_df)\n x_test, Y_test = process_dataframe(test_df)\n\n # Apply one-hot encoding to outputs\n y_train = to_categorical(Y_train)\n y_test = to_categorical(Y_test)\n\n return x_train, x_test, y_train, y_test\n"
] |
[
[
"pandas.read_csv",
"sklearn.preprocessing.OrdinalEncoder",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.MinMaxScaler"
]
] |
mkhorton/mp-dash-components
|
[
"b9af1b59f0120a90897631d9a7f8d9f0ae561de9"
] |
[
"crystal_toolkit/core/legend.py"
] |
[
"from pymatgen.core.periodic_table import Specie, Element\nfrom pymatgen.core.structure import Molecule\nfrom pymatgen.core.structure import SiteCollection, Site\nfrom pymatgen.analysis.molecule_structure_comparator import CovalentRadius\nfrom pymatgen.util.string import unicodeify_species\n\nfrom monty.json import MSONable\nfrom monty.serialization import loadfn\n\nfrom itertools import chain\nfrom collections import defaultdict\n\nfrom palettable.colorbrewer.qualitative import Set1_9, Set2_8\nfrom sklearn.preprocessing import LabelEncoder\nfrom matplotlib.cm import get_cmap\nfrom webcolors import html5_parse_legacy_color, html5_serialize_simple_color\n\nfrom typing import Union, Optional, Tuple, Dict, List, Any\n\nimport warnings\nimport numpy as np\nimport os\n\n# element colors forked from pymatgen\nmodule_dir = os.path.dirname(os.path.abspath(__file__))\nEL_COLORS = loadfn(os.path.join(module_dir, \"ElementColorSchemes.yaml\"))\n\n\nclass Legend(MSONable):\n \"\"\"\n Help generate a legend (colors and radii) for a Structure or Molecule\n such that colors and radii can be displayed for the appropriate species.\n\n Note that species themselves have a color (for example, Oxygen is typically\n red), but that we might also want to color-code by site properties (for example,\n magnetic moment), thus this class has to take into account both the species\n present and its context (the specific site the species is at) to correctly\n generate the legend.\n \"\"\"\n\n default_color_scheme = \"Jmol\"\n default_color = [0, 0, 0]\n default_radius = 1.0\n fallback_radius = 0.5\n uniform_radius = 0.5\n\n def __init__(\n self,\n site_collection: Union[SiteCollection, Site],\n color_scheme: str = \"Jmol\",\n radius_scheme: str = \"uniform\",\n cmap: str = \"coolwarm\",\n cmap_range: Optional[Tuple[float, float]] = None,\n ):\n \"\"\"\n Create a legend for a given SiteCollection to choose how to\n display colors and radii for the given sites and the species\n on those sites.\n\n If a site has a \"display_color\" or \"display_radius\" site\n property defined, this can be used to manually override the\n displayed colors and radii respectively.\n\n Args:\n site_collection: SiteCollection or, for convenience, a\n single site can be provided and this will be converted\n into a SiteCollection\n color_scheme: choose how to color-code species, one of\n \"Jmol\", \"VESTA\", \"accessible\" or a scalar site property\n (e.g. magnetic moment) or a categorical/string site\n property (e.g. Wyckoff label)\n radius_scheme: choose the radius for a species, one of\n \"atomic\", \"specified_or_average_ionic\", \"covalent\",\n \"van_der_waals\", \"atomic_calculated\", \"uniform\"\n cmap: only used if color_mode is set to a scalar site\n property, defines the matplotlib color map to use, by\n default is blue-white-red for negative to postive values\n cmap_range: only used if color_mode is set to a scalar site\n property, defines the minimum and maximum values of the\n color scape\n \"\"\"\n\n if isinstance(site_collection, Site):\n site_collection = Molecule.from_sites([site_collection])\n\n site_prop_types = self.analyze_site_props(site_collection)\n\n self.allowed_color_schemes = (\n [\"VESTA\", \"Jmol\", \"accessible\"]\n + site_prop_types.get(\"scalar\", [])\n + site_prop_types.get(\"categorical\", [])\n )\n\n self.allowed_radius_schemes = (\n \"atomic\",\n \"specified_or_average_ionic\",\n \"covalent\",\n \"van_der_waals\",\n \"atomic_calculated\",\n \"uniform\",\n )\n\n if color_scheme not in self.allowed_color_schemes:\n warnings.warn(\n f\"Color scheme {color_scheme} not available, \"\n f\"falling back to {self.default_color_scheme}.\"\n )\n color_scheme = self.default_color_scheme\n\n # if color-coding by a scalar site property, determine minimum and\n # maximum values for color scheme, will default to be symmetric\n # about zero\n if color_scheme in site_prop_types.get(\"scalar\", []) and not cmap_range:\n props = np.array(\n [\n p\n for p in site_collection.site_properties[color_scheme]\n if p is not None\n ]\n )\n prop_max = max([abs(min(props)), max(props)])\n prop_min = -prop_max\n cmap_range = (prop_min, prop_max)\n\n el_colors = EL_COLORS.copy()\n el_colors.update(\n self.generate_accessible_color_scheme_on_the_fly(site_collection)\n )\n\n self.categorical_colors = self.generate_categorical_color_scheme_on_the_fly(\n site_collection, site_prop_types\n )\n\n self.el_colors = el_colors\n self.site_prop_types = site_prop_types\n self.site_collection = site_collection\n self.color_scheme = color_scheme\n self.radius_scheme = radius_scheme\n self.cmap = cmap\n self.cmap_range = cmap_range\n\n @staticmethod\n def generate_accessible_color_scheme_on_the_fly(\n site_collection: SiteCollection,\n ) -> Dict[str, Dict[str, Tuple[int, int, int]]]:\n \"\"\"\n e.g. for a color scheme more appropriate for people with color blindness\n\n Args:\n site_collection: SiteCollection\n\n Returns: A dictionary in similar format to EL_COLORS\n\n \"\"\"\n\n color_scheme = {}\n\n all_species = set(\n chain.from_iterable(\n comp.keys() for comp in site_collection.species_and_occu\n )\n )\n all_elements = sorted([sp.as_dict()[\"element\"] for sp in all_species])\n\n # thanks to https://doi.org/10.1038/nmeth.1618\n palette = [\n (0, 0, 0), # 0, black\n (230, 159, 0), # 1, orange\n (86, 180, 233), # 2, sky blue\n (0, 158, 115), # 3, bluish green\n (240, 228, 66), # 4, yellow\n (0, 114, 178), # 5, blue\n (213, 94, 0), # 6, vermillion\n (204, 121, 167), # 7, reddish purple\n (255, 255, 255), # 8, white\n ]\n\n # similar to CPK, mapping element to palette index\n preferred_colors = {\n \"O\": 6,\n \"N\": 2,\n \"C\": 0,\n \"H\": 8,\n \"F\": 3,\n \"Cl\": 3,\n \"Fe\": 1,\n \"Br\": 7,\n \"I\": 7,\n \"P\": 1,\n \"S\": 4,\n }\n\n if len(set(all_elements)) > len(palette):\n warnings.warn(\n \"Too many distinct types of site to use an accessible color scheme, \"\n \"some sites will be given the default color.\"\n )\n\n preferred_elements_present = [\n el for el in all_elements if el in preferred_colors.keys()\n ]\n\n colors_assigned = []\n for el in preferred_elements_present:\n if preferred_colors[el] not in colors_assigned:\n color_scheme[el] = palette[preferred_colors[el]]\n colors_assigned.append(preferred_colors[el])\n\n remaining_elements = [\n el for el in all_elements if el not in color_scheme.keys()\n ]\n remaining_palette = [\n c for idx, c in enumerate(palette) if idx not in colors_assigned\n ]\n\n for el in remaining_elements:\n if remaining_palette:\n color_scheme[el] = remaining_palette.pop()\n\n return {\"accessible\": color_scheme}\n\n @staticmethod\n def generate_categorical_color_scheme_on_the_fly(\n site_collection: SiteCollection, site_prop_types\n ) -> Dict[str, Dict[str, Tuple[int, int, int]]]:\n \"\"\"\n e.g. for Wykcoff\n\n Args:\n site_collection: SiteCollection\n\n Returns: A dictionary in similar format to EL_COLORS\n\n \"\"\"\n\n color_scheme = {}\n\n palette = Set1_9.colors\n\n for site_prop_name in site_prop_types.get(\"categorical\", []):\n\n props = np.array(site_collection.site_properties[site_prop_name])\n props[props == None] = \"None\"\n\n le = LabelEncoder()\n le.fit(props)\n transformed_props = le.transform(props)\n\n # if we have more categories than availiable colors,\n # arbitrarily group some categories together\n if len(set(props)) > len(palette):\n warnings.warn(\n \"Too many categories for a complete categorical color scheme.\"\n )\n transformed_props = [\n p if p < len(palette) else -1 for p in transformed_props\n ]\n\n colors = {name: palette[p] for name, p in zip(props, transformed_props)}\n\n color_scheme[site_prop_name] = colors\n\n return color_scheme\n\n def get_color(self, sp: Union[Specie, Element], site: Optional[Site] = None) -> str:\n \"\"\"\n Get a color to render a specific species. Optionally, you can provide\n a site for context, since ...\n\n Args:\n sp: Specie or Element\n site: Site\n\n Returns: Color\n\n \"\"\"\n\n # allow manual override by user\n if site and \"display_color\" in site.properties:\n color = site.properties[\"display_color\"]\n # TODO: next two lines due to change in API, will be removed\n if isinstance(color, list) and isinstance(color[0], str):\n color = color[0]\n if isinstance(color, list):\n return html5_serialize_simple_color(color)\n else:\n return html5_serialize_simple_color(html5_parse_legacy_color(color))\n\n if self.color_scheme in (\"VESTA\", \"Jmol\", \"accessible\"):\n el = sp.as_dict()[\"element\"]\n color = self.el_colors[self.color_scheme].get(\n el, self.el_colors[\"Extras\"].get(el, self.default_color)\n )\n\n elif self.color_scheme in self.site_prop_types.get(\"scalar\", []):\n\n if not site:\n raise ValueError(\n \"Requires a site for context to get the \"\n \"appropriate site property.\"\n )\n\n prop = site.properties[self.color_scheme]\n\n if prop:\n\n cmap = get_cmap(self.cmap)\n\n # normalize in [0, 1] range, as expected by cmap\n prop_min = self.cmap_range[0]\n prop_max = self.cmap_range[1]\n prop_normed = (prop - prop_min) / (prop_max - prop_min)\n\n color = [int(c * 255) for c in cmap(prop_normed)[0:3]]\n\n else:\n\n # fallback if site prop is None\n color = self.default_color\n\n elif self.color_scheme in self.site_prop_types.get(\"categorical\", []):\n\n if not site:\n raise ValueError(\n \"Requires a site for context to get the \"\n \"appropriate site property.\"\n )\n\n prop = site.properties[self.color_scheme]\n\n color = self.categorical_colors[self.color_scheme].get(\n prop, self.default_color\n )\n\n else:\n\n raise ValueError(\n f\"Unknown color for {sp} and color scheme {self.color_scheme}.\"\n )\n\n return html5_serialize_simple_color(color)\n\n def get_radius(\n self, sp: Union[Specie, Element], site: Optional[Site] = None\n ) -> float:\n\n # allow manual override by user\n if site and \"display_radius\" in site.properties:\n return site.properties[\"display_radius\"]\n\n if self.radius_scheme not in self.allowed_radius_schemes:\n raise ValueError(\n f\"Unknown radius scheme {self.radius_scheme}, \"\n f\"choose from: {self.allowed_radius_schemes}.\"\n )\n\n radius = None\n if self.radius_scheme == \"uniform\":\n radius = self.uniform_radius\n elif self.radius_scheme == \"atomic\":\n radius = float(sp.atomic_radius)\n elif (\n self.radius_scheme == \"specified_or_average_ionic\"\n and isinstance(sp, Specie)\n and sp.oxi_state\n ):\n radius = float(sp.ionic_radius)\n elif self.radius_scheme == \"specified_or_average_ionic\":\n radius = float(sp.average_ionic_radius)\n elif self.radius_scheme == \"covalent\":\n el = str(getattr(sp, \"element\", sp))\n radius = float(CovalentRadius.radius[el])\n elif self.radius_scheme == \"van_der_waals\":\n radius = float(sp.van_der_waals_radius)\n elif self.radius_scheme == \"atomic_calculated\":\n radius = float(sp.atomic_radius_calculated)\n\n if (not radius) or (not isinstance(radius, float)):\n warnings.warn(\n \"Radius unknown for {} and strategy {}, \"\n \"setting to 0.5.\".format(sp, self.radius_scheme)\n )\n radius = self.fallback_radius\n\n return radius\n\n @staticmethod\n def analyze_site_props(site_collection: SiteCollection) -> Dict[str, List[str]]:\n \"\"\"\n Returns: A dictionary with keys \"scalar\", \"matrix\", \"vector\", \"categorical\"\n and values of a list of site property names corresponding to each type\n \"\"\"\n # (implicitly assumes all site props for a given key are same type)\n site_prop_names = defaultdict(list)\n for name, props in site_collection.site_properties.items():\n if isinstance(props[0], float) or isinstance(props[0], int):\n site_prop_names[\"scalar\"].append(name)\n elif isinstance(props[0], list) and len(props[0]) == 3:\n if isinstance(props[0][0], list) and len(props[0][0]) == 3:\n site_prop_names[\"matrix\"].append(name)\n else:\n site_prop_names[\"vector\"].append(name)\n elif isinstance(props[0], str):\n site_prop_names[\"categorical\"].append(name)\n return dict(site_prop_names)\n\n @staticmethod\n def get_species_str(sp: Union[Specie, Element]) -> str:\n \"\"\"\n Args:\n sp: Specie or Element\n\n Returns: string representation\n \"\"\"\n # TODO: add roman numerals for oxidation state for ease of readability\n # and then move this to pymatgen string utils ...\n return unicodeify_species(str(sp))\n\n def get_legend(self) -> Dict[str, Any]:\n\n # decide what we want the labels to be\n if self.color_scheme in (\"Jmol\", \"VESTA\", \"accessible\"):\n label = lambda site, sp: self.get_species_str(sp)\n elif self.color_scheme in self.site_prop_types.get(\"scalar\", {}):\n label = lambda site, sp: f\"{site.properties[self.color_scheme]:.2f}\"\n elif self.color_scheme in self.site_prop_types.get(\"categorical\", {}):\n label = lambda site, sp: f\"{site.properties[self.color_scheme]}\"\n else:\n raise ValueError(f\"Color scheme {self.color_scheme} not known.\")\n\n legend = defaultdict(list)\n\n # first get all our colors for different species\n for site in self.site_collection:\n for sp, occu in site.species.items():\n legend[self.get_color(sp, site)].append(label(site, sp))\n\n legend = {k: \", \".join(sorted(list(set(v)))) for k, v in legend.items()}\n\n color_options = []\n for site_prop_type in (\"scalar\", \"categorical\"):\n if site_prop_type in self.site_prop_types:\n for prop in self.site_prop_types[site_prop_type]:\n color_options.append(prop)\n\n return {\n \"composition\": self.site_collection.composition.as_dict(),\n \"colors\": legend,\n \"available_color_schemes\": color_options,\n }\n"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"matplotlib.cm.get_cmap"
]
] |
Russell-Ryan/pyLINEAR
|
[
"d68e44bc64d302b816db69d2becc4de3b15059f9"
] |
[
"pylinear/modules/extract/groupcollection.py"
] |
[
"import numpy as np\nfrom shapely import geometry\nimport h5py\n\n\nfrom ... import h5table\nfrom ...utilities import pool\nfrom ...constants import COMPARGS,SEGTYPE\n\n\n\nclass GroupCollection(list):\n def __init__(self,minarea=0.1,ncpu=0,path='tables'):\n \n self.minarea=minarea\n self.path=path\n self.ncpu=ncpu\n\n\n\n\n\n\n\n def group(self,grisms,sources,beams):\n\n print('[info]Starting the group algorithm')\n\n \n p=pool.Pool(self.group_grism,desc='Grouping grisms',ncpu=self.ncpu)\n \n ids=p(grisms.values(),sources,beams)\n #ids=[group_grism(f,sources,beams,path) for f in grisms]\n \n # convert the list of lists a list of sets\n sets=[]\n for i in ids:\n sets.extend(i)\n \n\n \n # group those IDs\n groups=self.group_ids(sets)\n \n #data=group_ids(sets)\n\n\n \n ## now sort them in reverse order\n if len(groups)!=0:\n groups.sort(key=len)\n\n # put them in in reverse order\n for group in reversed(groups):\n self.append(group)\n\n\n \n # print something for something's sake\n print(\"[info]Done grouping, found {} groups.\\n\".format(len(self)))\n\n\n def append(self,data):\n super().append(tuple(data))\n\n \n def write_h5(self,grpfile):\n with h5py.File(grpfile,'w') as h5:\n h5.attrs['ngroup']=np.uint32(len(self))\n for grpid,segids in enumerate(self):\n name=str(grpid)\n hd=h5.create_dataset(name,data=np.array(segids,dtype=SEGTYPE),\n **COMPARGS)\n \n hd.attrs['nobj']=np.uint32(len(segids))\n \n\n @classmethod\n def load_h5(cls,grpfile,**kwargs):\n obj=cls(**kwargs)\n with h5py.File(grpfile,'r') as h5:\n for v in h5.values():\n obj.append(list(v[:]))\n return obj\n \n def write_ascii(self,grpfile):\n with open(grpfile,'w') as fp:\n for grpid,grp in enumerate(self):\n for segid in grp:\n print(grpid,segid,file=fp)\n \n\n @classmethod\n def load_ascii(cls,grpfile,**kwargs):\n obj=cls(**kwargs)\n\n grpids,segids=np.loadtxt(grpfile,unpack=True,dtype=(np.uint,SEGTYPE))\n\n grpids,grpnums=np.unique(grpids,return_counts=True)\n \n # this is basically what np.split does, but i want it liek this\n # since i will caste it as a list anyway\n i=0\n for num in grpnums:\n j=i+num\n obj.append(segids[i:j])\n i=j\n \n \n return obj\n\n\n\n \n def group_polygons(self,data):\n nnew=ndata=len(data)\n\n while nnew!=0:\n groups=[]\n\n while len(data)!=0:\n thisid,thispoly=data.pop(0)\n \n for i,(testid,testpoly) in enumerate(data):\n inter=thispoly.intersection(testpoly)\n\n if (inter.area > self.minarea*testpoly.area) and \\\n (inter.area > self.minarea*thispoly.area):\n data.pop(i)\n thispoly=thispoly.union(testpoly)\n thisid.extend(testid)\n\n groups.append((thisid,thispoly))\n\n N=len(data)\n data=groups\n nnew=ndata-N\n ndata=N\n\n return groups\n\n def group_ids(self,data):\n nnew=ndata=len(data)\n while nnew!=0:\n new=[]\n while len(data)!=0:\n this=data.pop(0)\n for i,test in enumerate(data):\n if this.intersection(test):\n this=this.union(test)\n data.pop(i)\n new.append(this)\n data=new\n n=len(data)\n nnew=ndata-n\n ndata=n\n return data\n\n def group_grism(self,grism,sources,beams):\n \n # open the file for a given grism\n with h5table.H5Table(grism.dataset,path=self.path,mode='r') as h5tab:\n groups=[]\n for device in grism:\n\n # first convert each source to a polygon\n polys=[]\n ids=[]\n for i,source in enumerate(sources):\n \n # make a Multipolygon for each source\n poly=[]\n for beam in beams:\n\n # open the files\n if i ==0:\n h5tab.open_table(device.name,beam,'pdt')\n \n # read the data\n odt=h5tab.load_from_file(source,beam,'odt')\n ovt=odt.compute_vertices()\n this_poly=ovt.as_polygon()\n \n ## read the data \n #ovt=h5tab.load_from_file(source.name,'ovt',beam)\n #\n ## get the polygon representation\n #this_poly=ovt.as_polygon()\n \n\n # record it in the list\n poly.append(this_poly)\n\n\n # make a multipolygon over beams\n poly=geometry.MultiPolygon(poly)\n\n # make a multipolgon\n polys.append(poly) #geometry.MultiPolygon(poly))\n ids.append([source.segid])\n \n data=list(zip(ids,polys))\n\n # now group these polygons for a given device\n grouped=self.group_polygons(data)\n\n # update the groups\n groups.append(grouped)\n\n\n # get a list of the segids as a list of lists\n segids=list(list(zip(*groups[0]))[0])\n\n #groups=list(zip(*groups))[0]\n #segids=list(zip(*groups))[0]\n\n \n #ids=[set(group) for group in groups]\n\n # convert the list of lists into a list of sets\n ids=[set(i) for i in segids]\n \n \n # now group these IDs for the different devices\n ids=self.group_ids(ids)\n \n # return the SEGIDs that collide\n return ids\n\n \n"
] |
[
[
"numpy.array",
"numpy.loadtxt",
"numpy.unique"
]
] |
benjonesbenjones/silver
|
[
"4c037577851682aed07e3bd1daf05403cc00c5f0"
] |
[
"app.py"
] |
[
"# by ben with <3\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport dash_colorscales\nimport pandas as pd\nimport cufflinks as cf\nimport numpy as np\n\napp = dash.Dash(__name__)\nserver = app.server\n\ndf_lat_lon = pd.read_csv('lib/lat_lon_counties.csv')\ndf_lat_lon['FIPS '] = df_lat_lon['FIPS '].apply(lambda x: str(x).zfill(5))\n\ndf_full_data = pd.read_csv('lib/votes.csv')\nper_point_diff_2016 = df_full_data['per_point_diff_2016']\nper_point_diff_2012 = df_full_data['per_point_diff_2012']\n\nprint(per_point_diff_2016)\nprint(per_point_diff_2016)\nprint(df_lat_lon['FIPS '])\n\nYEARS = [2012, 2016]\n\nDEFAULT_COLORSCALE = [\"#EF8A62\", \"#F7F7F7\", \"#67A9CF\"]\n\nDEFAULT_OPACITY = 0.8\n\n\nmapbox_access_token = \"pk.eyJ1IjoiYmVuam9uZXM0NzQ3IiwiYSI6ImNqczBoZWo1MzFlZ3Q0YW81YTFtcDVrN3AifQ.anJanZY5dXkl6JIC4P8RRQ\"\n\n\n'''\n~~~~~~~~~~~~~~~~\n~~ APP LAYOUT ~~\n~~~~~~~~~~~~~~~~\n'''\n\napp.layout = html.Div(children=[\n\n\thtml.Div([\n\t\thtml.Div([\n\t\t\thtml.Div([\n\t\t\t\thtml.H4(children='SILVER'),\n\t\t\t\thtml.P('RESULTS Select year:'),\n\t\t\t]),\n\n\t\t\thtml.Div([\n\t\t\t\tdcc.Slider(\n\t\t\t\t\tid='years-slider',\n\t\t\t\t\tmin=min(YEARS),\n\t\t\t\t\tmax=max(YEARS),\n\t\t\t\t\tvalue=min(YEARS),\n\t\t\t\t\tmarks={str(year): str(year) for year in YEARS},\n\t\t\t\t),\n\t\t\t], style={'width':400, 'margin':25}),\n\n\t\t\thtml.Br(),\n\n\t\t\thtml.P('Map transparency:',\n\t\t\t\tstyle={\n\t\t\t\t\t'display':'inline-block',\n\t\t\t\t\t'verticalAlign': 'top',\n\t\t\t\t\t'marginRight': '10px'\n\t\t\t\t}\n\t\t\t),\n\n\t\t\thtml.Div([\n\t\t\t\tdcc.Slider(\n\t\t\t\t\tid='opacity-slider',\n\t\t\t\t\tmin=0, max=1, value=DEFAULT_OPACITY, step=0.1,\n\t\t\t\t\tmarks={tick: str(tick)[0:3] for tick in np.linspace(0,1,11)},\n\t\t\t\t),\n\t\t\t], style={'width':300, 'display':'inline-block', 'marginBottom':10}),\n\n\t\t\thtml.Div([\n\t\t\t\tdash_colorscales.DashColorscales(\n\t\t\t\t\tid='colorscale-picker',\n\t\t\t\t\tcolorscale=DEFAULT_COLORSCALE,\n\t\t\t\t\tnSwatches=16,\n\t\t\t\t\tfixSwatches=True\n\t\t\t\t)\n\t\t\t], style={'display':'inline-block'}),\n\n\t\t\thtml.Div([\n\t\t\t\tdcc.Checklist(\n\t\t\t\t options=[{'label': 'Hide legend', 'value': 'hide_legend'}],\n\t\t\t\t\tvalue=[],\n\t\t\t\t\tlabelStyle={'display': 'inline-block'},\n\t\t\t\t\tid='hide-map-legend',\n\t\t\t\t)\n\t\t\t], style={'display':'inline-block'}),\n\n\t\t], style={'margin':20} ),\n\n\t\tdcc.Graph(\n\t\t\tid = 'county-choropleth',\n\t\t\tfigure = dict(\n\t\t\t\tdata=dict(\n\t\t\t\t\tlat = df_lat_lon['Latitude '],\n\t\t\t\t\tlon = df_lat_lon['Longitude'],\n\t\t\t\t\ttext = df_lat_lon['Hover'],\n\t\t\t\t\ttype = 'scattermapbox'\n\t\t\t\t),\n\t\t\t\tlayout = dict(\n\t\t\t\t\tmapbox = dict(\n\t\t\t\t\t\tlayers = [],\n\t\t\t\t\t\taccesstoken = mapbox_access_token,\n\t\t\t\t\t\tstyle = 'light',\n\t\t\t\t\t\tcenter=dict(\n\t\t\t\t\t\t\tlat=38.72490,\n\t\t\t\t\t\t\tlon=-95.61446,\n\t\t\t\t\t\t),\n\t\t\t\t\t\tpitch=0,\n\t\t\t\t\t\tzoom=2.5\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t)\n\t\t),\n\n\t\thtml.Div([\n\t\t\thtml.P('† Error ~= 0.0241.'\n\t\t\t)\n\t\t], style={'margin':20})\n\n\t], className='six columns', style={'margin':0}),\n\n\thtml.Div([\n\t\tdcc.Checklist(\n\t\t options=[{'label': 'Log scale', 'value': 'log'},\n\t\t\t\t\t{'label': 'Hide legend', 'value': 'hide_legend'}],\n\t\t\tvalue=[],\n\t\t\tlabelStyle={'display': 'inline-block'},\n\t\t\tid='log-scale',\n\t\t\tstyle={'position': 'absolute', 'right': 80, 'top': 10}\n\t\t),\n\t\thtml.Br(),\n\t\thtml.P('Select chart:', style={'display': 'inline-block'}),\n\t\tdcc.Dropdown(\n\t\t options=[\n\n ],\n\t\t\tvalue='show_death_rate_single_year',\n\t\t\tid='chart-dropdown'\n\t\t),\n\t\tdcc.Graph(\n\t\t\tid = 'selected-data',\n\t\t\tfigure = dict(\n\t\t\t\tdata = [dict(x=0, y=0)],\n\t\t\t\tlayout = dict(\n\t\t\t\t\tpaper_bgcolor = '#F4F4F8',\n\t\t\t\t\tplot_bgcolor = '#F4F4F8',\n\t\t\t\t\theight = 700\n\t\t\t\t)\n\t\t\t),\n\t\t\tanimate = True\n\t\t)\n\t], className='six columns', style={'margin':0}),\n])\n\napp.css.append_css({'external_url': 'https://codepen.io/plotly/pen/EQZeaW.css'})\n\napp.run_server(debug=True)\n"
] |
[
[
"pandas.read_csv",
"numpy.linspace"
]
] |
sroet/regreg
|
[
"299ff18b8680872d4d85447953793bf438f78bba"
] |
[
"regreg/problems/dual_problem.py"
] |
[
"import numpy as np\nfrom scipy import sparse\nfrom warnings import warn\n\nfrom ..algorithms import FISTA\nfrom ..problems.composite import (composite, nonsmooth as nonsmooth_composite,\n smooth as smooth_composite)\nfrom ..affine import (vstack as afvstack, identity as afidentity, power_L,\n selector as afselector,\n scalar_multiply, adjoint)\nfrom ..problems.separable import separable\nfrom ..smooth import smooth_atom, affine_smooth\nfrom ..atoms import affine_atom as nonsmooth_affine_atom\nfrom ..atoms.cones import zero_constraint, zero as zero_nonsmooth, affine_cone\nfrom ..identity_quadratic import identity_quadratic\n\nclass dual_problem(composite):\n r\"\"\"\n A class for specifying a problem of the form\n\n .. math::\n\n \\text{minimize}_{x} f(x) + g(Dx)\n\n which will be solved by a dual problem\n\n .. math::\n\n \\text{minimize}_{u_i} f^*(-D^Tu) + g^*(u)\n\n while the primal variable is stored in the computation of the gradient of\n :math:`f^*`.\n \"\"\"\n\n def __init__(self, f_conjugate, transform, atom):\n self.offset = None\n self.f_conjugate = f_conjugate\n if not isinstance(self.f_conjugate, smooth_composite):\n warn('the conjugate of f should be a smooth_composite to solve with generalized gradient')\n\n self.transform = transform\n self.atom = atom\n\n # the dual problem has f^*(-D^Tu) as objective\n self.affine_fc = affine_smooth(self.f_conjugate, scalar_multiply(adjoint(self.transform), -1))\n self.coefs = np.zeros(self.affine_fc.shape)\n\n # the quadratic is delegated to \n @property\n def quadratic(self):\n return identity_quadratic(None,None,None,None)\n\n @staticmethod\n def fromprimal(f, *g):\n transform, separable_dual_atom = stacked_dual(f.shape, *g)\n return dual_problem(f.conjugate, transform, separable_dual_atom)\n\n def smooth_objective(self, x, mode='both', check_feasibility=False):\n \"\"\"\n The smooth_objective DOES NOT INCLUDE the identity\n quadratic of all the smooth atoms.\n \"\"\"\n v = self.affine_fc.smooth_objective(x, mode=mode, check_feasibility=check_feasibility)\n # retain a reference\n if mode in ['both', 'grad']:\n self.primal = self.affine_fc.grad\n return v\n\n def nonsmooth_objective(self, x, check_feasibility=False):\n out = self.atom.nonsmooth_objective(x, \n check_feasibility=check_feasibility)\n return out + self.affine_fc.nonsmooth_objective(x, \n check_feasibility=check_feasibility)\n\n def proximal(self, proxq, prox_control=None):\n \"\"\"\n The proximal function for the dual problem\n \"\"\"\n return self.atom.proximal(proxq, prox_control=prox_control)\n\n def solve(self, quadratic=None, return_optimum=False, **fit_args):\n solver = FISTA(self)\n solver.fit(**fit_args)\n if return_optimum:\n return self.objective(self.primal), self.primal\n return self.primal\n\ndef stacked_dual(shape, *primary_atoms):\n r'''\n Computes a dual of\n\n .. math::\n\n \\sum_i g_i(D_i\\beta)\n\n under the substitutions :math:`v_i=D_i\\beta`.\n\n That is, it returns the following dual function after minimizing\n over :math:`(v_i,\\beta_i)`:\n\n .. math::\n\n -\\sum_i g_i^*(u_i)\n\n as well as the transform :math:`D \\mapsto \\mathbb{R}^p \\prod_i\n \\mathbb{R}^{m_i}` where :math:`p` is the primal shape and :math:`m_i` are\n the corresponding dual shapes.\n\n Parameters\n ----------\n primary_atoms : [atoms]\n Objects that have dual attributes, which is a pair\n (ltransform, conjugate).\n '''\n if len(primary_atoms) == 0:\n primary_atoms = [zero_nonsmooth(shape)]\n\n duals = [atom.dual for atom in primary_atoms]\n transforms = [d[0] for d in duals]\n dual_atoms = [d[1] for d in duals]\n\n if len(transforms) > 1:\n transform = afvstack(transforms)\n separable_atom = separable(transform.output_shape, dual_atoms,\n transform.dual_slices)\n _dual = transform, separable_atom\n else:\n _dual = (transforms[0], dual_atoms[0])\n return _dual\n\n"
] |
[
[
"numpy.zeros"
]
] |
hongfz16/Garment4D
|
[
"9317dc262f3d35eb9e6cd6a7bfbb29f04560ca35"
] |
[
"modules/mesh_encoder.py"
] |
[
"import numpy as np\nimport torch\nfrom torch import nn\nimport pickle\nimport torch.nn.functional as F\nfrom .pointnet2encoder import Pointnet2MSGSEG\nimport sys\nsys.path.append('../')\nfrom utils import mesh_utils\nfrom smplx import batch_rodrigues\nfrom .pygcn import layers\nfrom .pygcn import utils as gcn_utils\nimport scipy.sparse as sp\nfrom .pointnet2.pointnet2.pointnet2_utils import three_interpolate, three_nn, ball_query, grouping_operation, QueryAndGroup\nfrom .pointnet2.pointnet2.pointnet2_modules import PointnetSAModuleMSG, PointnetSAModule\n\nfrom smplx.smplx.lbs import batch_rigid_transform, vertices2joints, vertices2jointsB\nfrom chamferdist import knn_points\n\nimport time\n\nfrom utils.dataloader import label_dict, class_num\n\ndef quads2tris(F):\n out = []\n for f in F:\n if len(f) == 3: out += [f]\n elif len(f) == 4: out += [[f[0],f[1],f[2]],\n [f[0],f[2],f[3]]]\n else: print(\"This should not happen...\")\n return np.array(out, np.int32)\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\nclass PCAGarmentEncoderSeg(nn.Module):\n def __init__(self, cfg=None, args=None):\n super(PCAGarmentEncoderSeg, self).__init__()\n self.cfg = cfg\n self.args = args\n\n self.pointnet = Pointnet2MSGSEG(input_channels=0, bn=True, global_feat=False)\n\n if self.args.only_seg:\n return\n\n self.GarmentEncoder = nn.ModuleList()\n self.GarmentEncoder.append(\n PointnetSAModuleMSG(\n npoint=512,\n radii=[0.05, 0.1],\n nsamples=[16, 32],\n mlps=[[self.pointnet.feat_channels_list[0], 32, 32], [self.pointnet.feat_channels_list[0], 64, 64]],\n use_xyz=True,\n bn=True\n )\n )\n self.GarmentEncoder.append(\n PointnetSAModuleMSG(\n npoint=64,\n radii=[0.2, 0.4],\n nsamples=[32, 64],\n mlps=[[32+64, 128, 128], [32+64, 256, 256]],\n use_xyz=True,\n bn=True\n )\n )\n self.GarmentSummarize = PointnetSAModule(\n mlp=[128+256, 512, 512], use_xyz=True,\n bn=True\n )\n self.PCAEncoder = nn.Sequential(\n nn.Conv1d(512, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Conv1d(128, 64, 1),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n nn.Conv1d(64, 64, 1)\n )\n\n with open(self.cfg.GARMENT.PCACOMPONENTSFILE, 'rb') as fd:\n PCA_pkl = pickle.load(fd)\n self.PCA_comp = torch.from_numpy(PCA_pkl['components'][:self.cfg.GARMENT.PCADIM])\n self.PCA_mean = torch.from_numpy(PCA_pkl['mean'])\n self.PCA_expl = torch.from_numpy(PCA_pkl['explained'][:self.cfg.GARMENT.PCADIM])\n self.PCA_scale = torch.from_numpy(PCA_pkl['ss_scale'].astype(np.float32))\n\n self.remesh_cylinder_v, self.remesh_cylinder_f, _, _ = mesh_utils.readOBJ(self.cfg.GARMENT.TEMPLATE)\n self.remesh_cylinder_f = np.array(list(self.remesh_cylinder_f))\n self.garment_f_3 = quads2tris(self.remesh_cylinder_f).astype(np.int32)\n self.garment_v_num = self.remesh_cylinder_v.shape[0]\n\n def PCA_inverse_transform(self, coeff):\n assert coeff.shape[1] == self.cfg.GARMENT.PCADIM\n self.PCA_comp = self.PCA_comp.cuda()\n self.PCA_mean = self.PCA_mean.cuda()\n self.PCA_expl = self.PCA_expl.cuda()\n self.PCA_scale = self.PCA_scale.cuda()\n return ((torch.mm(coeff, self.PCA_comp) + self.PCA_mean) * self.PCA_scale).reshape(coeff.shape[0], -1, 3)\n\n def calc_segmentation_results(self, x, sem_logits, n, nbatch, T, feature):\n x = x.reshape(nbatch * T, -1, 3)\n feature = feature.transpose(1, 2).reshape(nbatch * T, 6890, feature.shape[-2])\n sem_logits = sem_logits.reshape(nbatch * T, -1, class_num)\n labels = torch.argmax(sem_logits, dim=2).detach()\n garment_v = []\n feat = []\n for i in range(nbatch * T):\n cur_x = x[i][labels[i]==(label_dict[self.cfg.GARMENT.NAME]-1), :]\n cur_f = feature[i][labels[i]==(label_dict[self.cfg.GARMENT.NAME]-1), :]\n if cur_x.shape[0] >= n:\n garment_v.append(cur_x[:n, :])\n feat.append(cur_f[:n, :])\n else:\n garment_v.append(torch.cat([cur_x, torch.zeros(n-cur_x.shape[0], 3, dtype=torch.float32).cuda()]))\n feat.append(torch.cat([cur_f, torch.zeros(n-cur_x.shape[0], feature.shape[-1], dtype=torch.float32).cuda()]))\n return torch.stack(garment_v), torch.stack(feat)\n\n def forward(self, x, body_model=None, batch=None):\n assert(x.size()[-1] >= 3)\n nbatch= x.size()[0]\n T = x.size()[1]\n N = x.size()[2]\n JN = 24\n x = x.view(nbatch * T, N, -1)\n output_dict = {}\n output_dict['middle_results'] = {}\n\n assert body_model is not None\n\n feat_global, sem_logits, feature_list, xyz_list = self.pointnet(x)\n\n output_dict['feat_global'] = feat_global\n output_dict['feature_list'] = feature_list\n output_dict['xyz_list'] = xyz_list\n output_dict['sem_logits'] = sem_logits\n\n if self.args.only_seg:\n return output_dict\n\n garment_v, garment_f = self.calc_segmentation_results(xyz_list[0], sem_logits, N // 4, nbatch, T, feature_list[0])\n garment_v = garment_v.reshape(nbatch * T, -1, 3)\n garment_f = garment_f.reshape(nbatch * T, -1, garment_f.shape[-1]).transpose(1, 2).contiguous()\n l_xyz, l_features = [garment_v], [garment_f]\n for i in range(len(self.GarmentEncoder)):\n li_xyz, li_features = self.GarmentEncoder[i](l_xyz[i], l_features[i])\n l_xyz.append(li_xyz)\n l_features.append(li_features)\n output_dict['garment_v_list'] = l_xyz\n output_dict['garment_f_list'] = l_features\n garment_summary = self.GarmentSummarize(l_xyz[-1], l_features[-1])[1].reshape(nbatch, T, 512)\n output_dict['garment_summary'] = garment_summary\n output_dict['garment_PCA_coeff'] = self.PCAEncoder(garment_summary.max(1)[0].reshape(nbatch, -1, 1)).reshape(nbatch, self.cfg.GARMENT.PCADIM)\n output_dict['tpose_garment'] = self.PCA_inverse_transform(output_dict['garment_PCA_coeff'])\n output_dict['garment_f_3'] = self.garment_f_3\n output_dict['PCABase'] = {\n 'components': self.PCA_comp,\n 'mean': self.PCA_mean,\n 'explained': self.PCA_expl,\n }\n return output_dict\n\n\nclass PCALBSGarmentUseSegEncoderSeg(nn.Module):\n def __init__(self, cfg=None, args=None):\n super(PCALBSGarmentUseSegEncoderSeg, self).__init__()\n self.cfg = cfg\n self.args = args\n self.PCA_garment_encoder = PCAGarmentEncoderSeg(self.cfg, self.args)\n\n self.garment_radius_list = [0.1, 0.2, 0.4]\n self.garment_sample_num_list = [32, 16, 8]\n self.body_radius_list = [0.1, 0.2, 0.4]\n self.body_sample_num_list = [8, 16, 32]\n\n if self.cfg.GARMENT.NAME == 'Trousers':\n self.garment_radius_list = [0.1, 0.2, 0.4]\n self.garment_sample_num_list = [32, 8, 4]\n self.body_radius_list = [0.1, 0.2, 0.4]\n self.body_sample_num_list = [8, 16, 32]\n\n self.lbs_positional_encoding_dim = 3\n\n self.feat_num = 32 # positional encoding\n self.hidden_dim = 128 # GCN hidden dim\n self.graph_start_feature_dim = self.feat_num * 6 + self.lbs_positional_encoding_dim\n self.feat_num_output = self.feat_num\n\n self.body_query_group0 = QueryAndGroup(radius=self.body_radius_list[0], nsample=self.body_sample_num_list[0], use_xyz=True)\n self.body_query_group1 = QueryAndGroup(radius=self.body_radius_list[1], nsample=self.body_sample_num_list[1], use_xyz=True)\n self.body_query_group2 = QueryAndGroup(radius=self.body_radius_list[2], nsample=self.body_sample_num_list[2], use_xyz=True)\n self.body_query_group_list = [\n self.body_query_group0,\n self.body_query_group1,\n self.body_query_group2,\n ]\n self.body_positional_encoding0 = nn.Sequential(\n nn.Linear(6, self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.body_positional_encoding1 = nn.Sequential(\n nn.Linear(6, self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.body_positional_encoding2 = nn.Sequential(\n nn.Linear(6, self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.body_positional_encoding_list = [\n self.body_positional_encoding0,\n self.body_positional_encoding1,\n self.body_positional_encoding2,\n ]\n\n self.garment_query_group0 = QueryAndGroup(radius=self.garment_radius_list[0], nsample=self.garment_sample_num_list[0], use_xyz=True)\n self.garment_query_group1 = QueryAndGroup(radius=self.garment_radius_list[1], nsample=self.garment_sample_num_list[1], use_xyz=True)\n self.garment_query_group2 = QueryAndGroup(radius=self.garment_radius_list[2], nsample=self.garment_sample_num_list[2], use_xyz=True)\n self.garment_query_group_list = [\n self.garment_query_group0,\n self.garment_query_group1,\n self.garment_query_group2,\n ]\n self.garment_positional_encoding_input_dim = [\n 3 + 64,\n 3 + 32 + 64,\n 3 + 128 + 256,\n ]\n self.garment_positional_encoding0 = nn.Sequential(\n nn.Linear(self.garment_positional_encoding_input_dim[0], self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.garment_positional_encoding1 = nn.Sequential(\n nn.Linear(self.garment_positional_encoding_input_dim[1], self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.garment_positional_encoding2 = nn.Sequential(\n nn.Linear(self.garment_positional_encoding_input_dim[2], self.feat_num),\n nn.ReLU(),\n nn.Linear(self.feat_num, self.feat_num_output),\n )\n self.garment_positional_encoding_list = [\n self.garment_positional_encoding0,\n self.garment_positional_encoding1,\n self.garment_positional_encoding2,\n ]\n \n self.temporal_qkv_1 = nn.Linear(self.hidden_dim, self.hidden_dim * 3, bias=False)\n self.temporal_qkv_2 = nn.Linear(self.hidden_dim, self.hidden_dim * 3, bias=False)\n self.temporal_qkv_list = [\n self.temporal_qkv_1,\n self.temporal_qkv_2,\n ]\n\n self.lbs_graph_regress1 = nn.ModuleList([\n layers.GraphConvolution(self.graph_start_feature_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, 3),\n ])\n self.lbs_graph_regress2 = nn.ModuleList([\n layers.GraphConvolution(self.graph_start_feature_dim+self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, 3),\n ])\n self.lbs_graph_regress3 = nn.ModuleList([\n layers.GraphConvolution(self.graph_start_feature_dim+self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, self.hidden_dim),\n layers.GraphConvolution(self.hidden_dim, 3),\n ])\n\n self.remesh_cylinder_f = self.PCA_garment_encoder.remesh_cylinder_f\n self.lbs_graph_regress = [self.lbs_graph_regress1, self.lbs_graph_regress2, self.lbs_graph_regress3]\n edges = np.zeros([2, self.remesh_cylinder_f.shape[0] * 4], dtype=np.int32)\n for i, f in enumerate(self.remesh_cylinder_f):\n if len(f) == 4:\n edges[:, i * 4 + 0] = np.array([f[0], f[1]], dtype=np.int32)\n edges[:, i * 4 + 1] = np.array([f[1], f[2]], dtype=np.int32)\n edges[:, i * 4 + 2] = np.array([f[2], f[3]], dtype=np.int32)\n edges[:, i * 4 + 3] = np.array([f[3], f[0]], dtype=np.int32)\n elif len(f) == 3:\n edges[:, i * 4 + 0] = np.array([f[0], f[1]], dtype=np.int32)\n edges[:, i * 4 + 1] = np.array([f[1], f[2]], dtype=np.int32)\n edges[:, i * 4 + 3] = np.array([f[2], f[0]], dtype=np.int32)\n else:\n raise NotImplementedError\n self.adj = sp.coo_matrix((np.ones(edges.shape[1]), (edges[0, :], edges[1, :])),\n shape=(self.PCA_garment_encoder.garment_v_num, self.PCA_garment_encoder.garment_v_num),\n dtype=np.float32)\n self.adj = self.adj + self.adj.T.multiply(self.adj.T > self.adj) - self.adj.multiply(self.adj.T > self.adj)\n self.adj_old = self.adj.copy()\n self.adj = gcn_utils.normalize(self.adj + sp.eye(self.adj.shape[0]))\n self.adj = gcn_utils.sparse_mx_to_torch_sparse_tensor(self.adj).cuda()\n\n self.vf_fid = None\n self.vf_vid = None\n\n def lbs_garment_interpolation(self, pred_template_garment_v, Tpose_vertices, Tpose_root_joints, zeropose_vertices, body_model, gt_pose, T_J_regressor, T_lbs_weights, K=3):\n assert len(pred_template_garment_v.shape) == 3 and pred_template_garment_v.shape[2] == 3\n assert len(gt_pose.shape) == 3 and gt_pose.shape[2] == 72\n \n batch_size = pred_template_garment_v.shape[0]\n seq_length = gt_pose.shape[1]\n gt_pose_mat = batch_rodrigues(gt_pose.reshape(-1, 3)).reshape(batch_size * seq_length, 24, 3, 3)\n\n root_joint_pred_template_garment_v = pred_template_garment_v + Tpose_root_joints.reshape(batch_size, 3).unsqueeze(1)\n nnk = knn_points(root_joint_pred_template_garment_v, Tpose_vertices.reshape(batch_size, -1, 3), K=K)\n K64 = min(64, K)\n nn64 = knn_points(root_joint_pred_template_garment_v, Tpose_vertices.reshape(batch_size, -1, 3), K=K64)\n nn = knn_points(root_joint_pred_template_garment_v, Tpose_vertices.reshape(batch_size, -1, 3))\n\n inv_template_pose = torch.zeros([batch_size, 24, 3]).cuda()\n inv_template_pose[:, 0, 0] = -np.pi / 2\n inv_template_pose[:, 1, 1] = 0.15\n inv_template_pose[:, 2, 1] = -0.15\n inv_template_pose_mat = batch_rodrigues(inv_template_pose.reshape(-1, 3)).reshape(batch_size, 24, 3, 3)\n device, dtype = inv_template_pose.device, inv_template_pose.dtype\n \n inv_J = vertices2jointsB(T_J_regressor[:, 0, :, :].reshape(batch_size, T_J_regressor.shape[2], T_J_regressor.shape[3]),\n Tpose_vertices.reshape(batch_size, -1, 3))\n _, inv_A = batch_rigid_transform(inv_template_pose_mat, inv_J, body_model.parents, dtype=dtype)\n\n ##### INTERPOLATION\n num_joints = body_model.J_regressor.shape[0]\n inv_W = T_lbs_weights[:, 0, :, :].reshape(batch_size, -1, 1, num_joints).repeat(1, 1, K64, 1) # batch_size x num_body_v x K x num_joints\n inv_nn_W = torch.gather(inv_W, 1, nn64.idx.reshape(batch_size, -1, K64, 1).repeat(1, 1, 1, num_joints)) # batch_size x num_garment_v x K x num_joints\n interp_weights64 = 1 / nn64.dists.reshape(batch_size, -1, K64, 1)\n # interp_weights64 = torch.zeros_like(nnk.dists.reshape(batch_size, -1, K, 1)) + 1\n interp_weights64[torch.where(torch.isinf(interp_weights64))] = 0\n interp_weights64 = interp_weights64 / interp_weights64.sum(-2, keepdim=True)\n interp_weights64[torch.where(torch.isinf(interp_weights64))] = 0 # batch_size x num_garment_v x K x 1\n inv_nn_W = (inv_nn_W * interp_weights64).sum(-2) # batch_size x num_garment_v x num_joints\n inv_nn_T = torch.matmul(inv_nn_W, inv_A.view(batch_size, num_joints, 16)).view(batch_size, -1, 4, 4)\n del inv_W\n del interp_weights64\n del inv_nn_W\n ######\n\n ###### ORIGINAL\n # inv_W = T_lbs_weights[:, 0, :, :]\n # num_joints = body_model.J_regressor.shape[0]\n # inv_T = torch.matmul(inv_W, inv_A.view(batch_size, num_joints, 16)).view(batch_size, -1, 4, 4)\n # inv_nn_T = torch.gather(inv_T, 1, nn.idx.reshape(batch_size, -1, 1, 1).repeat(1, 1, inv_T.shape[2], inv_T.shape[3]))\n ######\n \n inv_homogen_coord = torch.ones([batch_size, root_joint_pred_template_garment_v.shape[1], 1], dtype=dtype, device=device)\n inv_v_posed_homo = torch.cat([root_joint_pred_template_garment_v, inv_homogen_coord], dim=2)\n inv_v_homo = torch.matmul(inv_nn_T, torch.unsqueeze(inv_v_posed_homo, dim=-1))\n inv_template_garment_v = inv_v_homo[:, :, :3, 0].reshape(batch_size, 1, -1, 3).repeat(1, seq_length, 1, 1).reshape(batch_size * seq_length, -1, 3)\n\n if torch.any(torch.isnan(inv_template_garment_v)):\n import pdb; pdb.set_trace()\n\n zero_pose_pred_shape_v = zeropose_vertices.reshape(batch_size * seq_length, -1, 3)\n J = vertices2jointsB(T_J_regressor.reshape(batch_size * seq_length, T_J_regressor.shape[2], T_J_regressor.shape[3]),\n zero_pose_pred_shape_v)\n _, A = batch_rigid_transform(gt_pose_mat, J, body_model.parents, dtype=dtype)\n\n ###### INTERPOLATE\n interp_weights = 1 / nnk.dists.reshape(batch_size, -1, K, 1)\n # interp_weights = torch.zeros_like(nnk.dists.reshape(batch_size, -1, K, 1)) + 1\n interp_weights[torch.where(torch.isinf(interp_weights))] = 0\n interp_weights = interp_weights / interp_weights.sum(-2, keepdim=True)\n interp_weights[torch.where(torch.isinf(interp_weights))] = 0 # batch_size x num_garment_v x K x 1\n\n W = T_lbs_weights.reshape(batch_size * seq_length, -1, 1, num_joints).repeat(1, 1, K, 1) # batch_size*seq_length x num_body_v x K x num_joints\n nn_W = torch.gather(W, 1, nnk.idx.reshape(batch_size, 1, -1, K, 1).repeat(1, seq_length, 1, 1, num_joints).reshape(batch_size * seq_length, -1, K, num_joints)) # batch_size*seq_length x num_garment_v x K x num_joints\n nn_W = (nn_W * interp_weights.reshape(batch_size, 1, -1, K , 1).repeat(1, seq_length, 1, 1, 1).reshape(batch_size * seq_length, -1, K, 1)).sum(-2) # batch_size x num_garment_v x num_joints\n \n #### SMOOTH THE WEIGHTS\n if K > 1:\n adj = gcn_utils.normalize(self.adj_old) - sp.eye(self.adj_old.shape[0])\n adj = gcn_utils.sparse_mx_to_torch_sparse_tensor(adj).cuda()\n coeff = 0.1\n for it in range(100):\n nn_W = nn_W + coeff * torch.spmm(adj, nn_W.transpose(0, 1).reshape(-1, batch_size * seq_length * num_joints)).reshape(-1, batch_size * seq_length, num_joints).transpose(0, 1)\n ####\n\n nn_T = torch.matmul(nn_W, A.view(batch_size * seq_length, num_joints, 16)).view(batch_size * seq_length, -1, 4, 4)\n del nn_W\n del W\n del interp_weights\n ######\n\n ###### ORIGINAL\n # W = T_lbs_weights.reshape(batch_size * seq_length, T_lbs_weights.shape[2], T_lbs_weights.shape[3])\n # T = torch.matmul(W, A.view(batch_size * seq_length, num_joints, 16)).view(batch_size * seq_length, -1, 4, 4)\n # repeated_nn_idx = nn.idx.reshape(batch_size, 1, -1, 1, 1).repeat(1, seq_length, 1, T.shape[2], T.shape[3]).reshape(batch_size * seq_length, -1, T.shape[2], T.shape[3])\n # nn_T = torch.gather(T, 1, repeated_nn_idx)\n ######\n \n homogen_coord = torch.ones([batch_size * seq_length, inv_template_garment_v.shape[1], 1], dtype=dtype, device=device)\n v_posed_homo = torch.cat([inv_template_garment_v, homogen_coord], dim=2)\n v_homo = torch.matmul(nn_T, torch.unsqueeze(v_posed_homo, dim=-1))\n\n return v_homo[:, :, :3, 0].reshape(batch_size, seq_length, -1, 3), nn, inv_template_garment_v.reshape(batch_size, seq_length, -1, 3)\n\n def forward(self, x, body_model, batch):\n nbatch= x.size()[0]\n T = x.size()[1]\n N = x.size()[2]\n with torch.no_grad():\n output_dict = self.PCA_garment_encoder(x, body_model)\n lap_adj = sp.eye(self.adj_old.shape[0]) - gcn_utils.normalize(self.adj_old)\n output_dict['lap_adj'] = gcn_utils.sparse_mx_to_torch_sparse_tensor(lap_adj).cuda()\n\n garment_v_list = output_dict['garment_v_list']\n garment_f_list = output_dict['garment_f_list']\n\n body_v = batch['smpl_vertices_torch'].cuda().reshape(nbatch * T, -1, 3).contiguous()\n if self.vf_fid is None or self.vf_vid is None:\n self.vf_fid, self.vf_vid = mesh_utils.calc_body_mesh_info(body_model)\n self.vf_fid = self.vf_fid.cuda()\n self.vf_vid = self.vf_vid.cuda()\n body_vn = mesh_utils.compute_vnorms(body_v, torch.from_numpy(body_model.faces.astype(np.int64)).cuda(), self.vf_vid, self.vf_fid)\n body_vn = body_vn.float()\n\n regressed_garment_v = output_dict['tpose_garment'].reshape(nbatch, -1, 3)\n\n start_time = time.time()\n output_dict['lbs_pred_garment_v'], output_dict['lbs_nn'], output_dict['lbs_stage1_pred_garment_v'] = \\\n self.lbs_garment_interpolation(regressed_garment_v, batch['Tpose_smpl_vertices_torch'].cuda(),\n batch['Tpose_smpl_root_joints_torch'].cuda(), batch['zeropose_smpl_vertices_torch'].cuda(),\n body_model, batch['pose_torch'].cuda(), batch['T_J_regressor'].cuda(),\n batch['T_lbs_weights'].cuda(), K=self.cfg.NETWORK.LBSK)\n end_time = time.time()\n output_dict['lbs_time'] = end_time - start_time\n\n # output_dict['middle_results']['offsets'] = []\n # output_dict['middle_results']['gcn_inputs'] = []\n iter_regressed_lbs_garment_v = []\n lbs_pred_garment_v = output_dict['lbs_pred_garment_v'].reshape(nbatch * T, -1, 3).contiguous()\n cur_garment_v = lbs_pred_garment_v\n garment_v_num = cur_garment_v.shape[1]\n lbs_iter_feat = []\n for regress_iter in range(self.cfg.NETWORK.ITERATION):\n body_pe_list = []\n for i in range(len(self.body_radius_list)):\n cur_body_qg = self.body_query_group_list[i](xyz=body_v, new_xyz=cur_garment_v, features=body_vn.transpose(1, 2).contiguous()) \\\n .reshape(nbatch * T, 6, garment_v_num, self.body_sample_num_list[i])\\\n .permute(0, 2, 3, 1)\n cur_body_pe = self.body_positional_encoding_list[i](cur_body_qg).max(-2)[0].reshape(nbatch * T, garment_v_num, self.feat_num)\n body_pe_list.append(cur_body_pe)\n garment_pe_list = []\n for i in range(len(self.garment_radius_list)):\n cur_garment_qg = self.garment_query_group_list[i](xyz=garment_v_list[i], new_xyz=cur_garment_v, features=garment_f_list[i])\\\n .reshape(nbatch * T, self.garment_positional_encoding_input_dim[i], garment_v_num, self.garment_sample_num_list[i])\\\n .permute(0, 2, 3, 1)\n cur_garment_pe = self.garment_positional_encoding_list[i](cur_garment_qg).max(-2)[0].reshape(nbatch * T, garment_v_num, self.feat_num)\n garment_pe_list.append(cur_garment_pe)\n cur_positional_encoding = cur_garment_v\n templates_feat = torch.cat([cur_positional_encoding] + body_pe_list + garment_pe_list, 2)\n if regress_iter > 0:\n last_feat = lbs_iter_feat[-2].reshape(nbatch, T, garment_v_num, self.hidden_dim)\n q, k, v = self.temporal_qkv_list[regress_iter - 1](last_feat).chunk(3, dim=-1)\n q = q.reshape(nbatch, T, garment_v_num * self.hidden_dim)\n k = k.reshape(nbatch, T, garment_v_num * self.hidden_dim)\n v = v.reshape(nbatch, T, garment_v_num * self.hidden_dim)\n qk = torch.matmul(q, k.transpose(1, 2)).reshape(nbatch, T, T) / np.sqrt(T)\n qk = torch.softmax(qk, dim=-1)\n v = torch.matmul(qk, v).reshape(nbatch * T, garment_v_num, self.hidden_dim)\n templates_feat = torch.cat([templates_feat, v], -1)\n for i, m in enumerate(self.lbs_graph_regress[regress_iter]):\n templates_feat = m(templates_feat, self.adj, False)\n if i != len(self.lbs_graph_regress[regress_iter]) - 1:\n templates_feat = F.relu(templates_feat)\n lbs_iter_feat.append(templates_feat)\n regressed_garment_v = cur_garment_v + templates_feat\n cur_garment_v = regressed_garment_v\n iter_regressed_lbs_garment_v.append(cur_garment_v)\n output_dict['iter_regressed_lbs_garment_v'] = iter_regressed_lbs_garment_v\n\n return output_dict\n\nclass PCALBSGarmentUseSegEncoderSegMGN(nn.Module):\n def __init__(self, cfg=None, args=None):\n super(PCALBSGarmentUseSegEncoderSegMGN, self).__init__()\n self.cfg = cfg\n self.args = args\n self.PCA_garment_encoder = PCAGarmentEncoderSeg(self.cfg, self.args)\n\n self.remesh_cylinder_f = self.PCA_garment_encoder.remesh_cylinder_f\n edges = np.zeros([2, self.remesh_cylinder_f.shape[0] * 4], dtype=np.int32)\n for i, f in enumerate(self.remesh_cylinder_f):\n if len(f) == 4:\n edges[:, i * 4 + 0] = np.array([f[0], f[1]], dtype=np.int32)\n edges[:, i * 4 + 1] = np.array([f[1], f[2]], dtype=np.int32)\n edges[:, i * 4 + 2] = np.array([f[2], f[3]], dtype=np.int32)\n edges[:, i * 4 + 3] = np.array([f[3], f[0]], dtype=np.int32)\n elif len(f) == 3:\n edges[:, i * 4 + 0] = np.array([f[0], f[1]], dtype=np.int32)\n edges[:, i * 4 + 1] = np.array([f[1], f[2]], dtype=np.int32)\n edges[:, i * 4 + 3] = np.array([f[2], f[0]], dtype=np.int32)\n else:\n raise NotImplementedError\n self.adj = sp.coo_matrix((np.ones(edges.shape[1]), (edges[0, :], edges[1, :])),\n shape=(self.PCA_garment_encoder.garment_v_num, self.PCA_garment_encoder.garment_v_num),\n dtype=np.float32)\n self.adj = self.adj + self.adj.T.multiply(self.adj.T > self.adj) - self.adj.multiply(self.adj.T > self.adj)\n self.adj_old = self.adj.copy()\n self.adj = gcn_utils.normalize(self.adj + sp.eye(self.adj.shape[0]))\n self.adj = gcn_utils.sparse_mx_to_torch_sparse_tensor(self.adj).cuda()\n\n self.vf_fid = None\n self.vf_vid = None\n\n self.displacement_encoder = nn.Sequential(\n nn.Linear(512, 1024),\n nn.ReLU(),\n nn.Linear(1024, 2048),\n nn.ReLU(),\n nn.Linear(2048, self.PCA_garment_encoder.garment_v_num * 3)\n )\n\n def lbs_garment_MGN(self, pred_template_garment_v, Tpose_vertices, Tpose_root_joints, zeropose_vertices, body_model, gt_pose, T_J_regressor, T_lbs_weights, K=3):\n assert len(pred_template_garment_v.shape) == 4 and pred_template_garment_v.shape[-1] == 3\n assert len(gt_pose.shape) == 3 and gt_pose.shape[2] == 72\n assert K == 1\n \n batch_size = pred_template_garment_v.shape[0]\n seq_length = gt_pose.shape[1]\n gt_pose_mat = batch_rodrigues(gt_pose.reshape(-1, 3)).reshape(batch_size * seq_length, 24, 3, 3)\n\n root_joint_pred_template_garment_v = pred_template_garment_v + Tpose_root_joints.reshape(batch_size, 3).reshape(batch_size, 1, 1, 3).repeat(1, seq_length, 1, 1)\n root_joint_pred_template_garment_v = root_joint_pred_template_garment_v.reshape(batch_size * seq_length, -1, 3)\n new_Tpose_vertices = Tpose_vertices.reshape(batch_size, 1, -1, 3).repeat(1, seq_length, 1, 1).reshape(batch_size * seq_length, -1, 3)\n nn = knn_points(root_joint_pred_template_garment_v, new_Tpose_vertices.reshape(batch_size * seq_length, -1, 3), K=1)\n\n inv_template_pose = torch.zeros([batch_size * seq_length, 24, 3]).cuda()\n inv_template_pose[:, 0, 0] = -np.pi / 2\n inv_template_pose[:, 1, 1] = 0.15\n inv_template_pose[:, 2, 1] = -0.15\n inv_template_pose_mat = batch_rodrigues(inv_template_pose.reshape(-1, 3)).reshape(batch_size * seq_length, 24, 3, 3)\n device, dtype = inv_template_pose.device, inv_template_pose.dtype\n \n inv_J = vertices2jointsB(T_J_regressor.reshape(batch_size * seq_length, T_J_regressor.shape[2], T_J_regressor.shape[3]),\n new_Tpose_vertices.reshape(batch_size * seq_length, -1, 3))\n _, inv_A = batch_rigid_transform(inv_template_pose_mat, inv_J, body_model.parents, dtype=dtype)\n\n ##### ORIGINAL\n inv_W = T_lbs_weights.reshape(batch_size * seq_length, T_lbs_weights.shape[2], T_lbs_weights.shape[3])\n num_joints = body_model.J_regressor.shape[0]\n inv_T = torch.matmul(inv_W, inv_A.view(batch_size * seq_length, num_joints, 16)).view(batch_size * seq_length, -1, 4, 4)\n inv_nn_T = torch.gather(inv_T, 1, nn.idx.reshape(batch_size * seq_length, -1, 1, 1).repeat(1, 1, inv_T.shape[2], inv_T.shape[3]))\n #####\n \n inv_homogen_coord = torch.ones([batch_size * seq_length, root_joint_pred_template_garment_v.shape[1], 1], dtype=dtype, device=device)\n inv_v_posed_homo = torch.cat([root_joint_pred_template_garment_v, inv_homogen_coord], dim=2)\n inv_v_homo = torch.matmul(inv_nn_T, torch.unsqueeze(inv_v_posed_homo, dim=-1))\n inv_template_garment_v = inv_v_homo[:, :, :3, 0].reshape(batch_size * seq_length, -1, 3)\n\n if torch.any(torch.isnan(inv_template_garment_v)):\n import pdb; pdb.set_trace()\n\n zero_pose_pred_shape_v = zeropose_vertices.reshape(batch_size * seq_length, -1, 3)\n J = vertices2jointsB(T_J_regressor.reshape(batch_size * seq_length, T_J_regressor.shape[2], T_J_regressor.shape[3]),\n zero_pose_pred_shape_v)\n _, A = batch_rigid_transform(gt_pose_mat, J, body_model.parents, dtype=dtype)\n\n ##### ORIGINAL\n W = T_lbs_weights.reshape(batch_size * seq_length, T_lbs_weights.shape[2], T_lbs_weights.shape[3])\n T = torch.matmul(W, A.view(batch_size * seq_length, num_joints, 16)).view(batch_size * seq_length, -1, 4, 4)\n repeated_nn_idx = nn.idx.reshape(batch_size * seq_length, -1, 1, 1).repeat(1, 1, T.shape[2], T.shape[3]).reshape(batch_size * seq_length, -1, T.shape[2], T.shape[3])\n nn_T = torch.gather(T, 1, repeated_nn_idx)\n #####\n \n homogen_coord = torch.ones([batch_size * seq_length, inv_template_garment_v.shape[1], 1], dtype=dtype, device=device)\n v_posed_homo = torch.cat([inv_template_garment_v, homogen_coord], dim=2)\n v_homo = torch.matmul(nn_T, torch.unsqueeze(v_posed_homo, dim=-1))\n\n return v_homo[:, :, :3, 0].reshape(batch_size, seq_length, -1, 3), nn, inv_template_garment_v.reshape(batch_size, seq_length, -1, 3)\n\n def forward(self, x, body_model, batch):\n nbatch= x.size()[0]\n T = x.size()[1]\n N = x.size()[2]\n with torch.no_grad():\n output_dict = self.PCA_garment_encoder(x, body_model)\n lap_adj = sp.eye(self.adj_old.shape[0]) - gcn_utils.normalize(self.adj_old)\n output_dict['lap_adj'] = gcn_utils.sparse_mx_to_torch_sparse_tensor(lap_adj).cuda()\n regressed_garment_v = output_dict['tpose_garment'].reshape(nbatch, -1, 3)\n\n garment_summary = output_dict['garment_summary']\n displacements = self.displacement_encoder(garment_summary).reshape(nbatch, T, self.PCA_garment_encoder.garment_v_num, 3)\n displacements = displacements * 0.05\n if torch.any(torch.isnan(displacements)):\n displacements[torch.where(torch.isnan(displacements))] = 0\n t_garment_displacement = regressed_garment_v.reshape(nbatch, 1, -1, 3).repeat(1, T, 1, 1) + displacements\n\n output_dict['lbs_pred_garment_v'], output_dict['lbs_nn'], output_dict['lbs_stage1_pred_garment_v'] = \\\n self.lbs_garment_MGN(t_garment_displacement, batch['Tpose_smpl_vertices_torch'].cuda(),\n batch['Tpose_smpl_root_joints_torch'].cuda(), batch['zeropose_smpl_vertices_torch'].cuda(),\n body_model, batch['pose_torch'].cuda(), batch['T_J_regressor'].cuda(),\n batch['T_lbs_weights'].cuda(), K=1)\n\n lbs_pred_garment_v = output_dict['lbs_pred_garment_v'].reshape(nbatch * T, -1, 3).contiguous()\n iter_regressed_lbs_garment_v = [lbs_pred_garment_v]\n output_dict['iter_regressed_lbs_garment_v'] = iter_regressed_lbs_garment_v\n\n return output_dict\n"
] |
[
[
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"torch.no_grad",
"scipy.sparse.coo_matrix",
"torch.softmax",
"torch.ones",
"torch.mm",
"scipy.sparse.diags",
"torch.from_numpy",
"torch.nn.functional.relu",
"numpy.zeros",
"torch.isinf",
"numpy.power",
"torch.unsqueeze",
"torch.stack",
"numpy.array",
"torch.isnan",
"scipy.sparse.eye",
"numpy.ones",
"torch.matmul",
"torch.gather",
"numpy.isinf",
"torch.argmax"
]
] |
rayyang29/pygaggle
|
[
"6a0a1261293428e8df4817ef835c558ba5fd7b01"
] |
[
"pygaggle/run/evaluate_passage_ranker.py"
] |
[
"from typing import Optional, List\nfrom pathlib import Path\nimport logging\n\nfrom pydantic import BaseModel, validator\nfrom transformers import (AutoModel,\n AutoTokenizer,\n AutoModelForSequenceClassification,\n BertForSequenceClassification,\n T5ForConditionalGeneration)\nimport torch\n\nfrom .args import ArgumentParserBuilder, opt\nfrom pygaggle.rerank.base import Reranker\nfrom pygaggle.rerank.bm25 import Bm25Reranker\nfrom pygaggle.rerank.transformer import (\n UnsupervisedTransformerReranker,\n MonoT5,\n MonoBERT\n)\nfrom pygaggle.rerank.random import RandomReranker\nfrom pygaggle.rerank.similarity import CosineSimilarityMatrixProvider\nfrom pygaggle.model import (SimpleBatchTokenizer,\n T5BatchTokenizer,\n RerankerEvaluator,\n metric_names,\n MsMarcoWriter)\nfrom pygaggle.data import MsMarcoDataset\nfrom pygaggle.settings import MsMarcoSettings\n\n\nSETTINGS = MsMarcoSettings()\nMETHOD_CHOICES = ('transformer', 'bm25', 't5', 'seq_class_transformer',\n 'random')\n\n\nclass PassageRankingEvaluationOptions(BaseModel):\n task: str\n dataset: Path\n index_dir: Path\n method: str\n model: str\n split: str\n batch_size: int\n device: str\n is_duo: bool\n from_tf: bool\n metrics: List[str]\n model_type: Optional[str]\n tokenizer_name: Optional[str]\n\n @validator('task')\n def task_exists(cls, v: str):\n assert v in ['msmarco', 'treccar']\n\n @validator('dataset')\n def dataset_exists(cls, v: Path):\n assert v.exists(), 'data directory must exist'\n return v\n\n @validator('index_dir')\n def index_dir_exists(cls, v: Path):\n assert v.exists(), 'index directory must exist'\n return v\n\n @validator('model')\n def model_sane(cls, v: str, values, **kwargs):\n method = values['method']\n if method == 'transformer' and v is None:\n raise ValueError('transformer name or path must be specified')\n return v\n\n @validator('tokenizer_name')\n def tokenizer_sane(cls, v: str, values, **kwargs):\n if v is None:\n return values['model']\n return v\n\n\ndef construct_t5(options: PassageRankingEvaluationOptions) -> Reranker:\n model = MonoT5.get_model(options.model,\n from_tf=options.from_tf,\n device=options.device)\n tokenizer = MonoT5.get_tokenizer(options.model_type, batch_size=options.batch_size)\n return MonoT5(model, tokenizer)\n\n\ndef construct_transformer(options:\n PassageRankingEvaluationOptions) -> Reranker:\n device = torch.device(options.device)\n model = AutoModel.from_pretrained(options.model,\n from_tf=options.from_tf).to(device).eval()\n tokenizer = SimpleBatchTokenizer(AutoTokenizer.from_pretrained(\n options.tokenizer_name),\n options.batch_size)\n provider = CosineSimilarityMatrixProvider()\n return UnsupervisedTransformerReranker(model, tokenizer, provider)\n\n\ndef construct_seq_class_transformer(options: PassageRankingEvaluationOptions\n ) -> Reranker:\n try:\n model = MonoBERT.get_model(\n options.model, from_tf=options.from_tf, device=options.device)\n except AttributeError:\n # Hotfix for BioBERT MS MARCO. Refactor.\n BertForSequenceClassification.bias = torch.nn.Parameter(\n torch.zeros(2))\n BertForSequenceClassification.weight = torch.nn.Parameter(\n torch.zeros(2, 768))\n model = BertForSequenceClassification.from_pretrained(\n options.model, from_tf=options.from_tf)\n model.classifier.weight = BertForSequenceClassification.weight\n model.classifier.bias = BertForSequenceClassification.bias\n device = torch.device(options.device)\n model = model.to(device).eval()\n tokenizer = MonoBERT.get_tokenizer(options.tokenizer_name)\n return MonoBERT(model, tokenizer)\n\n\ndef construct_bm25(options: PassageRankingEvaluationOptions) -> Reranker:\n return Bm25Reranker(index_path=str(options.index_dir))\n\n\ndef main():\n apb = ArgumentParserBuilder()\n apb.add_opts(opt('--task',\n type=str,\n default='msmarco'),\n opt('--dataset', type=Path, required=True),\n opt('--index-dir', type=Path, required=True),\n opt('--method',\n required=True,\n type=str,\n choices=METHOD_CHOICES),\n opt('--model',\n required=True,\n type=str,\n help='Path to pre-trained model or huggingface model name'),\n opt('--output-file', type=Path, default='.'),\n opt('--overwrite-output', action='store_true'),\n opt('--split',\n type=str,\n default='dev',\n choices=('dev', 'eval')),\n opt('--batch-size', '-bsz', type=int, default=96),\n opt('--device', type=str, default='cuda:0'),\n opt('--is-duo', action='store_true'),\n opt('--from-tf', action='store_true'),\n opt('--metrics',\n type=str,\n nargs='+',\n default=metric_names(),\n choices=metric_names()),\n opt('--model-type', type=str),\n opt('--tokenizer-name', type=str))\n args = apb.parser.parse_args()\n options = PassageRankingEvaluationOptions(**vars(args))\n logging.info(\"Preprocessing Queries & Passages:\")\n ds = MsMarcoDataset.from_folder(str(options.dataset), split=options.split,\n is_duo=options.is_duo)\n examples = ds.to_relevance_examples(str(options.index_dir),\n is_duo=options.is_duo)\n logging.info(\"Loading Ranker & Tokenizer:\")\n construct_map = dict(transformer=construct_transformer,\n bm25=construct_bm25,\n t5=construct_t5,\n seq_class_transformer=construct_seq_class_transformer,\n random=lambda _: RandomReranker())\n reranker = construct_map[options.method](options)\n writer = MsMarcoWriter(args.output_file, args.overwrite_output)\n evaluator = RerankerEvaluator(reranker, options.metrics, writer=writer)\n width = max(map(len, args.metrics)) + 1\n logging.info(\"Reranking:\")\n for metric in evaluator.evaluate(examples):\n logging.info(f'{metric.name:<{width}}{metric.value:.5}')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.device",
"torch.zeros"
]
] |
wenxichen/tensorflow_yolo2
|
[
"f040d9932816d8b2f8d7a67231060f0beea821d4"
] |
[
"yolo1-resnet-adv.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import control_flow_ops\n# from datasets import dataset_factory\nfrom deployment import model_deploy\nfrom nets import nets_factory\n# from preprocessing import preprocessing_factory\n\nslim = tf.contrib.slim\n\ntf.app.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir', None, 'The directory where the dataset files are stored.')\n\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\n\ntf.app.flags.DEFINE_float(\n 'weight_decay', 0.00004, 'The weight decay on the model weights.')\n\ntf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\n\ntf.app.flags.DEFINE_string(\n 'learning_rate_decay_type',\n 'exponential',\n 'Specifies how the learning rate is decayed. One of \"fixed\", \"exponential\",'\n ' or \"polynomial\"')\n\ntf.app.flags.DEFINE_float(\n 'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')\n\ntf.app.flags.DEFINE_float(\n 'end_learning_rate', 0.0001,\n 'The minimal end learning rate used by a polynomial decay learning rate.')\n\ntf.app.flags.DEFINE_string(\n 'trainable_scopes', None,\n 'Comma-separated list of scopes to filter the set of variables to train.'\n 'By default, None would train all the variables.')\n\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', None,\n 'The path to a checkpoint from which to fine-tune.')\n\ntf.app.flags.DEFINE_string(\n 'train_dir', './train_dir/yolo1-resnet/',\n 'Directory where checkpoints and event logs are written to.')\n\ntf.app.flags.DEFINE_integer(\n 'batch_size', 32, 'The number of samples in each batch.')\n\ntf.app.flags.DEFINE_float(\n 'num_epochs_per_decay', 2.0,\n 'Number of epochs after which learning rate decays.')\n\ntf.app.flags.DEFINE_integer(\n 'max_number_of_steps', None,\n 'The maximum number of training steps.')\n\ntf.app.flags.DEFINE_integer(\n 'log_every_n_steps', 10,\n 'The frequency with which logs are print.')\n\ntf.app.flags.DEFINE_integer(\n 'save_summaries_secs', 600,\n 'The frequency with which summaries are saved, in seconds.')\n\ntf.app.flags.DEFINE_integer(\n 'save_interval_secs', 600,\n 'The frequency with which the model is saved, in seconds.')\n\n\ndef _configure_learning_rate(num_samples_per_epoch, global_step):\n \"\"\"Configures the learning rate.\n\n Args:\n num_samples_per_epoch: The number of samples in each epoch of training.\n global_step: The global_step tensor.\n\n Returns:\n A `Tensor` representing the learning rate.\n\n Raises:\n ValueError: if\n \"\"\"\n decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *\n FLAGS.num_epochs_per_decay)\n\n if FLAGS.learning_rate_decay_type == 'exponential':\n return tf.train.exponential_decay(FLAGS.learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True,\n name='exponential_decay_learning_rate')\n elif FLAGS.learning_rate_decay_type == 'fixed':\n return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')\n elif FLAGS.learning_rate_decay_type == 'polynomial':\n return tf.train.polynomial_decay(FLAGS.learning_rate,\n global_step,\n decay_steps,\n FLAGS.end_learning_rate,\n power=1.0,\n cycle=False,\n name='polynomial_decay_learning_rate')\n else:\n raise ValueError('learning_rate_decay_type [%s] was not recognized',\n FLAGS.learning_rate_decay_type)\n\n\ndef _add_variables_summaries(learning_rate):\n summaries = []\n for variable in slim.get_model_variables():\n summaries.append(tf.summary.histogram(variable.op.name, variable))\n summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))\n return summaries\n\n\ndef _get_variables_to_train():\n \"\"\"Returns a list of variables to train.\n\n Returns:\n A list of variables to train by the optimizer.\n \"\"\"\n if FLAGS.trainable_scopes is None:\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train\n\n\ndef _get_init_fn():\n \"\"\"Returns a function run by the chief worker to warm-start the training.\n\n Note that the init_fn is only run when initializing the model during the very\n first global step.\n\n Returns:\n An init function run by the supervisor.\n \"\"\"\n if FLAGS.checkpoint_path is None:\n return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then we'll be\n # ignoring the checkpoint anyway.\n if tf.train.latest_checkpoint(FLAGS.train_dir):\n tf.logging.info(\n 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n % FLAGS.train_dir)\n return None\n\n exclusions = ['resnet_v1_50/logits']\n\n # TODO(sguada) variables.filter_variables()\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n\n return slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore,\n ignore_missing_vars=False)\n\n\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n tf.logging.set_verbosity(tf.logging.INFO)\n with tf.Graph().as_default():\n #######################\n # Config model_deploy #\n #######################\n deploy_config = model_deploy.DeploymentConfig(\n num_clones=1,\n clone_on_cpu=False,\n replica_id=0,\n num_replicas=1,\n num_ps_tasks=0)\n\n # Create global_step\n with tf.device(deploy_config.variables_device()):\n global_step = slim.create_global_step()\n\n\n # TODO: integrate data\n ######################\n # Select the dataset #\n ######################\n dataset = dataset_factory.get_dataset(\n FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)\n\n ######################\n # Select the network #\n ######################\n network_fn = nets_factory.get_network_fn(\n 'resnet_v1_50',\n num_classes=(dataset.num_classes - FLAGS.labels_offset),\n weight_decay=FLAGS.weight_decay,\n is_training=True)\n\n # TODO: should write own preprocessing\n #####################################\n # Select the preprocessing function #\n #####################################\n preprocessing_name = 'resnet_v1_50'\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(\n preprocessing_name,\n is_training=True)\n\n # TODO: data provider needed\n ##############################################################\n # Create a dataset provider that loads data from the dataset #\n ##############################################################\n with tf.device(deploy_config.inputs_device()):\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=FLAGS.num_readers,\n common_queue_capacity=20 * FLAGS.batch_size,\n common_queue_min=10 * FLAGS.batch_size)\n [image, label] = provider.get(['image', 'label'])\n label -= FLAGS.labels_offset\n\n train_image_size = FLAGS.train_image_size or network_fn.default_image_size\n\n image = image_preprocessing_fn(image, train_image_size, train_image_size)\n\n images, labels = tf.train.batch(\n [image, label],\n batch_size=FLAGS.batch_size,\n num_threads=FLAGS.num_preprocessing_threads,\n capacity=5 * FLAGS.batch_size)\n labels = slim.one_hot_encoding(\n labels, dataset.num_classes - FLAGS.labels_offset)\n batch_queue = slim.prefetch_queue.prefetch_queue(\n [images, labels], capacity=2 * deploy_config.num_clones)\n\n ####################\n # Define the model #\n ####################\n def clone_fn(batch_queue):\n \"\"\"Allows data parallelism by creating multiple clones of network_fn.\"\"\"\n images, labels = batch_queue.dequeue()\n logits, end_points = network_fn(images)\n\n #############################\n # Specify the loss function #\n #############################\n if 'AuxLogits' in end_points:\n tf.losses.softmax_cross_entropy(\n logits=end_points['AuxLogits'], onehot_labels=labels,\n label_smoothing=0, weights=0.4, scope='aux_loss')\n tf.losses.softmax_cross_entropy(\n logits=logits, onehot_labels=labels,\n label_smoothing=0, weights=1.0)\n return end_points\n\n # Gather initial summaries.\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])\n first_clone_scope = deploy_config.clone_scope(0)\n # Gather update_ops from the first clone. These contain, for example,\n # the updates for the batch_norm variables created by network_fn.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)\n\n # Add summaries for end_points.\n end_points = clones[0].outputs\n for end_point in end_points:\n x = end_points[end_point]\n summaries.add(tf.summary.histogram('activations/' + end_point, x))\n summaries.add(tf.summary.scalar('sparsity/' + end_point,\n tf.nn.zero_fraction(x)))\n\n # Add summaries for losses.\n for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):\n summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))\n\n # Add summaries for variables.\n for variable in slim.get_model_variables():\n summaries.add(tf.summary.histogram(variable.op.name, variable))\n\n moving_average_variables, variable_averages = None, None\n\n #########################################\n # Configure the optimization procedure. #\n #########################################\n with tf.device(deploy_config.optimizer_device()):\n learning_rate = _configure_learning_rate(dataset.num_samples, global_step)\n # TODO: may need to add flexibility in optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate)\n summaries.add(tf.summary.scalar('learning_rate', learning_rate))\n\n # Variables to train.\n variables_to_train = _get_variables_to_train()\n\n # and returns a train_tensor and summary_op\n total_loss, clones_gradients = model_deploy.optimize_clones(\n clones,\n optimizer,\n var_list=variables_to_train)\n # Add total_loss to summary.\n summaries.add(tf.summary.scalar('total_loss', total_loss))\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(clones_gradients,\n global_step=global_step)\n update_ops.append(grad_updates)\n\n update_op = tf.group(*update_ops)\n train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,\n name='train_op')\n\n # Add the summaries from the first clone. These contain the summaries\n # created by model_fn and either optimize_clones() or _gather_clone_loss().\n summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,\n first_clone_scope))\n\n # Merge all summaries together.\n summary_op = tf.summary.merge(list(summaries), name='summary_op')\n\n\n ###########################\n # Kicks off the training. #\n ###########################\n slim.learning.train(\n train_tensor,\n logdir=FLAGS.train_dir,\n master=FLAGS.master,\n is_chief=(FLAGS.task == 0),\n init_fn=_get_init_fn(),\n summary_op=summary_op,\n number_of_steps=FLAGS.max_number_of_steps,\n log_every_n_steps=FLAGS.log_every_n_steps,\n save_summaries_secs=FLAGS.save_summaries_secs,\n save_interval_secs=FLAGS.save_interval_secs,\n sync_optimizer=None)"
] |
[
[
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.batch",
"tensorflow.gfile.IsDirectory",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.get_collection",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.exponential_decay",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.logging.info",
"tensorflow.summary.histogram",
"tensorflow.train.polynomial_decay",
"tensorflow.train.latest_checkpoint",
"tensorflow.constant",
"tensorflow.nn.zero_fraction",
"tensorflow.app.flags.DEFINE_float"
]
] |
rickatx/message_response_pipeline
|
[
"7433c83e3b1cfd201da448c7545117fd79a13cea"
] |
[
"udacity_root/data/process_data.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: ETL_Pipeline_Preparation.ipynb (unless otherwise specified).\n\n__all__ = ['load_data', 'clean_data', 'get_engine', 'save_data']\n\n# Cell\n\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport sys\n\n# Cell\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"Load message and category data from the specified filepaths.\n\n Args:\n - messages_filepath (str): path to messages csv file\n - categories_filepath (str): path to categories csv file\n\n Returns:\n - DataFrame of the two datasets, joined on 'id'.\n \"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n return messages.merge(categories, on='id')\n\n# Cell\n\ndef clean_data(df):\n \"\"\"Clean the loaded messages and categories data.\n Convert 'categories' column string into a set of numeric columns, and remove duplicate rows.\n\n Arg:\n - df: DataFrame containing data loaded by load_data()\n\n Returns:\n - DataFrame of cleaned data.\n \"\"\"\n # Categories are in a single string column; create a dataframe with a column for each category\n categories = df.categories.str.split(';', expand=True)\n\n # select the first row of the categories dataframe\n row = categories.iloc[0]\n # use this row to extract a list of new column names for categories.\n category_colnames = [s[:-2] for s in row]\n # rename the columns of `categories`\n categories.columns = category_colnames\n\n # convert category values to numbers\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n\n # convert column from string to numeric\n categories[column] = categories[column].astype(int)\n\n # related column has values (0, 1, 2)\n # encode all non-related entries as 0, so we have 1 for related, 0 for non-related\n categories.related = categories.related.replace(2, 0)\n\n # replace original category string values with numeric columns\n df = df.drop('categories', axis=1)\n\n # concatenate the original dataframe with the new `categories` dataframe\n df = df.join(categories)\n\n # remove duplicates\n return df.drop_duplicates()\n\n# Cell\n\ndef get_engine(database_filename):\n \"\"\"Return database engine given database filename.\"\"\"\n return create_engine(f'sqlite:///{database_filename}')\n\ndef save_data(df, engine):\n \"\"\"Save the specified DataFrame as table 'CategorizedMessages' to specified database `engine`.\"\"\"\n with engine.connect() as connection:\n df.to_sql('CategorizedMessages', connection, index=False, if_exists='replace')\n\n# Internal Cell\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n engine = get_engine(database_filepath)\n save_data(df, engine)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\n# Don't run main() when this cell is run in a notebook; use try so exported module\n# has no dependency on nbdev\ntry: from nbdev.imports import IN_NOTEBOOK\nexcept: IN_NOTEBOOK=False\n\nif __name__ == '__main__' and not IN_NOTEBOOK:\n main()"
] |
[
[
"pandas.read_csv"
]
] |
hanaseleb/pycaret
|
[
"1fe6e1a6bee642351c4b6064d769f97294713f48"
] |
[
"pycaret/tests/test_preprocess.py"
] |
[
"import os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport pytest\nimport pycaret.datasets\nimport pycaret.internal.preprocess\nimport pycaret.classification\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n\ndef test_auto_infer_label():\n # loading dataset\n data = pycaret.datasets.get_data(\"juice\")\n data.loc[:, 'test_target'] = np.random.randint(5, 8, data.shape[0])\n data.loc[:, 'test_target'] = data.loc[:, 'test_target'].astype(np.int64) # should not encode\n target = 'test_target'\n\n # init setup\n _ = pycaret.classification.setup(\n data,\n target=target,\n log_experiment=True,\n silent=True,\n html=False,\n session_id=123,\n n_jobs=1\n )\n\n with pytest.raises(AttributeError):\n _ = pycaret.classification.get_config('prep_pipe').named_steps[\"dtypes\"].replacement\n\n\ndef test():\n # loading dataset\n data = pycaret.datasets.get_data(\"juice\")\n target = \"Purchase\"\n\n # preprocess all in one\n pipe = pycaret.internal.preprocess.Preprocess_Path_One(\n train_data=data, target_variable=target, display_types=False\n )\n X = pipe.fit_transform(data)\n assert isinstance(X, pd.core.frame.DataFrame)\n\n assert 1 == 1\n\n\nif __name__ == \"__main__\":\n test()\n"
] |
[
[
"numpy.random.randint"
]
] |
NetVoobrazhenia/pandas_task
|
[
"0289391e47bb136dacce1cec700ef0f52a028801"
] |
[
"lec/5.py"
] |
[
"import pandas as pd\n\nworks = pd.read_csv(\"./works.csv\")\n\nnNa = works['skills'].notna()\ndf = works['skills'].dropna().str.lower().str.contains('питон|python')\n\nprint(f\"зарплата тех, у кого в скиллах есть python (питон):\\n\",\n ', '.join(f\"{i}\" for i in works[nNa][df]['salary'].values))\n"
] |
[
[
"pandas.read_csv"
]
] |
guialfredo/mmtracking
|
[
"05926b4d824a9f7b05e78a6375e6e63530a55df9"
] |
[
"mmtrack/models/sot/base.py"
] |
[
"from abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom mmcv.runner import auto_fp16, load_checkpoint\nfrom mmcv.utils import print_log\n\nfrom mmtrack.utils import get_root_logger\n\n\nclass BaseSingleObjectTracker(nn.Module, metaclass=ABCMeta):\n \"\"\"Base class for single object tracker.\"\"\"\n\n def __init__(self):\n super(BaseSingleObjectTracker, self).__init__()\n self.logger = get_root_logger()\n\n def init_module(self, module, pretrain=None):\n \"\"\"Initialize the weights in video detector.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n if pretrain is not None:\n print_log(f'load {module} from: {pretrain}', logger=self.logger)\n load_checkpoint(\n getattr(self, module),\n pretrain,\n strict=False,\n logger=self.logger)\n else:\n getattr(self, module).init_weights()\n\n def freeze_module(self, module):\n \"\"\"Freeze module during training.\"\"\"\n if isinstance(module, str):\n modules = [module]\n else:\n if not (isinstance(module, list) or isinstance(module, tuple)):\n raise TypeError('module must be a str or a list.')\n else:\n modules = module\n for module in modules:\n m = getattr(self, module)\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n @property\n def with_backbone(self):\n \"\"\"bool: whether the framework has a backbone\"\"\"\n return hasattr(self, 'backbone') and self.backbone is not None\n\n @property\n def with_neck(self):\n \"\"\"bool: whether the framework has a neck\"\"\"\n return hasattr(self, 'neck') and self.neck is not None\n\n @property\n def with_head(self):\n \"\"\"bool: whether the framework has a head\"\"\"\n return hasattr(self, 'head') and self.head is not None\n\n @abstractmethod\n def forward_train(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image information dict where each\n dict has: 'img_shape', 'scale_factor', 'flip', and may also\n contain 'filename', 'ori_shape', 'pad_shape', and\n 'img_norm_cfg'. For details on the values of these keys see\n `mmtrack/datasets/pipelines/formatting.py:VideoCollect`.\n \"\"\"\n pass\n\n @abstractmethod\n def simple_test(self, img, img_metas, **kwargs):\n pass\n\n def aug_test(self, imgs, img_metas, **kwargs):\n \"\"\"Test function with test time augmentation.\"\"\"\n pass\n\n def forward_test(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n \"\"\"\n if isinstance(imgs, torch.Tensor):\n imgs = [imgs]\n elif not isinstance(imgs, list):\n raise TypeError(\n f'imgs must be a list or tensor, but got {type(imgs)}')\n\n assert isinstance(img_metas, list)\n if isinstance(img_metas[0], dict):\n img_metas = [img_metas]\n elif not isinstance(img_metas[0], list):\n raise TypeError(\n 'img_metas must be a List[List[dict]] or List[dict]')\n\n num_augs = len(imgs)\n if num_augs != len(img_metas):\n raise ValueError(f'num of augmentations ({len(imgs)}) '\n f'!= num of image meta ({len(img_metas)})')\n\n if num_augs == 1:\n # proposals (List[List[Tensor]]): the outer list indicates\n # test-time augs (multiscale, flip, etc.) and the inner list\n # indicates images in a batch.\n # The Tensor should have a shape Px4, where P is the number of\n # proposals.\n if 'proposals' in kwargs:\n kwargs['proposals'] = kwargs['proposals'][0]\n return self.simple_test(imgs[0], img_metas[0], **kwargs)\n else:\n assert imgs[0].size(0) == 1, 'aug test does not support ' \\\n 'inference with batch size ' \\\n f'{imgs[0].size(0)}'\n # TODO: support test augmentation for predefined proposals\n assert 'proposals' not in kwargs\n return self.aug_test(imgs, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('img', ))\n def forward(self, img, img_metas, return_loss=True, **kwargs):\n \"\"\"Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note this setting will change the expected inputs. When\n ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n and List[dict]), and when ``resturn_loss=False``, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n \"\"\"\n if return_loss:\n return self.forward_train(img, img_metas, **kwargs)\n else:\n return self.forward_test(img, img_metas, **kwargs)\n\n def _parse_losses(self, losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary infomation.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \\\n which may be a weighted sum of all losses, log_vars contains \\\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def train_step(self, data, optimizer):\n \"\"\"The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \\\n ``num_samples``.\n\n - ``loss`` is a tensor for back propagation, which can be a \\\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model is \\\n DDP, it means the batch size on each GPU), which is used for \\\n averaging the logs.\n \"\"\"\n losses = self(**data)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))\n\n return outputs\n\n def val_step(self, data, optimizer):\n \"\"\"The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n \"\"\"\n losses = self(**data)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))\n\n return outputs\n\n def show_result(self,\n img,\n result,\n color='green',\n thickness=1,\n show=False,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Visualize tracking results.\n\n Args:\n img (str or ndarray): The image to be displayed.\n result (ndarray): ndarray of shape (4, ).\n color (str or tuple or Color, optional): color of bbox.\n Defaults to green.\n thickness (int, optional): Thickness of lines.\n Defaults to 1.\n show (bool, optional): Whether to show the image.\n Defaults to False.\n win_name (str, optional): The window name.\n Defaults to ''.\n wait_time (int, optional): Value of waitKey param.\n Defaults to 0.\n out_file (str, optional): The filename to write the image.\n Defaults to None.\n\n Returns:\n ndarray: Visualized image.\n \"\"\"\n assert result.ndim == 1\n assert result.shape[0] == 4\n mmcv.imshow_bboxes(\n img,\n result[np.newaxis, :],\n colors=color,\n thickness=thickness,\n show=show,\n win_name=win_name,\n wait_time=wait_time,\n out_file=out_file)\n return img\n"
] |
[
[
"torch.distributed.get_world_size",
"torch.distributed.is_available",
"torch.distributed.is_initialized"
]
] |
jamesmtuck/DNA_stability
|
[
"52b8561807969c09db8ba81e1198f6cbcee28857"
] |
[
"stability.py"
] |
[
"# Contributed by James Tuck ([email protected]) \n\nimport sys\nimport csv\nimport math\nfrom math import factorial\n\nimport matplotlib.pyplot as plt\nimport gmpy2 as g\nfrom gmpy2 import mpfr\n\n# Tune precision of gmpy2 module\ng.get_context().precision = 1000\n\ndef dumpXY(name, XY, labels):\n \"\"\" Write a csv file called name+\".csv\" using\n data in XY organized as tuples (X0,Y0), (X1,Y1), \n ... (Xi, Yi), where each Xi and Yi are vectors of a \n common length, n.\n Each line of the file expands these sets:\n X0[0], Y0[1], X0[1], Y0[1],...,X0[j],Y0[j],...X0[n-1],Y0[n-1]\n X1[0], Y1[1], X1[1], Y1[1],...,X1[j],Y1[j],...X1[n-1],Y1[n-1]\n ...\n Xi[0], Yi[1], Xi[1], Yi[1],...,Xi[j],Yi[j],...Xi[n-1],Yi[n-1]\n ...\n \"\"\"\n with open(name+\".csv\",\"w\") as csvfile:\n wr = csv.writer(csvfile)\n row = []\n for l in labels:\n row.append(\"x\")\n row.append(l)\n wr.writerow(row)\n row = []\n l = 0\n for x,y in XY:\n l = max(l, len(x))\n for i in range(l):\n row = []\n for x,y in XY:\n if i < len(x):\n row.append(x[i])\n row.append(y[i])\n else:\n row.append(' ')\n row.append(' ')\n wr.writerow(row)\n\n \ndef dump(name,X,X_label,Ys,Ys_labels):\n \"\"\" Write a csv file called name+\".csv\" using\n X as independent variable and Ys as a set of dependent variables.\n xlabel is the label for X, and ylabels for Y.\n CSV file contains lines like this, where m is length of X and Y and n is length\n of each member of Y:\n X[0],Y[0][0],Y[0][1],Y[0][2],...,Y[0][n-1]\n X[1],Y[1][0],Y[1][1],Y[1][2],...,Y[1][n-1]\n ...\n X[i],Y[i][0],Y[i][1],Y[i][2],...,Y[i][n-1]\n ...\n X[m-1],Y[m-1][0],Y[m-1][1],Y[m-1][2],...,Y[m-1][n-1]\n\n \"\"\"\n with open(name+'.csv', 'w') as csvfile:\n wr = csv.writer(csvfile)\n wr.writerow([X_label]+Ys_labels)\n for i,x in enumerate(X):\n row = [x]\n for y in Ys:\n row.append(\"{:1.5e}\".format(y[i]))\n wr.writerow(row) \n\n\ndef plot(X,Ys,labels,xlabel=\"\",ylabel=\"\",title=\"\"):\n \"\"\" Helper function to quickly plot Y vs X with corresponding labels. \"\"\"\n for Y,label in zip(Ys,labels):\n plt.plot(X,Y,label=label)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.suptitle(title)\n plt.legend()\n plt.show()\n\ndef g_pow(base,e):\n \"\"\" g_pow(base,e): gmpy2 helper function for exponentiation\n with proper special case handling for base==0.\n \"\"\"\n #print (base,e)\n if base==mpfr(\"0.0\"):\n if e==mpfr(0):\n return mpfr(\"1\")\n else:\n return mpfr(\"0\")\n return g.exp(g.mul(g.log(mpfr(base)),mpfr(e)))\n\n\ndef binomial_pmf(q, N, k):\n \"\"\" Calculate binomial probability mass function. \"\"\"\n # Useful as sanity check that gmpy2 is providing sufficient precision.\n # g.bincoef is essential for ensuring precision.\n tmp = g.mul(g_pow(q,k),g.mul(g.bincoef(N,k),g_pow(1-q,N-k)))\n return tmp\n\ndef binomial_cdf(q, N, k):\n \"\"\" Calculate binomial cumulative distribution function. \"\"\"\n # Useful as sanity check that gmpy2 is providing sufficient precision.\n # g.bincoef is essential for ensuring precision.\n tmp_list = [mpfr(\"0\")]\n for i in range(0,k+1):\n tt1 = g.mul(g_pow(q,i),g.mul(g.bincoef(N,i),g_pow(1-q,N-i)))\n tmp_list.append( tt1 ) \n tmp1 = g.fsum(tmp_list)\n return tmp1\n\ndef plot_binomial_pmf(N):\n X = [ _ for _ in range(0,N+1,1) ]\n Y = [\n [ binomial_pmf(0.01,N,_) for _ in X ],\n [ binomial_pmf(0.25,N,_) for _ in X ],\n [ binomial_pmf(0.5,N,_) for _ in X ],\n [ binomial_pmf(0.7,N,_) for _ in X ],\n [ binomial_pmf(0.9,N,_) for _ in X ]\n ]\n #print (Y)\n plot(X,Y,[ \"q={} N={}\".format(qi,N) for qi in [0.01,0.25,0.5,0.7,0.9] ],\n \"Random Variable\", \"Probability\", \"Binomial PMF with N={} and varying q\".format(N))\n\n\ndef plot_binomial_cdf(N):\n X = [ _ for _ in range(0,N+1,1) ]\n Y = [\n [ binomial_cdf(0.01,N,_) for _ in X ],\n [ binomial_cdf(0.25,N,_) for _ in X ],\n [ binomial_cdf(0.5,N,_) for _ in X ],\n [ binomial_cdf(0.7,N,_) for _ in X ],\n [ binomial_cdf(0.9,N,_) for _ in X ]\n ]\n #print (Y)\n plot(X,Y,[ \"q={} N={}\".format(qi,N) for qi in [0.01,0.25,0.5,0.7,0.9] ], \"Random Variable\", \"Probability\",\n \"Binomial CDF with N={} and varying q\".format(N))\n\n\ndef plot_binomials(N):\n \"\"\" Plot binomial PMF and CDF for value of N \"\"\"\n plot_binomial_cdf(N)\n plot_binomial_pmf(N)\n\nclass Codeword:\n \"\"\" This class models a codeword very abstractly. \"\"\"\n def __init__(self,length,num,es=1e-3):\n \"\"\"\n Its members are:\n num = number of codewords\n length = length of codeword in nucleotides, assumes a block code\n es = probability of single nucleotide in error\n \"\"\"\n self.length = length\n self.num = num\n self.es = es\n\n def P_error(self,k=1):\n \"\"\" Compute the likelihood of a codeword being in error based on es and length \"\"\"\n return g.sub(mpfr(\"1\"),binomial_cdf(self.es,self.length,k-1))\n\nclass RSCode:\n \"\"\" RSCode models a single Reed-Solomon Code, either inner or outer. \"\"\"\n \n \"\"\" memoize RSCode objects so that we avoid unnecessary repeated calculations \"\"\"\n rs_code_objects = {}\n\n @staticmethod\n def create(n,k,d,es=1e-3,ee=1e-3):\n \"\"\" Build an object that represents an RS[n,k,d] code, and specify the probability\n of a symbol error (es) or erasure (ee). \n \"\"\"\n if (n,k,d,es,ee) in RSCode.rs_code_objects:\n return RSCode.rs_code_objects[(n,k,d,es,ee)]\n else:\n rs = RSCode(n,k,d,es,ee)\n RSCode.rs_code_objects[(n,k,d,es,ee)] = rs\n return rs\n\n def __init__(self,n,k,d,es=1e-3,ee=1e-3):\n \"\"\" Constructor for n [n,k,d] RSCode with symbol error rate (es) and erasure\n error rate (ee).\n \"\"\"\n self.q = 4\n self.n = n\n self.k = k\n self.d = d \n self.t = int((d-1)/2)\n self.symbol_err_rate = es\n self.erasure_err_rate = ee\n self.result = mpfr(\"0\")\n self.has_result = False\n #print (n,k,d,es,ee)\n\n def label(self):\n return \"RS[{},{},{}] psym={:1.2e} perasure={:1.2e}\".format(self.n,self.k,self.d,self.symbol_err_rate, self.erasure_err_rate)\n \n def R(self):\n return self.k / self.n\n\n def R_index(self, M):\n return (self.k - math.log(M,256))/float(self.n)\n \n def P_symbol_error(self):\n return g.sub(mpfr(\"1\"),binomial_cdf(self.symbol_err_rate,self.n,self.t))\n \n def P_random_errors(self,i):\n return binomial_pmf(self.symbol_err_rate,self.n,i)\n \n def P_random_erasures(self,x):\n #print (\"random_erasures\",x,g.sub(1,binomial_cdf(self.erasure_err_rate,self.n,x)))\n return g.sub(mpfr(\"1\"),binomial_cdf(self.erasure_err_rate,self.n,x))\n \n def P_erasure_error(self):\n tmp = mpfr('0')\n for i in range(self.t+1):\n tmp = g.add(tmp, g.mul(self.P_random_errors(i),self.P_random_erasures(2*(self.t-i))))\n return tmp\n \n def P_result(self):\n if not self.has_result:\n self.result = g.add(self.P_symbol_error(),self.P_erasure_error())\n self.has_result = False\n return self.result\n\n #def P_erasure_res(self):\n # return self.P_random_erasures(self.d-1)\n \n #def P_ue(self):\n # return g.sub(mpfr(\"1\"),binomial_cdf(self.symbol_err_rate,self.n,self.t))\n \n\nclass RSInnerOuter:\n \"\"\" RSInnerOuter contains an inner and outer model RSCode object and is able\n to analyze their combined properties, namely code Rate and decoding error\n probability.\n \"\"\"\n def __init__(self,n_inner,k_inner,d_inner,n_outer,k_outer,d_outer,p_sub,p_strand_loss):\n self.rs_inner = RSCode.create(n_inner,k_inner,d_inner,p_sub,mpfr(\"1e-15\"))\n #print (\"[{},{},{}] Error probability inner={:1.2e}\".format(n_inner,k_inner,d_inner,self.rs_inner.P_result()))\n self.rs_outer = RSCode.create(n_outer,k_outer,d_outer,self.rs_inner.P_result(),mpfr(p_strand_loss))\n #print (\"[{},{},{}] Error probability outer={:1.2e}\".format(n_outer,k_outer,d_outer,self.rs_outer.P_result()))\n \n def P_result(self):\n return self.rs_outer.P_result()\n\n def R_index(self, M):\n #print (\"outer_k={} inner_k={} logM={}\".format(self.rs_outer.k,self.rs_inner.k,math.log(M,256)))\n return (self.rs_outer.k)*(self.rs_inner.k - math.ceil(math.log(M, 256))) / (self.rs_inner.n*self.rs_outer.n)\n \n def R_raw(self):\n return (self.rs_inner.k * self.rs_outer.k)/(self.rs_inner.n*self.rs_inner.n)\n\n def getLabel(self):\n return \"[{}*{},{}*{}]\".format(self.rs_outer.n,self.rs_inner.n,self.rs_outer.k,self.rs_inner,k)\n\n # @staticmethod\n # def get(L,d_inner,d_outer,p_sym,p_strand):\n # print (\"get args:\",L,d_inner,d_outer,p_sym,p_strand)\n # cw = Codeword(4,256,p_sym)\n # n_inner = int(L/4)\n # k_inner = n_inner - d_inner\n # n_outer = 255\n # k_outer = n_outer - d_outer\n # return RSInnerOuter(n_inner,k_inner,d_inner,n_outer,k_outer,d_outer,p_sym,p_strand) \n \n # @staticmethod\n # def get50(p_sub,p_strand_loss):\n # expected_strand_loss = int(255*p_strand_loss*(1-p_strand_loss)*2+1)\n # cw = Codeword(4,256,p_sub)\n # L = 50\n # n_inner = int(L/4)\n # d_inner = 2*int(n_inner*cw.P_error()*(1-cw.P_error))+1\n # k_inner = n_inner - d_inner\n # n_outer = 255\n # d_outer = expected_strand_loss\n # k_outer = n_outer - d_outer\n # return RSInnerOuter(n_inner,k_inner,d_inner,n_outer,k_outer,d_outer,p_sub,p_strand_loss)\n\n # @staticmethod\n # def get200(p_sub,p_strand_loss):\n # expected_strand_loss = int(255*p_strand_loss*(1-p_strand_loss)*2+1)\n # cw = Codeword(4,256,p_sub)\n # L = 200\n # n_inner = int(L/4)\n # d_inner = 2*int(n_inner*cw.P_error()*(1-cw.P_error))+1\n # k_inner = n_inner - d_inner\n # n_outer = 255\n # d_outer = expected_strand_loss\n # k_outer = n_outer - d_outer\n # return RSInnerOuter(n_inner,k_inner,d_inner,n_outer,k_outer,d_outer,p_sub,p_strand_loss)\n\n # @staticmethod\n # def get1000(p_sub,p_strand_loss):\n # expected_strand_loss = int(255*p_strand_loss*(1-p_strand_loss)*2+1)\n # cw = Codeword(4,256,p_sub)\n # L = 1000\n # n_inner = int(L/4)\n # d_inner = 2*int(n_inner*cw.P_error()*(1-cw.P_error))+1\n # k_inner = n_inner - d_inner\n # n_outer = 255\n # d_outer = expected_strand_loss\n # k_outer = n_outer - d_outer\n # return RSInnerOuter(n_inner,k_inner,d_inner,n_outer,k_outer,d_outer,p_sub,p_strand_loss)\n\n \ndef result(L,cw_size=4,cw_er=1e-3,cw_es=1e-3,dropout=1e-3):\n n = int(L/cw_size)\n errs = []\n D = []\n for d in range(3,n,2):\n k = n - d\n D.append(d)\n rs_inner = RSCode(n*cw_size,k*cw_size,d,cw_er,cw_es)\n rs_outer = RSCode(255,255-31,31,rs_inner.P_result(),dropout)\n errs.append(g.log(rs_outer.P_result()))\n #print (zip(D,errs))\n plot (D,[errs],[\"p\"]) \n return\n\n\ndef rs_sweep_length(n=255,k=[235],Length=[_ for _ in range(50,201,25)],p_se=[g_pow(10,_/10.0) for _ in range(-80,-10,1)],copies=[1],PL=lambda L, c: g_pow(g.mul(mpfr(1e-3),mpfr(L)),mpfr(c)), filename=\"sweep_length\" ):\n\n #print (n,k,Length,p_se) \n #X = [ math.log(x,10) for x in p_se ]\n X = Length\n #print (X)\n Y = []\n Label = []\n for ki in k:\n #nol = []\n for c in copies:\n for p in p_se:\n y = []\n Label.append( \"RS(N={},k={}) perr={:1.1e} c={}\".format(n,ki,float(p),c) )\n for l in Length:\n #cw_size = 4\n #cw_strand = int(l/cw_size)\n #d = 3 \n #rs_inner = RSCode(cw_strand,cw_strand-d,d,mpfr(p),mpfr(0.001))\n rs_code = RSCode(n,ki,n-ki,mpfr(p),PL(mpfr(l),c))\n y.append(g.log10(rs_code.P_result()))\n Y.append(y)\n #print (y) \n\n fig, ax = plt.subplots()\n #ax.set_xlim(-1,-8)\n #ax.set_ylim(-20,0)\n plt.ylabel(\"Probabilty Decoding Error - Log Scale (10^y)\")\n plt.xlabel(\"Strand Length (nt)\")\n #ax.grid(True)\n for y,label in zip(Y,Label):\n plt.plot(X,y,label=label)\n plt.legend()\n plt.show()\n dump(filename,X,\"Length\",Y,Label)\n\n\ndef get_inner_at_target(L,index,P_target,p_sym):\n \"\"\" Search for an inner RSCode that is at least as good as P_target\n assuming L and symbol error rate of p_sym. \n \"\"\"\n cw = Codeword(4,256,p_sym)\n n = int(L/4)\n inner = None\n guess = int(n*p_sym)\n if guess % 2 == 0:\n guess += 1\n for d in range(guess,n-3,2):\n if n-d-index < 1:\n continue\n inner = RSCode.create(n,n-d,d,es=mpfr(cw.P_error()),ee=mpfr(1e-15))\n if inner.P_result() <= P_target:\n return inner\n return inner\n\ndef get_outer_at_target(P_target,p_sym,p_loss):\n \"\"\" Search for an outer RSCode by varying distance that is at least \n as good as P_target assuming L=255 and symbol error rate of p_sym \n and strand loss rate of p_loss \n \"\"\"\n n=255\n outer = None\n guess = int(n*p_loss)\n if guess % 2 == 0:\n guess += 1\n for d in range(guess,n-3,2):\n outer = RSCode.create(n,n-d,d,p_sym,p_loss)\n if outer.P_result() < P_target:\n return outer\n return outer\n \n \ndef get_rs_at_res(M=1e9,L=200,P_res=mpfr(1e-11),p_sym=mpfr(1e-3),p_loss=mpfr(1e-3)):\n cw = Codeword(4,256,p_sym)\n index = math.log(M,256)\n inner = get_inner_at_target(L,index,P_res,p_sym)\n if inner==None:\n return None\n frac = g.factorial(int((inner.d-1)/2))\n outer = get_outer_at_target(P_res,g.div(inner.P_result(),frac),\n g.mul(inner.P_result(),g.sub(mpfr(1),g.div(mpfr(1),mpfr(frac))))+p_loss)\n if outer==None:\n return None\n return RSInnerOuter(inner.n,inner.k,inner.d,outer.n,outer.k,outer.d,cw.P_error(),p_loss)\n \ndef compare_rs_rate(L=[l for l in range(50,1000,20)],\n pse=[1e-3], slope=[1e-3],\n PL=lambda L, c, slope: g_pow(g.mul(mpfr(slope),mpfr(L)),mpfr(c))):\n\n XY = []\n XRE = []\n Label = []\n for s in slope:\n for p in pse:\n y = []\n re = []\n x = []\n Label.append( \"RS p_break/nt={:1.1e} p_sym={:1.1e}\".format(float(s),float(p)) )\n for l in L:\n rs = get_rs_at_res(1e9,l,mpfr(\"1e-14\"),p,PL(l,1,s))\n if rs == None or rs.P_result() > mpfr(\"1e-14\"):\n continue\n x.append(l)\n y.append(rs.R_index(1e9))\n re.append(g.log10(rs.P_result()))\n \n XY.append( [x,y] )\n XRE.append( [x,re] )\n \n fig, ax = plt.subplots()\n plt.ylabel(\"Information Density\")\n plt.xlabel(\"Strand Length (nt)\")\n #ax.grid(True)\n for [x,y],label in zip(XY,Label):\n plt.plot(x,y,label=label)\n plt.legend()\n plt.show()\n dumpXY(\"compare_rs_rate\",XY,Label)\n\n # fig, ax = plt.subplots()\n # plt.ylabel(\"Decoding Error Probability\")\n # plt.xlabel(\"Strand Length (nt)\")\n # #ax.grid(True)\n # for [x,y],label in zip(XRE,Label):\n # plt.plot(x,y,label=label)\n # plt.legend()\n # plt.show()\n\n #dump(filename,X,\"Length\",Y,Label)\n\ndef compare_density_vs_loss(L=[l for l in range(50,1000,20)],\n pse=[1e-3], slope=[1e-3],\n PL=lambda L, c, slope: g_pow(g.mul(mpfr(slope),mpfr(L)),mpfr(c))):\n\n XY = []\n XRE = []\n Label = []\n for l in L:\n for p in pse:\n y = []\n re = []\n x = []\n Label.append( \"RS L={} p_sym={:1.1e}\".format(l,float(p)) )\n for s in slope:\n rs = get_rs_at_res(1e9,l,mpfr(\"1e-14\"),p,PL(l,1,s))\n if rs == None or rs.P_result() > mpfr(\"1e-14\"):\n continue\n x.append(g.log10(s))\n y.append(rs.R_index(1e9))\n re.append(g.log10(rs.P_result()))\n \n XY.append( [x,y] )\n XRE.append( [x,re] )\n \n fig, ax = plt.subplots()\n plt.ylabel(\"Information Density\")\n plt.xlabel(\"Log Probability of Strand Loss\")\n #ax.grid(True)\n ax.set_xlim(-2.5,-8.5)\n for [x,y],label in zip(XY,Label):\n plt.plot(x,y,label=label)\n plt.legend()\n plt.show()\n dumpXY(\"compare_density_loss\",XY,Label)\n\n # fig, ax = plt.subplots()\n # plt.ylabel(\"Decoder Error Probability\")\n # plt.xlabel(\"Strand Length (nt)\")\n # ax.set_xlim(-1,-8)\n # #ax.grid(True)\n # for [x,y],label in zip(XRE,Label):\n # plt.plot(x,y,label=label)\n # plt.legend()\n # plt.show()\n \n\ndef rs_code_with_copy(n,d,copy,p_sym,p_erasure):\n loss = g_pow(p_erasure,copy)\n rs = RSCode(n,n-d+1,d,p_sym,loss)\n return rs\n \n \ndef compare_copy_to_code(p_err):\n label = []\n error = []\n rate = []\n rs = rs_code_with_copy(255,3,4,mpfr(\"0\"),mpfr(p_err))\n \n label.append( rs.label() + \" copies=4 \" )\n error.append( g.log10(rs.P_result()) )\n rate.append( rs.R() / 4 )\n\n rs =get_outer_at_target(rs.P_result(),mpfr(\"0\"),mpfr(p_err))\n label.append( rs.label() + \" copies=1\" )\n error.append( g.log10(rs.P_result()) )\n rate.append( rs.R() )\n\n for l,e,r in zip(label,error,rate):\n print (\"{}: {} {}\".format(l,e,r))\n \ndef make_plots():\n rs_sweep_length(n=255,\n k=[223],Length=[_ for _ in range(50,1025,25)],\n p_se=[mpfr(1e-2),mpfr(1e-3)],\n copies = [1,3,10],\n PL=lambda l,c: g_pow(g.mul(mpfr(\"0.0005\"),mpfr(l)),mpfr(c)))\n\n compare_rs_rate(slope=[1e-3,5e-4,1e-4],L=[l for l in range(50,1020,20)])\n compare_density_vs_loss(slope=[ 10**(_/10.0) for _ in range(-80,-9, 10) ],L=[l for l in range(200,1100,200)])\n \n#test:\n#plot_binomials(256) \n\nif __name__ == \"__main__\": \n make_plots()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
dsj96/TITS
|
[
"ea9c4dc812ff871c7ccb2e3748e35d3b634920d0"
] |
[
"FNN.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nfrom torch.nn import Module\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nimport math\r\nimport numpy as np\r\nimport argparse\r\n\r\nfrom metrics import RMSE, MAPE_y, MAPE_y_head\r\nfrom utils import *\r\n\r\n\r\nclass FNNModel(nn.Module):\r\n def __init__(self, n_input, n_hidden, n_output, dropout):\r\n super(FNNModel, self).__init__()\r\n self.n_input = n_input\r\n self.n_output = n_output\r\n self.dropout = dropout\r\n self.layer1 = nn.Linear(n_input, n_hidden, bias=True)\r\n\r\n self.layer2 = nn.Linear(n_hidden, n_hidden, bias=True)\r\n self.layer3 = nn.Linear(n_hidden, n_output, bias=True)\r\n\r\n def forward(self, x):\r\n out1 = self.layer1(x)\r\n out1 = F.sigmoid(out1)\r\n out1 = F.dropout(out1, self.dropout, training=True)\r\n out2 = self.layer2(out1)\r\n out2 = F.sigmoid(out2)\r\n out2 = F.dropout(out2, self.dropout, training=True)\r\n out3 = self.layer3(out2)\r\n\r\n return out3\r\n\r\n\r\ndef get_args():\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--cuda', type=bool , default=False,\r\n help='if use CUDA training.')\r\n\r\n parser.add_argument('--dataset', type=str , default='jinan', # hangzhou or jinan\r\n help='select the dataset.')\r\n\r\n parser.add_argument('--seed', type=int, default=0, help='Random seed.') # jinan:2 y=0.55 y_head=0.497 hangzhou:0\r\n\r\n parser.add_argument('--epochs', type=int, default=100,\r\n help='Number of epochs to train.')\r\n\r\n parser.add_argument('--lr', type=float, default=0.05,\r\n help='Initial learning rate.')\r\n\r\n parser.add_argument('--weight_decay', type=float, default=1e-3,\r\n help='Weight decay (L2 loss on parameters).')\r\n\r\n parser.add_argument('--n_hidden', type=int, default=128,\r\n help='Number of hidden units.')\r\n\r\n parser.add_argument('--n_output', type=int, default=1,\r\n help='Number of output units.')\r\n\r\n parser.add_argument('--percent', type=float, default=0.2,\r\n help='Number of percent to test.')\r\n\r\n parser.add_argument('--dropout', type=float, default=0.1,\r\n help='Dropout rate (1 - keep probability).')\r\n\r\n parser.add_argument('--num_slice', type=int, default=12,\r\n help='take the num_slice G into consideration.')\r\n\r\n parser.add_argument('--matual_split', type=bool, default=False,\r\n help='')\r\n\r\n args, _ = parser.parse_known_args()\r\n\r\n return args\r\n\r\n\r\ndef matual_split_data_hangzhou(normed_ways_segment_volume_dict):\r\n test_ways_list = [0,24,59,201,289]\r\n train_ways_segment_volume_dict = {}\r\n test_ways_segment_volume_dict = {}\r\n train_ways_set = set(normed_ways_segment_volume_dict.keys()) - set(test_ways_list)\r\n for test_way in test_ways_list:\r\n test_ways_segment_volume_dict[test_way] = normed_ways_segment_volume_dict[test_way]\r\n for train_way in train_ways_set:\r\n train_ways_segment_volume_dict[train_way] = normed_ways_segment_volume_dict[train_way]\r\n return train_ways_segment_volume_dict, test_ways_segment_volume_dict\r\n\r\n\r\ndef evalueat_model(model, args, features, test_ways_segment_volume_dict, cur_slice):\r\n model.eval()\r\n pre_list, true_list = [], []\r\n result = model(features)\r\n for cur_way, volume_list in test_ways_segment_volume_dict.items():\r\n pre_list.append(result[cur_way])\r\n true_list.append(volume_list[cur_slice])\r\n\r\n mape_y = MAPE_y(pre_list, true_list)\r\n mape_y_head = MAPE_y_head(pre_list, true_list)\r\n rmse = RMSE(pre_list, true_list)\r\n model.train()\r\n return mape_y, mape_y_head, rmse\r\n\r\n\r\n\r\ndef train_model(args, features, ways_segment_volume_dict):\r\n\r\n if args.matual_split:\r\n train_ways_segment_volume_dict, test_ways_segment_volume_dict = matual_split_data_hangzhou(ways_segment_volume_dict)\r\n else:\r\n data_feature, data_target = preprocess_split_data(ways_segment_volume_dict)\r\n train_volume_arr, test_volume_arr, train_leida_id_arr, test_leida_id_arr = \\\r\n train_test_split(data_feature, data_target, test_size=args.percent, random_state=args.seed)\r\n train_ways_segment_volume_dict = combine_ways_segment_volume_dict(train_leida_id_arr, train_volume_arr)\r\n test_ways_segment_volume_dict = combine_ways_segment_volume_dict(test_leida_id_arr, test_volume_arr)\r\n\r\n mape_y_list, mape_y_head_list, rmse_list = [], [], []\r\n\r\n\r\n model = FNNModel(n_input=features.shape[1], n_hidden=args.n_hidden, n_output=args.n_output, dropout = args.dropout)\r\n optimizer = optim.Adam(model.parameters() , lr=args.lr, weight_decay=args.weight_decay)\r\n\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n print(name)\r\n for cur_slice in range(args.num_slice):\r\n for i in range(args.epochs):\r\n model.train()\r\n optimizer.zero_grad()\r\n result = model(features)\r\n train_loss = 0.\r\n for cur_way, volume_list in train_ways_segment_volume_dict.items():\r\n train_loss = train_loss + (result[cur_way] - volume_list[cur_slice])**2\r\n\r\n print(\"train_loss:\", train_loss)\r\n train_loss.backward()\r\n optimizer.step()\r\n\r\n\r\n\r\n cur_mape_y, cur_mape_y_head, cur_rmse = evalueat_model(model, args, features, test_ways_segment_volume_dict, cur_slice)\r\n mape_y_list.append(cur_mape_y)\r\n mape_y_head_list.append(cur_mape_y_head)\r\n rmse_list.append(cur_rmse)\r\n\r\n\r\n model = FNNModel(n_input=features.shape[1], n_hidden=args.n_hidden, n_output=args.n_output, dropout=args.dropout)\r\n optimizer = optim.Adam(model.parameters() , lr=args.lr, weight_decay=args.weight_decay)\r\n\r\n return mape_y_list, mape_y_head_list, rmse_list\r\n\r\n\r\nif __name__ == '__main__':\r\n '''0. preprocess data'''\r\n args = get_args()\r\n features = read_pkl('FNN_data/{}/features.pkl'.format(args.dataset))\r\n ways_segment_volume_dict = read_pkl('FNN_data/{}/ways_segment_volume_dict.pkl'.format(args.dataset))\r\n\r\n '''1.train\\evaluate'''\r\n mape_y_list, mape_y_head_list, rmse_list = train_model(args, features, ways_segment_volume_dict)\r\n\r\n print( 'mape_y_list:{}, mape_y_head_list:{}, rmse_list:{}'.format(np.mean(mape_y_list), np.mean(mape_y_head_list), np.mean(rmse_list)))\r\n print('over')\r\n"
] |
[
[
"torch.nn.functional.dropout",
"sklearn.model_selection.train_test_split",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"numpy.mean"
]
] |
prakhargurawa/Machine-Learning-A-Z
|
[
"f1b2dfbdfe67525c2d5061e66c3d31d612d35309",
"f1b2dfbdfe67525c2d5061e66c3d31d612d35309"
] |
[
"3_Classification/Model_Selection_Classification/random_forest_classification.py",
"2_Regression/Model_Selection_Regression/polynomial_regression.py"
] |
[
"# Random Forest Classification\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('ENTER_THE_NAME_OF_YOUR_DATASET_HERE.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Training the Random Forest Classification model on the Training set\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)\nclassifier.fit(X_train, y_train)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix, accuracy_score\ny_pred = classifier.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\naccuracy_score(y_test, y_pred)",
"# Polynomial Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('ENTER_THE_NAME_OF_YOUR_DATASET_HERE.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Training the Polynomial Regression model on the Training set\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\npoly_reg = PolynomialFeatures(degree = 4)\nX_poly = poly_reg.fit_transform(X_train)\nregressor = LinearRegression()\nregressor.fit(X_poly, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(poly_reg.transform(X_test))\nnp.set_printoptions(precision=2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\n\n# Evaluating the Model Performance\nfrom sklearn.metrics import r2_score\nr2_score(y_test, y_pred)"
] |
[
[
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.accuracy_score"
],
[
"pandas.read_csv",
"sklearn.metrics.r2_score",
"numpy.set_printoptions",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LinearRegression"
]
] |
baptistesoulard/Production-plan-optimization
|
[
"38cd0501315c11fd0635d09e2869c54e2336f0bf"
] |
[
"temp/Model5.py"
] |
[
"# Import required packages\nimport pandas as pd\nimport gurobipy\nfrom matplotlib import pyplot as plt\nimport datetime\nfrom typing import List, Dict\n\n\ndef optimize_planning(\n timeline: List[str],\n workcenters: List[str],\n needs: Dict[str, int],\n wc_cost_reg: Dict[str, int],\n wc_cost_ot: Dict[str, int],\n wc_cost_we: Dict[str, int],\n inventory_cost: int,\n delay_cost: int,\n) -> pd.DataFrame:\n\n # Weekdays / Weekends\n weekdays = []\n weekend = []\n for i in timeline:\n date = datetime.datetime.strptime(i, \"%Y/%m/%d\")\n if date.weekday() < 5:\n weekdays.append(i)\n else:\n weekend.append(i)\n\n # Initiate optimization model\n model = gurobipy.Model(\"Optimize production planning\")\n\n # DEFINE VARIABLES\n # Load variables (hours) - regular and overtime\n reg_hours = model.addVars(\n timeline,\n workcenters,\n lb=7,\n ub=8,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"Regular hours\",\n )\n ot_hours = model.addVars(\n timeline,\n workcenters,\n lb=0,\n ub=4,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"OT hours\",\n )\n # Status of the line ( 0 = closed, 1 = opened)\n line_opening = model.addVars(\n timeline, workcenters, vtype=gurobipy.GRB.BINARY, name=\"Open\"\n )\n # Variable total load (hours)\n total_hours = model.addVars(\n timeline,\n workcenters,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"Total hours\",\n )\n # Variable cost\n cost = model.addVars(\n timeline, workcenters, vtype=gurobipy.GRB.CONTINUOUS, name=\"Cost\"\n )\n\n # Set the value of cost (hours * hourly cost)\n model.addConstrs(\n (\n cost[(date, wc)]\n == reg_hours[(date, wc)] * wc_cost_reg[wc] * line_opening[(date, wc)]\n + ot_hours[(date, wc)] * wc_cost_ot[wc] * line_opening[(date, wc)]\n for date in weekdays\n for wc in workcenters\n ),\n name=\"Cost weekdays\",\n )\n model.addConstrs(\n (\n cost[(date, wc)]\n == (reg_hours[(date, wc)] + ot_hours[(date, wc)])\n * wc_cost_we[wc]\n * line_opening[(date, wc)]\n for date in weekend\n for wc in workcenters\n ),\n name=\"Cost weekend\",\n )\n\n # Set the value of total load (regular + overtime)\n model.addConstrs(\n (\n total_hours[(date, wc)]\n == (reg_hours[(date, wc)] + ot_hours[(date, wc)]) * line_opening[(date, wc)]\n for date in timeline\n for wc in workcenters\n ),\n name=\"Total hours = reg + OT\",\n )\n\n # Constraint: Total hours of production = required production time\n model.addConstr(\n (\n gurobipy.quicksum(\n total_hours[(date, wc)] for date in timeline for wc in workcenters\n )\n == gurobipy.quicksum(needs[date] for date in timeline)\n ),\n name=\"Total hours = need\",\n )\n\n # Create variable \"early production\", \"late production\" and \"inventory costs\"\n # Gap early/late production\n gap_prod = model.addVars(\n timeline,\n lb=-10000,\n ub=10000,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"gapProd\",\n )\n abs_gap_prod = model.addVars(\n timeline,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"absGapProd\",\n )\n\n # Set the value of gap production vs need\n model.addConstrs(\n (\n (\n gap_prod[timeline[k]]\n == gurobipy.quicksum(\n total_hours[(date, wc)]\n for date in timeline[: k + 1]\n for wc in workcenters\n )\n - (gurobipy.quicksum(needs[date] for date in timeline[: k + 1]))\n )\n for k in range(len(timeline))\n ),\n name=\"gap prod\",\n )\n\n model.addConstrs(\n ((abs_gap_prod[date] == gurobipy.abs_(gap_prod[date])) for date in timeline),\n name=\"abs gap prod\",\n )\n\n # Create variable \"early production\" and \"inventory costs\"\n early_prod = model.addVars(\n timeline,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"early prod\",\n )\n inventory_costs = model.addVars(\n timeline,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"inventory costs\",\n )\n\n # Set the value of early production\n model.addConstrs(\n (\n (early_prod[date] == (gap_prod[date] + abs_gap_prod[date]) / 2)\n for date in timeline\n ),\n name=\"early prod\",\n )\n\n # Set the value of inventory costs\n model.addConstrs(\n (\n (inventory_costs[date] == early_prod[date] * inventory_cost)\n for date in timeline\n ),\n name=\"inventory costs\",\n )\n\n # Create variable \"late production\" and \"delay costs\"\n late_prod = model.addVars(\n timeline,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"early prod\",\n )\n delay_costs = model.addVars(\n timeline,\n vtype=gurobipy.GRB.CONTINUOUS,\n name=\"inventory costs\",\n )\n\n # Set the value of late production\n model.addConstrs(\n (\n (late_prod[date] == (abs_gap_prod[date] - gap_prod[date]) / 2)\n for date in timeline\n ),\n name=\"late prod\",\n )\n\n # Set the value of delay costs\n model.addConstrs(\n ((delay_costs[date] == late_prod[date] * delay_cost) for date in timeline),\n name=\"delay costs\",\n )\n\n # DEFINE MODEL\n # Objective : minimize a function\n model.ModelSense = gurobipy.GRB.MINIMIZE\n # Function to minimize\n optimization_var = (\n gurobipy.quicksum(cost[(date, wc)] for date in timeline for wc in workcenters)\n + gurobipy.quicksum(inventory_costs[date] for date in timeline)\n + gurobipy.quicksum(delay_costs[date] for date in timeline)\n )\n\n objective = 0\n objective += optimization_var\n\n # SOLVE MODEL\n model.setObjective(objective)\n model.optimize()\n\n sol = pd.DataFrame(data={\"Solution\": model.X}, index=model.VarName)\n sol = sol.filter(like=\"Total hours\", axis=0)\n\n print(\"Total cost = $\" + str(model.ObjVal))\n\n # FORMAT THE RESULT\n planning = sol\n planning[\"Date\"] = list(planning.index.values)\n planning[[\"Date\", \"Line\"]] = planning[\"Date\"].str.split(\",\", expand=True)\n planning[\"Date\"] = planning[\"Date\"].str.split(\"[\").str[1]\n planning[\"Line\"] = planning[\"Line\"].str.split(\"]\").str[0]\n planning = planning.pivot(index=\"Line\", columns=\"Date\", values=\"Solution\")\n\n return planning\n\n\ndef plot_planning(plan, need):\n plan = plan.T\n plan[\"Min capacity\"] = 7\n plan[\"Max capacity\"] = 12\n\n my_colors = [\"skyblue\", \"salmon\", \"lightgreen\"]\n\n fig, axs = plt.subplots(2)\n need.plot(\n kind=\"bar\",\n width=0.2,\n title=\"Need in h per day\",\n ax=axs[0],\n color=\"midnightblue\",\n )\n\n plan[[\"Min capacity\", \"Max capacity\"]].plot(\n rot=90, ax=axs[1], style=[\"b\", \"b--\"], linewidth=1\n )\n\n plan.drop([\"Min capacity\", \"Max capacity\"], axis=1).plot(\n kind=\"bar\", title=\"Load in h per line\", ax=axs[1], color=my_colors\n )\n\n axs[0].tick_params(axis=\"x\", labelsize=7)\n axs[0].tick_params(axis=\"y\", labelsize=7)\n axs[0].get_legend().remove()\n axs[0].set_xticklabels([])\n axs[1].tick_params(axis=\"x\", labelsize=7)\n axs[1].tick_params(axis=\"y\", labelsize=7)\n axs[1].get_legend().remove()\n\n plt.savefig(\"Result_Model5.png\", bbox_inches=\"tight\", dpi=1200)\n axe = plt.show()\n return axe\n\n\n# Define the daily requirement\ndaily_requirements: Dict[str, int] = {\n \"2020/7/13\": 30,\n \"2020/7/14\": 10,\n \"2020/7/15\": 34,\n \"2020/7/16\": 23,\n \"2020/7/17\": 23,\n \"2020/7/18\": 24,\n \"2020/7/19\": 25,\n}\n\ncalendar: List[str] = list(daily_requirements.keys())\ndaily_requirements_df = pd.DataFrame.from_dict(daily_requirements, orient=\"index\")\n\n# Define the hourly cost per line - regular, overtime and weekend\nreg_costs_per_line = {\"Curtain_C1\": 350, \"Curtain_C2\": 300, \"Curtain_C3\": 350}\not_costs_per_line = {\n k: 1.5 * reg_costs_per_line[k] for k, v in reg_costs_per_line.items()\n}\nwe_costs_per_line = {\n k: 2 * reg_costs_per_line[k] for k, w in reg_costs_per_line.items()\n}\n\nearly_prod_cost = 17\nlate_prod_cost = 100\n\nlines: List[str] = list(reg_costs_per_line.keys())\n\n# Optimize the planning\nsolution = optimize_planning(\n calendar,\n lines,\n daily_requirements,\n reg_costs_per_line,\n ot_costs_per_line,\n we_costs_per_line,\n early_prod_cost,\n late_prod_cost,\n)\n\n# Plot the new planning\nplot_planning(solution, daily_requirements_df)\n"
] |
[
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.show"
]
] |
bensondaled/puffsopto
|
[
"fdcf2fabb50c2bd2521a3d2702ad2e41217c0032"
] |
[
"figs/figure_s7.py"
] |
[
"\"\"\"\nFigure S7: logistic regression model, by subj\n\"\"\"\nimport matplotlib.pyplot as pl\nfrom figure_panels import *\n\n## Setup figure ##\nfig_id = 'sfig7'\nfigw,figh = 7.2,5.\nfig = pl.figure(fig_id, figsize=(figw,figh))\n\nrow_bottoms = [.85,.7,.55,.4,.25,.1]\nletter_ys = [.8, .6, 0, 0, 0, 0, 0, 0]\nletter_xs = [.01, .21, .01, .01, .8,.01,.01,.01,.01]\nletters = ['','','', '', '','','','','']\n#letters = [l.upper() for l in letters]\n\nlet_kw = dict(fontsize=9, fontname='Arial', weight='bold')\n\n# boxes: row_id, x, w, h (w/h in inches)\nboxes = [ \n \n [ 0, # 1\n 0.07,\n 6. ,\n .5 ],\n \n [ 1, # 2\n 0.07,\n 6.,\n .5 ],\n \n [ 2, # 3\n 0.07,\n 6.,\n .5 ],\n \n [ 3, # 4\n 0.07,\n 6.,\n .5 ],\n \n [ 4, # 5\n 0.07,\n 6.,\n .5 ],\n \n [ 5, # 6\n 0.07,\n 6.,\n .5 ],\n \n ]\n\n# draw letters\nfor lx,letter,(row_id,*_) in zip(letter_xs, letters, boxes):\n fig.text(lx, letter_ys[row_id], letter, **let_kw)\n# convert panel w/h to fractions\nboxes = [[b[0], b[1], b[2]/figw, b[3]/figh] for b in boxes]\n# convert row_ids to y positions\nboxes = [[b[1], row_bottoms[b[0]], b[2], b[3]] for b in boxes]\n# draw axes\naxs = [fig.add_axes(box) for box in boxes]\n\n## Draw panels\n#axs = regs_xval(axs, panel_id=0)\naxs = reg_by_subj(axs, panel_id=0, manip=2, title=True)\naxs = reg_by_subj(axs, panel_id=1, manip=3)\naxs = reg_by_subj(axs, panel_id=2, manip=4)\naxs = reg_by_subj(axs, panel_id=3, manip=5, ylab=True)\naxs = reg_by_subj(axs, panel_id=4, manip=6)\naxs = reg_by_subj(axs, panel_id=5, manip=7, xticklabs=True)\n\nprettify_axes(axs)\n\npl.savefig('/Users/ben/Desktop/{}.pdf'.format(fig_id), dpi=500)\npl.close('all')\n"
] |
[
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] |
Velocities/stocker
|
[
"a163971c60dd70a2c20a6d23add33c25036efe46"
] |
[
"stocker/get_data.py"
] |
[
"import pandas as pd\r\nimport yfinance as yf\r\nimport requests\r\nimport datetime as dt\r\nfrom pytrends.request import TrendReq\r\n\r\n\r\ndef main(stock, years=1): # function to get data from Yahoo Finance\r\n end = dt.datetime.today().strftime('%Y-%m-%d') # today as the end date\r\n start = (dt.datetime.today() - dt.timedelta(days=365*years)).strftime('%Y-%m-%d') # 1 year ago as start\r\n df = yf.download(stock, start, end)\r\n\r\n return df, start, end\r\n\r\n\r\ndef company_name(stock): # function to get the company's name from the stock\r\n url = \"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en\".format(stock) # source\r\n company = requests.get(url).json()['ResultSet']['Result'][0]['name'] # saving the name as 'company'\r\n\r\n return company\r\n\r\n\r\ndef get_interest(company, timeframe): # base function to get 'interest' from Google Trends\r\n pytrend = TrendReq() # accessing to Google Trends using pytrends package\r\n pytrend.build_payload(kw_list=[company], timeframe=timeframe) # finding interest for 'company' during 'timeframe'\r\n result = pytrend.interest_over_time().drop('isPartial', axis=1) # saving the 'interest' values\r\n\r\n return result\r\n\r\n\r\ndef add_interest(df, company, years=1): # main function to get 'interest' from Google Trends\r\n delta = int((365 * years / 73) - 1) # dividing the year in groups of 73 days\r\n since = (dt.datetime.today() - dt.timedelta(days=365 * years)).strftime('%Y-%m-%d')\r\n until = (dt.datetime.today() - dt.timedelta(days=73 * delta)).strftime('%Y-%m-%d')\r\n timeframe = since + ' ' + until # setting the required format\r\n trends = get_interest(company, timeframe) # get the values for the first 73 days\r\n for x in range(delta): # get the values for the rest of the year\r\n since = (dt.datetime.today() - dt.timedelta(days=73 * (delta - x))).strftime('%Y-%m-%d')\r\n until = (dt.datetime.today() - dt.timedelta(days=73 * (delta - 1 - x))).strftime('%Y-%m-%d')\r\n timeframe = since + ' ' + until\r\n trends.append(get_interest(company, timeframe))\r\n\r\n trends.rename(columns={company: 'Interest'}, inplace=True) # changing title to 'Interest'\r\n trends.index.names = ['Date']\r\n df = df.merge(trends, how='left', on='Date') # Add Interest column from Google Trends API - pytrends\r\n df.Interest.interpolate(inplace=True) # interpolation for missing values\r\n\r\n return df\r\n\r\n\r\ndef add_wiki_views(df, company, start, end): # function to get number of page views from Wikipedia\r\n start = start.replace('-', '')\r\n end = end.replace('-', '')\r\n link = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents' \\\r\n '/{company}/daily/{st}/{end}'.format(company=company, st=start, end=end)\r\n r = requests.Session()\r\n r.headers = {\"User-Agent\": \"stocker/0.1.7 (https://github.com/jcamiloangarita/stocker; [email protected])\"}\r\n response = r.get(link)\r\n wiki_data = response.json() # get the data from Wikipedia API\r\n views = [i['views'] for i in wiki_data['items']] # saving views values\r\n date = [i['timestamp'] for i in wiki_data['items']] # saving dates\r\n date = [dt.datetime.strptime(date[:-2], '%Y%m%d').date().strftime('%Y-%m-%d') for date in date] # change format\r\n wiki_views = pd.DataFrame(views, index=date, columns=['Wiki_views'])\r\n wiki_views.index.name = 'Date'\r\n wiki_views.index = pd.to_datetime(wiki_views.index)\r\n\r\n df = df.merge(wiki_views, how='left', on='Date') # Add Wiki_views column from Wikipedia API\r\n df.Wiki_views.ffill(inplace=True)\r\n\r\n return df\r\n\r\n\r\ndef add_rsi(df, period): # function to Calculate RSI values\r\n df['Change'] = df.Close - df.Open # calculating gains and losses in a new column\r\n df['Gain'] = df.Change[df.Change > 0] # new column of gains\r\n df['Loss'] = df.Change[df.Change < 0] * (-1) # new column of losses\r\n df.drop(columns=['Change'], inplace=True) # remove the column change\r\n\r\n # Filling missing values with 0\r\n df.Gain.fillna(0, inplace=True)\r\n df.Loss.fillna(0, inplace=True)\r\n\r\n df['Again'] = df.Gain.rolling(period).mean() # calculate the average gain in the last 14 periods\r\n df['Aloss'] = df.Loss.rolling(period).mean() # calculate the average loss in the last 14 periods\r\n\r\n df['RS'] = df.Again / df.Aloss # calculating RS\r\n df['RSI'] = 100 - (100 / (1 + (df.Again / df.Aloss))) # calculating RSI\r\n df.drop(columns=['Gain', 'Loss', 'Again', 'Aloss', 'RS'], inplace=True) # remove undesired columns\r\n\r\n return df\r\n\r\n\r\ndef add_k(df, period): # Calculate Stochastic Oscillator (%K)\r\n df['L14'] = df.Low.rolling(period).min() # find the lowest price in the last 14 periods\r\n df['H14'] = df.High.rolling(period).max() # find the highest price in the last 14 periods\r\n df['%K'] = ((df.Close - df.L14) / (df.H14 - df.L14)) * 100\r\n df.drop(columns=['L14', 'H14'], inplace=True) # remove columns L14 and H14\r\n\r\n return df\r\n\r\n\r\ndef add_r(df, period): # Calculate Larry William indicator (%R)\r\n df['HH'] = df.High.rolling(period).max() # find the highest high price in the last 14 periods\r\n df['LL'] = df.Low.rolling(period).min() # find the lowest low price in the last 14 periods\r\n df['%R'] = ((df.HH - df.Close) / (df.HH - df.LL)) * (-100)\r\n df.drop(columns=['HH', 'LL'], inplace=True) # remove columns HH and LL\r\n\r\n return df\r\n\r\n\r\ndef total(stock, years=1, interest=False, wiki_views=False, indicators=False, period=14):\r\n # main function to combine data from Yahoo Finance, Google Trends, Wikipedia and calculated indicators.\r\n df, start, end = main(stock, years=years) # get data from Yahoo Finance and define star and end\r\n company = company_name(stock) # get the name of the company\r\n\r\n if interest:\r\n df = add_interest(df, company, years=years) # adding Interest from Google Trends.\r\n\r\n if wiki_views:\r\n df = add_wiki_views(df, company, start, end) # adding Wiki Views\r\n\r\n if indicators: # Adding indicators\r\n df = add_k(df, period) # generating %K column.\r\n df = add_r(df, period) # generating %R column.\r\n df = add_rsi(df, period) # generating RSI column.\r\n\r\n df = df.dropna() # drop rows with missing data\r\n\r\n return df\r\n\r\n\r\ndef correlation(stock, years=1, interest=False, wiki_views=False, indicators=False, period=14, complete=True, limit=0.5):\r\n # function to get the Pearson correlation coefficients for all the features\r\n\r\n df = total(stock, years, interest, wiki_views, indicators, period)\r\n\r\n if complete:\r\n features = df.corr().Close\r\n else: # only the coefficients against the close prices\r\n features = df.corr().Close[df.corr().Close > limit].index.tolist()\r\n\r\n return features\r\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
aryaman4/ludwig
|
[
"76ddea4634e4dcc1c0f956f2e61d80b0c3621a81"
] |
[
"ludwig/utils/math_utils.py"
] |
[
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport math\n\nimport numpy as np\n\n\ndef softmax(x, temperature=1.0):\n e_x = np.exp((x - np.max(x)) / temperature)\n return e_x / e_x.sum()\n\n\ndef int_type(number):\n if number <= np.iinfo(np.int8).max:\n return np.int8\n elif number <= np.iinfo(np.int16).max:\n return np.int16\n elif number <= np.iinfo(np.int32).max:\n return np.int32\n else: # if number <= np.iinfo(np.int64).max:\n return np.int64\n\n\ndef convert_size(size_bytes):\n if size_bytes == 0:\n return '0B'\n size_name = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return '{} {}'.format(s, size_name[i])\n\n\ndef learning_rate_warmup_distributed(\n learning_rate,\n epoch,\n warmup_epochs,\n num_workers,\n curr_step,\n steps_per_epoch\n):\n \"\"\"Implements gradual learning rate warmup:\n `lr = initial_lr / hvd.size()` ---> `lr = initial_lr`\n `initial_lr` is the learning rate of the model optimizer at the start\n of the training. This technique was described in the paper\n \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\".\n See https://arxiv.org/pdf/1706.02677.pdf for details.\n\n Inspired by Horovod's implementation:\n https://github.com/uber/horovod/blob/master/horovod/keras/callbacks.py#L202\n Math recap:\n curr_step\n epoch = full_epochs + ---------------\n steps_per_epoch\n lr size - 1\n lr'(epoch) = ---- * (-------- * epoch + 1)\n size warmup\n lr\n lr'(epoch = 0) = ----\n size\n lr'(epoch = warmup) = lr\n \"\"\"\n if epoch > warmup_epochs:\n return learning_rate\n else:\n epoch_adjusted = float(epoch) + (curr_step / steps_per_epoch)\n return learning_rate / num_workers * \\\n (epoch_adjusted * (num_workers - 1) / warmup_epochs + 1)\n\n\ndef learning_rate_warmup(\n learning_rate,\n epoch,\n warmup_epochs,\n curr_step,\n steps_per_epoch\n):\n global_curr_step = 1 + curr_step + epoch * steps_per_epoch\n warmup_steps = warmup_epochs * steps_per_epoch\n\n warmup_percent_done = global_curr_step / warmup_steps\n warmup_learning_rate = learning_rate * warmup_percent_done\n\n is_warmup = int(global_curr_step < warmup_steps)\n interpolated_learning_rate = (\n (1.0 - is_warmup) * learning_rate +\n is_warmup * warmup_learning_rate\n )\n\n return interpolated_learning_rate\n\n\ndef round2precision(val, precision: int = 0, which: str = ''):\n assert precision >= 0\n val *= 10 ** precision\n round_callback = round\n if which.lower() == 'up':\n round_callback = math.ceil\n if which.lower() == 'down':\n round_callback = math.floor\n return '{1:.{0}f}'.format(precision, round_callback(val) / 10 ** precision)\n"
] |
[
[
"numpy.max",
"numpy.iinfo"
]
] |
denizetkar/lstms.pth
|
[
"c1d6af1e106e17c51604ae8acdb5114828adff19"
] |
[
"test/test_correctness.py"
] |
[
"#!/usr/bin/env python\n\nimport torch as th\nimport torch.nn as nn\nfrom torch.autograd import Variable as V\n\nfrom lstms import SlowLSTM, LSTM, GalLSTM, MoonLSTM, SemeniutaLSTM\n\n\nif __name__ == '__main__':\n lstms = [\n (SlowLSTM, 'SlowLSTM'),\n (LSTM, 'LSTM'),\n ]\n for lstm, name in lstms:\n th.manual_seed(1234)\n x = V(th.rand(1, 1, 256))\n hiddens = (V(th.rand(1, 1, 256)), V(th.rand(1, 1, 256)))\n ref = nn.LSTM(256, 256, bias=False, dropout=0.0)\n cus = lstm(256, 256, bias=False, dropout=0.0)\n\n # Make sure they have the same parameters:\n val = th.rand(1)[0]\n for c in cus.parameters():\n c.data.fill_(val)\n for r in ref.parameters():\n r.data.fill_(val)\n\n objective = V(th.zeros(1, 256))\n\n i, j = x.clone(), [h.clone() for h in hiddens]\n g, h = x.clone(), [h.clone() for h in hiddens]\n for _ in range(10):\n i, j = ref(i, j)\n g, h = cus(g, h)\n assert(th.equal(g.data, i.data))\n assert(th.equal(j[0].data, h[0].data))\n assert(th.equal(j[1].data, h[1].data))\n ref_loss = th.sum((i - objective)**2)\n cus_loss = th.sum((g - objective)**2)\n ref_loss.backward(retain_graph=True)\n cus_loss.backward(retain_graph=True)\n print('Correct: ', name)\n print('Test passed')\n"
] |
[
[
"torch.zeros",
"torch.nn.LSTM",
"torch.manual_seed",
"torch.sum",
"torch.equal",
"torch.rand"
]
] |
lisiyuan656/datasets
|
[
"b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607"
] |
[
"tensorflow_datasets/core/deprecated/text/subword_text_encoder_test.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=utf-8\n\"\"\"Tests for tensorflow_datasets.core.deprecated.text.subword_text_encoder.\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom absl.testing import parameterized\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets import testing\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.core.deprecated.text import subword_text_encoder\nfrom tensorflow_datasets.core.deprecated.text import text_encoder\n\nTEST_DATA_DIR = os.path.join(utils.tfds_write_path(), 'testing', 'test_data')\n\n\nclass SubwordTextEncoderTest(parameterized.TestCase, testing.TestCase):\n\n def setUp(self):\n super(SubwordTextEncoderTest, self).setUp()\n # Vocab ids will be (offset for pad=0):\n # 1 2 3 4 5\n self.vocab_list = ['foo_', 'bar_', 'foo', 'bar', '<EOS>']\n self.encoder = subword_text_encoder.SubwordTextEncoder(\n vocab_list=self.vocab_list)\n\n def test_vocab_size(self):\n # Bytes + pad + subwords\n self.assertEqual((256 + 1 + len(self.vocab_list)), self.encoder.vocab_size)\n\n @parameterized.parameters(\n ('foo bar', [1, 4]),\n ('foobar foo bar<EOS>bar', [3, 2, 1, 4, 5, 4]),\n # Respects whitespace\n ('bar <EOS>bar', [2, 5, 4]),\n ('bar <EOS> bar', [2, 5, 38, 4]),\n ('bar<EOS> bar', [4, 5, 38, 4]),\n # Invertible even with oov, respecting underscores and backslashes\n ('a_b!', [103, 101, 104, 39]),\n ('foo \\\\bar_!', [3, 38, 98, 4, 101, 39]),\n ('foo \\\\\\\\bar_!', [3, 38, 98, 98, 4, 101, 39]),\n ('hello world!', None),\n ('foo_ bar', None),\n ('foo _ bar', None),\n ('foo _bar', None),\n ('hello_world', None),\n ('hello_ world', None),\n ('hello _ world', None),\n ('hello _world', None),\n ('_', None),\n # Test that the underscore replacement string is unharmed\n ('\\\\&undsc', None),\n # Unicode encoded as bytes but decoded back to unicode character\n ('你', [234, 195, 166]),\n )\n def test_encode_decode(self, text, expected_ids):\n ids = self.encoder.encode(text)\n # Test ids match if ids provided\n if expected_ids:\n self.assertEqual(expected_ids, ids)\n # Test invertibility\n self.assertEqual(tf.compat.as_text(text), self.encoder.decode(ids))\n\n def test_bad_bytes(self):\n valid_unicode = '你'\n bad_bytes = [220 + len(self.vocab_list) + 1]\n bad_ids = self.encoder.encode('你') + bad_bytes\n text = self.encoder.decode(bad_ids)\n # Valid unicode character preserved\n self.assertEqual(valid_unicode, text[0])\n # Invalid byte converted to unknown character\n self.assertEqual('\\uFFFD', text[1])\n\n def test_vocab_file(self):\n vocab_file = os.path.join(self.get_temp_dir(), 'vocab')\n self.encoder.save_to_file(vocab_file)\n encoder = subword_text_encoder.SubwordTextEncoder.load_from_file(vocab_file)\n self.assertEqual(encoder.subwords, self.vocab_list)\n\n\nclass SubwordTextEncoderBuildTest(testing.TestCase):\n\n def test_build(self):\n text_gen = lorem_ipsum_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300)\n # Created some subwords\n self.assertGreater(encoder.vocab_size, text_encoder.NUM_BYTES + 1)\n\n base_encoder = subword_text_encoder.SubwordTextEncoder(vocab_list=[])\n for line in text_gen():\n # Invertible\n encoded = encoder.encode(line)\n self.assertEqual(line, encoder.decode(encoded))\n # Shorter than base\n if len(line) > 2:\n self.assertLess(len(encoded), len(base_encoder.encode(line)))\n\n def test_build_with_unicode(self):\n text_gen = lorem_ipsum_zh_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300)\n # Created some subwords\n self.assertGreater(encoder.vocab_size, text_encoder.NUM_BYTES + 1)\n\n base_encoder = subword_text_encoder.SubwordTextEncoder(vocab_list=[])\n for line in text_gen():\n # Invertible\n encoded = encoder.encode(line)\n self.assertEqual(line, encoder.decode(encoded))\n # Shorter than base\n if len(line) > 2:\n self.assertLess(len(encoded), len(base_encoder.encode(line)))\n\n def test_max_subword_length(self):\n text_gen = lorem_ipsum_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300, max_subword_length=1)\n # Created no subwords because there are no unicode characters in lorem ipsum\n # and single byte subwords are skipped because all bytes are in the vocab by\n # default.\n self.assertEqual(encoder.vocab_size, text_encoder.NUM_BYTES + 1)\n self.assertEqual(len(encoder.subwords), 0)\n\n # Not the case when there are unicode characters\n text_gen = lorem_ipsum_zh_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300, max_subword_length=1)\n self.assertGreater(encoder.vocab_size, text_encoder.NUM_BYTES + 1)\n self.assertGreater(len(encoder.subwords), 0)\n\n def test_max_chars(self):\n text_gen = lorem_ipsum_zh_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300, max_corpus_chars=1)\n self.assertGreater(encoder.vocab_size, text_encoder.NUM_BYTES + 1)\n self.assertEqual(1, len(encoder.subwords))\n first_letter = next(lorem_ipsum_zh_generator())[0]\n self.assertEqual(first_letter, encoder.subwords[0])\n\n def test_reserved_tokens(self):\n text_gen = lorem_ipsum_generator\n build_fn = subword_text_encoder.SubwordTextEncoder.build_from_corpus\n encoder = build_fn(text_gen(), 300, reserved_tokens=['<EOS>', '<EOD>'])\n self.assertEqual(2, encoder.encode('Lorem<EOD>')[-1])\n self.assertEqual(2, encoder.encode('Lorem<EOD>a')[-2])\n self.assertEqual(2, encoder.encode('Lorem<EOD>{')[-2])\n self.assertEqual(2, encoder.encode('Lorem<EOD> ')[-2])\n self.assertEqual('<EOS> <EOD>', encoder.decode([1, 78, 2]))\n self.assertEqual(['<EOS>', '<EOD>'], encoder.subwords[:2])\n\n\ndef _yield_lines_from_file(txt_file):\n with tf.io.gfile.GFile(txt_file, 'rb') as f:\n for line in f:\n yield tf.compat.as_text(line)\n\n\ndef lorem_ipsum_generator():\n txt_file = os.path.join(TEST_DATA_DIR, 'lorem_ipsum.txt')\n return _yield_lines_from_file(txt_file)\n\n\ndef lorem_ipsum_zh_generator():\n txt_file = os.path.join(TEST_DATA_DIR, 'lorem_ipsum_zh.txt')\n return _yield_lines_from_file(txt_file)\n\n\nif __name__ == '__main__':\n testing.test_main()\n"
] |
[
[
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.compat.as_text"
]
] |
ConnorJL/ProgGAN-PyTorch
|
[
"a64aec9640a094f5dc09184677c13236574f69a2"
] |
[
"ProgGAN.py"
] |
[
"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom scipy.misc import imsave\nfrom torch.autograd import Variable\nfrom torch.nn.init import kaiming_normal, calculate_gain\n\n\nclass ProgGAN(object):\n def __init__(self, nz=512, lr=0.0010):\n self.nz = nz # Dimension of noise vector\n self.lr = lr\n\n self.current_size = 4\n self.batch_size = 16\n\n # Create Networks\n self.disc = Discriminator()\n self.gen = Generator()\n\n # Create Optimizers\n self.optimizerD = torch.optim.Adam(self.disc.parameters(), lr=self.lr, betas=(0, 0.99))\n self.optimizerG = torch.optim.Adam(self.gen.parameters(), lr=self.lr, betas=(0, 0.99))\n self.criterion = torch.nn.MSELoss()\n\n # Helper Tensors\n self.input = torch.FloatTensor(self.batch_size, 3, self.current_size, self.current_size)\n self.noise = torch.FloatTensor(self.batch_size, self.nz, 1, 1)\n self.label = torch.FloatTensor(self.batch_size)\n\n def cuda(self):\n self.disc.cuda()\n self.gen.cuda()\n self.criterion.cuda()\n self.input = self.input.cuda()\n self.noise = self.noise.cuda()\n self.label = self.label.cuda()\n\n def state_dict(self):\n state = {\n \"disc\": self.disc.state_dict(),\n \"gen\": self.gen.state_dict(),\n \"optD\": self.optimizerD.state_dict(),\n \"optG\": self.optimizerG.state_dict(),\n \"alpha\": self.disc.alpha,\n \"current_size\": self.current_size,\n }\n\n return state\n\n def load(self, state):\n while self.current_size < state[\"current_size\"]:\n self.grow()\n self.disc.load_state_dict(state[\"disc\"])\n self.gen.load_state_dict(state[\"gen\"])\n self.optimizerD.load_state_dict(state[\"optD\"])\n self.optimizerG.load_state_dict(state[\"optG\"])\n self.disc.alpha = state[\"alpha\"]\n self.gen.alpha = state[\"alpha\"]\n\n def sample(self, path=None, size=9, torch_style=False):\n imgs = []\n while len(imgs) < size*size:\n self.noise.resize_(self.batch_size, self.nz, 1, 1).normal_(0, 1)\n noisev = Variable(self.noise)\n imgb = self.gen(noisev).data.cpu()\n if not torch_style:\n imgb = torch.transpose(imgb, 1, 3).numpy()\n for i in range(self.batch_size):\n imgs.append(imgb[i])\n if len(imgs) == size*size:\n if path is not None and not torch_style:\n self.color_grid_vis(imgs, size, size, save_path=path)\n return imgs\n\n def color_grid_vis(self, X, nh, nw, save_path=None):\n h, w = X[0].shape[:2]\n img = np.zeros((h*nh, w*nw, 3))\n for n, x in enumerate(X):\n j = int(n/nw)\n i = int(n%nw)\n img[j*h:j*h+h, i*w:i*w+w, :] = x\n if save_path is not None:\n imsave(save_path, img)\n return img\n\n def grow(self):\n self.disc.grow()\n self.gen.grow()\n self.current_size *= 2\n if self.current_size == 256:\n self.batch_size = 14\n elif self.current_size == 512:\n self.batch_size = 6\n elif self.current_size == 1024:\n self.batch_size = 3\n\n self.optimizerD = torch.optim.Adam(self.disc.parameters(), lr=self.lr, betas=(0, 0.99))\n self.optimizerG = torch.optim.Adam(self.gen.parameters(), lr=self.lr, betas=(0, 0.99))\n\n self.input = torch.FloatTensor(self.batch_size, 3, self.current_size, self.current_size)\n\n self.cuda()\n\n def increase_alpha(self, amount):\n self.disc.alpha += amount\n self.gen.alpha += amount\n\n def set_lr(self, lr):\n for param_group in self.optimizerD.param_groups:\n param_group['lr'] = lr\n for param_group in self.optimizerG.param_groups:\n param_group['lr'] = lr\n\n self.lr = lr\n\n # Adapted from https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py\n def calc_gradient_penalty(self, netD, real_data, fake_data):\n alpha = torch.rand(self.batch_size, 1)\n alpha = alpha.expand(self.batch_size, int(real_data.nelement()/self.batch_size)).contiguous().view(self.batch_size, 3, self.current_size, self.current_size)\n alpha = alpha.cuda()\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n interpolates = interpolates.cuda()\n interpolates = Variable(interpolates, requires_grad=True)\n\n disc_interpolates = netD(interpolates)\n\n gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n gradients = gradients.view(gradients.size(0), -1)\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10\n return gradient_penalty\n\n def train(self, data):\n # Train with real\n self.disc.zero_grad()\n data = data.cuda()\n self.input.resize_as_(data).copy_(data)\n self.label.resize_(self.batch_size).fill_(1)\n inputv = Variable(self.input)\n labelv = Variable(self.label)\n\n output = self.disc(inputv)\n real_output = output\n errD_real = self.criterion(output, labelv)\n errD_real.backward(retain_graph=True)\n\n errD_drift = torch.mean((real_output**2)) * 0.001\n errD_drift.backward()\n\n # Train with fake\n self.noise.resize_(self.batch_size, self.nz, 1, 1).normal_(0, 1)\n noisev = Variable(self.noise)\n fake = self.gen(noisev)\n self.label.resize_(self.batch_size)\n labelv = Variable(self.label.fill_(0))\n output = self.disc(fake.detach())\n errD_fake = self.criterion(output, labelv)\n errD_fake.backward()\n\n # Train with gradient penalty\n errD_grad = self.calc_gradient_penalty(self.disc, inputv.data, fake.data)\n errD_grad.backward()\n\n errD = errD_real + errD_fake + errD_grad + errD_drift\n self.optimizerD.step()\n\n # Train G\n self.gen.zero_grad()\n labelv = Variable(self.label.fill_(1))\n output = self.disc(fake)\n errG = self.criterion(output, labelv)\n errG.backward()\n self.optimizerG.step()\n\n return errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], errD_grad.data[0], errD_drift.data[0]\n\n\nclass Generator(nn.Module):\n def __init__(self, start_size=4):\n super(Generator, self).__init__()\n self.blocks = []\n self.size = start_size\n self.alpha = 1.0\n\n block = []\n block.append(nn.ConvTranspose2d(512, 512, 4))\n block.append(nn.Conv2d(512, 512, 3, padding=1))\n he_init(block[0])\n he_init(block[1])\n self.blocks.append(block)\n self.add_module(\"block0/0\", block[0])\n self.add_module(\"block0/1\", block[1])\n\n self.toRGB = nn.Conv2d(512, 3, 1)\n he_init(self.toRGB)\n self.add_module(\"toRGBm\", self.toRGB)\n self.toRGB2 = None\n\n if not start_size == 4:\n for i in range(int(math.log(start_size, 2)-2)):\n self.grow()\n\n def grow(self):\n self.size = self.size*2\n step = int(math.log(self.size, 2)-1)\n block = []\n if step < 5:\n filters = 512\n block.append(nn.Conv2d(filters, filters, 3, padding=1))\n block.append(nn.Conv2d(filters, filters, 3, padding=1))\n else:\n filters = 512 / (2**(step-4))\n block.append(nn.Conv2d(filters*2, filters, 3, padding=1))\n block.append(nn.Conv2d(filters, filters, 3, padding=1))\n\n he_init(block[0])\n he_init(block[1])\n self.blocks.append(block)\n self.add_module(\"block\" + str(self.size) + \"/0\", block[0])\n self.add_module(\"block0\" + str(self.size) + \"/1\", block[1])\n\n self.alpha = 1e-6\n self.toRGB2 = self.toRGB\n self.toRGB = nn.Conv2d(filters, 3, 1)\n self.add_module(\"toRGB\" + str(self.size), self.toRGB)\n he_init(self.toRGB)\n\n def normalize(self, x):\n norm = (x ** 2) + 1e-8\n norm = torch.sum(norm, 1)\n norm = norm / x.size()[1]\n norm = torch.sqrt(norm)\n\n stack = []\n for i in range(x.size()[1]):\n stack.append(norm)\n norm = torch.stack(stack)\n norm = torch.transpose(norm, 0, 1)\n\n return x / norm\n\n def forward(self, x):\n for i in range(len(self.blocks)-1):\n for e in self.blocks[i]:\n x = e(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n if not i == 0:\n x = self.normalize(x)\n if i == 0:\n x = self.normalize(x)\n if not i == len(self.blocks)-2:\n x = F.upsample(x, scale_factor=2)\n\n if self.alpha < 1.0:\n x = F.upsample(x, scale_factor=2)\n x1 = self.toRGB2(x) * (1-self.alpha)\n\n x2 = self.blocks[-1][0](x)\n x2 = F.leaky_relu(x2, negative_slope=0.2)\n x2 = self.normalize(x2)\n x2 = self.blocks[-1][1](x2)\n x2 = F.leaky_relu(x2, negative_slope=0.2)\n x2 = self.normalize(x2)\n x2 = self.toRGB(x2) * self.alpha\n return x1+x2\n\n else:\n if not len(self.blocks) == 1:\n x = F.upsample(x, scale_factor=2)\n for e in self.blocks[-1]:\n x = e(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n self.normalize(x)\n x = self.toRGB(x)\n return x\n\n\nclass Discriminator(nn.Module):\n def __init__(self, start_size=4):\n super(Discriminator, self).__init__()\n\n self.blocks = []\n self.size = start_size\n self.alpha = 1.0\n\n block = []\n block.append(nn.Conv2d(513, 512, 3, padding=1))\n block.append(nn.Conv2d(512, 512, 4))\n he_init(block[0])\n he_init(block[1])\n self.blocks.append(block)\n self.add_module(\"block0/0\", block[0])\n self.add_module(\"block0/1\", block[1])\n\n self.lin = nn.Linear(512, 1)\n he_init(self.lin, nonlinearity=\"linear\", param=None)\n self.add_module(\"linear\", self.lin)\n\n self.fromRGB = nn.Conv2d(3, 512, 1)\n self.add_module(\"fromRGBm\", self.fromRGB)\n self.fromRGB2 = None\n he_init(self.fromRGB)\n\n for i in range(int(math.log(start_size, 2)-2)):\n self.grow()\n\n def grow(self):\n self.size = self.size*2\n step = int(math.log(self.size, 2)-1)\n block = []\n if step < 5:\n filters = 512\n block.append(nn.Conv2d(filters, filters, 3, padding=1))\n block.append(nn.Conv2d(filters, filters, 3, padding=1))\n else:\n filters = 512 / (2**(step-4))\n block.append(nn.Conv2d(filters, filters*2, 3, padding=1))\n block.append(nn.Conv2d(filters*2, filters*2, 3, padding=1))\n he_init(block[0])\n he_init(block[1])\n self.blocks.append(block)\n self.add_module(\"block\" + str(self.size) + \"/0\", block[0])\n self.add_module(\"block\" + str(self.size) + \"/1\", block[1])\n\n self.alpha = 1e-6\n self.fromRGB2 = self.fromRGB\n self.fromRGB = nn.Conv2d(3, filters, 1)\n self.add_module(\"fromRGB\"+str(self.size), self.fromRGB)\n he_init(self.fromRGB)\n\n def add_stddev(self, x):\n std = torch.std(x, 0)\n std = torch.mean(std)\n stack = []\n for i in range(x.size()[0]):\n stack.append(std)\n std = torch.stack(stack) # batch_size x 1\n\n stack = []\n for i in range(x.size()[2]):\n stack.append(std)\n std = torch.stack(stack) # WH x batch_size x 1\n\n stack = []\n for i in range(x.size()[2]):\n stack.append(std)\n std = torch.stack(stack) # WH x WH x batch_size x 1\n\n std = torch.transpose(std, 0, 2)\n std = torch.transpose(std, 1, 3)\n\n return torch.cat([x, std], dim=1)\n\n def forward(self, x):\n if self.alpha < 1.0:\n x1 = self.fromRGB(x)\n x1 = F.leaky_relu(x1, negative_slope=0.2)\n for e in self.blocks[-1]:\n x1 = e(x1)\n x1 = F.leaky_relu(x1, negative_slope=0.2)\n x1 = F.avg_pool2d(x1, 3, padding=1, stride=2)\n x1 = x1 * self.alpha\n\n x2 = F.avg_pool2d(x, 3, padding=1, stride=2)\n x2 = self.fromRGB2(x2)\n x2 = F.leaky_relu(x2, negative_slope=0.2)\n x2 = x2 * (1-self.alpha)\n\n x = x1 + x2\n\n else:\n x = self.fromRGB(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n if not len(self.blocks) == 1:\n for e in self.blocks[-1]:\n x = e(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n x = F.avg_pool2d(x, 3, padding=1, stride=2)\n\n for i in range(1, len(self.blocks)-1):\n rev = self.blocks[::-1]\n for e in rev[i]:\n x = e(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n x = F.avg_pool2d(x, 3, padding=1, stride=2)\n x = self.add_stddev(x)\n for e in self.blocks[0]:\n x = e(x)\n x = F.leaky_relu(x, negative_slope=0.2)\n x = torch.squeeze(x, dim=2)\n x = torch.transpose(x, 1, 2)\n x = self.lin(x)\n x = torch.squeeze(x)\n return x\n\n\n# Taken from https://github.com/github-pengge/PyTorch-progressive_growing_of_gans/blob/master/models/base_model.py\ndef he_init(layer, nonlinearity='leaky_relu', param=0.2):\n nonlinearity = nonlinearity.lower()\n if nonlinearity not in ['linear', 'conv1d', 'conv2d', 'conv3d', 'relu', 'leaky_relu', 'sigmoid', 'tanh']:\n if not hasattr(layer, 'gain') or layer.gain is None:\n gain = 0 # default\n else:\n gain = layer.gain\n elif nonlinearity == 'leaky_relu':\n assert param is not None, 'Negative_slope(param) should be given.'\n gain = calculate_gain(nonlinearity, param)\n else:\n gain = calculate_gain(nonlinearity)\n kaiming_normal(layer.weight, a=gain)\n"
] |
[
[
"torch.nn.functional.upsample",
"torch.mean",
"torch.transpose",
"torch.cat",
"torch.sum",
"torch.FloatTensor",
"torch.autograd.Variable",
"torch.nn.init.calculate_gain",
"torch.sqrt",
"torch.std",
"torch.rand",
"numpy.zeros",
"torch.squeeze",
"torch.nn.init.kaiming_normal",
"torch.nn.ConvTranspose2d",
"scipy.misc.imsave",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.leaky_relu",
"torch.stack",
"torch.nn.MSELoss"
]
] |
An-Dang/clustinator
|
[
"d1737628cd65745eb7034eb4e0d59996f8976f8b",
"d1737628cd65745eb7034eb4e0d59996f8976f8b"
] |
[
"clustinator/clustering.py",
"poc/src/clustering.py"
] |
[
"from sklearn.cluster import DBSCAN\nimport numpy as np\n\n\nstates = [\"INITIAL\",\"login\",\"View_Items\",\"home\",\"logout\",\"View_Items_quantity\",\"Add_to_Cart\",\"shoppingcart\",\n \"remove\",\"deferorder\",\"purchasecart\",\"inventory\",\"sellinventory\",\"clearcart\",\"cancelorder\",\"$\"]\n\n# Data imports\nPATH = \"../data/raw/\"\nsessions_file = (PATH+'sessions.dat')\n\n\nclass Clustering:\n def __init__(self, X, eps, min_samples):\n \"\"\"\n Class for the DBSCAN clustering algorithm with sklearn.\n :param X: Input data for the clustering\n \"\"\"\n self.X = X\n self.eps = eps\n self.min_samples = min_samples\n\n def dbscan(self):\n return DBSCAN(eps=self.eps, min_samples=self.min_samples).fit(self.X)\n\n def unique_labels(self):\n labels = self.dbscan().labels_\n unique, counts = np.unique(labels, return_counts=True)\n return unique, counts, labels\n\n def compare_results(self):\n unique, counts = self.unique_labels()\n # represent the cluster results as dict\n result = dict(zip(unique, counts))\n\n return result\n\n # Dict_Cluster\n @staticmethod\n def cluster_dict(labels, X_):\n cluster_list = []\n\n for label in np.unique(labels):\n points = X_[labels == label].toarray()\n\n for point in points:\n cluster_dict = {}\n cluster_dict[label] = point\n cluster_list.append(cluster_dict)\n\n return cluster_list\n\n @staticmethod\n def list_cluster(cluster_dict_, labels_next, labels_past):\n \"\"\"\n TODO: Check if the clusterlabels are equal, it's because an error can be accurse at the analysis\n :param cluster_dict_: dict of all cluster with clusterlabels e.g. [0,1,2,...]\n :param labels_next: actuall labels\n :param labels_past: older labels\n :return: list of cluster mean markov-chains\n \"\"\"\n cluster_list = []\n result = {}\n\n # Initial list cluster\n if labels_past is None:\n for cluster_index, value in enumerate(np.unique(labels_next)):\n tmp = []\n for item in cluster_dict_:\n for k, v in item.items():\n if k == value:\n tmp.append(v.tolist())\n cluster_list.append(np.mean(tmp, axis=0))\n\n for index, value in enumerate(cluster_list):\n for value1 in np.unique(labels_next):\n result[str(value1)] = value\n\n return result\n\n # From the second pass on\n elif np.unique(labels_next) in labels_past:\n for cluster_index, value in enumerate(np.unique(labels_next)):\n tmp = []\n for item in cluster_dict_:\n for k, v in item.items():\n if k == value:\n tmp.append(v.tolist())\n cluster_list.append(np.mean(tmp, axis=0))\n return cluster_list\n\n else:\n print('Unequally Number of cluster labels. Actual cluster {actualcluster} old cluster {oldcluster}'.format(\n actualcluster=np.unique(labels_next), oldcluster=labels_past))\n for cluster_index, value in enumerate(np.unique(labels_next)):\n tmp = []\n for item in cluster_dict_:\n for k, v in item.items():\n if k == value:\n tmp.append(v.tolist())\n cluster_list.append(np.mean(tmp, axis=0))\n return cluster_list",
"from sklearn.cluster import DBSCAN\nimport numpy as np\n\n\nstates = [\"INITIAL\",\"login\",\"View_Items\",\"home\",\"logout\",\"View_Items_quantity\",\"Add_to_Cart\",\"shoppingcart\",\n \"remove\",\"deferorder\",\"purchasecart\",\"inventory\",\"sellinventory\",\"clearcart\",\"cancelorder\",\"$\"]\n\n# Data imports\nPATH = \"../data/raw/\"\nsessions_file = (PATH+'sessions.dat')\n\n\nclass Clustering:\n def __init__(self, X):\n \"\"\"\n Class for the DBSCAN clustering algorithm with sklearn.\n :param X: Input data for the clustering\n \"\"\"\n self.X = X\n\n def dbscan(self):\n return DBSCAN(eps=1.5, min_samples=10).fit(self.X)\n\n def unique_labels(self):\n labels = self.dbscan().labels_\n unique, counts = np.unique(labels, return_counts=True)\n return unique, counts, labels\n\n def compare_results(self):\n unique, counts = self.unique_labels()\n # represent the cluster results as dict\n result = dict(zip(unique, counts))\n\n return result\n\n # Dict_Cluster\n def cluster_dict(self, labels, X_):\n cluster_list = []\n\n for label in np.unique(labels):\n points = X_[labels == label].toarray()\n\n for point in points:\n cluster_dict = {}\n cluster_dict[label] = point\n cluster_list.append(cluster_dict)\n\n return cluster_list\n\n def list_cluster(self, cluster_dict_, labels_next, labels_past):\n cluster_list = []\n if labels_next in labels_past:\n for cluster_index, value in enumerate(np.unique(labels_next)):\n tmp = []\n for item in cluster_dict_:\n for k, v in item.items():\n if k == cluster_index:\n tmp.append(v.tolist())\n cluster_list.append(np.mean(tmp, axis=0))\n\n return cluster_list\n"
] |
[
[
"numpy.mean",
"sklearn.cluster.DBSCAN",
"numpy.unique"
],
[
"numpy.mean",
"sklearn.cluster.DBSCAN",
"numpy.unique"
]
] |
KelvinKramp/Autotune123
|
[
"f6948e6da650bed0c05577e9565449e7e488ea2a"
] |
[
"get_filtered_data.py"
] |
[
"import numpy as np\nfrom scipy.signal import savgol_filter\n\n\ndef get_filtered_data(df, filter=\"No filter\"):\n # clean lists by removing sensitivity, removing IC ratio, removing empty values and converting strings\n # with ratios to floats.\n\n # x\n l = df[\"Parameter\"].to_list()\n l_time = []\n for string in l[3:]:\n if string == \"\":\n string = np.nan\n l_time.append(string)\n else:\n l_time.append(string)\n\n # y1\n l1 = df[\"Pump\"].to_list()\n l1_new = []\n for string in l1[3:]:\n if string == \"\":\n string = np.nan\n l1_new.append(string)\n else:\n l1_new.append(string)\n l1 = list(map(float, l1_new))\n\n # y2\n l2 = df[\"Autotune\"].to_list()\n l2 = l2[3:]\n l2_new = []\n for string in l2:\n if string == \"\":\n string = np.nan\n l2_new.append(string)\n else:\n l2_new.append(string)\n l2 = list(map(float, l2_new))\n l2 = np.asarray(l2)\n\n # apply filter\n l2_clean = l2[::2] # remove empty values\n if filter == \"No filter\":\n l3 = l2_clean\n else:\n if filter == \"Savitzky-Golay 11.6\":\n l3 = savgol_filter(l2_clean, 11, 6)\n elif filter == \"Savitzky-Golay 17.5\":\n l3 = savgol_filter(l2_clean, 17, 5)\n elif filter == \"Savitzky-Golay 23.3\":\n l3 = savgol_filter(l2_clean, 23, 3)\n\n # update numpy array of recommendations (l2) with filtered values\n n = 0\n for i, j in enumerate(l2):\n if not np.isnan(j):\n l2[i] = l3[n]\n n += 1\n l2 = l2.tolist()\n\n # round numbers\n l2 = [round(num, 2) for num in l2]\n\n # use easy identifiable variable names\n x = l_time\n y1 = l1\n y2 = l2\n\n return x,y1,y2\n"
] |
[
[
"numpy.asarray",
"numpy.isnan",
"scipy.signal.savgol_filter"
]
] |
fgkcv25/LMC
|
[
"1c13ac671ef325448fb3f8264398648cbb3d56b7"
] |
[
"Leitura de Arquivo/Leitura de arquivo.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 21 07:51:07 2019\r\n\r\n@author: 05873472955\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\n\r\nwith open('C:\\\\Users\\\\05873472955\\\\Desktop\\\\LEitura de Arquivo\\\\A.txt') as arquivo:\r\n dados = arquivo.readlines()\r\n \r\ndados.remove(dados[1001])\r\ndados.remove(dados[1000])\r\n\r\nA = []\r\nfor linha in dados:\r\n A.append(linha.rstrip('\\n').lstrip(' ').split(' '))\r\n \r\nA = np.asarray(A, dtype=np.float64)\r\n\r\n\r\nwith open('C:\\\\Users\\\\05873472955\\\\Desktop\\\\LEitura de Arquivo\\\\B.txt') as arquivoB:\r\n dadosB = arquivoB.readlines()\r\n \r\ndadosB.remove(dadosB[1507])\r\ndadosB.remove(dadosB[1506])\r\ndadosB.remove(dadosB[1505])\r\ndadosB.remove(dadosB[1504])\r\n\r\nB = []\r\nfor linha in dadosB:\r\n B.append(linha.rstrip('\\n').lstrip(' ').split(' '))\r\n \r\ni = 0\r\nwhile i < len(B):\r\n if len(B[i]) != 1000:\r\n B.pop(i)\r\n else:\r\n i = i+1\r\n \r\nB = np.asarray(B, dtype=np.float64)\r\n \r\n \r\narquivoC = open('C:\\\\Users\\\\05873472955\\\\Desktop\\\\LEitura de Arquivo\\\\C.txt','w+')\r\n(m,n) = A.shape\r\n(p,q) = B.shape\r\n\r\n\r\nlistadematrizes = [1*B,2*B,3*B,4*B,5*B,6*B,7*B,8*B,9*B,10*B]\r\n \r\nfor Ai in range(m):\r\n C = []\r\n for Aj in range(n):\r\n C.append(listadematrizes[int(A[Ai][Aj])-1])\r\n for i in range(len(C)):\r\n C[i] = list(C[i])\r\n C = str(C)\r\n C = C.rstrip(']').replace(']','\\n').replace('.0','').replace(',','').replace('[','')\r\n arquivoC.write(C)\r\n \r\narquivoC.close()\r\n\r\n\r\n \r\n \r\n"
] |
[
[
"numpy.asarray"
]
] |
njaupan/SASAR
|
[
"b32e7eef9c85663656fdf19a58f7f39a768a8116"
] |
[
"SASAR.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\n# June 2021\n# If using this pipeline please cite : XXXXXXXXXX\n#--------------------------------------------------------------------------+ \n# \n#\tSASAR is a meta-assembly tool \n# to reconcile different long read assemblies without a reference guide. \n#\tExample commands: \n#\tpython SASAR.py in_dir (option -t / -i/ -c/ -m/ -r / -o) \n# \n#--------------------------------------------------------------------------+\n# \n#\tAUTHOR: panpan ZHANG \n#\tCONTACT: [email protected] \n# \n#\tLICENSE: \n# \tGNU General Public License, Version 3 \n#\thttp://www.gnu.org/licenses/gpl.html \n# \n#\tVERSION: V.1 \n# \n#--------------------------------------------------------------------------+\n\"\"\"\n\nimport argparse\nimport re\nimport sys\nimport os\nimport pathlib\nimport gzip\nimport subprocess\nimport itertools\nimport multiprocessing\nimport pybedtools\nimport pandas as pd\nfrom pybedtools import BedTool\nfrom functools import reduce\n#import tempfile\n#import collections\n#from operator import index\n#import random\n#import shutil\n\n__version__ = '1.0'\n\nif str(pd.__version__) == \"\":\n print(\"Please install pandas\") \n\nif str(pybedtools.__version__)== \"\":\n print(\"Please install pybedtools\") \n\n\ndef get_arguments(args):\n parser = argparse.ArgumentParser(description='Long read assembly reconciliation', add_help=False, \n usage=\"python SASAR.py in_dir\")\n\n required_args = parser.add_argument_group('Positional arguments')\n required_args.add_argument('in_dir', type=str,\n help='input directory containing all assemblies')\n #required_args.add_argument('out_dir', type=str,\n # help='output directory for SASAR-assembly')\n\n setting_args = parser.add_argument_group('Settings')\n setting_args.add_argument('-t', metavar=\"INT\",type=int, default=get_default_thread(),\n help='number of CPU threads for whole genome alignment')\n setting_args.add_argument('-i', metavar=\"INT\",type=int,default=95,\n help=\"minimum identity confidence score [95]\")\n setting_args.add_argument('-c', metavar=\"INT\",type=int,default=95,\n help=\"minimum coverage confidence score [95]\")\n setting_args.add_argument('-m',metavar=\"INT\", type=int,default=50000,\n help=\"minium overlap length [50000]\")\n setting_args.add_argument('--repeat_size',metavar=\"INT\", type=int,default=50000,\n help=\"repeat size [50000]\")\n\n output_args = parser.add_argument_group(\"Output options\")\n output_args.add_argument(\"-o\", metavar=\"PATH\", type=str, default=\"SASAR_output\", \n help=\"output directory for SASAR-assembly [./SASAR_output]\")\n\n other_args = parser.add_argument_group('Other')\n other_args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,\n help='show this help message and exit')\n other_args.add_argument('-v','--version', action='version',\n version='SASAR v' + __version__,\n help=\"show program's version number and exit\")\n\n args = parser.parse_args(args)\n return args\n\ndef main(args=None):\n args = get_arguments(args)\n #parser = get_parser()\n #random.seed(0)\n out_dir = args.o\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n out_dir = os.path.abspath(out_dir) + \"/\"\n\n\n wga=process_wga(args.in_dir,args.o,args.t)\n filter=id_filter(args.i,args.c,args.m,args.repeat_size,args.o)\n #pans=Pan_contig(args.o)\n #ends=Extend_ends(args.i,args.c,args.m,args.o)\n #os.makedirs(args.o, exist_ok=True)\n print('SASAR to {}'.format(args.o))\n print()\n\ndef process_wga(in_dir,o,t):\n l = [str(x) for x in sorted(pathlib.Path(in_dir).glob('*'))\n if x.is_file()]\n #print(l)\n l = [x for x in l if\n x.endswith('.fasta') or x.endswith('.fasta.gz') or\n x.endswith('.fna') or x.endswith('.fna.gz') or\n x.endswith('.fa') or x.endswith('.fa.gz')] \n #print(l)\n print(\"---------------------------------------\"+'Total {:,} assemblies as input'.format(len(l))+\"----------------------------------------------------\")\n #cmd =\"mkdir \" +out_dir \n #subprocess.call(cmd, shell=True) \n for x in l:\n if x.endswith('.fasta') or x.endswith('.fna') or x.endswith('.fa'):\n fname=x.split('/')[-1]\n outfa=o+\"/NEW_\"+str(fname)\n print(\"---------------------------------------Reheader assembly : \"+str(fname)+\" --------------------------------------------------------------\")\n cmd =\"bioawk -cfastx '{ print \\\">\\\"$name\\\"_len\\\"length($seq);print $seq}' \" + x +\" >\" +outfa\n #subprocess.call(cmd, shell=True) \n if x.endswith('.fasta.gz') or x.endswith('.fna.gz') or x.endswith('.fa.gz'):\n fname=x.split('/')[-1].split('.gz')[0]\n outfa=o+\"/NEW_\"+str(fname)\n print(\"---------------------------------------Reheader assembly : \"+str(fname)+\" --------------------------------------------------------------\")\n cmd =\"bioawk -cfastx '{ print \\\">\\\"$name\\\"_len\\\"length($seq);print $seq}' \" + x +\" >\" +outfa\n #subprocess.call(cmd, shell=True) \n l = [str(x) for x in sorted(pathlib.Path(o).glob('NEW_*'))\n if x.is_file()]\n print(\"---------------------------------------Whole genome alignment of every two assemblies----------------------------------------------------\")\n cmd='cat ' +' '.join([str(i) for i in l]) + ' > ' +o+'/WGA.fa'\n print(cmd)\n subprocess.call(cmd, shell=True)\n os.chdir(o)\n length = len(l)\n i = 0\n # Iterating \n while i < length:\n r = l[i].split('/')[-1]\n i += 1\n j=i\n while j < length:\n q = l[j].split('/')[-1]\n j += 1\n print(r,q)\n outpaf=\"Q\"+str(r)+\"_\"+str(q)+\".paf\" \n minimap2_command = [\"minimap2\", \"--cs\", \"-cxasm20\", \"-t\", str(t),r,q,\"-o\",outpaf]\n print(minimap2_command)\n #minimap2_out = subprocess.run(minimap2_command, stdout=subprocess.PIPE).stdout.decode()\n cmd = \"awk 1 Q*.paf >WGA.paf\" \n #subprocess.call(cmd, shell=True) \n\n\n print(\"---------------------------------------Get metrics from alignment---------------------------------------------------------------------------\")\n paf=open('WGA.paf') \n outfile=open('WGA.statsF','w') \n #--------------------------------------------------------------------------------------------------------------------------------------------------+\n # This script is to get the statistics from paf file, such as Insertion, Deletion, Subsititution \n # and Gap-compressed Identity by the same definition from minimap2 developer \n # Heng Li's blog: http://lh3.github.io/2018/11/25/on-the-definition-of-sequence-identity\n #\t\t \n #\tTag Type Description \n #\n #\ttp A Type of aln: P/primary, S/secondary and I,i/inversion \n #\tcm i Number of minimizers on the chain \n # s1 i Chaining score\n # s2 i Chaining score of the best secondary chain\n # NM i Total number of mismatches and gaps in the alignment\n # MD Z To generate the ref sequence in the alignment\n # AS i DP alignment score\n # ms i DP score of the max scoring segment in the alignment\n # nn i Number of ambiguous bases in the alignment\n # ts A Transcript strand (splice mode only)\n # cg Z CIGAR string (only in PAF) M:MATCH; I:iNSERTION; D:DELETION\n # cs Z Difference string\n #-------------------------------------------------------------------------------------------------------------------------------------------------+\n #headers = ['refID','rstart', 'rend', 'rlen','direction','queryID', 'qlen','qstart', 'qend' ,'match','block',\n #'blast_iden','gap_compress_iden','divergence','PS',\n #'rcov', 'qcov','ins_max','delt_max','sub','gap_compress'] \n headers = ['queryID','qstart','qend','qlen', 'refID','direction','rstart','rend', 'rlen','gap_compress_iden','PS']\n outfile.write('\\t'.join(headers))\n outfile.write('\\n')\n total=0\n n_primary=0\n #parts = pd.DataFrame([i.split('\\t') for i in pafFile]), if install pandas module\n for line in paf: \n parts = line.strip().split(\"\\t\")\n total=total+1\n #get tag \"cg\" for cigar\n cg=line.strip().split(\"cg:Z:\")[1].split(\"\\t\")[0]\n PS=line.strip().split(\"tp:A:\")[1].split(\"\\t\")[0]\n DE=line.strip().split(\"de:f:\")[1].split(\"\\t\")[0]\n #value=re.findall(r'(\\d+)(\\w)', cg)\n M= re.compile(r'(\\d+)[M]').findall(cg)\n I= re.compile(r'(\\d+)[I]').findall(cg)\n D= re.compile(r'(\\d+)[D]').findall(cg)\n match_list =list(map(int, M))\n ins_list = list(map(int, I))\n ins_max=max(ins_list,default=0)\n delt_list = list(map(int, D))\n delt_max=max(delt_list,default=0)\n ins_compress = cg.count(\"I\")\n delt_compress = cg.count(\"D\")\n # NM is edit distance: NM = INS + DEL + SUB,\n # but for minimap2 NM = #mismatches + #I + #D + #ambiguous_bases, NM does not count reference skip `N`.\n NM= int(parts[10])- int(parts[9])\n sub=NM-ins_compress-delt_compress\n ins_compress = cg.count(\"I\")\n delt_compress = cg.count(\"D\")\n blast_iden = 100.0 *int((parts[9]))/ int((parts[10]))\n gap_compress = ins_compress +delt_compress\n gap_compress_iden = 100.0 *int((parts[9]))/(int(parts[10])-gap_compress)\n ql = int(parts[3])-int(parts[2])\n qcov = 100.0 *int(ql)/int((parts[1]))\n rl = int(parts[8])-int(parts[7])\n rcov = 100.0 *int(rl)/int((parts[6]))\n resultss = {\n\t\t \"queryID\": parts[0],\n\t\t \"qlen\": int(parts[1]),\n\t\t \"qstart\": int(parts[2]),\n\t\t \"qend\": int(parts[3]),\n\t\t \"direction\": parts[4],\n\t\t \"refID\": parts[5],\n\t\t \"rlen\": int(parts[6]),\n\t\t \"rstart\": int(parts[7]),\n\t\t \"rend\": int(parts[8]),\n\t\t \"match\": int(parts[9]),\n\t\t \"block\": int(parts[10]),\n\t\t \"qcov\" : qcov,\n\t\t \"rcov\" : rcov,\n \"PS\":PS,\n \"divergence\":DE,\n\t\t \"ins_max\": ins_max,\n\t\t \"delt_max\": delt_max,\n\t\t \"sub\": sub,\n\t\t \"gap_compress\":gap_compress,\n\t\t \"blast_iden\": blast_iden,\n\t\t \"gap_compress_iden\": gap_compress_iden,\n }\n out_row = (str(resultss[x]) for x in headers)\n outfile.write('\\t'.join(out_row))\n outfile.write('\\n')\n cmd = \"awk -v OFS='\\t' 'NR>1{print $5,$7,$8,$9,$1,$6,$2,$3,$4,$10,$11}' WGA.statsF >WGA.statsR;awk 1 WGA.statsF WGA.statsR > WGA.stats;rm WGA.statsF WGA.statsR\"\n subprocess.call(cmd, shell=True)\n #filter for primary alignment\n #subprocess.call(\"awk '$11 ~ /P/' WGA.stats > WGA.statsP\", shell=True)\n #cat WGA.stats3 |awk '$15P ~ /P/' |awk '{l[$1\"\\t\"$6]++}END{for (x in l) print x,l[x]}' > WGA.stats3.txt\n #awk -v FS='\\t' -v OFS='\\t' 'FNR==NR{A[$1 FS $2]=$3;next}{print ($1 FS $6 in A ) ? $0 OFS A[$1 FS $6] : $0 OFS 0}' WGA.stats3.txt WGA.stats3 > WGA.stats\n cmd = \"cat WGA.statsP |awk -v OFS='\\t' 'NR>1{print $1\\\"__\\\"$5,$0}' |cut -f1,3-12|bedtools sort |bedtools merge -c 4,5,6,9,10 -o distinct,distinct,distinct,distinct,max -d 10000 -s |sed 's/__/\\t/g' |cut -f1,3-9 > WGA.stats.sum\"\n #subprocess.call(cmd, shell=True)\n\nclass dd_list(dict):\n def __missing__(self,k):\n r = self[k] = []\n return r\nD = dd_list()\n\ndef id_filter(i,c,m,repeat_size,o):\n print(\"---------------------------------------Contig identity and coverage filter----------------------------------------------------------------------\")\n df=pd.read_csv('WGA.stats', skiprows=1,sep='\\t',\n names = ['queryID','qstart','qend','qlen', 'refID','direction','rstart','rend', 'rlen','identity','PS'])\n if df.empty:\n print('DataFrame is empty!')\n else:\n #filter for primary alignment\n df=df[df.PS == \"P\"]\n #df=df[(df.queryID == \"at.sd_utg668_len3242322\") | (df.queryID == \"at.wt_ctg107_len67542\") ]\n #df=df[(df.queryID == \"tig00004409_len418920\") | (df.queryID == \"at.sd_utg3040_len265040 \") ]\n df['queryID'] = df[['queryID','refID']].apply(lambda x : '{}__{}'.format(x[0],x[1]), axis=1)\n #merge overlap and keep strandedness\n x =BedTool.sort(BedTool.from_dataframe(df))\n y =BedTool.merge(x,s=True, c = '4,5,6,9,10',\n o = 'distinct,distinct,distinct,distinct,max',d=10000)\n d=BedTool.to_dataframe(y,disable_auto_names=True, header=None,names = ['queryID','qstart','qend','qlen','refID','direction','rlen','identity'])\n d['queryID']=d['queryID'].str.split('__', expand = True)[0]\n #sum up query contig coverage and keep strandedness\n d['qcov'] =((d.qend-d.qstart)/d.qlen*100).round(3)\n dcov=d.groupby(['queryID','refID','direction'])['qcov'].sum().reset_index(name='coverage').round(3)\n d=pd.merge(d, dcov, on=['refID','queryID','direction']) \n #d=d[(d.queryID == \"tig00004409_len418920\") | (d.queryID == \"at.sd_utg3040_len265040\") ]\n d= d[(d.qlen * d.coverage/100>10000)].sort_values(['coverage'])\n #print(d.sort_values(['coverage']))\n #add ref start and end information\n df=d[['queryID','qstart', 'qend','refID','direction','coverage']]\n df.columns=['refID','rstart', 'rend','queryID','direction','rcoverage']\n d=pd.merge(d, df, on=['queryID','refID','direction']) \n #reference contig coverage\n #d['rcov'] =(d.rend -d.rstart)/d.rlen*100\n d.to_csv('WGA.merge', header=False, index = False, sep='\\t') \n \n df0=d.copy()\n #print(df)\n #df1=df[['queryID','qstart','qend','qlen','refID','direction','rlen','identity','qcov','coverage','rstart','rend','rcoverage']]\n \n #print(df0)\n #df0.columns=['refID','rstart','rend','rlen','queryID','direction','qlen','identity','qcov','rcoverage','qstart','qend','coverage']\n #dfr=df0[['queryID','qstart','qend','qlen','refID','direction','rlen','identity','qcov','coverage','rstart','rend','rcoverage']]\n #print(dfr)\n #df=pd.concat([df0,dfr])\n #df=dfr.append(df0)\n #print(df)\n print(\"---------------------------------------Remove Redundancy of contigs---------------------------------------------------------------------------\")\n if c: \n d = d[ (d.qlen < d.rlen ) & (round(d.coverage,0) >= c)].sort_values(['coverage'],ascending=False)\n if i: \n d = d[ (d.qlen < d.rlen) & (round(d.identity,0) >= i)]\n #print(d)\n d1=d.groupby(['rlen','refID'])['queryID'].agg(list).reset_index().sort_values(['rlen'],ascending=False)\n #print(d1.dtypes)\n D=dict(zip(d1['refID'],d1['queryID']))\n #print(D)\n count=0\n newDic = dict()\n limit = len(list(D))\n lists=[k for k in D.keys()]\n while count < limit:\n v_num=[v for v in D.values()][int(count)]\n k_num=[k for k in D.keys()][int(count)]\n newDic[k_num] = v_num \n newDic_v=[v for v in newDic.values()] \n newDic_v_merge=list(itertools.chain.from_iterable(newDic_v))\n c=count\n while c < limit:\n k_num_next=[k for k in D.keys()][int(c)]\n #print(k_num_next)\n if k_num_next in newDic_v_merge:\n #print(k_num_next+\" is in the last round\") \n lists.remove( k_num_next)\n break\n count+=1\n f_out=open('WGA.r1.txt','w')\n f_out.write(\"\\n\".join(str(item) for item in lists))\n #subprocess.call(\"cat WGA.list.txt |tr ' ' '\\n' |sort|uniq > WGA.r1.txt\", shell=True) \n\n#def Extend_ends(i,c,m,o):\n cmd = \"seqtk subseq WGA.fa WGA.r1.txt > WGA.r1.fa\" \n subprocess.call(cmd, shell=True) \n #cmd = \"grep -wFf WGA.r1.txt WGA.stats.merge > WGA.stats.r1\" \n #subprocess.call(cmd, shell=True) \n #print(\"---------------------------------------Extend two ends of contigs---------------------------------------------------------\")\n #names = ['queryID','qstart','qend','qlen','refID','direction','rlen',\n #'identity','qcov', 'coverage', 'rstart', 'rend', 'rcoverage']\n #df0 = pd.read_csv('WGA.stats.merge', sep='\\t',names=names,header=None)\n #df=df0[(df0.queryID == \"tig00004409_len418920\") | (df0.queryID == \"at.sd_utg3040_len265040\") ]\n \n #print(df)\n #if df.empty:\n # print('DataFrame stats is empty!')\n #else:\n #print(df)\n \n f=open('WGA.r1.txt', 'r')\n selection = [line.strip() for line in f]\n df=df0[(pd.DataFrame(df0.refID.tolist()).isin(selection).any(1)) | (pd.DataFrame(df0.queryID.tolist()).isin(selection).any(1)) ]\n print(df)\n #df=d0\n #print(df)\n #df_sels=df[~df.refID.isin(dfE_ref.refID.tolist())]\n #dsearch=pd.read_csv('WGA.r1.txt',skiprows=0,sep='\\t',names = ['refID'])\n #df=pd.merge(df, dsearch, on=['refID']) \n\n #print(df.head(50))\n df=df[(df.queryID.astype(str) == \"tig00000133_len7134675\") |(df.refID.astype(str) == \"tig00000133_len7134675\") ]\n #df=df[(df.refID.astype(str) == \"tig00000133_len7134675\") ]\n df['Eq_end']=df.qlen-df.qend\n df['Er_end']=df.rlen-df.rend\n #print(df)\n\n df1=df.groupby(['refID','queryID','direction'])['Eq_end'].min().reset_index(name='Eq')\n df2=df.groupby(['refID','queryID','direction'])['Er_end'].min().reset_index(name='Er')\n df3=df.groupby(['refID','queryID','direction'])['qstart'].min().reset_index(name='Sq')\n df4=df.groupby(['refID','queryID','direction'])['rstart'].min().reset_index(name='Sr')\n data_frames=[df1,df2,df3,df4]\n df0 = reduce(lambda left,right: pd.merge(left,right,on=['refID','queryID','direction'],\n how='outer'), data_frames)\n df=pd.merge(df, df0, on=['refID','queryID','direction']) \n #print(df.sort_values(['coverage']))\n\n def add_end(df): \n if (df.direction == '+'):\n if repeat_size:\n if (df.Sq>=df.Sr):\n if (df.Er<df.Eq) or (df.Eq< df.Er <repeat_size):\n return \"inside\"\n if (df.Sq<=df.Sr) and (df.Er< repeat_size <df.Eq) :\n s=int(df.Eq - df.Er)\n return 'END:{}'.format(s) \n if (df.Eq < df.Er) and (df.Sr< repeat_size <df.Sq) :\n s=int(df.Sq - df.Sr)\n return 'START:{}'.format(s)\n elif (df.direction == '-'): \n if repeat_size:\n if (df.Eq>=df.Sr): \n if (df.Sq>df.Er) or (df.Sq< df.Er <repeat_size):\n return \"inside\" \n if (df.Eq<=df.Sr)and (df.Eq <repeat_size)and (df.Er< repeat_size <df.Sq) : \n s=int(df.Sq - df.Er) \n return 'END:{}'.format(s) \n if (df.Sq < df.Er) and (df.Sq <repeat_size) and (df.Sr< repeat_size <df.Eq):\n s=int(df.Eq - df.Sr)\n return 'START:{}'.format(s) \n return \"NONE\"\n \n df['Ef_len'] = df.apply(add_end, axis=1)\n #print(df)\n df=df[df.Ef_len.str.contains(\"START|END\")]\n df['INFO']=df.Ef_len.str.split(\":\").str[0]\n df['Ef_len']=df.Ef_len.str.split(\":\").str[1]\n #print(df)\n #dsearch.columns=dsearch.columns.str.replace('refID', 'queryID')\n #df=pd.merge(df, dsearch, on=['queryID']).sort_values(['rlen'])\n #print(df.head(50))\n #print(df.sort_values(['queryID']))\n #df['diff1']=(df.qend-df.qstart)/(df.rend-df.rstart)\n #df['diff2']=(df.qend-df.qstart)-(df.rend-df.rstart)\n if m:\n df=df[df.qend-df.qstart > m]\n df= df[df.groupby(['refID','INFO'])['Ef_len'].transform(max) == df['Ef_len']]\n print(df)\n\n\ndef get_default_thread():\n return min(multiprocessing.cpu_count(), 16)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.merge",
"pandas.read_csv"
]
] |
dingmyu/psa
|
[
"e27539bbd569cd1100a339336b9e3a2b0dad67fc"
] |
[
"fail_wok/voc_deeplab/resnet.py"
] |
[
"import torch.nn as nn\nimport torch\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torchE.nn import SyncBatchNorm2d\n\nBatchNorm = nn.BatchNorm2d\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = BatchNorm(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, SyncBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n weight = torch.load('/mnt/lustre/share/DSK/model_zoo/pytorch/imagenet/resnet101-5d3b4d8f.pth')\n model.load_state_dict(weight, strict=False) \n #model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)\n return model\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)\n return model\n"
] |
[
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] |
BrunoKrinski/segtool
|
[
"cb604b5f38104c43a76450136e37c3d1c4b6d275"
] |
[
"final_plot.py"
] |
[
"import os\nimport cv2\nimport glob\nimport json\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\n\ndef getminmax(x1, x2):\n aux1 = sorted(x1)\n aux2 = sorted(x2)\n\n #print(aux1[0], aux2[0])\n if aux1[0] < aux2[0]:\n p = aux1[0]\n else:\n p = aux2[0]\n \n #print(p)\n p = p * 10\n #print(p)\n if p < 0:\n p = 0\n #print('----------')\n if aux1[len(aux1)-1] > aux2[len(aux2)-1]:\n e = aux1[len(aux1)-1]\n else:\n e = aux2[len(aux2)-1]\n #print(e)\n e = e * 10 + 1\n #print(e)\n if e > 10:\n e = 10\n #print(math.floor(p)*10, int(round(e,0))*10+5)\n return math.floor(p)*10, int(round(e,0))*10+5\n\ndef plot(label, train, valid, save_path):\n y = list(range(len(train)))\n #p, e = getminmax(train, valid)\n p, e = 0, 100\n yticks = [p/100 for p in range(p, e, 5)]\n xticks = [p for p in range(0, len(train)+5, 5)]\n plt.plot(y, train, label='Train')\n plt.plot(y, valid, label='Valid')\n plt.xticks(xticks)\n plt.yticks(yticks)\n plt.title(label)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(save_path + label.lower().replace(' ','_').replace(':','_') + '.pdf')\n plt.clf()\n\nif __name__ == '__main__':\n\n root = 'RUNS/'\n\n super_runs = ['0p1/', '0p2/']\n super_runs = ['50e/']\n \n experiments = ['Clahe/', 'CoarseDropout/', 'ElasticTransform/', 'Emboss/', 'Flip/', \n 'GaussianBlur/', 'GridDistortion/', 'GridDropout/', 'ImageCompression/', 'MedianBlur/',\n 'OpticalDistortion/', 'PiecewiseAffine/', 'Posterize/', 'RandomBrightnessContrast/', 'RandomCrop/',\n 'RandomGamma/', 'RandomSnow/', 'Rotate/', 'Sharpen/', 'ShiftScaleRotate/']\n experiments = ['noda/']\n #encoders = ['resnet50/','resnet101/','resnext50_32x4d/','resnext101_32x8d/',\n # 'timm-res2net50_26w_4s/','timm-res2net101_26w_4s/','vgg16/','densenet121/',\n # 'densenet169/','densenet201/', 'se_resnext50_32x4d/', 'se_resnext101_32x4d/',\n # 'se_resnet50/', 'se_resnet101/',\n # 'timm-regnetx_002/','timm-regnetx_004/','timm-regnetx_006/',\n # 'timm-regnety_002/','timm-regnety_004/','timm-regnety_006/']\n encoders = ['timm-regnetx_002/']\n\n #decoders = ['unetplusplus/', 'unet/','fpn/','pspnet/','linknet/', 'manet/']\n decoders = ['unetplusplus/']\n\n datasets = ['covid19china/', 'medseg/', 'mosmed/', 'ricord1a/', 'covid20cases/',]\n\n for super_run in super_runs:\n\n for experiment in experiments:\n\n runs_path = root + super_run + experiment\n\n for dataset in datasets:\n\n # MAKE A DATASET PLOT\n\n dataset_path = runs_path + dataset\n decoders_train = []\n decoders_valid = []\n #colors = ['red', 'purple', 'blue','orange','green', 'grey']\n colors = [[0, 0, 166], [255, 74, 70], [0, 137, 65], [153, 0, 153], [96, 96, 96], [255, 128, 0]]\n colors = np.array(colors)/255\n #colors = ['mediumblue', 'darkred', 'green', 'saddlebrown', 'dodgerblue', 'saddlebrown']\n\n decoders_train = []\n decoders_valid = []\n for decoder in decoders:\n\n decoder_path = dataset_path + decoder \n\n for e, encoder in enumerate(encoders):\n\n encoder_path = decoder_path + encoder\n\n # MAKE THE MEAN CURVE OF 5 FOLDS\n\n graphics_path = encoder_path + 'graphics/'\n if os.path.isdir(graphics_path):\n os.system('rm -rf {}'.format(graphics_path))\n\n runs = glob.glob(encoder_path + '*')\n for r, run in enumerate(runs):\n print(run)\n\n # READ RESULTS\n train_logs_path = run + '/train_logs.json'\n with open(train_logs_path) as train_logs_file:\n train_logs = json.load(train_logs_file)\n\n train_results = train_logs['train']\n valid_results = train_logs['valid']\n\n list_keys = list(train_results[0].keys())\n\n train_list = []\n valid_list = []\n for key in list_keys:\n train_list.append([])\n valid_list.append([])\n \n for train_result, valid_result in zip(train_results, valid_results):\n\n for i, key in enumerate(list_keys):\n train_list[i].append(train_result[key])\n valid_list[i].append(valid_result[key])\n \n # PLOT RESULTS\n run_graphics_path = run + '/graphics/'\n if os.path.isdir(run_graphics_path):\n os.system('rm -rf {}'.format(run_graphics_path))\n os.mkdir(run_graphics_path)\n\n for key, t_item, v_item in zip(list_keys, train_list, valid_list):\n plot(key, t_item, v_item, run_graphics_path)\n\n #print('Fold size: ' + str(len(train_list)))\n # GET THE MEAN RESULTS OF RUNS\n if r == 0:\n train_list_runs = train_list\n valid_list_runs = valid_list\n else:\n for c in range(len(train_list_runs)):\n train_list_runs[c] = [x + y for x,y in zip(train_list_runs[c], train_list[c])]\n valid_list_runs[c] = [x + y for x,y in zip(valid_list_runs[c], valid_list[c])]\n \n for t in range(len(train_list_runs)):\n train_list_runs[t] = [x / len(runs) for x in train_list_runs[t]]\n valid_list_runs[t] = [x / len(runs) for x in valid_list_runs[t]]\n\n #print('Mean Fold Size: ' + str(len(train_list_runs)))\n \n # PLOT THE MEAN RESULT\n os.mkdir(graphics_path)\n for key, t_item, v_item in zip(list_keys, train_list_runs, valid_list_runs):\n plot(key, t_item, v_item, graphics_path)\n\n if e == 0:\n encoder_train = train_list_runs\n encoder_valid = valid_list_runs\n else:\n for c in range(len(encoder_train)):\n encoder_train[c] = [x + y for x,y in zip(encoder_train[c], train_list_runs[c])]\n encoder_valid[c] = [x + y for x,y in zip(encoder_valid[c], valid_list_runs[c])]\n \n for t in range(len(encoder_train)):\n encoder_train[t] = [x / len(encoders) for x in encoder_train[t]]\n encoder_valid[t] = [x / len(encoders) for x in encoder_valid[t]]\n\n decoders_train.append(encoder_train)\n decoders_valid.append(encoder_valid)\n\n save_path = dataset_path + 'graphics/'\n if os.path.isdir(save_path):\n os.system('rm -rf {}'.format(save_path))\n os.mkdir(save_path)\n\n for k, key in enumerate(list_keys):\n for dn, (dtrain, dvalid, decoder) in enumerate(zip(decoders_train, decoders_valid, decoders)):\n train = dtrain[k]\n valid = dvalid[k]\n y = list(range(len(train)))\n\n #p, e = getminmax(train, valid)\n p, e = 0, 0\n title = ''\n if dataset == 'covid19china/':\n title = 'CC-CCII'\n p, e = 30, 80\n elif dataset == 'covid20cases/':\n p, e = 60, 100\n title = 'Zenodo'\n elif dataset == 'medseg/':\n title = 'MedSeg'\n p, e = 10, 70\n elif dataset == 'mosmed/':\n title = 'MosMed'\n p, e = 40, 90\n elif dataset == 'ricord1a/':\n title = 'Ricord1a'\n p, e = 70, 100\n #print(p, e)\n\n if decoder == 'unetplusplus/':\n dec = 'U-net++'\n elif decoder == 'unet/':\n dec = 'U-net'\n elif decoder == 'fpn/':\n dec = 'FPN'\n elif decoder == 'pspnet/':\n dec = 'PSPNet'\n elif decoder == 'linknet/':\n dec = 'LinkNet'\n elif decoder == 'manet/':\n dec = 'MA-Net'\n\n ylabel = ''\n if key == 'Fscore':\n ylabel = 'F-score'\n else:\n ylabel = key\n\n yticks = [p/100 for p in range(p, e, 5)]\n xticks = [p for p in range(0, len(train)+5, 5)]\n #plt.figure(figsize=[10.4, 6.8])\n #plt.rc('font', size=20)\n plt.plot(y, train, '-', lw=2, color=colors[dn], label='Train of ' + dec)\n plt.plot(y, valid, '--', lw=2, color=colors[dn], label='Valid of ' + dec)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.xlabel('Epochs')\n plt.ylabel(ylabel)\n plt.xticks(xticks)\n plt.yticks(yticks)\n plt.title(title)\n plt.tight_layout()\n plt.grid()\n plt.savefig(save_path + key.lower().replace(' ','_').replace(':','_') + '.pdf')\n plt.clf()\n \n '''\n if e == 0:\n encoders_results_train = []\n encoders_results_valid = []\n for key in list_keys:\n encoders_results_train.append([])\n encoders_results_valid.append([])\n \n for t, (t_item, v_item) in enumerate(zip(train_list_runs, valid_list_runs)):\n encoders_results_train[t].append(t_item)\n encoders_results_valid[t].append(v_item)\n\n print('Encoder size:' + str(len(encoders_results_train)))\n\n decoders_train.append(encoders_results_train)\n decoders_valid.append(encoders_results_valid)\n \n print('Decoder size:' + str(len(decoders_train)))\n\n for k, key in enumerate(list_keys):\n for dn, (dtrain, dvalid, decoder) in enumerate(zip(decoders_train, decoders_valid, decoders)):\n train = dtrain[k]\n valid = dvalid[k]\n \n print(len(train)) \n t = np.transpose(np.array(train))\n print(t.shape)\n train = t.cumsum(axis=0)\n print(train.shape)\n t_mean = t.mean(axis=1)\n print(t_mean.shape)\n t_std = t.std(axis=1)\n\n v = np.transpose(np.array(valid))\n valid = v.cumsum(axis=0)\n v_mean = v.mean(axis=1)\n v_std = v.std(axis=1)\n\n y = list(range(50))\n p, e = getminmax(t_mean, v_mean)\n yticks = [p/100 for p in range(p, e, 5)]\n xticks = [p for p in range(0, len(train)+5, 5)]\n plt.plot(y, t_mean, '-', lw=2, label='Train ' + decoder.replace('/',''), color=colors[dn])\n plt.plot(y, v_mean, '--', lw=2, label='Valid ' + decoder.replace('/',''), color=colors[dn])\n plt.fill_between(y, t_mean+t_std, t_mean-t_std, facecolor=colors[dn], alpha=0.2)\n plt.fill_between(y, v_mean+v_std, v_mean-v_std, facecolor=colors[dn], alpha=0.2)\n #plt.set_title(r'random walkers empirical $\\mu$ and $\\pm \\sigma$ interval')\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n #plt.set_xlabel('num steps')\n #plt.set_ylabel('position')\n plt.yticks(yticks)\n plt.tight_layout()\n plt.grid()\n plt.savefig('{}.png'.format(list_keys[k]))\n plt.clf() \n '''\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
telnoratti/burningwheel-tools
|
[
"78277752ddcd6f5b257603920385b61b113effac"
] |
[
"gen_function.py"
] |
[
"import sympy\nfrom sympy import rsolve, Function, expand, Rational, Sum, Poly\nfrom sympy.abc import *\n\nfrom sympy import O\nimport numpy as np\nfrom numpy.polynomial import polynomial as np_poly\n\n### Recurrence relations precomputed, needed for exploding cases\n# These are kept as recurrence relations because I get an error when trying to\n# find the recurrence relation before substituting in values. If I were a math\n# wiz I'd just solve it myself, but I get tripped up tracking all the variables\n## exploding\n#D_e = Function('D_e')\n#R_e = D_e(n+1) - s*e*D_e(n)\nD_e_1 = s*e*(d/e+f)\nD_e_0 = f\n# Solve for D_e_n after substitution with \n# D_e_n = rsolve(R_e, D_e(n), {D_e(1): D_e_1})\n# P_r_e_n = D_e_0 + Sum(D_e_n*x**n, (n, 1, N)) + O(x**(N+1))\n# Calculated with Mathematica which has a more robust recurrence solver\nP_e = (d+e*f)*(e*s)**n/e\n\n# These initial conditions ignore all rolls with at least one failure used for rerolling one die\nD_e_s_1 = s*d\nD_e_s_0 = 0\n# Calculated with Mathematica which has a more robust recurrence solver\nP_e_s = d*(e*s)**n/e\n\n## reroll failures, exploding\n#D_r_e = Function('D_r_e')\n# The second term is derived from the exploding non-reroll case\n#R_r_e = D_r_e(n+1) - f*(s*e)**(n+1)*(d/e+f) - s*e*D_r_e(n)\nD_r_e_1 = f*s*(d+e*f) + s*(d+e*f*f)\nD_r_e_0 = f**2\n# Calculated with Mathematica which has a more robust recurrence solver\nP_r_e = (e*s)**n*(e*f**2*n + d*f*n + e*f**2 + d)/e\n# Solve for P_r_e after substitution with \n# D_r_e_n = rsolve(R_r_e, D_r_e(n), {D_r_e(1): D_r_e_1})\n# P_r_e = D_r_e_0 + Sum(D_r_e_n*x**n, (n, 1, N)) + O(x**(N+1))\n# The case of only successes\n#D_r_e_s = Function('D_r_e_s')\n#R_r_e_s = D_r_e_s(n+1) - f*d*(e*s)**n/e - s*e*D_r_e_s(n)\nD_r_e_s_1 = f*s*d+s*d\nD_r_e_s_0 = 0\n# Calculated with Mathematica which has a more robust recurrence solver\n# RSolve[{p[n+1] == f*d*(s*e)^(n+1)/e + s*e*p[n], p[1] == f*s*d + s*d}, p, n]\nP_r_e_s = (e*s)**n * d*(1+f*n)/e\n\n### Polynomials precomputed, for use in non-exploding cases\n## Simple die roll\nP_x = f + s*x\nP_0 = f\nP_1 = s\n# P is just P_x after substitution\n# only success\nP_s_0 = 0\nP_s_1 = s\n\n## Die roll and reroll one failure\nP_r_x = f**2 + (1 - f**2)*x\nP_r_0 = f**2\nP_r_1 = (1-f**2)\n\nP_r_s_0 = 0\nP_r_s_1 = f*s+s\n\n# This is a convienence function for rolling some dice for burning wheel\ndef roll_dice(num_dice=1, shade='black', open_ended=False, luck=False, boon=0, divine_inspiration=False, saving_grace=False, high_ob=10, cum_sum=False, log=False):\n # What counts as a success\n shade_to_faces = {\n 'black': 3,\n 'grey': 4,\n 'white': 5,\n }\n success_count = shade_to_faces[shade]\n\n # Check the open-ended/luck behaviour\n explode_count = 0\n reroll_one = False\n if open_ended:\n explode_count = 1\n if luck:\n reroll_one = True\n elif luck:\n explode_count = 1\n\n # Boon and divine_inspiration impact number of dice rolled\n if divine_inspiration:\n num_dice = num_dice * 2\n\n num_dice += boon\n\n # Numpy method is most effecient\n p_1 = die_poly(die_faces=6, success_count=success_count, explode_count=explode_count, reroll=saving_grace, order=high_ob)\n p_n = p_1\n for i in range(1,num_dice):\n p_n = np_poly.polymul(p_n, p_1)[:high_ob+1]\n\n # Now p_n is an array of polynomial coeffecients from low to high which is the generating function for our die\n if reroll_one:\n # I needs the odds of at least one failure, \n # For two dice this should be 1, 1, 8/9, ...\n\n # s_n's mth term is the odds of rolling m successes without any dice failing\n s_1 = die_poly(die_faces=6, success_count=success_count, explode_count=explode_count, reroll=saving_grace, only_success=True, order=high_ob)\n s_n = s_1\n for i in range(1,num_dice):\n s_n = np_poly.polymul(s_n, s_1)[:high_ob+1]\n\n s_n = s_n\n # f_n is the chance of getting \n f_n = (1 - s_n)\n # f_n is the odds of getting to roll an additional die from luck as is a mask for our luck die\n #print(list(map(float,p_n)))\n #print(sum(list(map(float,p_n))))\n #print(list(map(float,p_n)))\n #print(sum(list(map(float,p_n))))\n fl = lambda x: list(map(float,x))\n p_nr_1 = die_poly(die_faces=6, success_count=success_count, explode_count=explode_count, reroll=False, order=high_ob)\n p_n_r = np_poly.polymul((p_n - s_n), p_nr_1)[:high_ob+1]\n p_n = s_n + p_n_r\n #print(list(map(float,p_n)))\n #print(sum(list(map(float,p_n))))\n if log:\n print(\"Odds of n successes including a failure: \", fl(f_n))\n print(\"Odds of n successes having no failures : \", fl(s_n))\n print(\"Odds of only success (1d): \", fl(s_1))\n\n results = np.zeros(high_ob+1, dtype=object)\n results[:p_n.shape[0]] = p_n\n\n if cum_sum:\n results = results[::-1].cumsum()[::-1] + (1 - results.sum())\n\n return results\n\ndef pad_cut_probs(probs, length):\n probs = probs[:length]\n probs = np.pad(probs, (0, length - len(probs)), mode='constant', constant_values=(0,0)).astype(int)\n return probs\n\n# Yields a numpy array length order+1 of the coeffecients of the generating function from low to high\n# only_success calculates the probability of a value with no terminating failures\ndef die_poly(die_faces=6, success_count=3, explode_count=0, reroll=False, only_success=False, order=10):\n # Probability of a die roll being a success\n success_chance = Rational(str(success_count) + \"/\" + str(die_faces))\n # Probability of a *success* exploding\n explode_chance = Rational(str(explode_count) + \"/\" + str(success_count))\n\n # Have our substitutions as a short hand\n def sp(x):\n if isinstance(x, sympy.core.expr.Expr):\n return x.subs(f, 1 - s).subs(s, success_chance).subs(d, 1 - e).subs(e, explode_chance)\n return x\n\n coeffs = np.zeros(order+1, dtype=object)\n if explode_count > 0:\n # We have an exploding die\n if reroll:\n # We are rerolling each failure once\n F = None\n if only_success:\n #F = rsolve(sp(R_r_e_s), D_r_e_s(n), {D_r_e_s(1): sp(D_r_e_s_1)})\n F = sp(P_r_e_s)\n coeffs[0] = sp(D_r_e_s_0)\n else:\n #F = rsolve(sp(R_r_e), D_r_e(n), {D_r_e(1): sp(D_r_e_1)})\n F = sp(P_r_e)\n coeffs[0] = sp(D_r_e_0)\n\n for i in range(1, order + 1):\n coeffs[i] = F.subs(n,i)\n else:\n # We have the simple exploding die formula\n F = None\n if only_success:\n F = sp(P_e_s)\n coeffs[0] = sp(D_e_s_0)\n else:\n F = sp(P_e)\n coeffs[0] = sp(D_e_0)\n for i in range(1, order + 1):\n coeffs[i] = F.subs(n,i)\n else:\n # We have a non-exploding die\n if reroll:\n # We must reroll the first failure\n if only_success:\n coeffs[0] = sp(P_r_s_0)\n coeffs[1] = sp(P_r_s_1)\n else:\n coeffs[0] = sp(P_r_0)\n coeffs[1] = sp(P_r_1)\n\n else:\n if only_success:\n coeffs[0] = sp(P_s_0)\n coeffs[1] = sp(P_s_1)\n else:\n coeffs[0] = sp(P_0)\n coeffs[1] = sp(P_1)\n return coeffs\n"
] |
[
[
"numpy.zeros",
"numpy.polynomial.polynomial.polymul"
]
] |
wdecay/ShapeClassification
|
[
"0592a837f272c709322a1d7e74948268e8c82cce"
] |
[
"layers/Output.py"
] |
[
"import tensorflow as tf\n\nclass Output(tf.keras.layers.Layer):\n def __init__(self, num_classes, **kwargs):\n self.num_classes = num_classes\n super(Output, self).__init__(**kwargs)\n\n def build(self, input_shape):\n tfn_output_shape = input_shape[0][0].as_list()\n\n self.fully_connected_layer = self.add_weight(\n name = \"fcl\",\n shape = [tfn_output_shape[-2], self.num_classes], \n dtype=tf.float32)\n self.output_biases = self.add_weight(\n name = \"biases\",\n shape = [self.num_classes], dtype=tf.float32)\n\n @tf.function\n def call(self, inputs):\n def process_row(row):\n tfn_scalars = row\n tfn_output = tf.reduce_mean(tf.squeeze(tfn_scalars), axis=0)\n # output : [num_classes]\n output = tf.einsum('xy,x->y', self.fully_connected_layer, tfn_output) + self.output_biases\n return output\n if True:\n return tf.map_fn(process_row, inputs[0][0])\n else:\n return process_row(inputs[0][0])\n\n def get_config(self):\n return {\"num_classes\": self.num_classes}\n\n"
] |
[
[
"tensorflow.map_fn",
"tensorflow.squeeze",
"tensorflow.einsum"
]
] |
sylvainletourneau/env_canada
|
[
"ec146f98b6b556a483358789f2a963b9fc421478"
] |
[
"env_canada/ec_radar.py"
] |
[
"from concurrent.futures import as_completed\nimport datetime\nfrom io import BytesIO\nimport json\nimport os\nfrom PIL import Image\nimport xml.etree.ElementTree as et\n\nimport cv2\nimport dateutil.parser\nimport imageio\nimport numpy as np\nimport requests\nfrom requests_futures.sessions import FuturesSession\n\n# Natural Resources Canada\n\nbasemap_url = \"http://maps.geogratis.gc.ca/wms/CBMT?service=wms&version=1.3.0&request=GetMap&layers=CBMT&styles=&CRS=epsg:4326&BBOX={south},{west},{north},{east}&width={width}&height={height}&format=image/png\"\n\n# Environment Canada\n\nlayer = {\"rain\": \"RADAR_1KM_RRAI\", \"snow\": \"RADAR_1KM_RSNO\"}\n\nlegend_style = {\"rain\": \"RADARURPPRECIPR\", \"snow\": \"RADARURPPRECIPS14\"}\n\ncapabilities_path = \"https://geo.weather.gc.ca/geomet/?lang=en&service=WMS&version=1.3.0&request=GetCapabilities&LAYER={layer}\"\nwms_namespace = {\"wms\": \"http://www.opengis.net/wms\"}\ndimension_xpath = './/wms:Layer[wms:Name=\"{layer}\"]/wms:Dimension'\n\nradar_path = \"https://geo.weather.gc.ca/geomet?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&BBOX={south},{west},{north},{east}&CRS=EPSG:4326&WIDTH={width}&HEIGHT={height}&LAYERS={layer}&FORMAT=image/png&TIME={time}\"\nlegend_path = \"https://geo.weather.gc.ca/geomet?version=1.3.0&service=WMS&request=GetLegendGraphic&sld_version=1.1.0&layer={layer}&format=image/png&STYLE={style}\"\n\n\ndef get_station_coords(station_id):\n with open(\n os.path.join(os.path.dirname(__file__), \"radar_sites.json\")\n ) as sites_file:\n site_dict = json.loads(sites_file.read())\n return site_dict[station_id][\"lat\"], site_dict[station_id][\"lon\"]\n\n\ndef get_bounding_box(distance, latittude, longitude):\n \"\"\"\n Modified from https://gist.github.com/alexcpn/f95ae83a7ee0293a5225\n \"\"\"\n latittude = np.radians(latittude)\n longitude = np.radians(longitude)\n\n distance_from_point_km = distance\n angular_distance = distance_from_point_km / 6371.01\n\n lat_min = latittude - angular_distance\n lat_max = latittude + angular_distance\n\n delta_longitude = np.arcsin(np.sin(angular_distance) / np.cos(latittude))\n\n lon_min = longitude - delta_longitude\n lon_max = longitude + delta_longitude\n lon_min = np.degrees(lon_min)\n lat_max = np.degrees(lat_max)\n lon_max = np.degrees(lon_max)\n lat_min = np.degrees(lat_min)\n\n return lat_min, lon_min, lat_max, lon_max\n\n\nclass ECRadar(object):\n def __init__(\n self,\n station_id=None,\n coordinates=None,\n radius=200,\n precip_type=None,\n width=800,\n height=800,\n ):\n \"\"\"Initialize the data object.\"\"\"\n\n # Set precipitation type\n\n if precip_type:\n self.precip_type = precip_type.lower()\n elif datetime.date.today().month in range(4, 11):\n self.precip_type = \"rain\"\n else:\n self.precip_type = \"snow\"\n\n self.layer = layer[self.precip_type]\n\n # Get legend\n\n legend_url = legend_path.format(\n layer=self.layer, style=legend_style[self.precip_type]\n )\n legend_bytes = requests.get(url=legend_url).content\n self.legend_image = Image.open(BytesIO(legend_bytes)).convert(\"RGB\")\n legend_width, legend_height = self.legend_image.size\n self.legend_position = (width - legend_width, 0)\n\n # Get coordinates\n\n if station_id:\n coordinates = get_station_coords(station_id)\n\n self.bbox = get_bounding_box(radius, coordinates[0], coordinates[1])\n self.width = width\n self.height = height\n\n # Get basemap\n\n url = basemap_url.format(\n south=self.bbox[0],\n west=self.bbox[1],\n north=self.bbox[2],\n east=self.bbox[3],\n width=self.width,\n height=self.height,\n )\n self.base_bytes = requests.get(url).content\n\n self.timestamp = datetime.datetime.now()\n\n def get_dimensions(self):\n \"\"\"Get time range of available data.\"\"\"\n capabilities_xml = requests.get(capabilities_path.format(layer=self.layer)).text\n capabilities_tree = et.fromstring(\n capabilities_xml, parser=et.XMLParser(encoding=\"utf-8\")\n )\n dimension_string = capabilities_tree.find(\n dimension_xpath.format(layer=self.layer), namespaces=wms_namespace\n ).text\n start, end = [\n dateutil.parser.isoparse(t) for t in dimension_string.split(\"/\")[:2]\n ]\n self.timestamp = end.isoformat()\n return start, end\n\n def assemble_url(self, url_time):\n \"\"\"Construct WMS query URL.\"\"\"\n url = radar_path.format(\n south=self.bbox[0],\n west=self.bbox[1],\n north=self.bbox[2],\n east=self.bbox[3],\n width=self.width,\n height=self.height,\n layer=self.layer,\n time=url_time.strftime(\"%Y-%m-%dT%H:%M:00Z\"),\n )\n return url\n\n def combine_layers(self, radar_bytes, frame_time):\n \"\"\"Add radar overlay to base layer and add timestamp.\"\"\"\n\n base = Image.open(BytesIO(self.base_bytes)).convert(\"RGBA\")\n radar = Image.open(BytesIO(radar_bytes)).convert(\"RGBA\")\n frame = Image.alpha_composite(base, radar)\n frame.paste(self.legend_image, self.legend_position)\n\n # Add timestamp\n\n font_face = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 1.5\n font_thickness = 2\n\n timestamp = (\n self.precip_type.title() + \" @ \" + frame_time.astimezone().strftime(\"%H:%M\")\n )\n text_size = cv2.getTextSize(\n text=timestamp,\n fontFace=font_face,\n fontScale=font_scale,\n thickness=font_thickness,\n )[0]\n\n cv_image = cv2.cvtColor(np.array(frame), cv2.COLOR_RGBA2BGR)\n cv2.rectangle(\n img=cv_image,\n pt1=(0, 0),\n pt2=(text_size[0] + 10, text_size[1] + 10),\n color=(255, 255, 255),\n thickness=-1,\n )\n cv2.putText(\n img=cv_image,\n text=timestamp,\n org=(5, text_size[1] + 5),\n fontFace=font_face,\n fontScale=font_scale,\n color=(0, 0, 0),\n thickness=font_thickness,\n )\n\n frame_bytes = cv2.imencode(\".png\", cv_image)[1].tobytes()\n\n return frame_bytes\n\n def get_latest_frame(self):\n \"\"\"Get the latest image from Environment Canada.\"\"\"\n start, end = self.get_dimensions()\n radar = requests.get(self.assemble_url(end)).content\n return self.combine_layers(radar, end)\n\n def get_loop(self):\n \"\"\"Build an animated GIF of recent radar images.\"\"\"\n\n \"\"\"Build list of frame timestamps.\"\"\"\n start, end = self.get_dimensions()\n frame_times = [start]\n\n while True:\n next_frame = frame_times[-1] + datetime.timedelta(minutes=10)\n if next_frame > end:\n break\n else:\n frame_times.append(next_frame)\n\n \"\"\"Fetch frames.\"\"\"\n responses = []\n\n with FuturesSession(max_workers=len(frame_times)) as session:\n futures = [session.get(self.assemble_url(t)) for t in frame_times]\n for future in as_completed(futures):\n responses.append(future.result())\n\n responses = sorted(responses, key=lambda r: r.url)\n\n frames = []\n\n for i, f in enumerate(responses):\n frames.append(self.combine_layers(f.content, frame_times[i]))\n\n for f in range(3):\n frames.append(frames[-1])\n\n \"\"\"Assemble animated GIF.\"\"\"\n gif_frames = [imageio.imread(f) for f in frames]\n gif_bytes = imageio.mimwrite(\n imageio.RETURN_BYTES, gif_frames, format=\"GIF\", fps=5\n )\n return gif_bytes\n"
] |
[
[
"numpy.radians",
"numpy.degrees",
"numpy.cos",
"numpy.sin",
"numpy.array"
]
] |
PaipaPsyche/SuperClusterCharacterization
|
[
"8d86ad31aa34dfa6a44592b94c42cba38487216d"
] |
[
"Densidades (test)/plot_divergence.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 6 13:06:41 2018\n\n@author: David\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.convolution import Gaussian2DKernel,convolve\n\n\n\n\n#Data Loading========\ndata1 = np.loadtxt(\"Halo.txt\")\ndata1=np.transpose(data1)\n\n\nprints=0#Define si se hacen los prints de calculo y plot\n\ndataX=data1[0]\ndataY=data1[1]\ndataZ=data1[2]\ndataVx=data1[3]\ndataVy=data1[4]\ndataVz=data1[5]\ndataM=data1[6]\nMseg=[]\n\n\ndef segregar_corte_z(x, y, z, vx, vy, vz, mass, minz, dz):\n\tii = (z>minz) & (z<(minz+dz))\n\treturn x[ii], y[ii], z[ii], vx[ii], vy[ii], vz[ii], mass[ii]\n\nX, Y, Z,VX, VY, VZ, MASS = segregar_corte_z(dataX, dataY, dataZ, dataVx, dataVy, dataVz, dataM, 80,10)\n\nL_box = 1200\nn_side = 100\nl_side = L_box/n_side\nvx_grid = np.zeros([n_side, n_side])\nvy_grid = np.zeros([n_side, n_side])\n\n\n\nfor i in range (n_side):\n print('calculo ',i)\n for j in range (n_side):\n\n min_x = i * l_side\n min_y = j * l_side\n\n ii = (X>min_x) & (X<min_x + l_side) & (Y>min_y) & (Y<min_y+l_side)\n \n tmp_vx = VX[ii]\n tmp_vy = VY[ii]\n tmp_m = MASS[ii] \n masa_total = np.sum(tmp_m) + 1E-10\n vx_grid[i,j] = np.sum(tmp_m * tmp_vx) / masa_total\n vy_grid[i,j] = np.sum(tmp_m * tmp_vy) / masa_total\n\n \n#==========================================\ndef definir_divergencia(vxg,vyg):\n div=np.zeros([n_side,n_side])\n div[1:-1,1:-1] = (vxg[:-2,1:-1] - vxg[2:,1:-1]) + (vyg[1:-1,:-2] - vyg[1:-1,2:])\n return div\n\ndef puntos_criticos(div,Vmax,Vmin):\n PuntosC=np.zeros([n_side,n_side])\n ii=(div<=Vmin)\n jj=(div>Vmax)\n \n PuntosC[ii]=-1\n PuntosC[jj]=1\n return PuntosC\n\n\n \n#========================================================00\nDivergencia=definir_divergencia(vx_grid,vy_grid)\ndivplot=plt.figure(figsize=(10,10))\nplt.imshow(Divergencia.T)\n\naxARR=plt.axes()\nDlim=220\nDmin=Dlim\nDmax=Dlim\nPC=puntos_criticos(Divergencia,Dmax,Dmin)\n\nfor i in range(n_side):\n print('plot ',i)\n for j in range(n_side):\n \n xi = i \n yi = j \n v_div=200\n xf = vx_grid[i,j]/v_div\n yf = vy_grid[i,j]/v_div\n axARR.arrow(xi,yi,xf,yf,head_width=0.5,head_length=0.1,fc='k',ec='k', alpha=0.5 )\n \n\ndivplot.savefig('divergenciaOD.png')\n\n\nCPplot=plt.figure(figsize=(10,10))\naxPC=plt.axes()\nplt.imshow(PC.T)\nfor i in range(n_side):\n for j in range(n_side):\n xi = i \n yi = j \n v_div=200\n xf = vx_grid[i,j]/v_div\n yf = vy_grid[i,j]/v_div\n axPC.arrow(xi,yi,xf,yf,head_width=0.5,head_length=0.1,fc='k',ec='k', alpha=0.5 )\n\n\n\nCPplot.savefig(\"PuntosCriticos.png\")\ngauss=Gaussian2DKernel(3)\n \n \n\n\n\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axes",
"numpy.transpose",
"numpy.zeros",
"numpy.sum",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
Saiprasad16/agents
|
[
"9e0972fc0878b29925ae496e883d80e7da3928aa"
] |
[
"tf_agents/utils/nest_utils.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for handling nested tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nimport collections\nimport numbers\n\nfrom typing import Optional, Text\n\nfrom absl import logging\nimport numpy as np\nfrom six.moves import zip\nimport tensorflow as tf\nfrom tf_agents.typing import types\nfrom tf_agents.utils import composite\nimport wrapt\n\n\n# TODO(b/128613858): Update to a public facing API.\nfrom tensorflow.python.util import nest # pylint:disable=g-direct-tensorflow-import # TF internal\n\n\ntry:\n # Python 3.3 and above.\n collections_abc = collections.abc\nexcept AttributeError:\n collections_abc = collections\n\n\nflatten_up_to = nest.flatten_up_to\nflatten_with_tuple_paths = nest.flatten_with_tuple_paths\nmap_structure_up_to = nest.map_structure_up_to\nmap_structure_with_paths = nest.map_structure_with_paths\n\n\nclass _Dot(object):\n \"\"\"An object whose representation is a simple '.'.\"\"\"\n\n def __repr__(self):\n return '.'\n\n def __str__(self):\n return '.'\n\n\n_DOT = _Dot()\n\n\ndef assert_same_structure(nest1,\n nest2,\n check_types: bool = True,\n expand_composites: bool = False,\n allow_shallow_nest1: bool = False,\n message: Optional[Text] = None) -> None:\n \"\"\"Same as tf.nest.assert_same_structure but with cleaner error messages.\n\n Args:\n nest1: an arbitrarily nested structure.\n nest2: an arbitrarily nested structure.\n check_types: if `True` (default) types of sequences are checked as well,\n including the keys of dictionaries. If set to `False`, for example a list\n and a tuple of objects will look the same if they have the same size. Note\n that namedtuples with identical name and fields are always considered to\n have the same shallow structure. Two types will also be considered the\n same if they are both list subtypes (which allows \"list\" and\n \"_ListWrapper\" from trackable dependency tracking to compare equal).\n expand_composites: If true, then composite tensors such as `tf.SparseTensor`\n and `tf.RaggedTensor` are expanded into their component tensors.\n allow_shallow_nest1: If `True`, `nest1` is allowed to be more shallow\n than `nest2`.\n message: Optional error message to provide in case of failure.\n\n Raises:\n ValueError: If the two structures do not have the same number of elements\n or if the two structures are not nested in the same way.\n TypeError: If the two structures differ in the type of sequence in any\n of their substructures. Only possible if `check_types is True`.\n \"\"\"\n if not isinstance(check_types, bool):\n raise TypeError(\n 'check_types must be a bool but saw: \\'{}\\''.format(check_types))\n if not isinstance(expand_composites, bool):\n raise TypeError('expand_composites must be a bool but saw: \\'{}\\''.format(\n expand_composites))\n message = message or 'The two structures do not match'\n exception = None\n\n if allow_shallow_nest1:\n check_fn = nest.assert_shallow_structure\n else:\n check_fn = tf.nest.assert_same_structure\n\n try:\n check_fn(\n nest1,\n nest2,\n check_types=check_types,\n expand_composites=expand_composites)\n except (TypeError, ValueError) as e:\n exception = type(e)\n\n if exception:\n str1 = tf.nest.map_structure(\n lambda _: _DOT, nest1, expand_composites=expand_composites)\n str2 = tf.nest.map_structure(\n lambda _: _DOT, nest2, expand_composites=expand_composites)\n raise exception('{}:\\n {}\\nvs.\\n {}\\nValues:\\n {}\\nvs.\\n {}.'\n .format(message, str1, str2, nest1, nest2))\n\n\ndef flatten_with_joined_paths(structure, expand_composites=False):\n flattened = flatten_with_tuple_paths(\n structure, expand_composites=expand_composites)\n\n def stringify_and_join(path_elements):\n return '/'.join(str(path_element) for path_element in path_elements)\n\n return [(stringify_and_join(path), value) for (path, value) in flattened]\n\n\ndef fast_map_structure_flatten(func, structure, *flat_structure, **kwargs):\n expand_composites = kwargs.get('expand_composites', False)\n entries = zip(*flat_structure)\n return tf.nest.pack_sequence_as(\n structure, [func(*x) for x in entries],\n expand_composites=expand_composites)\n\n\ndef fast_map_structure(func, *structure, **kwargs):\n expand_composites = kwargs.get('expand_composites', False)\n flat_structure = [\n tf.nest.flatten(s, expand_composites=expand_composites) for s in structure\n ]\n entries = zip(*flat_structure)\n\n return tf.nest.pack_sequence_as(\n structure[0], [func(*x) for x in entries],\n expand_composites=expand_composites)\n\n\ndef has_tensors(*x):\n return np.any(\n [tf.is_tensor(t) for t in tf.nest.flatten(x, expand_composites=True)])\n\n\ndef _is_namedtuple(x):\n return (isinstance(x, tuple)\n and isinstance(getattr(x, '_fields', None), collections_abc.Sequence))\n\n\ndef _is_attrs(x):\n return getattr(type(x), '__attrs_attrs__', None) is not None\n\n\ndef _attr_items(x):\n attrs = getattr(type(x), '__attrs_attrs__')\n attr_names = [a.name for a in attrs]\n return [(attr_name, getattr(x, attr_name)) for attr_name in attr_names]\n\n\ndef prune_extra_keys(narrow, wide):\n \"\"\"Recursively prunes keys from `wide` if they don't appear in `narrow`.\n\n Often used as preprocessing prior to calling `tf.nest.flatten`\n or `tf.nest.map_structure`.\n\n This function is more forgiving than the ones in `nest`; if two substructures'\n types or structures don't agree, we consider it invalid and `prune_extra_keys`\n will return the `wide` substructure as is. Typically, additional checking is\n needed: you will also want to use\n `nest.assert_same_structure(narrow, prune_extra_keys(narrow, wide))`\n to ensure the result of pruning is still a correct structure.\n\n Examples:\n ```python\n wide = [{\"a\": \"a\", \"b\": \"b\"}]\n # Narrows 'wide'\n assert prune_extra_keys([{\"a\": 1}], wide) == [{\"a\": \"a\"}]\n # 'wide' lacks \"c\", is considered invalid.\n assert prune_extra_keys([{\"c\": 1}], wide) == wide\n # 'wide' contains a different type from 'narrow', is considered invalid\n assert prune_extra_keys(\"scalar\", wide) == wide\n # 'wide' substructure for key \"d\" does not match the one in 'narrow' and\n # therefore is returned unmodified.\n assert (prune_extra_keys({\"a\": {\"b\": 1}, \"d\": None},\n {\"a\": {\"b\": \"b\", \"c\": \"c\"}, \"d\": [1, 2]})\n == {\"a\": {\"b\": \"b\"}, \"d\": [1, 2]})\n # assert prune_extra_keys((), wide) == ()\n # assert prune_extra_keys({\"a\": ()}, wide) == {\"a\": ()}\n ```\n\n Args:\n narrow: A nested structure.\n wide: A nested structure that may contain dicts with more fields than\n `narrow`.\n\n Returns:\n A structure with the same nested substructures as `wide`, but with\n dicts whose entries are limited to the keys found in the associated\n substructures of `narrow`.\n\n In case of substructure or size mismatches, the returned substructures\n will be returned as is. Note that ObjectProxy-wrapped objects are\n considered equivalent to their non-ObjectProxy types.\n \"\"\"\n # If `narrow` is `()`, then `()` is returned. That is, we narrow any\n # object w.r.t. an empty tuple to to an empty tuple. We use `id()`\n # here because the emtpy tuple is a singleton in cpython and\n # because using \"x is ()\" or \"x == ()\" gives syntax warnings for\n # numpy arrays.\n narrow_raw = (narrow.__wrapped__ if isinstance(narrow, wrapt.ObjectProxy)\n else narrow)\n\n if id(narrow_raw) == id(()):\n return narrow\n\n if isinstance(wide, wrapt.ObjectProxy):\n return type(wide)(prune_extra_keys(narrow, wide.__wrapped__))\n\n wide_raw = (wide.__wrapped__ if isinstance(wide, wrapt.ObjectProxy) else wide)\n\n if ((type(narrow_raw) != type(wide_raw)) # pylint: disable=unidiomatic-typecheck\n and not (isinstance(narrow_raw, list) and isinstance(wide_raw, list))\n and not (isinstance(narrow_raw, collections_abc.Mapping)\n and isinstance(wide_raw, collections_abc.Mapping))):\n # We return early if the types are different; but we make some exceptions:\n # list subtypes are considered the same (e.g. ListWrapper and list())\n # Mapping subtypes are considered the same (e.g. DictWrapper and dict())\n # (TupleWrapper subtypes are handled by unwrapping ObjectProxy above).\n return wide\n\n if isinstance(narrow, collections_abc.Mapping):\n if len(narrow) > len(wide):\n # wide lacks a required key from narrow; return early.\n return wide\n\n narrow_keys = set(narrow.keys())\n wide_keys = set(wide.keys())\n if not wide_keys.issuperset(narrow_keys):\n # wide lacks a required key from narrow; return early.\n return wide\n ordered_items = [\n (k, prune_extra_keys(v, wide[k]))\n for k, v in narrow.items()]\n if isinstance(wide, collections.defaultdict):\n subset = type(wide)(wide.default_factory, ordered_items)\n else:\n subset = type(wide)(ordered_items)\n return subset\n\n if nest.is_sequence(narrow):\n if _is_attrs(wide):\n items = [prune_extra_keys(n, w)\n for n, w in zip(_attr_items(narrow), _attr_items(wide))]\n return type(wide)(*items)\n\n # Not an attrs, so can treat as lists or tuples from here on.\n if len(narrow) != len(wide):\n # wide's size is different than narrow; return early.\n return wide\n\n items = [prune_extra_keys(n, w) for n, w in zip(narrow, wide)]\n if _is_namedtuple(wide):\n return type(wide)(*items)\n elif _is_attrs(wide):\n return type(wide)\n return type(wide)(items)\n\n # narrow is a leaf, just return wide\n return wide\n\n\ndef assert_tensors_matching_dtypes_and_shapes(tensors_1, tensors_2, caller,\n tensors_1_name, tensors_2_name):\n \"\"\"Checks if tensors have matching dtypes and shapes.\n\n Args:\n tensors_1: A nest of tensor objects.\n tensors_2: A nest of tensor objects.\n caller: The object calling `assert...`.\n tensors_1_name: (str) Name to use for tensors_1 in case of an error.\n tensors_2_name: (str) Name to use for tensors_2 in case of an error.\n\n Raises:\n ValueError: If the tensors do not match dtypes or shapes.\n \"\"\"\n assert_same_structure(\n tensors_1,\n tensors_2,\n message=('{}: {} and {} do not have matching structures'.format(\n caller, tensors_1_name, tensors_2_name)))\n\n def convert_to_tensor(t):\n return tf.convert_to_tensor(t) if not tf.is_tensor(t) else t\n\n flat_t1 = tf.nest.map_structure(convert_to_tensor, tf.nest.flatten(tensors_1))\n flat_t2 = tf.nest.map_structure(convert_to_tensor, tf.nest.flatten(tensors_2))\n\n t1_shapes = [t.shape for t in flat_t1]\n t1_dtypes = [t.dtype for t in flat_t1]\n t2_shapes = [t.shape for t in flat_t2]\n t2_dtypes = [t.dtype for t in flat_t2]\n\n compatible = True\n\n if any(\n t1_dtype != t2_dtype for t1_dtype, t2_dtype in zip(t1_dtypes, t2_dtypes)):\n compatible = False\n else:\n for t1_shape, t2_shape in zip(t1_shapes, t2_shapes):\n if t1_shape.ndims != t2_shape.ndims:\n compatible = False\n break\n\n if not compatible:\n get_dtypes = lambda v: tf.nest.map_structure(lambda x: x.dtype, v)\n get_shapes = lambda v: tf.nest.map_structure(lambda x: x.shape, v)\n raise ValueError('{}: Inconsistent dtypes or shapes between {} and {}.\\n'\n 'dtypes:\\n{}\\nvs.\\n{}.\\n'\n 'shapes:\\n{}\\nvs.\\n{}.'.format(caller, tensors_1_name,\n tensors_2_name,\n get_dtypes(tensors_1),\n get_dtypes(tensors_2),\n get_shapes(tensors_1),\n get_shapes(tensors_2)))\n\n\ndef assert_matching_dtypes_and_inner_shapes(tensors,\n specs,\n caller,\n tensors_name,\n specs_name,\n allow_extra_fields=False):\n \"\"\"Returns `True` if tensors and specs have matching dtypes and inner shapes.\n\n Args:\n tensors: A nest of tensor objects.\n specs: A nest of `tf.TypeSpec` objects.\n caller: The object calling `assert...`.\n tensors_name: (str) Name to use for the tensors in case of an error.\n specs_name: (str) Name to use for the specs in case of an error.\n allow_extra_fields: If `True`, then `tensors` may contain more keys or list\n fields than strictly required by `specs`.\n\n Raises:\n ValueError: If the tensors do not match the specs' dtypes or their inner\n shapes do not match the specs' shapes.\n \"\"\"\n if allow_extra_fields:\n tensors = prune_extra_keys(specs, tensors)\n assert_same_structure(\n tensors,\n specs,\n message=('{}: {} and {} do not have matching structures'.format(\n caller, tensors_name, specs_name)))\n\n flat_tensors = nest.flatten(tensors)\n flat_specs = tf.nest.flatten(specs)\n flat_tensors = [\n tf.convert_to_tensor(t, dtype_hint=s.dtype) if not tf.is_tensor(t) else t\n for (t, s) in zip(flat_tensors, flat_specs)\n ]\n\n tensor_shapes = [t.shape for t in flat_tensors]\n tensor_dtypes = [t.dtype for t in flat_tensors]\n spec_shapes = [spec_shape(s) for s in flat_specs]\n spec_dtypes = [t.dtype for t in flat_specs]\n\n compatible = True\n\n if any(s_dtype != t_dtype\n for s_dtype, t_dtype in zip(spec_dtypes, tensor_dtypes)):\n compatible = False\n else:\n for s_shape, t_shape in zip(spec_shapes, tensor_shapes):\n if s_shape.ndims in (0, None) or t_shape.ndims is None:\n continue\n if s_shape.ndims > t_shape.ndims:\n compatible = False\n break\n if not s_shape.is_compatible_with(t_shape[-s_shape.ndims:]):\n compatible = False\n break\n\n if not compatible:\n get_dtypes = lambda v: tf.nest.map_structure(lambda x: x.dtype, v)\n get_shapes = lambda v: tf.nest.map_structure(spec_shape, v)\n raise ValueError('{}: Inconsistent dtypes or shapes between {} and {}.\\n'\n 'dtypes:\\n{}\\nvs.\\n{}.\\n'\n 'shapes:\\n{}\\nvs.\\n{}.'.format(\n caller,\n tensors_name,\n specs_name,\n get_dtypes(tensors),\n get_dtypes(specs),\n get_shapes(tensors),\n get_shapes(specs)))\n\n\ndef is_batched_nested_tensors(tensors,\n specs,\n num_outer_dims=1,\n allow_extra_fields=False,\n check_dtypes=True):\n \"\"\"Compares tensors to specs to determine if all tensors are batched or not.\n\n For each tensor, it checks the dimensions and dtypes with respect to specs.\n\n Returns `True` if all tensors are batched and `False` if all tensors are\n unbatched.\n\n Raises a `ValueError` if the shapes are incompatible or a mix of batched and\n unbatched tensors are provided.\n\n Raises a `TypeError` if tensors' dtypes do not match specs.\n\n Args:\n tensors: Nested list/tuple/dict of Tensors.\n specs: Nested list/tuple/dict of Tensors or CompositeTensors describing the\n shape of unbatched tensors.\n num_outer_dims: The integer number of dimensions that are considered batch\n dimensions. Default 1.\n allow_extra_fields: If `True`, then `tensors` may have extra subfields which\n are not in specs. In this case, the extra subfields\n will not be checked. For example: ```python\n tensors = {\"a\": tf.zeros((3, 4), dtype=tf.float32),\n \"b\": tf.zeros((5, 6), dtype=tf.float32)}\n specs = {\"a\": tf.TensorSpec(shape=(4,), dtype=tf.float32)} assert\n is_batched_nested_tensors(tensors, specs, allow_extra_fields=True) ```\n The above example would raise a ValueError if `allow_extra_fields` was\n False.\n check_dtypes: If `True` will validate that tensors and specs have the same\n dtypes.\n\n Returns:\n True if all Tensors are batched and False if all Tensors are unbatched.\n\n Raises:\n ValueError: If\n 1. Any of the tensors or specs have shapes with ndims == None, or\n 2. The shape of Tensors are not compatible with specs, or\n 3. A mix of batched and unbatched tensors are provided.\n 4. The tensors are batched but have an incorrect number of outer dims.\n TypeError: If `dtypes` between tensors and specs are not compatible.\n \"\"\"\n if allow_extra_fields:\n tensors = prune_extra_keys(specs, tensors)\n\n assert_same_structure(\n tensors,\n specs,\n message='Tensors and specs do not have matching structures')\n flat_tensors = nest.flatten(tensors)\n flat_specs = tf.nest.flatten(specs)\n\n tensor_shapes = [t.shape for t in flat_tensors]\n tensor_dtypes = [t.dtype for t in flat_tensors]\n spec_shapes = [spec_shape(s) for s in flat_specs]\n spec_dtypes = [t.dtype for t in flat_specs]\n\n if any(s_shape.rank is None for s_shape in spec_shapes):\n raise ValueError('All specs should have ndims defined. Saw shapes: %s' %\n (tf.nest.pack_sequence_as(specs, spec_shapes),))\n\n if any(t_shape.rank is None for t_shape in tensor_shapes):\n raise ValueError('All tensors should have ndims defined. Saw shapes: %s' %\n (tf.nest.pack_sequence_as(specs, tensor_shapes),))\n\n if (check_dtypes and\n any(s_dtype != t_dtype\n for s_dtype, t_dtype in zip(spec_dtypes, tensor_dtypes))):\n raise TypeError('Tensor dtypes do not match spec dtypes:\\n{}\\nvs.\\n{}'\n .format(tf.nest.pack_sequence_as(specs, tensor_dtypes),\n tf.nest.pack_sequence_as(specs, spec_dtypes)))\n is_unbatched = [\n s_shape.is_compatible_with(t_shape)\n for s_shape, t_shape in zip(spec_shapes, tensor_shapes)\n ]\n\n if all(is_unbatched):\n return False\n\n tensor_ndims_discrepancy = [\n t_shape.rank - s_shape.rank\n for s_shape, t_shape in zip(spec_shapes, tensor_shapes)\n ]\n\n tensor_matches_spec = [\n s_shape.is_compatible_with(t_shape[discrepancy:])\n for discrepancy, s_shape, t_shape in zip(\n tensor_ndims_discrepancy, spec_shapes, tensor_shapes)\n ]\n\n # Check if all tensors match and have correct number of outer_dims.\n is_batched = (\n all(discrepancy == num_outer_dims\n for discrepancy in tensor_ndims_discrepancy) and\n all(tensor_matches_spec))\n\n if is_batched:\n return True\n\n # Check if tensors match but have incorrect number of batch dimensions.\n if all(\n discrepancy == tensor_ndims_discrepancy[0]\n for discrepancy in tensor_ndims_discrepancy) and all(tensor_matches_spec):\n return False\n\n raise ValueError(\n 'Received a mix of batched and unbatched Tensors, or Tensors'\n ' are not compatible with Specs. num_outer_dims: %d.\\n'\n 'Saw tensor_shapes:\\n %s\\n'\n 'And spec_shapes:\\n %s' %\n (num_outer_dims, tf.nest.pack_sequence_as(specs, tensor_shapes),\n tf.nest.pack_sequence_as(specs, spec_shapes)))\n\n\ndef spec_shape(t):\n if isinstance(t, tf.SparseTensor):\n rank = tf.dimension_value(t.dense_shape.shape[0])\n return tf.TensorShape([None] * rank)\n else:\n return t.shape\n\n\ndef batch_nested_tensors(tensors, specs=None):\n \"\"\"Add batch dimension if needed to nested tensors while checking their specs.\n\n If specs is None, a batch dimension is added to each tensor.\n If specs are provided, each tensor is compared to the corresponding spec,\n and a batch dimension is added only if the tensor doesn't already have it.\n\n For each tensor, it checks the dimensions with respect to specs, and adds an\n extra batch dimension if it doesn't already have it.\n\n Args:\n tensors: Nested list/tuple or dict of Tensors.\n specs: Nested list/tuple or dict of TensorSpecs, describing the shape of the\n non-batched Tensors.\n\n Returns:\n A nested batched version of each tensor.\n Raises:\n ValueError: if the tensors and specs have incompatible dimensions or shapes.\n \"\"\"\n if specs is None:\n return tf.nest.map_structure(lambda x: composite.expand_dims(x, 0), tensors)\n\n assert_same_structure(\n tensors,\n specs,\n message='Tensors and specs do not have matching structures')\n\n flat_tensors = tf.nest.flatten(tensors)\n flat_shapes = [spec_shape(s) for s in tf.nest.flatten(specs)]\n batched_tensors = []\n\n tensor_rank = lambda tensor: tensor.shape.rank\n for tensor, shape in zip(flat_tensors, flat_shapes):\n if tensor_rank(tensor) == shape.rank:\n tensor.shape.assert_is_compatible_with(shape)\n tensor = composite.expand_dims(tensor, 0)\n elif tensor_rank(tensor) == shape.rank + 1:\n tensor.shape[1:].assert_is_compatible_with(shape)\n else:\n raise ValueError('Tensor does not have the correct dimensions. '\n 'tensor.shape {} expected shape {}'.format(\n tensor.shape, shape))\n batched_tensors.append(tensor)\n return tf.nest.pack_sequence_as(tensors, batched_tensors)\n\n\ndef _flatten_and_check_shape_nested_tensors(tensors, specs, num_outer_dims=1):\n \"\"\"Flatten nested tensors and check their shape for use in other functions.\"\"\"\n assert_same_structure(\n tensors,\n specs,\n message='Tensors and specs do not have matching structures')\n flat_tensors = tf.nest.flatten(tensors)\n flat_shapes = [spec_shape(s) for s in tf.nest.flatten(specs)]\n for tensor, shape in zip(flat_tensors, flat_shapes):\n if tensor.shape.rank == shape.rank:\n tensor.shape.assert_is_compatible_with(shape)\n elif tensor.shape.rank == shape.rank + num_outer_dims:\n tensor.shape[num_outer_dims:].assert_is_compatible_with(shape)\n else:\n raise ValueError('Tensor does not have the correct dimensions. '\n 'tensor.shape {} expected shape {}'.format(\n tensor.shape, [None] + shape.as_list()))\n return flat_tensors, flat_shapes\n\n\ndef flatten_and_check_shape_nested_specs(specs, reference_specs):\n \"\"\"Flatten nested specs and check their shape for use in other functions.\"\"\"\n try:\n flat_specs, flat_shapes = _flatten_and_check_shape_nested_tensors(\n specs, reference_specs, num_outer_dims=0)\n except ValueError:\n raise ValueError('specs must be compatible with reference_specs'\n '; instead got specs=%s, reference_specs=%s' %\n (specs, reference_specs))\n return flat_specs, flat_shapes\n\n\ndef unbatch_nested_tensors(tensors, specs=None):\n \"\"\"Remove the batch dimension if needed from nested tensors using their specs.\n\n If specs is None, the first dimension of each tensor will be removed.\n If specs are provided, each tensor is compared to the corresponding spec,\n and the first dimension is removed only if the tensor was batched.\n\n Args:\n tensors: Nested list/tuple or dict of batched Tensors.\n specs: Nested list/tuple or dict of TensorSpecs, describing the shape of the\n non-batched Tensors.\n\n Returns:\n A nested non-batched version of each tensor.\n Raises:\n ValueError: if the tensors and specs have incompatible dimensions or shapes.\n \"\"\"\n if specs is None:\n return tf.nest.map_structure(lambda x: composite.squeeze(x, 0), tensors)\n\n unbatched_tensors = []\n flat_tensors, flat_shapes = _flatten_and_check_shape_nested_tensors(\n tensors, specs)\n for tensor, shape in zip(flat_tensors, flat_shapes):\n if tensor.shape.rank == shape.rank + 1:\n tensor = composite.squeeze(tensor, 0)\n unbatched_tensors.append(tensor)\n return tf.nest.pack_sequence_as(tensors, unbatched_tensors)\n\n\ndef split_nested_tensors(tensors, specs, num_or_size_splits):\n \"\"\"Split batched nested tensors, on batch dim (outer dim), into a list.\n\n Args:\n tensors: Nested list/tuple or dict of batched Tensors.\n specs: Nested list/tuple or dict of TensorSpecs, describing the shape of the\n non-batched Tensors.\n num_or_size_splits: Same as argument for tf.split. Either a python integer\n indicating the number of splits along batch_dim or a list of integer\n Tensors containing the sizes of each output tensor along batch_dim. If a\n scalar then it must evenly divide value.shape[axis]; otherwise the sum of\n sizes along the split dimension must match that of the value. For\n `SparseTensor` inputs, `num_or_size_splits` must be the scalar `num_split`\n (see documentation of `tf.sparse.split` for more details).\n\n Returns:\n A list of nested non-batched version of each tensor, where each list item\n corresponds to one batch item.\n Raises:\n ValueError: if the tensors and specs have incompatible dimensions or shapes.\n ValueError: if a non-scalar is passed and there are SparseTensors in the\n structure.\n \"\"\"\n split_tensor_lists = []\n flat_tensors, flat_shapes = _flatten_and_check_shape_nested_tensors(\n tensors, specs)\n for tensor, shape in zip(flat_tensors, flat_shapes):\n if tensor.shape.rank == shape.rank:\n raise ValueError('Can only split tensors with a batch dimension.')\n if tensor.shape.rank == shape.rank + 1:\n if isinstance(tensor, tf.SparseTensor):\n if not isinstance(num_or_size_splits, numbers.Number):\n raise ValueError(\n 'Saw a SparseTensor, for which num_or_size_splits must be a '\n 'scalar. But it is not: {}'.format(num_or_size_splits))\n split_tensors = tf.sparse.split(\n sp_input=tensor, num_split=num_or_size_splits, axis=0)\n else:\n split_tensors = tf.split(tensor, num_or_size_splits)\n split_tensor_lists.append(split_tensors)\n split_tensors_zipped = zip(*split_tensor_lists)\n return [\n tf.nest.pack_sequence_as(tensors, zipped)\n for zipped in split_tensors_zipped\n ]\n\n\ndef unstack_nested_tensors(tensors, specs):\n \"\"\"Make list of unstacked nested tensors.\n\n Args:\n tensors: Nested tensors whose first dimension is to be unstacked.\n specs: Tensor specs for tensors.\n\n Returns:\n A list of the unstacked nested tensors.\n Raises:\n ValueError: if the tensors and specs have incompatible dimensions or shapes.\n \"\"\"\n unstacked_tensor_lists = []\n flat_tensors, flat_shapes = _flatten_and_check_shape_nested_tensors(\n tensors, specs)\n for tensor, shape in zip(flat_tensors, flat_shapes):\n if tensor.shape.rank == shape.rank:\n raise ValueError('Can only unstack tensors with a batch dimension.')\n if tensor.shape.rank == shape.rank + 1:\n unstacked_tensors = tf.unstack(tensor)\n unstacked_tensor_lists.append(unstacked_tensors)\n unstacked_tensors_zipped = zip(*unstacked_tensor_lists)\n return [\n tf.nest.pack_sequence_as(tensors, zipped)\n for zipped in unstacked_tensors_zipped\n ]\n\n\ndef stack_nested_tensors(tensors, axis=0):\n \"\"\"Stacks a list of nested tensors along the dimension specified.\n\n Args:\n tensors: A list of nested tensors to be stacked.\n axis: the axis along which the stack operation is applied.\n\n Returns:\n A stacked nested tensor.\n \"\"\"\n return tf.nest.map_structure(lambda *tensors: tf.stack(tensors, axis=axis),\n *tensors)\n\n\ndef flatten_multi_batched_nested_tensors(tensors, specs):\n \"\"\"Reshape tensors to contain only one batch dimension.\n\n For each tensor, it checks the number of extra dimensions beyond those in\n the spec, and reshapes tensor to have only one batch dimension.\n NOTE: Each tensor's batch dimensions must be the same.\n\n Args:\n tensors: Nested list/tuple or dict of batched Tensors or SparseTensors.\n specs: Nested list/tuple or dict of TensorSpecs, describing the shape of the\n non-batched Tensors.\n\n Returns:\n A nested version of each tensor with a single batch dimension.\n A list of the batch dimensions which were flattened.\n Raises:\n ValueError: if the tensors and specs have incompatible dimensions or shapes.\n \"\"\"\n assert_same_structure(\n tensors,\n specs,\n message='Tensors and specs do not have matching structures')\n flat_tensors = tf.nest.flatten(tensors)\n flat_shapes = [spec_shape(s) for s in tf.nest.flatten(specs)]\n out_tensors = []\n batch_dims = []\n for i, (tensor, shape) in enumerate(zip(flat_tensors, flat_shapes)):\n if i == 0: # Set batch_dims based on first tensor.\n batch_dims = tensor.shape[:tensor.shape.rank - shape.rank]\n if batch_dims.is_fully_defined():\n batch_dims = batch_dims.as_list()\n batch_prod = np.prod(batch_dims)\n batch_dims = tf.constant(batch_dims, dtype=tf.int64)\n else:\n batch_dims = tf.shape(tensor)[:tensor.shape.rank - shape.rank]\n batch_prod = tf.reduce_prod(batch_dims)\n reshaped_dims = [batch_prod] + shape.as_list()\n out_tensors.append(composite.reshape(tensor, reshaped_dims))\n return tf.nest.pack_sequence_as(tensors, out_tensors), batch_dims\n\n\ndef get_outer_shape(nested_tensor, spec):\n \"\"\"Runtime batch dims of tensor's batch dimension `dim`.\n\n Args:\n nested_tensor: Nest of tensors.\n spec: The nested spec.\n\n Returns:\n A `Tensor` containing the outer shape.\n\n Raises:\n ValueError: If `nested_tensor` and `spec` have different structures.\n TypeError: If `nested_tensor` and `spec` structures have differing types.\n \"\"\"\n assert_same_structure(\n nested_tensor,\n spec,\n message='Tensors and specs do not have matching structures')\n first_tensor = tf.nest.flatten(nested_tensor)[0]\n first_spec = tf.nest.flatten(spec)[0]\n\n # Check tensors have same batch shape.\n num_outer_dims = (len(first_tensor.shape) - len(first_spec.shape))\n if not is_batched_nested_tensors(\n nested_tensor, spec, num_outer_dims=num_outer_dims, check_dtypes=False):\n return tf.constant([], dtype=tf.int32)\n\n return tf.shape(input=first_tensor)[:num_outer_dims]\n\n\ndef get_outer_rank(tensors, specs):\n \"\"\"Compares tensors to specs to determine the number of batch dimensions.\n\n For each tensor, it checks the dimensions with respect to specs and\n returns the number of batch dimensions if all nested tensors and\n specs agree with each other.\n\n Args:\n tensors: Nested list/tuple/dict of Tensors or SparseTensors.\n specs: Nested list/tuple/dict of TensorSpecs, describing the shape of\n unbatched tensors.\n\n Returns:\n The number of outer dimensions for all Tensors (zero if all are\n unbatched or empty).\n Raises:\n ValueError: If\n 1. Any of the tensors or specs have shapes with ndims == None, or\n 2. The shape of Tensors are not compatible with specs, or\n 3. A mix of batched and unbatched tensors are provided.\n 4. The tensors are batched but have an incorrect number of outer dims.\n \"\"\"\n assert_same_structure(\n tensors,\n specs,\n message='Tensors and specs do not have matching structures')\n tensor_shapes = [t.shape for t in tf.nest.flatten(tensors)]\n spec_shapes = [spec_shape(s) for s in tf.nest.flatten(specs)]\n\n if any(s_shape.rank is None for s_shape in spec_shapes):\n raise ValueError('All specs should have ndims defined. Saw shapes: %s' %\n spec_shapes)\n\n if any(t_shape.rank is None for t_shape in tensor_shapes):\n raise ValueError('All tensors should have ndims defined. Saw shapes: %s' %\n tensor_shapes)\n\n is_unbatched = [\n s_shape.is_compatible_with(t_shape)\n for s_shape, t_shape in zip(spec_shapes, tensor_shapes)\n ]\n if all(is_unbatched):\n return 0\n\n tensor_ndims_discrepancy = [\n t_shape.rank - s_shape.rank\n for s_shape, t_shape in zip(spec_shapes, tensor_shapes)\n ]\n\n tensor_matches_spec = [\n s_shape.is_compatible_with(t_shape[discrepancy:])\n for discrepancy, s_shape, t_shape in zip(\n tensor_ndims_discrepancy, spec_shapes, tensor_shapes)\n ]\n\n # At this point we are guaranteed to have at least one tensor/spec.\n num_outer_dims = tensor_ndims_discrepancy[0]\n\n # Check if all tensors match and have correct number of batch dimensions.\n is_batched = (\n all(discrepancy == num_outer_dims\n for discrepancy in tensor_ndims_discrepancy) and\n all(tensor_matches_spec))\n\n if is_batched:\n return num_outer_dims\n\n # Check if tensors match but have incorrect number of batch dimensions.\n incorrect_batch_dims = (\n tensor_ndims_discrepancy and\n all(discrepancy == tensor_ndims_discrepancy[0] and discrepancy >= 0\n for discrepancy in tensor_ndims_discrepancy) and\n all(tensor_matches_spec))\n\n if incorrect_batch_dims:\n raise ValueError('Received tensors with %d outer dimensions. '\n 'Expected %d.' %\n (tensor_ndims_discrepancy[0], num_outer_dims))\n\n raise ValueError('Received a mix of batched and unbatched Tensors, or Tensors'\n ' are not compatible with Specs. num_outer_dims: %d.\\n'\n 'Saw tensor_shapes:\\n %s\\n'\n 'And spec_shapes:\\n %s' %\n (num_outer_dims, tensor_shapes, spec_shapes))\n\n\ndef batch_nested_array(nested_array):\n return tf.nest.map_structure(lambda x: np.expand_dims(x, 0), nested_array)\n\n\ndef unbatch_nested_array(nested_array):\n return tf.nest.map_structure(lambda x: np.squeeze(x, 0), nested_array)\n\n\ndef unbatch_nested_tensors_to_arrays(nested_tensors):\n\n def _to_unbatched_numpy(tensor):\n return np.squeeze(tensor.numpy(), 0)\n\n return tf.nest.map_structure(_to_unbatched_numpy, nested_tensors)\n\n\ndef _unstack_nested_arrays_into_flat_item_iterator(nested_array):\n\n def _unstack(array):\n # Use numpy views instead of np.split, it's 5x+ faster.\n return [array[i] for i in range(len(array))]\n\n return zip(*[_unstack(array) for array in tf.nest.flatten(nested_array)])\n\n\ndef unstack_nested_arrays(nested_array):\n \"\"\"Unstack/unbatch a nest of numpy arrays.\n\n Args:\n nested_array: Nest of numpy arrays where each array has shape [batch_size,\n ...].\n\n Returns:\n A list of length batch_size where each item in the list is a nest\n having the same structure as `nested_array`.\n \"\"\"\n\n return [\n tf.nest.pack_sequence_as(nested_array, zipped)\n for zipped in _unstack_nested_arrays_into_flat_item_iterator(nested_array)\n ]\n\n\ndef unstack_nested_arrays_into_flat_items(nested_array):\n \"\"\"Unstack/unbatch a nest of numpy arrays into flat items.\n\n Rebuild the nested structure of the unbatched elements is expensive. On the\n other hand it is sometimes unnecessary (e.g. if the downstream processing\n requires flattened structure, e.g. some replay buffer writers which flattens\n the items anyway).\n\n Args:\n nested_array: Nest of numpy arrays where each array has shape [batch_size,\n ...].\n\n Returns:\n A list of length batch_size where each item in the list is the flattened\n version of the corresponding item of the input.\n \"\"\"\n\n return list(_unstack_nested_arrays_into_flat_item_iterator(nested_array))\n\n\ndef stack_nested_arrays(nested_arrays):\n \"\"\"Stack/batch a list of nested numpy arrays.\n\n Args:\n nested_arrays: A list of nested numpy arrays of the same shape/structure.\n\n Returns:\n A nested array containing batched items, where each batched item is obtained\n by stacking corresponding items from the list of nested_arrays.\n \"\"\"\n nested_arrays_flattened = [tf.nest.flatten(a) for a in nested_arrays]\n batched_nested_array_flattened = [\n np.stack(a) for a in zip(*nested_arrays_flattened)\n ]\n return tf.nest.pack_sequence_as(nested_arrays[0],\n batched_nested_array_flattened)\n\n\ndef get_outer_array_shape(nested_array, spec):\n \"\"\"Batch dims of array's batch dimension `dim`.\"\"\"\n first_array = tf.nest.flatten(nested_array)[0]\n first_spec = tf.nest.flatten(spec)[0]\n num_outer_dims = len(first_array.shape) - len(first_spec.shape)\n return first_array.shape[:num_outer_dims]\n\n\ndef where(condition, true_outputs, false_outputs):\n \"\"\"Generalization of tf.where for nested structures.\n\n This generalization handles applying where across nested structures and the\n special case where the rank of the condition is smaller than the rank of the\n true and false cases.\n\n Args:\n condition: A boolean Tensor of shape [B, ...]. The shape of condition must\n be equal to or a prefix of the shape of true_outputs and false_outputs. If\n condition's rank is smaller than the rank of true_outputs and\n false_outputs, dimensions of size 1 are added to condition to make its\n rank match that of true_outputs and false_outputs in order to satisfy the\n requirements of tf.where.\n true_outputs: Tensor or nested tuple of Tensors of any dtype, each with\n shape [B, ...], to be split based on `condition`.\n false_outputs: Tensor or nested tuple of Tensors of any dtype, each with\n shape [B, ...], to be split based on `condition`.\n\n Returns:\n Interleaved output from `true_outputs` and `false_outputs` based on\n `condition`.\n \"\"\"\n assert_same_structure(\n true_outputs,\n false_outputs,\n message='\"true_outputs\" and \"false_outputs\" structures do not match')\n if tf.nest.flatten(true_outputs):\n case_rank = tf.rank(tf.nest.flatten(true_outputs)[0])\n rank_difference = case_rank - tf.rank(condition)\n condition_shape = tf.concat(\n [tf.shape(condition),\n tf.ones(rank_difference, dtype=tf.int32)], axis=0)\n condition = tf.reshape(condition, condition_shape)\n\n return tf.nest.map_structure(\n lambda t, f: tf.compat.v2.where(condition, t, f), true_outputs,\n false_outputs)\n\n\ndef remove_singleton_batch_spec_dim(spec: tf.TypeSpec,\n outer_ndim: int) -> tf.TypeSpec:\n \"\"\"Look for `spec`'s shape, check that outer dim is 1, and remove it.\n\n If `spec.shape[i] != 1` for any `i in range(outer_ndim)`, we stop removing\n singleton batch dimensions at `i` and return what's left. This is necessary\n to handle the outputs of inconsistent layers like `tf.keras.layers.LSTM()`\n which may take as input `(batch, time, dim) = (1, 1, Nin)` and emits only the\n batch entry if `time == 1`: output shape is `(1, Nout)`. We log an error\n in these cases.\n\n Args:\n spec: A `tf.TypeSpec`.\n outer_ndim: The maximum number of outer singleton dims to remove.\n\n Returns:\n A `tf.TypeSpec`, the spec without its outer batch dimension(s).\n\n Raises:\n ValueError: If `spec` lacks a `shape` property.\n \"\"\"\n shape = getattr(spec, 'shape', None)\n if shape is None:\n shape = getattr(spec, '_shape', None)\n if shape is None:\n raise ValueError(\n 'Could not remove singleton batch dim from spec; it lacks a shape: {}'\n .format(spec))\n for i in range(outer_ndim):\n if len(shape) <= i:\n logging.error(\n 'Could not remove singleton batch dim from spec; len(shape) < %d. '\n 'Shape: %s. Skipping.', i + 1, shape)\n break\n if tf.compat.dimension_value(shape[i]) != 1:\n logging.error(\n 'Could not remove singleton batch dim from spec; shape[%d] != 1: %s '\n '(shape: %s). Skipping.', i, spec, shape)\n break\n spec = spec._unbatch() # pylint: disable=protected-access\n return spec\n\n\ndef _tile_batch(t, multiplier, ensure_shape=True):\n \"\"\"Core single-tensor implementation of tile_batch.\"\"\"\n t = tf.convert_to_tensor(t, name='t')\n shape_t = tf.shape(t)\n if t.shape.ndims is None or t.shape.ndims < 1:\n raise ValueError('t must have statically known rank')\n tiling = [1] * (t.shape.ndims + 1)\n tiling[1] = multiplier\n num_batch_dims = tf.compat.dimension_value(t.shape.dims[0])\n tiled_static_batch_size = (\n num_batch_dims * multiplier if num_batch_dims is not None else None)\n tiled = tf.tile(tf.expand_dims(t, 1), tiling)\n tiled = tf.reshape(tiled,\n tf.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))\n\n if ensure_shape:\n tiled = tf.ensure_shape(\n tiled,\n tf.TensorShape([tiled_static_batch_size]).concatenate(t.shape[1:]))\n return tiled\n\n\ndef tile_batch(tensors: types.NestedTensor, multiplier: types.Int):\n \"\"\"Tile the batch dimension of a (possibly nested structure of) tensor(s).\n\n Copied from tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py\n\n For each tensor t in a (possibly nested structure) of tensors,\n this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of\n minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape\n `[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries\n `t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated\n `multiplier` times.\n\n Args:\n tensors: A nested structure of `Tensor` shaped `[batch_size, ...]`.\n multiplier: Python int or a Tensor. Note that if the multiplier is a tensor\n the shape can not be ensured.\n\n Returns:\n A (possibly nested structure of) `Tensor` shaped\n `[batch_size * multiplier, ...]`.\n\n Raises:\n ValueError: if tensor(s) `t` do not have a statically known rank or\n the rank is < 1.\n \"\"\"\n ensure_shape = False if tf.is_tensor(multiplier) else True\n return tf.nest.map_structure(\n lambda t: _tile_batch(t, multiplier, ensure_shape=ensure_shape), tensors)\n\n\ndef assert_value_spec(\n output_spec: types.NestedTensorSpec,\n network_name: str):\n \"\"\"Checks that `output_spec` is a nest of \"value\" type values.\n\n \"value\" type values correspond to floating point tensors with spec shape\n `()` or `(1,)`.\n\n Args:\n output_spec: The output spec returned by `network.create_variables`.\n network_name: The string name of the network for error messages.\n\n Raises:\n ValueError: If `output_spec` is not a nest of value-type tensors.\n \"\"\"\n def check_value_spec(v):\n if not isinstance(v, tf.TensorSpec):\n raise ValueError(\n '{} emits outputs that are not tensors; spec: {}'\n .format(network_name, output_spec))\n if v.shape not in ((), (1,)):\n raise ValueError(\n '{} emits multiple values; spec: {}'\n .format(network_name, output_spec))\n if not v.dtype.is_floating:\n raise ValueError(\n '{} emits outputs that are not real numbers; spec: {}'\n .format(network_name, output_spec))\n\n tf.nest.map_structure(check_value_spec, output_spec)\n"
] |
[
[
"tensorflow.convert_to_tensor",
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.stack",
"numpy.squeeze",
"tensorflow.rank",
"tensorflow.nest.flatten",
"tensorflow.nest.pack_sequence_as",
"numpy.stack",
"tensorflow.compat.v2.where",
"tensorflow.TensorShape",
"tensorflow.is_tensor",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.shape",
"tensorflow.unstack",
"tensorflow.compat.dimension_value",
"tensorflow.dimension_value",
"tensorflow.reduce_prod",
"tensorflow.split",
"tensorflow.nest.map_structure",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"numpy.prod",
"tensorflow.python.util.nest.flatten",
"tensorflow.sparse.split"
]
] |
Feverdreams/BMI
|
[
"53d59f996f21ad29bf2e8961eb9b45bfe1776252",
"53d59f996f21ad29bf2e8961eb9b45bfe1776252"
] |
[
"biosppy/plotting.py",
"biosppy/signals/eeg.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nbiosppy.plotting\n----------------\n\nThis module provides utilities to plot data.\n\n:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes\n:license: BSD 3-clause, see LICENSE for more details.\n\"\"\"\n\n# Imports\n# compat\nfrom __future__ import absolute_import, division, print_function\nfrom six.moves import range, zip\nimport six\n\n# built-in\nimport os\n\n# 3rd party\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\n\n# local\nfrom . import utils\nfrom biosppy.signals import tools as st\n\n# Globals\nMAJOR_LW = 2.5\nMINOR_LW = 1.5\nMAX_ROWS = 10\n\n\ndef _plot_filter(b, a, sampling_rate=1000., nfreqs=4096, ax=None):\n \"\"\"Compute and plot the frequency response of a digital filter.\n\n Parameters\n ----------\n b : array\n Numerator coefficients.\n a : array\n Denominator coefficients.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n nfreqs : int, optional\n Number of frequency points to compute.\n ax : axis, optional\n Plot Axis to use.\n\n Returns\n -------\n fig : Figure\n Figure object.\n\n \"\"\"\n\n # compute frequency response\n freqs, resp = st._filter_resp(b, a,\n sampling_rate=sampling_rate,\n nfreqs=nfreqs)\n\n # plot\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = ax.figure\n\n # amplitude\n pwr = 20. * np.log10(np.abs(resp))\n ax.semilogx(freqs, pwr, 'b', linewidth=MAJOR_LW)\n ax.set_ylabel('Amplitude (dB)', color='b')\n ax.set_xlabel('Frequency (Hz)')\n\n # phase\n angles = np.unwrap(np.angle(resp))\n ax2 = ax.twinx()\n ax2.semilogx(freqs, angles, 'g', linewidth=MAJOR_LW)\n ax2.set_ylabel('Angle (radians)', color='g')\n\n ax.grid()\n\n return fig\n\n\ndef plot_filter(ftype='FIR',\n band='lowpass',\n order=None,\n frequency=None,\n sampling_rate=1000.,\n path=None,\n show=True, **kwargs):\n \"\"\"Plot the frequency response of the filter specified with the given\n parameters.\n\n Parameters\n ----------\n ftype : str\n Filter type:\n * Finite Impulse Response filter ('FIR');\n * Butterworth filter ('butter');\n * Chebyshev filters ('cheby1', 'cheby2');\n * Elliptic filter ('ellip');\n * Bessel filter ('bessel').\n band : str\n Band type:\n * Low-pass filter ('lowpass');\n * High-pass filter ('highpass');\n * Band-pass filter ('bandpass');\n * Band-stop filter ('bandstop').\n order : int\n Order of the filter.\n frequency : int, float, list, array\n Cutoff frequencies; format depends on type of band:\n * 'lowpass' or 'bandpass': single frequency;\n * 'bandpass' or 'bandstop': pair of frequencies.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n ``**kwargs`` : dict, optional\n Additional keyword arguments are passed to the underlying\n scipy.signal function.\n\n \"\"\"\n\n # get filter\n b, a = st.get_filter(ftype=ftype,\n band=band,\n order=order,\n frequency=frequency,\n sampling_rate=sampling_rate, **kwargs)\n\n # plot\n fig = _plot_filter(b, a, sampling_rate)\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_spectrum(signal=None, sampling_rate=1000., path=None, show=True):\n \"\"\"Plot the power spectrum of a signal (one-sided).\n\n Parameters\n ----------\n signal : array\n Input signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n freqs, power = st.power_spectrum(signal, sampling_rate,\n pad=0,\n pow2=False,\n decibel=True)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.plot(freqs, power, linewidth=MAJOR_LW)\n ax.set_xlabel('Frequency (Hz)')\n ax.set_ylabel('Power (dB)')\n ax.grid()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_bvp(ts=None,\n raw=None,\n filtered=None,\n onsets=None,\n heart_rate_ts=None,\n heart_rate=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.bvp.bvp.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n raw : array\n Raw BVP signal.\n filtered : array\n Filtered BVP signal.\n onsets : array\n Indices of BVP pulse onsets.\n heart_rate_ts : array\n Heart rate time axis reference (seconds).\n heart_rate : array\n Instantaneous heart rate (bpm).\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('BVP Summary')\n\n # raw signal\n ax1 = fig.add_subplot(311)\n\n ax1.plot(ts, raw, linewidth=MAJOR_LW, label='Raw')\n\n ax1.set_ylabel('Amplitude')\n ax1.legend()\n ax1.grid()\n\n # filtered signal with onsets\n ax2 = fig.add_subplot(312, sharex=ax1)\n\n ymin = np.min(filtered)\n ymax = np.max(filtered)\n alpha = 0.1 * (ymax - ymin)\n ymax += alpha\n ymin -= alpha\n\n ax2.plot(ts, filtered, linewidth=MAJOR_LW, label='Filtered')\n ax2.vlines(ts[onsets], ymin, ymax,\n color='m',\n linewidth=MINOR_LW,\n label='Onsets')\n\n ax2.set_ylabel('Amplitude')\n ax2.legend()\n ax2.grid()\n\n # heart rate\n ax3 = fig.add_subplot(313, sharex=ax1)\n\n ax3.plot(heart_rate_ts, heart_rate, linewidth=MAJOR_LW, label='Heart Rate')\n\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Heart Rate (bpm)')\n ax3.legend()\n ax3.grid()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_eda(ts=None,\n raw=None,\n filtered=None,\n onsets=None,\n peaks=None,\n amplitudes=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.eda.eda.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n raw : array\n Raw EDA signal.\n filtered : array\n Filtered EDA signal.\n onsets : array\n Indices of SCR pulse onsets.\n peaks : array\n Indices of the SCR peaks.\n amplitudes : array\n SCR pulse amplitudes.\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('EDA Summary')\n\n # raw signal\n ax1 = fig.add_subplot(311)\n\n ax1.plot(ts, raw, linewidth=MAJOR_LW, label='raw')\n\n ax1.set_ylabel('Amplitude')\n ax1.legend()\n ax1.grid()\n\n # filtered signal with onsets, peaks\n ax2 = fig.add_subplot(312, sharex=ax1)\n\n ymin = np.min(filtered)\n ymax = np.max(filtered)\n alpha = 0.1 * (ymax - ymin)\n ymax += alpha\n ymin -= alpha\n\n ax2.plot(ts, filtered, linewidth=MAJOR_LW, label='Filtered')\n ax2.vlines(ts[onsets], ymin, ymax,\n color='m',\n linewidth=MINOR_LW,\n label='Onsets')\n ax2.vlines(ts[peaks], ymin, ymax,\n color='g',\n linewidth=MINOR_LW,\n label='Peaks')\n\n ax2.set_ylabel('Amplitude')\n ax2.legend()\n ax2.grid()\n\n # amplitudes\n ax3 = fig.add_subplot(313, sharex=ax1)\n\n ax3.plot(ts[onsets], amplitudes, linewidth=MAJOR_LW, label='Amplitudes')\n\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Amplitude')\n ax3.legend()\n ax3.grid()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_emg(ts=None,\n sampling_rate=None,\n raw=None,\n filtered=None,\n onsets=None,\n processed=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.emg.emg.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n sampling_rate : int, float\n Sampling frequency (Hz).\n raw : array\n Raw EMG signal.\n filtered : array\n Filtered EMG signal.\n onsets : array\n Indices of EMG pulse onsets.\n processed : array, optional\n Processed EMG signal according to the chosen onset detector.\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('EMG Summary')\n\n if processed is not None:\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312, sharex=ax1)\n ax3 = fig.add_subplot(313)\n\n # processed signal\n L = len(processed)\n T = (L - 1) / sampling_rate\n ts_processed = np.linspace(0, T, L, endpoint=False)\n ax3.plot(ts_processed, processed,\n linewidth=MAJOR_LW,\n label='Processed')\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Amplitude')\n ax3.legend()\n ax3.grid()\n else:\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212, sharex=ax1)\n\n # raw signal\n ax1.plot(ts, raw, linewidth=MAJOR_LW, label='Raw')\n\n ax1.set_ylabel('Amplitude')\n ax1.legend()\n ax1.grid()\n\n # filtered signal with onsets\n ymin = np.min(filtered)\n ymax = np.max(filtered)\n alpha = 0.1 * (ymax - ymin)\n ymax += alpha\n ymin -= alpha\n\n ax2.plot(ts, filtered, linewidth=MAJOR_LW, label='Filtered')\n ax2.vlines(ts[onsets], ymin, ymax,\n color='m',\n linewidth=MINOR_LW,\n label='Onsets')\n\n ax2.set_xlabel('Time (s)')\n ax2.set_ylabel('Amplitude')\n ax2.legend()\n ax2.grid()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_resp(ts=None,\n raw=None,\n filtered=None,\n zeros=None,\n resp_rate_ts=None,\n resp_rate=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.bvp.bvp.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n raw : array\n Raw Resp signal.\n filtered : array\n Filtered Resp signal.\n zeros : array\n Indices of Respiration zero crossings.\n resp_rate_ts : array\n Respiration rate time axis reference (seconds).\n resp_rate : array\n Instantaneous respiration rate (Hz).\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('Respiration Summary')\n\n # raw signal\n ax1 = fig.add_subplot(311)\n\n ax1.plot(ts, raw, linewidth=MAJOR_LW, label='Raw')\n\n ax1.set_ylabel('Amplitude')\n ax1.legend()\n ax1.grid()\n\n # filtered signal with zeros\n ax2 = fig.add_subplot(312, sharex=ax1)\n\n ymin = np.min(filtered)\n ymax = np.max(filtered)\n alpha = 0.1 * (ymax - ymin)\n ymax += alpha\n ymin -= alpha\n\n ax2.plot(ts, filtered, linewidth=MAJOR_LW, label='Filtered')\n ax2.vlines(ts[zeros], ymin, ymax,\n color='m',\n linewidth=MINOR_LW,\n label='Zero crossings')\n\n ax2.set_ylabel('Amplitude')\n ax2.legend()\n ax2.grid()\n\n # heart rate\n ax3 = fig.add_subplot(313, sharex=ax1)\n\n ax3.plot(resp_rate_ts, resp_rate,\n linewidth=MAJOR_LW,\n label='Respiration Rate')\n\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Respiration Rate (Hz)')\n ax3.legend()\n ax3.grid()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_eeg(ts=None,\n raw=None,\n filtered=None,\n labels=None,\n features_ts=None,\n theta=None,\n alpha_low=None,\n alpha_high=None,\n beta=None,\n gamma=None,\n plf_pairs=None,\n plf=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.eeg.eeg.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n raw : array\n Raw EEG signal.\n filtered : array\n Filtered EEG signal.\n labels : list\n Channel labels.\n features_ts : array\n Features time axis reference (seconds).\n theta : array\n Average power in the 4 to 8 Hz frequency band; each column is one\n EEG channel.\n alpha_low : array\n Average power in the 8 to 10 Hz frequency band; each column is one\n EEG channel.\n alpha_high : array\n Average power in the 10 to 13 Hz frequency band; each column is one\n EEG channel.\n beta : array\n Average power in the 13 to 25 Hz frequency band; each column is one\n EEG channel.\n gamma : array\n Average power in the 25 to 40 Hz frequency band; each column is one\n EEG channel.\n plf_pairs : list\n PLF pair indices.\n plf : array\n PLF matrix; each column is a channel pair.\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n nrows = MAX_ROWS\n alpha = 2.\n\n figs = []\n\n # raw\n fig = _plot_multichannel(ts=ts,\n signal=raw,\n labels=labels,\n nrows=nrows,\n alpha=alpha,\n title='EEG Summary - Raw',\n xlabel='Time (s)',\n ylabel='Amplitude')\n figs.append(('_Raw', fig))\n\n # filtered\n fig = _plot_multichannel(ts=ts,\n signal=filtered,\n labels=labels,\n nrows=nrows,\n alpha=alpha,\n title='EEG Summary - Filtered',\n xlabel='Time (s)',\n ylabel='Amplitude')\n figs.append(('_Filtered', fig))\n\n # band-power\n names = ('Theta Band', 'Lower Alpha Band', 'Higher Alpha Band',\n 'Beta Band', 'Gamma Band')\n args = (theta, alpha_low, alpha_high, beta, gamma)\n for n, a in zip(names, args):\n fig = _plot_multichannel(ts=features_ts,\n signal=a,\n labels=labels,\n nrows=nrows,\n alpha=alpha,\n title='EEG Summary - %s' % n,\n xlabel='Time (s)',\n ylabel='Power')\n figs.append(('_' + n.replace(' ', '_'), fig))\n\n # PLF\n plf_labels = ['%s vs %s' % (labels[p[0]], labels[p[1]]) for p in plf_pairs]\n fig = _plot_multichannel(ts=features_ts,\n signal=plf,\n labels=plf_labels,\n nrows=nrows,\n alpha=alpha,\n title='EEG Summary - Phase-Locking Factor',\n xlabel='Time (s)',\n ylabel='PLF')\n figs.append(('_PLF', fig))\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n ext = '.png'\n\n for n, fig in figs:\n path = root + n + ext\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n for _, fig in figs:\n plt.close(fig)\n\n\ndef _yscaling(signal=None, alpha=1.5):\n \"\"\"Get y axis limits for a signal with scaling.\n\n Parameters\n ----------\n signal : array\n Input signal.\n alpha : float, optional\n Scaling factor.\n\n Returns\n -------\n ymin : float\n Minimum y value.\n ymax : float\n Maximum y value.\n\n \"\"\"\n\n mi = np.min(signal)\n m = np.mean(signal)\n mx = np.max(signal)\n\n if mi == mx:\n ymin = m - 1\n ymax = m + 1\n else:\n ymin = m - alpha * (m - mi)\n ymax = m + alpha * (mx - m)\n\n return ymin, ymax\n\n\ndef _plot_multichannel(ts=None,\n signal=None,\n labels=None,\n nrows=10,\n alpha=2.,\n title=None,\n xlabel=None,\n ylabel=None):\n \"\"\"Plot a multi-channel signal.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n signal : array\n Multi-channel signal; each column is one channel.\n labels : list, optional\n Channel labels.\n nrows : int, optional\n Maximum number of rows to use.\n alpha : float, optional\n Scaling factor for y axis.\n title : str, optional\n Plot title.\n xlabel : str, optional\n Label for x axis.\n ylabel : str, optional\n Label for y axis.\n\n Returns\n -------\n fig : Figure\n Figure object.\n\n \"\"\"\n\n # ensure numpy\n signal = np.array(signal)\n nch = signal.shape[1]\n\n # check labels\n if labels is None:\n labels = ['Ch. %d' % i for i in range(nch)]\n\n if nch < nrows:\n nrows = nch\n\n ncols = int(np.ceil(nch / float(nrows)))\n\n fig = plt.figure()\n\n # title\n if title is not None:\n fig.suptitle(title)\n\n gs = gridspec.GridSpec(nrows, ncols, hspace=0, wspace=0.2)\n\n # reference axes\n ax0 = fig.add_subplot(gs[0, 0])\n ax0.plot(ts, signal[:, 0], linewidth=MAJOR_LW, label=labels[0])\n ymin, ymax = _yscaling(signal[:, 0], alpha=alpha)\n ax0.set_ylim(ymin, ymax)\n ax0.legend()\n ax0.grid()\n axs = {(0, 0): ax0}\n\n for i in range(1, nch - 1):\n a = i % nrows\n b = int(np.floor(i / float(nrows)))\n ax = fig.add_subplot(gs[a, b], sharex=ax0)\n axs[(a, b)] = ax\n\n ax.plot(ts, signal[:, i], linewidth=MAJOR_LW, label=labels[i])\n ymin, ymax = _yscaling(signal[:, i], alpha=alpha)\n ax.set_ylim(ymin, ymax)\n ax.legend()\n ax.grid()\n\n # last plot\n i = nch - 1\n a = i % nrows\n b = int(np.floor(i / float(nrows)))\n ax = fig.add_subplot(gs[a, b], sharex=ax0)\n axs[(a, b)] = ax\n\n ax.plot(ts, signal[:, -1], linewidth=MAJOR_LW, label=labels[-1])\n ymin, ymax = _yscaling(signal[:, -1], alpha=alpha)\n ax.set_ylim(ymin, ymax)\n ax.legend()\n ax.grid()\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n\n for b in range(0, ncols - 1):\n a = nrows - 1\n ax = axs[(a, b)]\n ax.set_xlabel(xlabel)\n\n if ylabel is not None:\n # middle left\n a = nrows // 2\n ax = axs[(a, 0)]\n ax.set_ylabel(ylabel)\n\n # make layout tight\n gs.tight_layout(fig)\n\n return fig\n\n\ndef plot_ecg(ts=None,\n raw=None,\n filtered=None,\n rpeaks=None,\n templates_ts=None,\n templates=None,\n heart_rate_ts=None,\n heart_rate=None,\n path=None,\n show=False):\n \"\"\"Create a summary plot from the output of signals.ecg.ecg.\n\n Parameters\n ----------\n ts : array\n Signal time axis reference (seconds).\n raw : array\n Raw ECG signal.\n filtered : array\n Filtered ECG signal.\n rpeaks : array\n R-peak location indices.\n templates_ts : array\n Templates time axis reference (seconds).\n templates : array\n Extracted heartbeat templates.\n heart_rate_ts : array\n Heart rate time axis reference (seconds).\n heart_rate : array\n Instantaneous heart rate (bpm).\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('ECG Summary')\n gs = gridspec.GridSpec(6, 2)\n\n # raw signal\n ax1 = fig.add_subplot(gs[:2, 0])\n\n ax1.plot(ts, raw, linewidth=MAJOR_LW, label='Raw')\n\n ax1.set_ylabel('Amplitude')\n ax1.legend()\n ax1.grid()\n\n # filtered signal with rpeaks\n ax2 = fig.add_subplot(gs[2:4, 0], sharex=ax1)\n\n ymin = np.min(filtered)\n ymax = np.max(filtered)\n alpha = 0.1 * (ymax - ymin)\n ymax += alpha\n ymin -= alpha\n\n ax2.plot(ts, filtered, linewidth=MAJOR_LW, label='Filtered')\n ax2.vlines(ts[rpeaks], ymin, ymax,\n color='m',\n linewidth=MINOR_LW,\n label='R-peaks')\n\n ax2.set_ylabel('Amplitude')\n ax2.legend()\n ax2.grid()\n\n # heart rate\n ax3 = fig.add_subplot(gs[4:, 0], sharex=ax1)\n\n ax3.plot(heart_rate_ts, heart_rate, linewidth=MAJOR_LW, label='Heart Rate')\n\n ax3.set_xlabel('Time (s)')\n ax3.set_ylabel('Heart Rate (bpm)')\n ax3.legend()\n ax3.grid()\n\n # templates\n ax4 = fig.add_subplot(gs[1:5, 1])\n\n ax4.plot(templates_ts, templates.T, 'm', linewidth=MINOR_LW, alpha=0.7)\n\n ax4.set_xlabel('Time (s)')\n ax4.set_ylabel('Amplitude')\n ax4.set_title('Templates')\n ax4.grid()\n\n # make layout tight\n gs.tight_layout(fig)\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef _plot_rates(thresholds, rates, variables,\n lw=1,\n colors=None,\n alpha=1,\n eer_idx=None,\n labels=False,\n ax=None):\n \"\"\"Plot biometric rates.\n\n Parameters\n ----------\n thresholds : array\n Classifier thresholds.\n rates : dict\n Dictionary of rates.\n variables : list\n Keys from 'rates' to plot.\n lw : int, float, optional\n Plot linewidth.\n colors : list, optional\n Plot line color for each variable.\n alpha : float, optional\n Plot line alpha value.\n eer_idx : int, optional\n Classifier reference index for the Equal Error Rate.\n labels : bool, optional\n If True, will show plot labels.\n ax : axis, optional\n Plot Axis to use.\n\n Returns\n -------\n fig : Figure\n Figure object.\n\n \"\"\"\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = ax.figure\n\n if colors is None:\n x = np.linspace(0., 1., len(variables))\n colors = plt.get_cmap('rainbow')(x)\n\n if labels:\n for i, v in enumerate(variables):\n ax.plot(thresholds, rates[v], colors[i],\n lw=lw,\n alpha=alpha,\n label=v)\n else:\n for i, v in enumerate(variables):\n ax.plot(thresholds, rates[v], colors[i], lw=lw, alpha=alpha)\n\n if eer_idx is not None:\n x, y = rates['EER'][eer_idx]\n ax.vlines(x, 0, 1, 'r', lw=lw)\n ax.set_title('EER = %0.2f %%' % (100. * y))\n\n return fig\n\n\ndef plot_biometrics(assessment=None, eer_idx=None, path=None, show=False):\n \"\"\"Create a summary plot of a biometrics test run.\n\n Parameters\n ----------\n assessment : dict\n Classification assessment results.\n eer_idx : int, optional\n Classifier reference index for the Equal Error Rate.\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('Biometrics Summary')\n\n c_sub = ['#008bff', '#8dd000']\n c_global = ['#0037ff', 'g']\n\n ths = assessment['thresholds']\n\n auth_ax = fig.add_subplot(121)\n id_ax = fig.add_subplot(122)\n\n # subject results\n for sub in six.iterkeys(assessment['subject']):\n auth_rates = assessment['subject'][sub]['authentication']['rates']\n _ = _plot_rates(ths, auth_rates, ['FAR', 'FRR'],\n lw=MINOR_LW,\n colors=c_sub,\n alpha=0.4,\n eer_idx=None,\n labels=False,\n ax=auth_ax)\n\n id_rates = assessment['subject'][sub]['identification']['rates']\n _ = _plot_rates(ths, id_rates, ['MR', 'RR'],\n lw=MINOR_LW,\n colors=c_sub,\n alpha=0.4,\n eer_idx=None,\n labels=False,\n ax=id_ax)\n\n # global results\n auth_rates = assessment['global']['authentication']['rates']\n _ = _plot_rates(ths, auth_rates, ['FAR', 'FRR'],\n lw=MAJOR_LW,\n colors=c_global,\n alpha=1,\n eer_idx=eer_idx,\n labels=True,\n ax=auth_ax)\n\n id_rates = assessment['global']['identification']['rates']\n _ = _plot_rates(ths, id_rates, ['MR', 'RR'],\n lw=MAJOR_LW,\n colors=c_global,\n alpha=1,\n eer_idx=eer_idx,\n labels=True,\n ax=id_ax)\n\n # set labels and grids\n auth_ax.set_xlabel('Threshold')\n auth_ax.set_ylabel('Authentication')\n auth_ax.grid()\n auth_ax.legend()\n\n id_ax.set_xlabel('Threshold')\n id_ax.set_ylabel('Identification')\n id_ax.grid()\n id_ax.legend()\n\n # make layout tight\n fig.tight_layout()\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n\n\ndef plot_clustering(data=None, clusters=None, path=None, show=False):\n \"\"\"Create a summary plot of a data clustering.\n\n Parameters\n ----------\n data : array\n An m by n array of m data samples in an n-dimensional space.\n clusters : dict\n Dictionary with the sample indices (rows from `data`) for each cluster.\n path : str, optional\n If provided, the plot will be saved to the specified file.\n show : bool, optional\n If True, show the plot immediately.\n\n \"\"\"\n\n fig = plt.figure()\n fig.suptitle('Clustering Summary')\n\n ymin, ymax = _yscaling(data, alpha=1.2)\n\n # determine number of clusters\n keys = list(clusters)\n nc = len(keys)\n\n if nc <= 4:\n nrows = 2\n ncols = 4\n else:\n area = nc + 4\n\n # try to fit to a square\n nrows = int(np.ceil(np.sqrt(area)))\n\n if nrows > MAX_ROWS:\n # prefer to increase number of columns\n nrows = MAX_ROWS\n\n ncols = int(np.ceil(area / float(nrows)))\n\n # plot grid\n gs = gridspec.GridSpec(nrows, ncols, hspace=0.2, wspace=0.2)\n\n # global axes\n ax_global = fig.add_subplot(gs[:2, :2])\n\n # cluster axes\n c_grid = np.ones((nrows, ncols), dtype='bool')\n c_grid[:2, :2] = False\n c_rows, c_cols = np.nonzero(c_grid)\n\n # generate color map\n x = np.linspace(0., 1., nc)\n cmap = plt.get_cmap('rainbow')\n\n for i, k in enumerate(keys):\n aux = data[clusters[k]]\n color = cmap(x[i])\n label = 'Cluster %s' % k\n ax = fig.add_subplot(gs[c_rows[i], c_cols[i]], sharex=ax_global)\n ax.set_ylim([ymin, ymax])\n ax.set_title(label)\n ax.grid()\n\n if len(aux) > 0:\n ax_global.plot(aux.T, color=color, lw=MINOR_LW, alpha=0.7)\n ax.plot(aux.T, color=color, lw=MAJOR_LW)\n\n ax_global.set_title('All Clusters')\n ax_global.set_ylim([ymin, ymax])\n ax_global.grid()\n\n # make layout tight\n gs.tight_layout(fig)\n\n # save to file\n if path is not None:\n path = utils.normpath(path)\n root, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext not in ['png', 'jpg']:\n path = root + '.png'\n\n fig.savefig(path, dpi=200, bbox_inches='tight')\n\n # show\n if show:\n plt.show()\n else:\n # close\n plt.close(fig)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nbiosppy.signals.eeg\n-------------------\n\nThis module provides methods to process Electroencephalographic (EEG)\nsignals.\n\n:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes\n:license: BSD 3-clause, see LICENSE for more details.\n\"\"\"\n\n# Imports\n# compat\nfrom __future__ import absolute_import, division, print_function\nfrom six.moves import range\n\n# 3rd party\nimport numpy as np\n\n# local\nfrom . import tools as st\nfrom .. import plotting, utils\n\n\ndef eeg(signal=None, sampling_rate=1000., labels=None, show=True):\n \"\"\"Process raw EEG signals and extract relevant signal features using\n default parameters.\n\n Parameters\n ----------\n signal : array\n Raw EEG signal matrix; each column is one EEG channel.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n labels : list, optional\n Channel labels.\n show : bool, optional\n If True, show a summary plot.\n\n Returns\n -------\n ts : array\n Signal time axis reference (seconds).\n filtered : array\n Filtered BVP signal.\n features_ts : array\n Features time axis reference (seconds).\n theta : array\n Average power in the 4 to 8 Hz frequency band; each column is one EEG\n channel.\n alpha_low : array\n Average power in the 8 to 10 Hz frequency band; each column is one EEG\n channel.\n alpha_high : array\n Average power in the 10 to 13 Hz frequency band; each column is one EEG\n channel.\n beta : array\n Average power in the 13 to 25 Hz frequency band; each column is one EEG\n channel.\n gamma : array\n Average power in the 25 to 40 Hz frequency band; each column is one EEG\n channel.\n plf_pairs : list\n PLF pair indices.\n plf : array\n PLF matrix; each column is a channel pair.\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n\n sampling_rate = float(sampling_rate)\n nch = signal.shape[1]\n\n if labels is None:\n labels = ['Ch. %d' % i for i in range(nch)]\n else:\n if len(labels) != nch:\n raise ValueError(\n \"Number of channels mismatch between signal matrix and labels.\")\n\n # high pass filter\n b, a = st.get_filter(ftype='butter',\n band='highpass',\n order=8,\n frequency=4,\n sampling_rate=sampling_rate)\n\n aux, _ = st._filter_signal(b, a, signal=signal, check_phase=True, axis=0)\n\n # low pass filter\n b, a = st.get_filter(ftype='butter',\n band='lowpass',\n order=16,\n frequency=40,\n sampling_rate=sampling_rate)\n\n filtered, _ = st._filter_signal(b, a, signal=aux, check_phase=True, axis=0)\n\n # band power features\n out = get_power_features(signal=filtered,\n sampling_rate=sampling_rate,\n size=0.25,\n overlap=0.5)\n ts_feat = out['ts']\n theta = out['theta']\n alpha_low = out['alpha_low']\n alpha_high = out['alpha_high']\n beta = out['beta']\n gamma = out['gamma']\n\n # PLF features\n _, plf_pairs, plf = get_plf_features(signal=filtered,\n sampling_rate=sampling_rate,\n size=0.25,\n overlap=0.5)\n\n # get time vectors\n length = len(signal)\n T = (length - 1) / sampling_rate\n ts = np.linspace(0, T, length, endpoint=False)\n\n # plot\n if show:\n plotting.plot_eeg(ts=ts,\n raw=signal,\n filtered=filtered,\n labels=labels,\n features_ts=ts_feat,\n theta=theta,\n alpha_low=alpha_low,\n alpha_high=alpha_high,\n beta=beta,\n gamma=gamma,\n plf_pairs=plf_pairs,\n plf=plf,\n path=None,\n show=True)\n\n # output\n args = (ts, filtered, ts_feat, theta, alpha_low, alpha_high, beta, gamma,\n plf_pairs, plf)\n names = ('ts', 'filtered', 'features_ts', 'theta', 'alpha_low',\n 'alpha_high', 'beta', 'gamma', 'plf_pairs', 'plf')\n\n return utils.ReturnTuple(args, names)\n\n\ndef car_reference(signal=None):\n \"\"\"Change signal reference to the Common Average Reference (CAR).\n\n Parameters\n ----------\n signal : array\n Input EEG signal matrix; each column is one EEG channel.\n\n Returns\n -------\n signal : array\n Re-referenced EEG signal matrix; each column is one EEG channel.\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n length, nch = signal.shape\n avg = np.mean(signal, axis=1)\n\n out = signal - np.tile(avg.reshape((length, 1)), nch)\n\n return utils.ReturnTuple((out,), ('signal',))\n\n\ndef get_power_features(signal=None,\n sampling_rate=1000.,\n size=0.25,\n overlap=0.5):\n \"\"\"Extract band power features from EEG signals.\n\n Computes the average signal power, with overlapping windows, in typical\n EEG frequency bands:\n * Theta: from 4 to 8 Hz,\n * Lower Alpha: from 8 to 10 Hz,\n * Higher Alpha: from 10 to 13 Hz,\n * Beta: from 13 to 25 Hz,\n * Gamma: from 25 to 40 Hz.\n\n Parameters\n ----------\n signal array\n Filtered EEG signal matrix; each column is one EEG channel.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n size : float, optional\n Window size (seconds).\n overlap : float, optional\n Window overlap (0 to 1).\n\n Returns\n -------\n ts : array\n Features time axis reference (seconds).\n theta : array\n Average power in the 4 to 8 Hz frequency band; each column is one EEG\n channel.\n alpha_low : array\n Average power in the 8 to 10 Hz frequency band; each column is one EEG\n channel.\n alpha_high : array\n Average power in the 10 to 13 Hz frequency band; each column is one EEG\n channel.\n beta : array\n Average power in the 13 to 25 Hz frequency band; each column is one EEG\n channel.\n gamma : array\n Average power in the 25 to 40 Hz frequency band; each column is one EEG\n channel.\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n nch = signal.shape[1]\n\n sampling_rate = float(sampling_rate)\n\n # convert sizes to samples\n size = int(size * sampling_rate)\n step = size - int(overlap * size)\n\n # padding\n min_pad = 1024\n pad = None\n if size < min_pad:\n pad = min_pad - size\n\n # frequency bands\n bands = [[4, 8], [8, 10], [10, 13], [13, 25], [25, 40]]\n nb = len(bands)\n\n # windower\n fcn_kwargs = {'sampling_rate': sampling_rate, 'bands': bands, 'pad': pad}\n index, values = st.windower(signal=signal,\n size=size,\n step=step,\n kernel='hann',\n fcn=_power_features,\n fcn_kwargs=fcn_kwargs)\n\n # median filter\n md_size = int(0.625 * sampling_rate / float(step))\n if md_size % 2 == 0:\n # must be odd\n md_size += 1\n\n for i in range(nb):\n for j in range(nch):\n values[:, i, j], _ = st.smoother(signal=values[:, i, j],\n kernel='median',\n size=md_size)\n\n # extract individual bands\n theta = values[:, 0, :]\n alpha_low = values[:, 1, :]\n alpha_high = values[:, 2, :]\n beta = values[:, 3, :]\n gamma = values[:, 4, :]\n\n # convert indices to seconds\n ts = index.astype('float') / sampling_rate\n\n # output\n args = (ts, theta, alpha_low, alpha_high, beta, gamma)\n names = ('ts', 'theta', 'alpha_low', 'alpha_high', 'beta', 'gamma')\n\n return utils.ReturnTuple(args, names)\n\n\ndef get_plf_features(signal=None, sampling_rate=1000., size=0.25, overlap=0.5):\n \"\"\"Extract Phase-Locking Factor (PLF) features from EEG signals between all\n channel pairs.\n\n Parameters\n ----------\n signal : array\n Filtered EEG signal matrix; each column is one EEG channel.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n size : float, optional\n Window size (seconds).\n overlap : float, optional\n Window overlap (0 to 1).\n\n Returns\n -------\n ts : array\n Features time axis reference (seconds).\n plf_pairs : list\n PLF pair indices.\n plf : array\n PLF matrix; each column is a channel pair.\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n nch = signal.shape[1]\n\n sampling_rate = float(sampling_rate)\n\n # convert sizes to samples\n size = int(size * sampling_rate)\n step = size - int(overlap * size)\n\n # padding\n min_pad = 1024\n N = None\n if size < min_pad:\n N = min_pad\n\n # PLF pairs\n pairs = [(i, j) for i in range(nch) for j in range(i + 1, nch)]\n nb = len(pairs)\n\n # windower\n fcn_kwargs = {'pairs': pairs, 'N': N}\n index, values = st.windower(signal=signal,\n size=size,\n step=step,\n kernel='hann',\n fcn=_plf_features,\n fcn_kwargs=fcn_kwargs)\n\n # median filter\n md_size = int(0.625 * sampling_rate / float(step))\n if md_size % 2 == 0:\n # must be odd\n md_size += 1\n\n for i in range(nb):\n values[:, i], _ = st.smoother(signal=values[:, i],\n kernel='median',\n size=md_size)\n\n # convert indices to seconds\n ts = index.astype('float') / sampling_rate\n\n # output\n args = (ts, pairs, values)\n names = ('ts', 'plf_pairs', 'plf')\n\n return utils.ReturnTuple(args, names)\n\n\ndef _power_features(signal=None, sampling_rate=1000., bands=None, pad=0):\n \"\"\"Helper function to compute band power features for each window.\n\n Parameters\n ----------\n signal : array\n Filtered EEG signal matrix; each column is one EEG channel.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n bands : list\n List of frequency pairs defining the bands.\n pad : int, optional\n Padding for the Fourier Transform (number of zeros added).\n\n Returns\n -------\n out : array\n Average power for each band and EEG channel; shape is\n (bands, channels).\n\n \"\"\"\n\n nch = signal.shape[1]\n\n out = np.zeros((len(bands), nch), dtype='float')\n for i in range(nch):\n # compute power spectrum\n freqs, power = st.power_spectrum(signal=signal[:, i],\n sampling_rate=sampling_rate,\n pad=pad,\n pow2=False,\n decibel=False)\n\n # compute average band power\n for j, b in enumerate(bands):\n avg, = st.band_power(freqs=freqs,\n power=power,\n frequency=b,\n decibel=False)\n out[j, i] = avg\n\n return out\n\n\ndef _plf_features(signal=None, pairs=None, N=None):\n \"\"\"Helper function to compute PLF features for each window.\n\n Parameters\n ----------\n signal : array\n Filtered EEG signal matrix; each column is one EEG channel.\n pairs : iterable\n List of signal channel pairs.\n N : int, optional\n Number of Fourier components.\n\n Returns\n -------\n out : array\n PLF for each channel pair.\n\n \"\"\"\n\n out = np.zeros(len(pairs), dtype='float')\n for i, p in enumerate(pairs):\n # compute PLF\n s1 = signal[:, p[0]]\n s2 = signal[:, p[1]]\n out[i], = st.phase_locking(signal1=s1, signal2=s2, N=N)\n\n return out\n"
] |
[
[
"numpy.abs",
"numpy.nonzero",
"numpy.min",
"numpy.linspace",
"numpy.sqrt",
"matplotlib.pyplot.get_cmap",
"numpy.ones",
"numpy.max",
"numpy.mean",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"numpy.angle",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"numpy.mean",
"numpy.linspace"
]
] |
shania3322/joeynmt
|
[
"5afe9d00930f19949b2078141771bf4621f6e9ae"
] |
[
"github/joeynmt/transformer_layers.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\nimport math\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch import Tensor\r\n\r\n\r\n# pylint: disable=arguments-differ\r\nclass MultiHeadedAttention(nn.Module):\r\n \"\"\"\r\n Multi-Head Attention module from \"Attention is All You Need\"\r\n\r\n Implementation modified from OpenNMT-py.\r\n https://github.com/OpenNMT/OpenNMT-py\r\n \"\"\"\r\n\r\n def __init__(self, num_heads: int, size: int, dropout: float = 0.1):\r\n \"\"\"\r\n Create a multi-headed attention layer.\r\n :param num_heads: the number of heads\r\n :param size: model size (must be divisible by num_heads)\r\n :param dropout: probability of dropping a unit\r\n \"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n\r\n assert size % num_heads == 0\r\n\r\n self.head_size = head_size = size // num_heads\r\n self.model_size = size\r\n self.num_heads = num_heads\r\n\r\n self.k_layer = nn.Linear(size, num_heads * head_size)\r\n self.v_layer = nn.Linear(size, num_heads * head_size)\r\n self.q_layer = nn.Linear(size, num_heads * head_size)\r\n\r\n self.output_layer = nn.Linear(size, size)\r\n self.softmax = nn.Softmax(dim=-1)\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n def forward(self, k: Tensor, v: Tensor, q: Tensor, mask: Tensor = None):\r\n \"\"\"\r\n Computes multi-headed attention.\r\n\r\n :param k: keys [B, M, D] with M being the sentence length.\r\n :param v: values [B, M, D]\r\n :param q: query [B, M, D]\r\n :param mask: optional mask [B, 1, M]\r\n :return:\r\n \"\"\"\r\n batch_size = k.size(0)\r\n num_heads = self.num_heads\r\n\r\n # project the queries (q), keys (k), and values (v)\r\n k = self.k_layer(k)\r\n v = self.v_layer(v)\r\n q = self.q_layer(q)\r\n\r\n # reshape q, k, v for our computation to [batch_size, num_heads, ..]\r\n k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)\r\n v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)\r\n q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)\r\n\r\n # compute scores\r\n q = q / math.sqrt(self.head_size)\r\n\r\n # batch x num_heads x query_len x key_len\r\n scores = torch.matmul(q, k.transpose(2, 3))\r\n\r\n # apply the mask (if we have one)\r\n # we add a dimension for the heads to it below: [B, 1, 1, M]\r\n if mask is not None:\r\n scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf'))\r\n\r\n # apply attention dropout and compute context vectors.\r\n attention = self.softmax(scores)\r\n attention = self.dropout(attention)\r\n\r\n # get context vector (select values with attention) and reshape\r\n # back to [B, M, D]\r\n context = torch.matmul(attention, v)\r\n context = context.transpose(1, 2).contiguous().view(\r\n batch_size, -1, num_heads * self.head_size)\r\n\r\n output = self.output_layer(context)\r\n\r\n return output\r\n\r\n\r\n# pylint: disable=arguments-differ\r\nclass PositionwiseFeedForward(nn.Module):\r\n \"\"\"\r\n Position-wise Feed-forward layer\r\n Projects to ff_size and then back down to input_size.\r\n \"\"\"\r\n\r\n def __init__(self, input_size, ff_size, dropout=0.1):\r\n \"\"\"\r\n Initializes position-wise feed-forward layer.\r\n :param input_size: dimensionality of the input.\r\n :param ff_size: dimensionality of intermediate representation\r\n :param dropout:\r\n \"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.layer_norm = nn.LayerNorm(input_size, eps=1e-6)\r\n self.pwff_layer = nn.Sequential(\r\n nn.Linear(input_size, ff_size),\r\n nn.ReLU(),\r\n nn.Dropout(dropout),\r\n nn.Linear(ff_size, input_size),\r\n nn.Dropout(dropout),\r\n )\r\n\r\n def forward(self, x):\r\n x_norm = self.layer_norm(x)\r\n return self.pwff_layer(x_norm) + x\r\n\r\n\r\n# pylint: disable=arguments-differ\r\nclass PositionalEncoding(nn.Module):\r\n \"\"\"\r\n Pre-compute position encodings (PE).\r\n In forward pass, this adds the position-encodings to the\r\n input for as many time steps as necessary.\r\n\r\n Implementation based on OpenNMT-py.\r\n https://github.com/OpenNMT/OpenNMT-py\r\n \"\"\"\r\n def __init__(self,\r\n size: int = 0,\r\n max_len: int = 5000):\r\n \"\"\"\r\n Positional Encoding with maximum length max_len\r\n :param size:\r\n :param max_len:\r\n :param dropout:\r\n \"\"\"\r\n if size % 2 != 0:\r\n raise ValueError(\"Cannot use sin/cos positional encoding with \"\r\n \"odd dim (got dim={:d})\".format(size))\r\n pe = torch.zeros(max_len, size)\r\n position = torch.arange(0, max_len).unsqueeze(1)\r\n div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *\r\n -(math.log(10000.0) / size)))\r\n pe[:, 0::2] = torch.sin(position.float() * div_term)\r\n pe[:, 1::2] = torch.cos(position.float() * div_term)\r\n pe = pe.unsqueeze(0) # shape: [1, size, max_len]\r\n super(PositionalEncoding, self).__init__()\r\n self.register_buffer('pe', pe)\r\n self.dim = size\r\n\r\n def forward(self, emb):\r\n \"\"\"Embed inputs.\r\n Args:\r\n emb (FloatTensor): Sequence of word vectors\r\n ``(seq_len, batch_size, self.dim)``\r\n \"\"\"\r\n # Add position encodings\r\n return emb + self.pe[:, :emb.size(1)]\r\n\r\n\r\nclass TransformerEncoderLayer(nn.Module):\r\n \"\"\"\r\n One Transformer encoder layer has a Multi-head attention layer plus\r\n a position-wise feed-forward layer.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n size: int = 0,\r\n ff_size: int = 0,\r\n num_heads: int = 0,\r\n dropout: float = 0.1):\r\n \"\"\"\r\n A single Transformer layer.\r\n :param size:\r\n :param ff_size:\r\n :param num_heads:\r\n :param dropout:\r\n \"\"\"\r\n super(TransformerEncoderLayer, self).__init__()\r\n\r\n self.layer_norm = nn.LayerNorm(size, eps=1e-6)\r\n self.src_src_att = MultiHeadedAttention(num_heads, size,\r\n dropout=dropout)\r\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,\r\n dropout=dropout)\r\n self.dropout = nn.Dropout(dropout)\r\n self.size = size\r\n\r\n # pylint: disable=arguments-differ\r\n def forward(self, x: Tensor, mask: Tensor) -> Tensor:\r\n \"\"\"\r\n Forward pass for a single transformer encoder layer.\r\n First applies layer norm, then self attention,\r\n then dropout with residual connection (adding the input to the result),\r\n and then a position-wise feed-forward layer.\r\n\r\n :param x: layer input\r\n :param mask: input mask\r\n :return: output tensor\r\n \"\"\"\r\n x_norm = self.layer_norm(x)\r\n h = self.src_src_att(x_norm, x_norm, x_norm, mask)\r\n h = self.dropout(h) + x\r\n o = self.feed_forward(h)\r\n return o\r\n\r\n\r\nclass TransformerDecoderLayer(nn.Module):\r\n \"\"\"\r\n Transformer decoder layer.\r\n\r\n Consists of self-attention, source-attention, and feed-forward.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n size: int = 0,\r\n ff_size: int = 0,\r\n num_heads: int = 0,\r\n dropout: float = 0.1):\r\n \"\"\"\r\n Represents a single Transformer decoder layer.\r\n\r\n It attends to the source representation and the previous decoder states.\r\n\r\n :param size: model dimensionality\r\n :param ff_size: size of the feed-forward intermediate layer\r\n :param num_heads: number of heads\r\n :param dropout: dropout to apply to input\r\n \"\"\"\r\n super(TransformerDecoderLayer, self).__init__()\r\n self.size = size\r\n\r\n self.trg_trg_att = MultiHeadedAttention(num_heads, size,\r\n dropout=dropout)\r\n self.src_trg_att = MultiHeadedAttention(num_heads, size,\r\n dropout=dropout)\r\n\r\n self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,\r\n dropout=dropout)\r\n\r\n self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)\r\n self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6)\r\n\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n # pylint: disable=arguments-differ\r\n def forward(self,\r\n x: Tensor = None,\r\n memory: Tensor = None,\r\n src_mask: Tensor = None,\r\n trg_mask: Tensor = None) -> Tensor:\r\n \"\"\"\r\n Forward pass of a single Transformer decoder layer.\r\n\r\n :param x: inputs\r\n :param memory: source representations\r\n :param src_mask: source mask\r\n :param trg_mask: target mask (so as to not condition on future steps)\r\n :return: output tensor\r\n \"\"\"\r\n # decoder/target self-attention\r\n x_norm = self.x_layer_norm(x)\r\n h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)\r\n h1 = self.dropout(h1) + x\r\n\r\n # source-target attention\r\n h1_norm = self.dec_layer_norm(h1)\r\n h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)\r\n\r\n # final position-wise feed-forward layer\r\n o = self.feed_forward(self.dropout(h2) + h1)\r\n\r\n return o\r\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.matmul",
"torch.arange",
"torch.nn.ReLU"
]
] |
c-lai/3D-ResNets-PyTorch
|
[
"488d0d7c4760e60ead4db80fe6f017a8778318ff"
] |
[
"model.py"
] |
[
"import torch\nfrom torch import nn\n\nfrom models import (resnet, resnet2p1d, pre_act_resnet,\n wide_resnet, resnext, densenet, googlenet)\n\n\ndef get_module_name(name):\n name = name.split('.')\n if name[0] == 'module':\n i = 1\n else:\n i = 0\n if name[i] == 'features':\n i += 1\n\n return name[i]\n\n\ndef get_fine_tuning_parameters(model, ft_begin_module):\n if not ft_begin_module:\n return model.parameters()\n\n parameters = []\n add_flag = False\n for k, v in model.named_parameters():\n if ft_begin_module == get_module_name(k):\n add_flag = True\n\n if add_flag:\n parameters.append({'params': v})\n\n return parameters\n\n\ndef generate_model(opt):\n assert opt.model in [\n 'resnet', 'resnet2p1d', 'preresnet', 'wideresnet',\n 'resnext', 'densenet', 'googlenet'\n ]\n\n if opt.model == 'resnet':\n model = resnet.generate_model(model_depth=opt.model_depth,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n shortcut_type=opt.resnet_shortcut,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool,\n widen_factor=opt.resnet_widen_factor)\n elif opt.model == 'resnet2p1d':\n model = resnet2p1d.generate_model(model_depth=opt.model_depth,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n shortcut_type=opt.resnet_shortcut,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool,\n widen_factor=opt.resnet_widen_factor)\n elif opt.model == 'wideresnet':\n model = wide_resnet.generate_model(\n model_depth=opt.model_depth,\n k=opt.wide_resnet_k,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n shortcut_type=opt.resnet_shortcut,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool)\n elif opt.model == 'resnext':\n model = resnext.generate_model(model_depth=opt.model_depth,\n cardinality=opt.resnext_cardinality,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n shortcut_type=opt.resnet_shortcut,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool)\n elif opt.model == 'preresnet':\n model = pre_act_resnet.generate_model(\n model_depth=opt.model_depth,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n shortcut_type=opt.resnet_shortcut,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool)\n elif opt.model == 'densenet':\n model = densenet.generate_model(model_depth=opt.model_depth,\n n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride,\n no_max_pool=opt.no_max_pool)\n elif opt.model == 'googlenet':\n model = googlenet.generate_model(n_classes=opt.n_classes,\n n_input_channels=opt.n_input_channels,\n conv1_t_size=opt.conv1_t_size,\n conv1_t_stride=opt.conv1_t_stride)\n\n return model\n\n\ndef load_pretrained_model(model, pretrain_path, model_name, n_finetune_classes):\n if pretrain_path:\n print('loading pretrained model {}'.format(pretrain_path))\n pretrain = torch.load(pretrain_path, map_location='cpu')\n\n model.load_state_dict(pretrain['state_dict'])\n tmp_model = model\n if model_name == 'densenet':\n tmp_model.classifier = nn.Linear(tmp_model.classifier.in_features,\n n_finetune_classes)\n else:\n tmp_model.fc = nn.Linear(tmp_model.fc.in_features,\n n_finetune_classes)\n\n return model\n\n\ndef make_data_parallel(model, is_distributed, device):\n if is_distributed:\n if device.type == 'cuda' and device.index is not None:\n torch.cuda.set_device(device)\n model.to(device)\n\n model = nn.parallel.DistributedDataParallel(model,\n device_ids=[device])\n else:\n model.to(device)\n model = nn.parallel.DistributedDataParallel(model)\n elif device.type == 'cuda':\n model = nn.DataParallel(model, device_ids=None).cuda()\n\n return model\n"
] |
[
[
"torch.cuda.set_device",
"torch.load",
"torch.nn.Linear",
"torch.nn.DataParallel",
"torch.nn.parallel.DistributedDataParallel"
]
] |
adit98/google-research
|
[
"0714e9a5a3934d922c0b9dd017943a8e511eb5bc"
] |
[
"jaxnerf/nerf/datasets.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Different datasets implementation plus a general port for all the datasets.\"\"\"\nINTERNAL = False # pylint: disable=g-statement-before-imports\nimport collections\nimport json\nimport os\nfrom os import path\nimport queue\nimport threading\nif not INTERNAL:\n import cv2 # pylint: disable=g-import-not-at-top\nimport jax\nimport numpy as np\nfrom PIL import Image\nfrom jaxnerf.nerf import utils\n\nRays = collections.namedtuple(\"Rays\", [\"origins\", \"directions\", \"viewdirs\"])\n\n\ndef ray_fn(fn, rays):\n \"\"\"Applies `fn` to each element of `rays`, and cast to a `Rays` namedtuple.\"\"\"\n return Rays(*[fn(r) for r in rays])\n\n\ndef get_dataset(split, args):\n return dataset_dict[args.dataset](split, args)\n\n\nclass Dataset(threading.Thread):\n \"\"\"Dataset Base Class.\"\"\"\n\n def __init__(self, split, args):\n super(Dataset, self).__init__()\n self.queue = queue.Queue(3) # Set prefetch buffer to 3 batches.\n self.daemon = True\n self.split = split\n if split == \"train\":\n self._train_init(args)\n elif split == \"test\":\n self._test_init(args)\n else:\n raise ValueError(\n \"the split argument should be either \\\"train\\\" or \\\"test\\\", set\"\n \"to {} here.\".format(split))\n self.batch_size = args.batch_size // jax.host_count()\n self.image_batching = args.image_batching\n self.render_path = args.render_path\n self.start()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"Get the next training batch or test example.\n\n Returns:\n batch: dict, has \"pixels\" and \"rays\".\n \"\"\"\n x = self.queue.get()\n if self.split == \"train\":\n return utils.shard(x)\n else:\n return utils.to_device(x)\n\n def peek(self):\n \"\"\"Peek at the next training batch or test example without dequeuing it.\n\n Returns:\n batch: dict, has \"pixels\" and \"rays\".\n \"\"\"\n x = self.queue.queue[0].copy() # Make a copy of the front of the queue.\n if self.split == \"train\":\n return utils.shard(x)\n else:\n return utils.to_device(x)\n\n def run(self):\n if self.split == \"train\":\n next_func = self._next_train\n else:\n next_func = self._next_test\n while True:\n self.queue.put(next_func())\n\n @property\n def size(self):\n return self.n_examples\n\n def _train_init(self, args):\n \"\"\"Initialize training.\"\"\"\n self._load_renderings(args)\n self._generate_rays()\n\n if args.image_batching:\n # flatten the ray and image dimension together.\n self.images = self.images.reshape([-1, 3])\n self.rays = ray_fn(lambda r: r.reshape([-1, r.shape[-1]]), self.rays)\n else:\n self.images = self.images.reshape([-1, self.resolution, 3])\n self.rays = ray_fn(\n lambda r: r.reshape([-1, self.resolution, r.shape[-1]]), self.rays)\n\n def _test_init(self, args):\n self._load_renderings(args)\n self._generate_rays()\n self.it = 0\n\n def _next_train(self):\n \"\"\"Sample next training batch.\"\"\"\n\n if self.image_batching:\n ray_indices = np.random.randint(0, self.rays[0].shape[0],\n (self.batch_size,))\n batch_pixels = self.images[ray_indices]\n batch_rays = ray_fn(lambda r: r[ray_indices], self.rays)\n else:\n image_index = np.random.randint(0, self.n_examples, ())\n ray_indices = np.random.randint(0, self.rays[0][0].shape[0],\n (self.batch_size,))\n batch_pixels = self.images[image_index][ray_indices]\n batch_rays = ray_fn(lambda r: r[image_index][ray_indices], self.rays)\n return {\"pixels\": batch_pixels, \"rays\": batch_rays}\n\n def _next_test(self):\n \"\"\"Sample next test example.\"\"\"\n idx = self.it\n self.it = (self.it + 1) % self.n_examples\n\n if self.render_path:\n return {\"rays\": ray_fn(lambda r: r[idx], self.render_rays)}\n else:\n return {\n \"pixels\": self.images[idx],\n \"rays\": ray_fn(lambda r: r[idx], self.rays)\n }\n\n # TODO(bydeng): Swap this function with a more flexible camera model.\n def _generate_rays(self):\n \"\"\"Generating rays for all images.\"\"\"\n x, y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking\n np.arange(self.w, dtype=np.float32), # X-Axis (columns)\n np.arange(self.h, dtype=np.float32), # Y-Axis (rows)\n indexing=\"xy\")\n dirs = np.stack([(x - self.w * 0.5) / self.focal,\n -(y - self.h * 0.5) / self.focal, -np.ones_like(x)],\n axis=-1)\n directions = ((dirs[None, Ellipsis, None, :] *\n self.camtoworlds[:, None, None, :3, :3]).sum(axis=-1))\n origins = np.broadcast_to(self.camtoworlds[:, None, None, :3, -1],\n directions.shape)\n # TODO(barron): Avoid the extra memory overhead wasted here on `viewdirs`.\n self.rays = Rays(\n origins=origins, directions=directions, viewdirs=directions)\n\n\nclass Blender(Dataset):\n \"\"\"Blender Dataset.\"\"\"\n\n def _load_renderings(self, args):\n \"\"\"Load images from disk.\"\"\"\n if args.render_path:\n raise ValueError(\"render_path cannot be used for the blender dataset.\")\n with utils.open_file(\n path.join(args.data_dir, \"transforms_{}.json\".format(self.split)),\n \"r\") as fp:\n meta = json.load(fp)\n images = []\n cams = []\n for i in range(len(meta[\"frames\"])):\n frame = meta[\"frames\"][i]\n fname = os.path.join(args.data_dir, frame[\"file_path\"] + \".png\")\n with utils.open_file(fname, \"rb\") as imgin:\n image = np.array(Image.open(imgin), dtype=np.float32) / 255.\n if args.factor == 2:\n [halfres_h, halfres_w] = [hw // 2 for hw in image.shape[:2]]\n image = cv2.resize(\n image, (halfres_w, halfres_h), interpolation=cv2.INTER_AREA)\n elif args.factor > 0:\n raise ValueError(\"Blender dataset only supports factor=0 or 2, {} \"\n \"set.\".format(args.factor))\n cams.append(frame[\"transform_matrix\"])\n images.append(image)\n self.images = np.stack(images, axis=0)\n if args.white_bkgd:\n self.images = (\n self.images[Ellipsis, :3] * self.images[Ellipsis, -1:] +\n (1. - self.images[Ellipsis, -1:]))\n else:\n self.images = self.images[Ellipsis, :3]\n self.h, self.w = self.images.shape[1:3]\n self.resolution = self.h * self.w\n self.camtoworlds = np.stack(cams, axis=0)\n camera_angle_x = float(meta[\"camera_angle_x\"])\n self.focal = .5 * self.w / np.tan(.5 * camera_angle_x)\n self.n_examples = self.images.shape[0]\n\n\nclass LLFF(Dataset):\n \"\"\"LLFF Dataset.\"\"\"\n\n def _load_renderings(self, args):\n \"\"\"Load images from disk.\"\"\"\n # Load images.\n imgdir_suffix = \"\"\n if args.factor > 0:\n imgdir_suffix = \"_{}\".format(args.factor)\n factor = args.factor\n else:\n factor = 1\n imgdir = path.join(args.data_dir, \"images\" + imgdir_suffix)\n if not utils.file_exists(imgdir):\n raise ValueError(\"Image folder {} doesn't exist.\".format(imgdir))\n imgfiles = [\n path.join(imgdir, f)\n for f in sorted(utils.listdir(imgdir))\n if f.endswith(\"JPG\") or f.endswith(\"jpg\") or f.endswith(\"png\")\n ]\n images = []\n for imgfile in imgfiles:\n with utils.open_file(imgfile, \"rb\") as imgin:\n image = np.array(Image.open(imgin), dtype=np.float32) / 255.\n images.append(image)\n images = np.stack(images, axis=-1)\n\n # Load poses and bds.\n with utils.open_file(path.join(args.data_dir, \"poses_bounds.npy\"),\n \"rb\") as fp:\n poses_arr = np.load(fp)\n poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])\n bds = poses_arr[:, -2:].transpose([1, 0])\n if poses.shape[-1] != images.shape[-1]:\n raise RuntimeError(\"Mismatch between imgs {} and poses {}\".format(\n images.shape[-1], poses.shape[-1]))\n\n # Update poses according to downsampling.\n poses[:2, 4, :] = np.array(images.shape[:2]).reshape([2, 1])\n poses[2, 4, :] = poses[2, 4, :] * 1. / factor\n\n # Correct rotation matrix ordering and move variable dim to axis 0.\n poses = np.concatenate(\n [poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)\n poses = np.moveaxis(poses, -1, 0).astype(np.float32)\n images = np.moveaxis(images, -1, 0)\n bds = np.moveaxis(bds, -1, 0).astype(np.float32)\n\n # Rescale according to a default bd factor.\n scale = 1. / (bds.min() * .75)\n poses[:, :3, 3] *= scale\n bds *= scale\n\n # Recenter poses.\n poses = self._recenter_poses(poses)\n\n # Generate a spiral/spherical ray path for rendering videos.\n if args.spherify:\n poses = self._generate_spherical_poses(poses, bds)\n elif self.split == \"test\":\n self._generate_spiral_poses(poses, bds)\n\n # Select the split.\n i_test = np.arange(images.shape[0])[::args.llffhold]\n i_train = np.array(\n [i for i in np.arange(int(images.shape[0])) if i not in i_test])\n if self.split == \"train\":\n indices = i_train\n else:\n indices = i_test\n images = images[indices]\n poses = poses[indices]\n\n self.images = images\n self.camtoworlds = poses[:, :3, :4]\n self.focal = poses[0, -1, -1]\n self.h, self.w = images.shape[1:3]\n self.resolution = self.h * self.w\n if args.render_path:\n self.n_examples = self.render_poses.shape[0]\n else:\n self.n_examples = images.shape[0]\n\n def _generate_rays(self):\n \"\"\"Generate normalized device coordinate rays for llff.\"\"\"\n if self.split == \"test\":\n n_render_poses = self.render_poses.shape[0]\n self.camtoworlds = np.concatenate([self.render_poses, self.camtoworlds],\n axis=0)\n\n super()._generate_rays()\n\n origins = self.rays.origins\n directions = self.rays.directions\n viewdirs = directions\n near = 1.\n\n # Shift ray origins to near plane\n t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2]\n origins = origins + t[Ellipsis, None] * directions\n\n # Projection\n o0 = -1. * ((2. * self.focal) / self.w) * origins[Ellipsis, 0] / origins[Ellipsis, 2]\n o1 = -1. * ((2. * self.focal) / self.h) * origins[Ellipsis, 1] / origins[Ellipsis, 2]\n o2 = 1. + 2. * near / origins[Ellipsis, 2]\n\n d0 = (-1. * ((2. * self.focal) / self.w) *\n (directions[Ellipsis, 0] / directions[Ellipsis, 2] -\n origins[Ellipsis, 0] / origins[Ellipsis, 2]))\n d1 = (-1. * ((2. * self.focal) / self.h) *\n (directions[Ellipsis, 1] / directions[Ellipsis, 2] -\n origins[Ellipsis, 1] / origins[Ellipsis, 2]))\n d2 = -2. * near / origins[Ellipsis, 2]\n\n origins = np.stack([o0, o1, o2], -1)\n directions = np.stack([d0, d1, d2], -1)\n self.rays = Rays(origins=origins, directions=directions, viewdirs=viewdirs)\n\n # Split poses from the dataset and generated poses\n if self.split == \"test\":\n self.camtoworlds = self.camtoworlds[n_render_poses:]\n split = [np.split(r, [n_render_poses], 0) for r in self.rays]\n split0, split1 = zip(*split)\n self.render_rays = Rays(*split0)\n self.rays = Rays(*split1)\n\n def _recenter_poses(self, poses):\n \"\"\"Recenter poses according to the original NeRF code.\"\"\"\n poses_ = poses.copy()\n bottom = np.reshape([0, 0, 0, 1.], [1, 4])\n c2w = self._poses_avg(poses)\n c2w = np.concatenate([c2w[:3, :4], bottom], -2)\n bottom = np.tile(np.reshape(bottom, [1, 1, 4]), [poses.shape[0], 1, 1])\n poses = np.concatenate([poses[:, :3, :4], bottom], -2)\n poses = np.linalg.inv(c2w) @ poses\n poses_[:, :3, :4] = poses[:, :3, :4]\n poses = poses_\n return poses\n\n def _poses_avg(self, poses):\n \"\"\"Average poses according to the original NeRF code.\"\"\"\n hwf = poses[0, :3, -1:]\n center = poses[:, :3, 3].mean(0)\n vec2 = self._normalize(poses[:, :3, 2].sum(0))\n up = poses[:, :3, 1].sum(0)\n c2w = np.concatenate([self._viewmatrix(vec2, up, center), hwf], 1)\n return c2w\n\n def _viewmatrix(self, z, up, pos):\n \"\"\"Construct lookat view matrix.\"\"\"\n vec2 = self._normalize(z)\n vec1_avg = up\n vec0 = self._normalize(np.cross(vec1_avg, vec2))\n vec1 = self._normalize(np.cross(vec2, vec0))\n m = np.stack([vec0, vec1, vec2, pos], 1)\n return m\n\n def _normalize(self, x):\n \"\"\"Normalization helper function.\"\"\"\n return x / np.linalg.norm(x)\n\n def _generate_spiral_poses(self, poses, bds):\n \"\"\"Generate a spiral path for rendering.\"\"\"\n c2w = self._poses_avg(poses)\n # Get average pose.\n up = self._normalize(poses[:, :3, 1].sum(0))\n # Find a reasonable \"focus depth\" for this dataset.\n close_depth, inf_depth = bds.min() * .9, bds.max() * 5.\n dt = .75\n mean_dz = 1. / (((1. - dt) / close_depth + dt / inf_depth))\n focal = mean_dz\n # Get radii for spiral path.\n tt = poses[:, :3, 3]\n rads = np.percentile(np.abs(tt), 90, 0)\n c2w_path = c2w\n n_views = 120\n n_rots = 2\n # Generate poses for spiral path.\n render_poses = []\n rads = np.array(list(rads) + [1.])\n hwf = c2w_path[:, 4:5]\n zrate = .5\n for theta in np.linspace(0., 2. * np.pi * n_rots, n_views + 1)[:-1]:\n c = np.dot(c2w[:3, :4], (np.array(\n [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) * rads))\n z = self._normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.])))\n render_poses.append(np.concatenate([self._viewmatrix(z, up, c), hwf], 1))\n self.render_poses = np.array(render_poses).astype(np.float32)[:, :3, :4]\n\n def _generate_spherical_poses(self, poses, bds):\n \"\"\"Generate a 360 degree spherical path for rendering.\"\"\"\n # pylint: disable=g-long-lambda\n p34_to_44 = lambda p: np.concatenate([\n p,\n np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])\n ], 1)\n rays_d = poses[:, :3, 2:3]\n rays_o = poses[:, :3, 3:4]\n\n def min_line_dist(rays_o, rays_d):\n a_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])\n b_i = -a_i @ rays_o\n pt_mindist = np.squeeze(-np.linalg.inv(\n (np.transpose(a_i, [0, 2, 1]) @ a_i).mean(0)) @ (b_i).mean(0))\n return pt_mindist\n\n pt_mindist = min_line_dist(rays_o, rays_d)\n center = pt_mindist\n up = (poses[:, :3, 3] - center).mean(0)\n vec0 = self._normalize(up)\n vec1 = self._normalize(np.cross([.1, .2, .3], vec0))\n vec2 = self._normalize(np.cross(vec0, vec1))\n pos = center\n c2w = np.stack([vec1, vec2, vec0, pos], 1)\n poses_reset = (\n np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4]))\n rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))\n sc = 1. / rad\n poses_reset[:, :3, 3] *= sc\n bds *= sc\n rad *= sc\n centroid = np.mean(poses_reset[:, :3, 3], 0)\n zh = centroid[2]\n radcircle = np.sqrt(rad**2 - zh**2)\n new_poses = []\n\n for th in np.linspace(0., 2. * np.pi, 120):\n camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])\n up = np.array([0, 0, -1.])\n vec2 = self._normalize(camorigin)\n vec0 = self._normalize(np.cross(vec2, up))\n vec1 = self._normalize(np.cross(vec2, vec0))\n pos = camorigin\n p = np.stack([vec0, vec1, vec2, pos], 1)\n new_poses.append(p)\n\n new_poses = np.stack(new_poses, 0)\n new_poses = np.concatenate([\n new_poses,\n np.broadcast_to(poses[0, :3, -1:], new_poses[:, :3, -1:].shape)\n ], -1)\n poses_reset = np.concatenate([\n poses_reset[:, :3, :4],\n np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape)\n ], -1)\n if self.split == \"test\":\n self.render_poses = new_poses[:, :3, :4]\n return poses_reset\n\n\ndataset_dict = {\n \"blender\": Blender,\n \"llff\": LLFF,\n}\n"
] |
[
[
"numpy.split",
"numpy.sqrt",
"numpy.linspace",
"numpy.concatenate",
"numpy.mean",
"numpy.cross",
"numpy.moveaxis",
"numpy.random.randint",
"numpy.square",
"numpy.ones_like",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"numpy.sin",
"numpy.load",
"numpy.linalg.inv",
"numpy.tan",
"numpy.transpose",
"numpy.array",
"numpy.abs",
"numpy.linalg.norm",
"numpy.cos",
"numpy.broadcast_to"
]
] |
stacksmashing/TensorKartRealHW
|
[
"bb5e1eb164db0a78738ac6c0c7f07909c4d89dff"
] |
[
"train3.py"
] |
[
"#!/usr/bin/env python\n#model like in https://www.youtube.com/watch?v=tcpmucSLKo8\n\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import backend as K\nimport sklearn\nfrom utils import Sample\n\n# Global variable\nOUT_SHAPE = 1\nINPUT_SHAPE = (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D)\n\n\ndef customized_loss(y_true, y_pred, loss='euclidean'):\n # Simply a mean squared error that penalizes large joystick summed values\n if loss == 'L2':\n L2_norm_cost = 0.001\n val = K.mean(K.square((y_pred - y_true)), axis=-1) \\\n + K.sum(K.square(y_pred), axis=-1)/2 * L2_norm_cost\n # euclidean distance loss\n elif loss == 'euclidean':\n val = K.sqrt(K.sum(K.square(y_pred-y_true), axis=-1))\n return val\n\n\ndef create_model_1(keep_prob = 0.8):\n model = Sequential()\n\n # NVIDIA's model\n model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), activation='relu', input_shape= INPUT_SHAPE))\n model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2), activation='relu'))\n model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2), activation='relu'))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n model.add(Flatten())\n model.add(Dense(1164, activation='relu'))\n drop_out = 1 - keep_prob\n model.add(Dropout(drop_out))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(drop_out))\n model.add(Dense(50, activation='relu'))\n model.add(Dropout(drop_out))\n model.add(Dense(10, activation='relu'))\n model.add(Dropout(drop_out))\n \n model.add(Dense(OUT_SHAPE, activation='softsign'))\n model.summary()\n\n return model\n\ndef create_model_2(keep_prob = 1.0):\n model = Sequential()\n\n # NVIDIA's model\n model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), activation='elu', input_shape= INPUT_SHAPE))\n model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2), activation='elu'))\n model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2), activation='elu'))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='elu'))\n model.add(Conv2D(64, kernel_size=(3, 3), activation='elu'))\n model.add(Flatten())\n drop_out = 1 - keep_prob\n model.add(Dropout(drop_out))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(drop_out))\n model.add(Dense(50, activation='elu'))\n model.add(Dropout(drop_out))\n model.add(Dense(10, activation='elu'))\n model.add(Dropout(drop_out))\n model.add(Dense(OUT_SHAPE))\n model.summary()\n\n return model\n\nimport sys\nimport os\nif __name__ == '__main__':\n\n # Load Training Data\n x_train = np.load(os.path.join(sys.argv[2], \"X.npy\"))\n y_train = np.load(os.path.join(sys.argv[2], \"y.npy\"))\n\n x_train, y_train = sklearn.utils.shuffle(x_train, y_train)\n # print(y_train)\n\n print(x_train.shape[0], 'train samples')\n\n\n # Training loop variables\n epochs = 120\n batch_size = 70\n print(\"1\")\n if(sys.argv[1] == \"1\"):\n model = create_model_1(0.8)\n elif(sys.argv[1] == \"2\"):\n model = create_model_2(0.8)\n\n print(\"2\")\n #learning_rate=0.0001\n model.compile(loss='mse', optimizer=optimizers.Adam(learning_rate=0.0001))\n print(\"3\")\n model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, validation_split=0.2)\n print(\"4\")\n model.save_weights('model_weights.h5')\n"
] |
[
[
"tensorflow.keras.layers.Dense",
"sklearn.utils.shuffle",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.square",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
]
] |
epahfk84/shape_as_points
|
[
"bcda2fdaab22c26e33b074d3c993fbb55fd567a1"
] |
[
"generate.py"
] |
[
"import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np; np.set_printoptions(precision=4)\nimport shutil, argparse, time, os\nimport pandas as pd\nfrom collections import defaultdict\nfrom src import config\nfrom src.utils import mc_from_psr, export_mesh, export_pointcloud\nfrom src.dpsr import DPSR\nfrom src.training import Trainer\nfrom src.model import Encode2Points\nfrom src.utils import load_config, load_model_manual, scale2onet, is_url, load_url\nfrom tqdm import tqdm\nfrom pdb import set_trace as st\n\n\ndef main():\n parser = argparse.ArgumentParser(description='MNIST toy experiment')\n parser.add_argument('config', type=str, help='Path to config file.')\n parser.add_argument('--no_cuda', action='store_true', default=False,\n help='disables CUDA training') \n parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n parser.add_argument('--iter', type=int, metavar='S', help='the training iteration to be evaluated.')\n \n args = parser.parse_args()\n cfg = load_config(args.config, 'configs/default.yaml')\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n data_type = cfg['data']['data_type']\n input_type = cfg['data']['input_type']\n vis_n_outputs = cfg['generation']['vis_n_outputs']\n if vis_n_outputs is None:\n vis_n_outputs = -1\n # Shorthands\n out_dir = cfg['train']['out_dir']\n if not out_dir:\n os.makedirs(out_dir)\n generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])\n out_time_file = os.path.join(generation_dir, 'time_generation_full.pkl')\n out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl')\n\n # PYTORCH VERSION > 1.0.0\n assert(float(torch.__version__.split('.')[-3]) > 0)\n\n dataset = config.get_dataset('test', cfg, return_idx=True)\n test_loader = torch.utils.data.DataLoader(\n dataset, batch_size=1, num_workers=0, shuffle=False)\n\n model = Encode2Points(cfg).to(device)\n \n # load model\n try:\n if is_url(cfg['test']['model_file']):\n state_dict = load_url(cfg['test']['model_file'])\n elif cfg['generation'].get('iter', 0)!=0:\n state_dict = torch.load(os.path.join(out_dir, 'model', '%04d.pt'% cfg['generation']['iter']))\n generation_dir += '_%04d'%cfg['generation']['iter']\n elif args.iter is not None:\n state_dict = torch.load(os.path.join(out_dir, 'model', '%04d.pt'% args.iter))\n else:\n state_dict = torch.load(os.path.join(out_dir, 'model_best.pt'))\n\n load_model_manual(state_dict['state_dict'], model)\n\n except:\n print('Model loading error. Exiting.')\n exit()\n \n \n # Generator\n generator = config.get_generator(model, cfg, device=device)\n \n # Determine what to generate\n generate_mesh = cfg['generation']['generate_mesh']\n generate_pointcloud = cfg['generation']['generate_pointcloud']\n \n # Statistics\n time_dicts = []\n\n # Generate\n model.eval()\n dpsr = DPSR(res=(cfg['generation']['psr_resolution'], \n cfg['generation']['psr_resolution'], \n cfg['generation']['psr_resolution']), \n sig= cfg['generation']['psr_sigma']).to(device)\n\n \n\n # Count how many models already created\n model_counter = defaultdict(int)\n\n print('Generating...')\n for it, data in enumerate(tqdm(test_loader)):\n\n # Output folders\n mesh_dir = os.path.join(generation_dir, 'meshes')\n in_dir = os.path.join(generation_dir, 'input')\n pointcloud_dir = os.path.join(generation_dir, 'pointcloud')\n generation_vis_dir = os.path.join(generation_dir, 'vis', )\n\n # Get index etc.\n idx = data['idx'].item()\n \n try:\n model_dict = dataset.get_model_dict(idx)\n except AttributeError:\n model_dict = {'model': str(idx), 'category': 'n/a'}\n\n modelname = model_dict['model']\n category_id = model_dict['category']\n\n try:\n category_name = dataset.metadata[category_id].get('name', 'n/a')\n except AttributeError:\n category_name = 'n/a'\n \n if category_id != 'n/a':\n mesh_dir = os.path.join(mesh_dir, str(category_id))\n pointcloud_dir = os.path.join(pointcloud_dir, str(category_id))\n in_dir = os.path.join(in_dir, str(category_id))\n\n folder_name = str(category_id)\n if category_name != 'n/a':\n folder_name = str(folder_name) + '_' + category_name.split(',')[0]\n\n generation_vis_dir = os.path.join(generation_vis_dir, folder_name)\n\n # Create directories if necessary\n if vis_n_outputs >= 0 and not os.path.exists(generation_vis_dir):\n os.makedirs(generation_vis_dir)\n \n if generate_mesh and not os.path.exists(mesh_dir):\n os.makedirs(mesh_dir)\n \n if generate_pointcloud and not os.path.exists(pointcloud_dir):\n os.makedirs(pointcloud_dir)\n \n if not os.path.exists(in_dir):\n os.makedirs(in_dir)\n\n # Timing dict\n time_dict = {\n 'idx': idx,\n 'class id': category_id,\n 'class name': category_name,\n 'modelname':modelname,\n }\n time_dicts.append(time_dict)\n\n # Generate outputs\n out_file_dict = {}\n \n if generate_mesh:\n #! deploy the generator to a separate class\n out = generator.generate_mesh(data)\n\n v, f, points, normals, stats_dict = out\n time_dict.update(stats_dict)\n\n # Write output\n mesh_out_file = os.path.join(mesh_dir, '%s.off' % modelname)\n export_mesh(mesh_out_file, scale2onet(v), f)\n out_file_dict['mesh'] = mesh_out_file\n \n if generate_pointcloud:\n pointcloud_out_file = os.path.join(\n pointcloud_dir, '%s.ply' % modelname) \n export_pointcloud(pointcloud_out_file, scale2onet(points), normals)\n out_file_dict['pointcloud'] = pointcloud_out_file\n \n if cfg['generation']['copy_input']:\n inputs_path = os.path.join(in_dir, '%s.ply' % modelname)\n p = data.get('inputs').to(device)\n export_pointcloud(inputs_path, scale2onet(p))\n out_file_dict['in'] = inputs_path\n \n # Copy to visualization directory for first vis_n_output samples\n c_it = model_counter[category_id]\n if c_it < vis_n_outputs:\n # Save output files\n img_name = '%02d.off' % c_it\n for k, filepath in out_file_dict.items():\n ext = os.path.splitext(filepath)[1]\n out_file = os.path.join(generation_vis_dir, '%02d_%s%s'\n % (c_it, k, ext))\n shutil.copyfile(filepath, out_file)\n \n # Also generate oracle meshes\n if cfg['generation']['exp_oracle']:\n points_gt = data.get('gt_points').to(device)\n normals_gt = data.get('gt_points.normals').to(device)\n psr_gt = dpsr(points_gt, normals_gt)\n v, f, _ = mc_from_psr(psr_gt,\n zero_level=cfg['data']['zero_level'])\n out_file = os.path.join(generation_vis_dir, '%02d_%s%s'\n % (c_it, 'mesh_oracle', '.off'))\n export_mesh(out_file, scale2onet(v), f)\n \n model_counter[category_id] += 1\n\n\n # Create pandas dataframe and save\n time_df = pd.DataFrame(time_dicts)\n time_df.set_index(['idx'], inplace=True)\n time_df.to_pickle(out_time_file)\n\n # Create pickle files with main statistics\n time_df_class = time_df.groupby(by=['class name']).mean()\n time_df_class.loc['mean'] = time_df_class.mean()\n time_df_class.to_pickle(out_time_file_class)\n\n # Print results\n print('Timings [s]:')\n print(time_df_class)\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.__version__.split",
"numpy.set_printoptions",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.cuda.is_available",
"torch.device"
]
] |
kevin-michael-cs230/bigbird
|
[
"38ef761f56d89be11f3a64e4aabac554c8454050"
] |
[
"bigbird/core/modeling.py"
] |
[
"# Copyright 2020 The BigBird Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main BigBird model and related functions.\"\"\"\n\nimport copy\n\nfrom bigbird.core import decoder\nfrom bigbird.core import encoder\nfrom bigbird.core import utils\nimport tensorflow.compat.v2 as tf\n\n\nclass BertModel(tf.compat.v1.layers.Layer):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into SentencePiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n params = utils.BigBirdConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(params, train=True)\n\n _, pooled_output = model(input_ids=input_ids, token_type_ids=token_type_ids)\n\n label_embeddings = tf.get_variable(...)\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self, params):\n \"\"\"Constructor for BertModel.\n\n Args:\n params: `BigBirdConfig` dictionary.\n \"\"\"\n self.params = copy.deepcopy(params)\n self.scope = params[\"scope\"]\n\n with tf.compat.v1.variable_scope(\n self.scope, reuse=tf.compat.v1.AUTO_REUSE) as vs:\n self.embeder = utils.EmbeddingLayer(\n vocab_size=self.params[\"vocab_size\"],\n emb_dim=self.params[\"hidden_size\"],\n initializer=utils.create_initializer(\n self.params[\"initializer_range\"]),\n scale_emb=self.params[\"rescale_embedding\"],\n use_token_type=True,\n num_token_types=self.params[\"type_vocab_size\"],\n use_position_embeddings=True,\n max_position_embeddings=self.params[\"max_position_embeddings\"],\n dropout_prob=self.params[\"hidden_dropout_prob\"])\n self.encoder = encoder.EncoderStack(self.params)\n self.pooler = tf.compat.v1.layers.Dense(\n units=self.params[\"hidden_size\"],\n activation=tf.tanh,\n kernel_initializer=utils.create_initializer(\n self.params[\"initializer_range\"]),\n name=\"pooler/dense\")\n super(BertModel, self).__init__(name=self.scope, _scope=vs)\n\n @property\n def trainable_weights(self):\n tvar_list = (self.embeder.trainable_weights +\n self.encoder.trainable_weights +\n self.pooler.trainable_weights)\n self._trainable_weights = list({v.name: v for v in tvar_list}.values())\n return self._trainable_weights\n\n def call(self,\n input_ids,\n token_type_ids=None,\n training=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n training: Boolean indicating whether the call is training or inference.\n\n Returns:\n sequence_output: Tensor of shape [batch_size, seq_length, hidden_size]\n pooled_output: Tensor of shape [batch_size, hidden_size]\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n if token_type_ids is None:\n token_type_ids = tf.zeros_like(input_ids, dtype=tf.int32)\n\n # Perform embedding lookup on the word ids.\n embedding_output = self.embeder(input_ids,\n self.params[\"max_encoder_length\"],\n token_type_ids=token_type_ids,\n training=training)\n\n # Generate mask.\n input_mask = tf.where(input_ids > 0,\n tf.ones_like(input_ids), tf.zeros_like(input_ids))\n\n # Run the stacked transformer.\n sequence_output = self.encoder(embedding_output, input_mask, training)\n\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n first_token_tensor = sequence_output[:, 0, :]\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n pooled_output = self.pooler(first_token_tensor)\n\n return sequence_output, pooled_output\n\n\nclass TransformerModel(tf.compat.v1.layers.Layer):\n \"\"\"Encoder-Decoder transformer model.\n\n Example usage:\n\n ```python\n # Already been converted into SentencePiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n target_ids = tf.constant([[43, 76, 38], [56, 8, 0]])\n\n params = utils.BigBirdConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.TransformerModel(params, train=True)\n\n predictions, _ = model(input_ids=input_ids, target_ids=target_ids)\n\n log_probs, logits, pred_ids = predictions\n ...\n ```\n \"\"\"\n\n def __init__(self, params):\n \"\"\"Constructor for TransformerModel.\n\n Args:\n params: `BigBirdConfig` dictionary.\n \"\"\"\n self.params = copy.deepcopy(params)\n self.scope = params[\"scope\"]\n\n with tf.compat.v1.variable_scope(\n self.scope, reuse=tf.compat.v1.AUTO_REUSE) as vs:\n self.embeder = utils.EmbeddingLayer(\n vocab_size=self.params[\"vocab_size\"],\n emb_dim=self.params[\"hidden_size\"],\n initializer=utils.create_initializer(\n self.params[\"initializer_range\"]),\n scale_emb=self.params[\"rescale_embedding\"],\n use_token_type=False,\n num_token_types=None,\n use_position_embeddings=True,\n max_position_embeddings=self.params[\"max_position_embeddings\"],\n dropout_prob=self.params[\"hidden_dropout_prob\"])\n self.encoder = encoder.EncoderStack(self.params)\n self.decoder = decoder.DecoderStack(self.params)\n super(TransformerModel, self).__init__(name=self.scope, _scope=vs)\n\n @property\n def trainable_weights(self):\n tvar_list = (self.embeder.trainable_weights +\n self.encoder.trainable_weights +\n self.decoder.trainable_weights)\n self._trainable_weights = list({v.name: v for v in tvar_list}.values())\n return self._trainable_weights\n\n def _encode(self, input_ids, training=None):\n \"\"\"Generate continuous representation for ids.\n\n Args:\n input_ids: Int tensor with shape [batch_size, input_length].\n training: Boolean indicating whether the call is training or inference.\n\n Returns:\n A float tensors of shape\n [batch_size, input_length, hidden_size].\n \"\"\"\n # Perform embedding lookup on the word ids.\n input_embs = self.embeder(\n input_ids, self.params[\"max_encoder_length\"], training=training)\n\n # Generate mask.\n input_mask = tf.where(input_ids > 0,\n tf.ones_like(input_ids), tf.zeros_like(input_ids))\n\n # Run the stacked transformer.\n encoder_output = self.encoder(input_embs, input_mask, training)\n\n return encoder_output, input_mask\n\n def _get_start_token_ids(self, tensor_for_shape):\n start_token_id = 2\n batch_size = utils.get_shape_list(tensor_for_shape)[0]\n return tf.ones([batch_size], dtype=tf.int32) * start_token_id\n\n def get_inputs_from_targets(self, targets, start_token_ids):\n \"\"\"Converts target ids to input ids, i.e. adds <s> and removes last.\"\"\"\n length = tf.math.count_nonzero(targets, axis=1, dtype=tf.int32)\n # Add start token ids.\n inputs = tf.concat([tf.expand_dims(start_token_ids, axis=1), targets], 1)\n # Remove </s> from the input.\n mask = tf.sequence_mask(length, self.params[\"max_decoder_length\"]+1,\n dtype=tf.int32)\n inputs = (mask * inputs)[:, :-1]\n return inputs\n\n def _decode(self, target_ids, target_mask, start_token_ids,\n encoder_output, encoder_mask, training):\n \"\"\"Compute likelihood of target tokens under the model.\n\n Args:\n target_ids: tensor with shape [batch_size, target_length, hidden_size]\n target_mask: self-attention bias for decoder attention layer. [batch_size,\n input_length]\n start_token_ids: int32 tensor of shape [batch_size] for first decoder\n input.\n encoder_output: Continuous representation of input sequence. Float tensor\n with shape [batch_size, input_length, hidden_size].\n encoder_mask: Float tensor with shape [batch_size, input_length].\n training: Boolean indicating whether the call is training or inference.\n\n Returns:\n A dict containing the output ids, the output log-probs, the output logits.\n \"\"\"\n\n # Prepare inputs to decoder layers by shifting targets, embedding ids,\n # adding positional encoding and applying dropout.\n input_ids = self.get_inputs_from_targets(target_ids, start_token_ids)\n\n input_embs = self.embeder(input_ids, self.params[\"max_decoder_length\"],\n training=training)\n\n outputs = self.decoder(input_embs, target_mask,\n encoder_output, encoder_mask, training=training)\n\n logits = self.embeder.linear(outputs)\n output_ids = tf.cast(tf.argmax(logits, axis=-1), tf.int32)\n\n log_probs = -tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_ids, logits=logits)\n log_probs = tf.where(target_ids > 0, log_probs,\n tf.zeros_like(log_probs, tf.float32))\n\n return (tf.identity(log_probs, name=\"log_probs\"),\n tf.identity(logits, name=\"logits\"),\n tf.cast(output_ids, tf.int32, name=\"pred_ids\"),)\n\n def _init_cache(self, batch_size):\n \"\"\"Initialize cache for decoding.\"\"\"\n\n max_decode_len = self.params[\"max_decoder_length\"]\n num_heads = self.params[\"num_attention_heads\"]\n head_size = int(self.params[\"hidden_size\"] / num_heads)\n\n cache = {}\n for layer in range(self.params[\"num_hidden_layers\"]):\n cache[\"layer_%d\" % layer] = {\n \"k\": tf.zeros([batch_size, num_heads, max_decode_len, head_size]),\n \"v\": tf.zeros([batch_size, num_heads, max_decode_len, head_size]),\n }\n return cache\n\n def _get_symbols_to_logits_fn(self, decoder_self_attention_mask):\n \"\"\"Returns a decoding function that calculates logits of the next tokens.\"\"\"\n\n max_decode_len = self.params[\"max_decoder_length\"]\n\n def _symbols_to_logits_fn(target_ids, cache, i):\n \"\"\"Generate logits for next candidate IDs.\n\n Args:\n target_ids: Current decoded sequences. int tensor with shape\n [batch_size, i + 1]\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n i: Loop index\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n \"\"\"\n decoder_input = tf.slice(target_ids,\n [0, tf.maximum(tf.cast(0, i.dtype), i - 1)],\n [target_ids.shape[0], 1])\n self_attention_mask = tf.slice(decoder_self_attention_mask, [0, 0, i, 0],\n [1, 1, 1, max_decode_len])\n\n # Preprocess decoder input by getting embeddings and adding timing signal.\n decoder_input = self.embeder(\n decoder_input, 1, start_pos=i, training=False)\n\n decoder_output = self.decoder(\n decoder_input, self_attention_mask,\n cache.get(\"encoder_output\"), cache.get(\"encoder_mask\"),\n cache=cache, decode_i=i, training=False)\n\n logits = self.embeder.linear(decoder_output)\n logits = tf.squeeze(logits, axis=[1])\n\n return logits\n\n return _symbols_to_logits_fn\n\n def _predict(self, target_ids, target_mask, start_token_ids,\n encoder_output, encoder_mask):\n \"\"\"Beam decode output tokens and probabilities.\n\n Args:\n target_ids: tensor with shape [batch_size, target_length, hidden_size]\n target_mask: self-attention bias for decoder attention layer. [batch_size,\n input_length]\n start_token_ids: int32 tensor of shape [batch_size] for first decoder\n input.\n encoder_output: Continuous representation of input sequence. Float\n tensor with shape [batch_size, target_length, num_hidden_layers,\n hidden_size]\n encoder_mask: bias for encoder-decoder attention layer. [batch_size,\n input_length]\n\n Returns:\n A tuple of:\n `log_probs`: Log-probs of output tokens.\n `logits`: Logits of output tokens.\n `pred_ids`: Predicted output sequence.\n \"\"\"\n batch_size = utils.get_shape_list(start_token_ids)[0]\n end_token_id = 1\n\n # One step logit function.\n symbols_to_logits_fn = self._get_symbols_to_logits_fn(target_mask)\n\n # Create cache storing decoder attention values for each layer.\n cache = self._init_cache(batch_size)\n\n if encoder_output is not None:\n # Add encoder output and attention bias to the cache.\n cache[\"encoder_output\"] = encoder_output\n cache[\"encoder_mask\"] = encoder_mask\n\n decoded_ids = decoder.left2right_decode(\n symbols_to_logits_fn,\n start_token_ids,\n cache,\n batch_size,\n self.params[\"max_decoder_length\"],\n vocab_size=self.params[\"vocab_size\"],\n beam_size=self.params[\"beam_size\"],\n beam_start=5,\n beam_alpha=self.params[\"alpha\"],\n beam_min=0,\n beam_max=-1,\n eos_id=end_token_id)\n\n # Get the top sequence for each batch element\n output_ids = tf.cast(decoded_ids, tf.int32, name=\"pred_ids\")\n\n # Calculate log probs for given sequence if available.\n calc_ids = output_ids if target_ids is None else target_ids\n output_log_probs, output_logits, _ = self._decode(\n calc_ids, target_mask, start_token_ids,\n encoder_output, encoder_mask, training=False)\n\n return (output_log_probs, output_logits, output_ids)\n\n def _decode_and_predict(self, target_ids, encoder_output, encoder_mask,\n training):\n \"\"\"Decodes a sequence given the input and the encoder.\n\n Args:\n target_ids: tensor with shape [batch_size, target_length, hidden_size]\n encoder_output: Continuous representation of input sequence. Float\n tensor with shape [batch_size, target_length, num_hidden_layers,\n hidden_size]\n encoder_mask: bias for encoder-decoder attention layer. [batch_size,\n input_length]\n training: Boolean indicating whether the call is training or inference.\n\n Returns:\n A tuple of:\n `log_probs`: Log-probs of output tokens.\n `logits`: Logits of output tokens.\n `pred_ids`: Predicted output sequence.\n \"\"\"\n # Create initial set of IDs that will be passed into symbols_to_logits_fn.\n start_token_ids = self._get_start_token_ids(encoder_output)\n\n # Create causal self-attention mask for decoder.\n target_mask = decoder.create_self_attention_mask(\n self.params[\"max_decoder_length\"])\n\n predictions = {}\n if training:\n predictions = self._decode(target_ids, target_mask, start_token_ids,\n encoder_output, encoder_mask, training=True)\n else:\n predictions = self._predict(target_ids, target_mask, start_token_ids,\n encoder_output, encoder_mask)\n\n return predictions\n\n def call(self,\n input_ids,\n target_ids=None,\n training=None):\n # Run the inputs through the encoder layer to map the symbol\n # representations to continuous representations.\n encoder_output, encoder_mask = self._encode(input_ids, training)\n\n # Decode.\n predictions = self._decode_and_predict(target_ids, encoder_output,\n encoder_mask, training)\n\n return predictions, encoder_output\n"
] |
[
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.math.count_nonzero",
"tensorflow.compat.v2.slice",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.compat.v1.variable_scope",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.compat.v2.argmax",
"tensorflow.compat.v2.sequence_mask"
]
] |
ruslanmv/Machine-Learning-Codes
|
[
"dfc0ce1321c9953800d4238b3f4ab8f164bf26fc"
] |
[
"rl2/cartpole/dqn_theano.py"
] |
[
"# https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python\n# https://www.udemy.com/deep-reinforcement-learning-in-python\nfrom __future__ import print_function, division\nfrom builtins import range\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\nimport gym\nimport os\nimport sys\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport matplotlib.pyplot as plt\nfrom gym import wrappers\nfrom datetime import datetime\nfrom q_learning_bins import plot_running_avg\n\n\n# global counter\nglobal_iters = 0\n\n\n# helper for adam optimizer\n# use tensorflow defaults\ndef adam(cost, params, lr0=1e-2, beta1=0.9, beta2=0.999, eps=1e-8):\n grads = T.grad(cost, params)\n updates = []\n time = theano.shared(0)\n new_time = time + 1\n updates.append((time, new_time))\n lr = lr0*T.sqrt(1 - beta2**new_time) / (1 - beta1**new_time)\n for p, g in zip(params, grads):\n m = theano.shared(p.get_value() * 0.)\n v = theano.shared(p.get_value() * 0.)\n new_m = beta1*m + (1 - beta1)*g\n new_v = beta2*v + (1 - beta2)*g*g\n new_p = p - lr*new_m / (T.sqrt(new_v) + eps)\n updates.append((m, new_m))\n updates.append((v, new_v))\n updates.append((p, new_p))\n return updates\n\n\n# a version of HiddenLayer that keeps track of params\nclass HiddenLayer:\n def __init__(self, M1, M2, f=T.tanh, use_bias=True):\n self.W = theano.shared(np.random.randn(M1, M2) * np.sqrt(2 / M1))\n self.params = [self.W]\n self.use_bias = use_bias\n if use_bias:\n self.b = theano.shared(np.zeros(M2))\n self.params += [self.b]\n self.f = f\n\n def forward(self, X):\n if self.use_bias:\n a = X.dot(self.W) + self.b\n else:\n a = X.dot(self.W)\n return self.f(a)\n\n\nclass DQN:\n def __init__(self, D, K, hidden_layer_sizes, gamma, max_experiences=10000, min_experiences=100, batch_sz=32):\n self.K = K\n lr = 1e-2\n mu = 0.\n decay = 0.99\n\n # create the graph\n self.layers = []\n M1 = D\n for M2 in hidden_layer_sizes:\n layer = HiddenLayer(M1, M2)\n self.layers.append(layer)\n M1 = M2\n\n # final layer\n layer = HiddenLayer(M1, K, lambda x: x)\n self.layers.append(layer)\n\n # collect params for copy\n self.params = []\n for layer in self.layers:\n self.params += layer.params\n\n # inputs and targets\n X = T.matrix('X')\n G = T.vector('G')\n actions = T.ivector('actions')\n\n # calculate output and cost\n Z = X\n for layer in self.layers:\n Z = layer.forward(Z)\n Y_hat = Z\n\n selected_action_values = Y_hat[T.arange(actions.shape[0]), actions]\n cost = T.sum((G - selected_action_values)**2) \n\n # create train function\n updates = adam(cost, self.params)\n\n # compile functions\n self.train_op = theano.function(\n inputs=[X, G, actions],\n updates=updates,\n allow_input_downcast=True\n )\n self.predict_op = theano.function(\n inputs=[X],\n outputs=Y_hat,\n allow_input_downcast=True\n )\n\n # create replay memory\n self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}\n self.max_experiences = max_experiences\n self.min_experiences = min_experiences\n self.batch_sz = batch_sz\n self.gamma = gamma\n\n def copy_from(self, other):\n my_params = self.params\n other_params = other.params\n for p, q in zip(my_params, other_params):\n actual = q.get_value()\n p.set_value(actual)\n\n def predict(self, X):\n X = np.atleast_2d(X)\n return self.predict_op(X)\n\n def train(self, target_network):\n # sample a random batch from buffer, do an iteration of GD\n if len(self.experience['s']) < self.min_experiences:\n # don't do anything if we don't have enough experience\n return\n\n # randomly select a batch\n idx = np.random.choice(len(self.experience['s']), size=self.batch_sz, replace=False)\n # print(\"idx:\", idx)\n states = [self.experience['s'][i] for i in idx]\n actions = [self.experience['a'][i] for i in idx]\n rewards = [self.experience['r'][i] for i in idx]\n next_states = [self.experience['s2'][i] for i in idx]\n dones = [self.experience['done'][i] for i in idx]\n next_Q = np.max(target_network.predict(next_states), axis=1)\n targets = [r + self.gamma*next_q if not done else r for r, next_q, done in zip(rewards, next_Q, dones)]\n\n # call optimizer\n self.train_op(states, targets, actions)\n\n def add_experience(self, s, a, r, s2, done):\n if len(self.experience['s']) >= self.max_experiences:\n self.experience['s'].pop(0)\n self.experience['a'].pop(0)\n self.experience['r'].pop(0)\n self.experience['s2'].pop(0)\n self.experience['done'].pop(0)\n self.experience['s'].append(s)\n self.experience['a'].append(a)\n self.experience['r'].append(r)\n self.experience['s2'].append(s2)\n self.experience['done'].append(done)\n\n def sample_action(self, x, eps):\n if np.random.random() < eps:\n return np.random.choice(self.K)\n else:\n X = np.atleast_2d(x)\n return np.argmax(self.predict(X)[0])\n\n\ndef play_one(env, model, tmodel, eps, gamma, copy_period):\n global global_iters\n observation = env.reset()\n done = False\n totalreward = 0\n iters = 0\n while not done and iters < 2000:\n # if we reach 2000, just quit, don't want this going forever\n # the 200 limit seems a bit early\n action = model.sample_action(observation, eps)\n prev_observation = observation\n observation, reward, done, info = env.step(action)\n\n totalreward += reward\n if done:\n reward = -200\n\n # update the model\n model.add_experience(prev_observation, action, reward, observation, done)\n model.train(tmodel)\n\n iters += 1\n global_iters += 1\n\n if global_iters % copy_period == 0:\n tmodel.copy_from(model)\n\n return totalreward\n\n\ndef main():\n env = gym.make('CartPole-v0')\n gamma = 0.99\n copy_period = 50\n\n D = len(env.observation_space.sample())\n K = env.action_space.n\n sizes = [200,200]\n model = DQN(D, K, sizes, gamma)\n tmodel = DQN(D, K, sizes, gamma)\n\n if 'monitor' in sys.argv:\n filename = os.path.basename(__file__).split('.')[0]\n monitor_dir = './' + filename + '_' + str(datetime.now())\n env = wrappers.Monitor(env, monitor_dir)\n\n N = 500\n totalrewards = np.empty(N)\n costs = np.empty(N)\n for n in range(N):\n eps = 1.0/np.sqrt(n+1)\n totalreward = play_one(env, model, tmodel, eps, gamma, copy_period)\n totalrewards[n] = totalreward\n if n % 100 == 0:\n print(\"episode:\", n, \"total reward:\", totalreward, \"eps:\", eps, \"avg reward (last 100):\", totalrewards[max(0, n-100):(n+1)].mean())\n\n print(\"avg reward for last 100 episodes:\", totalrewards[-100:].mean())\n print(\"total steps:\", totalrewards.sum())\n\n plt.plot(totalrewards)\n plt.title(\"Rewards\")\n plt.show()\n\n plot_running_avg(totalrewards)\n\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"numpy.random.random",
"numpy.sqrt",
"matplotlib.pyplot.title",
"numpy.random.choice",
"matplotlib.pyplot.plot",
"numpy.atleast_2d",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.empty"
]
] |
JAAlvarado-Montes/huntsman-pocs
|
[
"eb5dfdc07e3084cb86b8f02373e83b7b27ecfe5a"
] |
[
"src/huntsman/pocs/utils/dither.py"
] |
[
"import numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord, SkyOffsetFrame, ICRS\nfrom astropy.wcs import WCS\n\nimport matplotlib.pyplot as plt\n\n# Pattern for dice 9 3x3 grid (sequence of (RA offset, dec offset) pairs)\ndice9 = ((0, 0),\n (0, 1),\n (1, 1),\n (1, 0),\n (1, -1),\n (0, -1),\n (-1, -1),\n (-1, 0),\n (-1, 1))\n\n# Pattern for dice 5 grid (sequence of (RA offset, dec offset) pairs)\ndice5 = ((0, 0),\n (1, 1),\n (1, -1),\n (-1, -1),\n (-1, 1))\n\n\ndef get_dither_positions(base_position, n_positions, pattern=None, pattern_offset=None, random_offset=None, plot=False):\n \"\"\"\n Given a base position creates a SkyCoord list of dithered sky positions, applying a dither pattern and/or\n random dither offsets.\n\n Args:\n base_position (SkyCoord or compatible): base position for the dither pattern, either a SkyCoord or an object\n that can be converted to one by the SkyCoord constructor (e.g. string)\n n_positions (int): number of dithered sky positions to generate\n pattern (sequence of 2-tuples, optional): sequence of (RA offset, dec offset) tuples, in units of the\n pattern_offset. If given pattern_offset must also be specified.\n pattern_offset (Quantity, optional): scale for the dither pattern. Should be a Quantity with angular\n units, if a numeric type is passed instead it will be assumed to be in arceconds. If pattern offset is\n given pattern must be given too.\n random_offset (Quantity, optional): scale of the random offset to apply to both RA and dec. Should be a\n Quantity with angular units, if numeric type passed instead it will be assumed to be in arcseconds.\n plots (optional, default False): If False no plots will be created, otherwise plots will be generated and\n written to filename `plots`.\n\n Returns:\n SkyCoord: list of n_positions dithered sky positions\n \"\"\"\n if not isinstance(base_position, SkyCoord):\n try:\n base_position = SkyCoord(base_position)\n except ValueError:\n raise ValueError(\n \"Base position '{}' could not be converted to a SkyCoord object!\".format(base_position))\n\n if pattern:\n if pattern_offset is None:\n raise ValueError(\"`pattern` specified but no `pattern_offset` given!\")\n\n if not isinstance(pattern_offset, u.Quantity):\n pattern_offset = pattern_offset * u.arcsec\n\n pattern_length = len(pattern)\n\n RA_offsets = [pattern[count % pattern_length][0]\n for count in range(n_positions)] * pattern_offset\n dec_offsets = [pattern[count % pattern_length][1]\n for count in range(n_positions)] * pattern_offset\n\n else:\n RA_offsets = np.zeros(n_positions) * u.arcsec\n dec_offsets = np.zeros(n_positions) * u.arcsec\n\n if random_offset is not None:\n if not isinstance(random_offset, u.Quantity):\n random_offset = random_offset * u.arcsec\n\n RA_offsets += np.random.uniform(low=-1, high=+1, size=RA_offsets.shape) * random_offset\n dec_offsets += np.random.uniform(low=-1, high=+1, size=dec_offsets.shape) * random_offset\n\n offsets = SkyOffsetFrame(lon=RA_offsets, lat=dec_offsets, origin=base_position)\n positions = offsets.transform_to(ICRS)\n\n if plot:\n dummy_wcs = WCS(naxis=2)\n dummy_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n dummy_wcs.wcs.crval = [base_position.ra.value, base_position.dec.value]\n\n ax = plt.subplot(projection=dummy_wcs)\n ax.plot(positions.ra, positions.dec, 'b*-', transform=ax.get_transform('world'))\n ax.plot([base_position.ra.value], [base_position.dec.value],\n 'rx', transform=ax.get_transform('world'))\n ax.set_aspect('equal', adjustable='datalim')\n ax.coords[0].set_axislabel('Right Ascension')\n ax.coords[0].set_major_formatter('hh:mm')\n ax.coords[1].set_axislabel('declination')\n ax.coords[1].set_major_formatter('dd:mm')\n ax.grid()\n plt.title('base position: {},\\nnumber of positions: {}\\npattern offset: {},\\nrandom offset: {}'.format(\n base_position.to_string('hmsdms'),\n n_positions,\n pattern_offset,\n random_offset))\n plt.gcf().set_size_inches(8, 8.5)\n plt.savefig(plot)\n\n return SkyCoord(positions)\n"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"numpy.random.uniform",
"numpy.zeros"
]
] |
Shirling-VT/Tdiff_Validation
|
[
"a19c5c8b62b09d0cd60749154c4d744f1f56dfeb"
] |
[
"py/get_fit_data.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"get_fit_data.py: utility module to fetch fitacf<v> level data.\"\"\"\n\n__author__ = \"Chakraborty, S.\"\n__copyright__ = \"Copyright 2020, SuperDARN@VT\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"1.0.\"\n__maintainer__ = \"Chakraborty, S.\"\n__email__ = \"[email protected]\"\n__status__ = \"Research\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport glob\nimport bz2\nimport pydarnio as pydarn\nfrom loguru import logger\n\nimport copy\n\nclass Gate(object):\n \"\"\"Class object to hold each range cell value\"\"\"\n\n def __init__(self, bm, i, params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"v_e\"], gflg_type=-1):\n \"\"\"\n initialize the parameters which will be stored\n bm: beam object\n i: index to store\n params: parameters to store\n \"\"\"\n for p in params:\n if len(getattr(bm, p)) > i : setattr(self, p, getattr(bm, p)[i])\n else: setattr(self, p, np.nan)\n if gflg_type >= 0 and len(getattr(bm, \"gsflg\")[gflg_type]) > 0: setattr(self, \"gflg\", getattr(bm, \"gsflg\")[gflg_type][i])\n return\n\nclass Beam(object):\n \"\"\"Class to hold one beam object\"\"\"\n\n def __init__(self):\n \"\"\" initialize the instance \"\"\"\n return\n\n def set(self, time, d, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\"], k=None):\n \"\"\"\n Set all parameters\n time: datetime of beam\n d: data dict for other parameters\n s_param: other scalar params\n v_params: other list params\n \"\"\"\n for p in s_params:\n if p in d.keys():\n if p == \"scan\" and d[p] != 0: setattr(self, p, 1)\n else: setattr(self, p, d[p]) if k is None else setattr(self, p, d[p][k])\n else: setattr(self, p, None)\n for p in v_params:\n if p in d.keys(): setattr(self, p, d[p])\n else: setattr(self, p, [])\n self.time = time\n return\n \n def set_nc(self, time, d, i, s_params, v_params):\n \"\"\"\n Set all parameters\n time: datetime of beam\n d: data dict for other parameters\n s_param: other scalar params\n v_params: other list params\n \"\"\"\n for p in s_params:\n if p in d.keys(): setattr(self, p, d[p][i])\n else: setattr(self, p, None)\n for p in v_params:\n if p in d.keys(): \n setattr(self, p, np.array(d[p])[i,:])\n if \"slist\" not in v_params and p==\"v\": setattr(self, \"slist\", np.argwhere(~np.isnan(getattr(self, \"v\"))))\n setattr(self, p, getattr(self, p)[~np.isnan(getattr(self, p))])\n else: setattr(self, p, [])\n self.time = time\n return\n \n def copy(self, bm):\n \"\"\" Copy all parameters \"\"\"\n for p in bm.__dict__.keys(): setattr(self, p, getattr(bm, p))\n return\n\n def gs_estimation(self):\n \"\"\"\n Estimate GS flag using different criterion\n Cases -\n 0. Sundeen et al. |v| + w/3 < 30 m/s\n 1. Blanchard et al. |v| + 0.4w < 60 m/s\n 2. Blanchard et al. [2009] |v| - 0.139w + 0.00113w^2 < 33.1 m/s\n \"\"\"\n self.gsflg = {}\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[0] = ((np.abs(self.v) + self.w_l/3.) < 30.).astype(int)\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[1] = ((np.abs(self.v) + self.w_l*0.4) < 60.).astype(int)\n if len(self.v) > 0 and len(self.w_l) > 0: self.gsflg[2] = ((np.abs(self.v) - 0.139*self.w_l + 0.00113*self.w_l**2) < 33.1).astype(int)\n # Modified defination by S. Chakraborty: {W-[50-(0.7*(V+5)**2)]} < 0\n self.gsflg[3] = ((np.array(self.w_l)-(50-(0.7*(np.array(self.v)+5)**2))<0)).astype(int)\n return\n \nclass Scan(object):\n \"\"\"Class to hold one scan (multiple beams)\"\"\"\n\n def __init__(self, stime=None, etime=None, s_mode=\"normal\"):\n \"\"\"\n initialize the parameters which will be stored\n stime: start time of scan\n etime: end time of scan\n s_mode: scan type\n \"\"\"\n self.stime = stime\n self.etime = etime\n self.s_mode = s_mode\n self.beams = []\n return\n\n def update_time(self):\n \"\"\"\n Update stime and etime of the scan.\n up: Update average parameters if True\n \"\"\"\n self.stime = min([b.time for b in self.beams])\n self.etime = max([b.time for b in self.beams])\n self._populate_avg_params()\n return\n\n def _populate_avg_params(self):\n \"\"\"\n Polulate average parameetrs\n \"\"\"\n f, nsky = [], []\n for b in self.beams:\n f.append(getattr(b, \"tfreq\"))\n nsky.append(getattr(b, \"noise.sky\"))\n self.f, self.nsky = np.mean(f), np.mean(nsky)\n return\n \nclass FetchData(object):\n \"\"\"Class to fetch data from fitacf files for one radar for atleast a day\"\"\"\n\n def __init__(self, rad, date_range, ftype=\"fitacf\", files=None, verbose=True):\n \"\"\"\n initialize the vars\n rad = radar code\n date_range = [ start_date, end_date ]\n files = List of files to load the data from\n e.x : rad = \"sas\"\n date_range = [\n datetime.datetime(2017,3,17),\n datetime.datetime(2017,3,18),\n ]\n \"\"\"\n self.rad = rad\n self.date_range = date_range\n self.files = files\n self.verbose = verbose\n self.regex = \"/sd-data/{year}/{ftype}/{rad}/{date}.*{ftype}*.bz2\"\n self.ftype = ftype\n if (rad is not None) and (date_range is not None) and (len(date_range) == 2):\n self._create_files()\n return\n \n def _create_files(self):\n \"\"\"\n Create file names from date and radar code\n \"\"\"\n if self.files is None: self.files = []\n reg_ex = self.regex\n days = (self.date_range[1] - self.date_range[0]).days + 2\n for d in range(-1,days):\n e = self.date_range[0] + dt.timedelta(days=d)\n fnames = glob.glob(reg_ex.format(year=e.year, rad=self.rad, ftype=self.ftype, date=e.strftime(\"%Y%m%d\")))\n fnames.sort()\n for fname in fnames:\n tm = fname.split(\".\")[1]\n sc = fname.split(\".\")[2]\n d0 = dt.datetime.strptime(fname.split(\".\")[0].split(\"/\")[-1] + tm + sc, \"%Y%m%d%H%M%S\")\n d1 = d0 + dt.timedelta(hours=2)\n if (self.date_range[0] <= d0) and (d0 <= self.date_range[1]): self.files.append(fname)\n elif (d0 <= self.date_range[0] <=d1): self.files.append(fname)\n self.files = list(set(self.files))\n self.files.sort()\n return\n \n def _parse_data(self, data, s_params, v_params, by, scan_prop):\n \"\"\"\n Parse data by data type\n data: list of data dict\n params: parameter list to fetch\n by: sort data by beam or scan\n scan_prop: provide scan properties if by='scan'\n {\"s_mode\": type of scan, \"s_time\": duration in min}\n \"\"\"\n _b, _s = [], []\n if self.verbose: logger.info(\"Started converting to beam data %02d.\"%len(data))\n for d in data:\n time = dt.datetime(d[\"time.yr\"], d[\"time.mo\"], d[\"time.dy\"], d[\"time.hr\"], d[\"time.mt\"], d[\"time.sc\"], d[\"time.us\"])\n if time >= self.date_range[0] and time <= self.date_range[1]:\n bm = Beam()\n bm.set(time, d, s_params, v_params)\n _b.append(bm)\n if self.verbose: logger.info(\"Converted to beam data.\")\n if by == \"scan\":\n if self.verbose: logger.info(\"Started converting to scan data.\")\n scan, sc = 0, Scan(None, None, scan_prop[\"s_mode\"])\n sc.beams.append(_b[0])\n for _ix, d in enumerate(_b[1:]):\n if d.scan == 1 and d.time != _b[_ix].time:\n sc.update_time()\n _s.append(sc)\n sc = Scan(None, None, scan_prop[\"s_mode\"])\n sc.beams.append(d)\n else: sc.beams.append(d)\n _s.append(sc)\n if self.verbose: logger.info(\"Converted to scan data.\")\n return _b, _s\n \n def convert_to_pandas(self, beams, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the beam data into dataframe\n \"\"\"\n _o = dict(zip(s_params+v_params, ([] for _ in s_params+v_params)))\n for b in beams:\n l = len(getattr(b, \"slist\"))\n for p in v_params:\n _o[p].extend(getattr(b, p))\n for p in s_params:\n _o[p].extend([getattr(b, p)]*l)\n L = len(_o[\"slist\"])\n for p in s_params+v_params:\n if len(_o[p]) < L:\n l = len(_o[p])\n _o[p].extend([np.nan]*(L-l))\n return pd.DataFrame.from_records(_o)\n \n def scans_to_pandas(self, scans, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\", \"channel\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"], start_scnum=0):\n \"\"\"\n Convert the scan data into dataframe\n \"\"\"\n new_cols = [\"scnum\",\"sbnum\"]\n _o = dict(zip(s_params+v_params+new_cols, ([] for _ in s_params+v_params+new_cols)))\n for idn, s in enumerate(scans):\n for idh, b in enumerate(s.beams):\n l = len(getattr(b, \"slist\"))\n for p in v_params:\n _o[p].extend(getattr(b, p))\n for p in s_params:\n _o[p].extend([getattr(b, p)]*l)\n _o[\"scnum\"].extend([idn + start_scnum]*l)\n _o[\"sbnum\"].extend([idh]*l)\n L = len(_o[\"slist\"])\n for p in s_params+v_params+new_cols:\n if len(_o[p]) < L:\n l = len(_o[p])\n _o[p].extend([np.nan]*(L-l))\n return pd.DataFrame.from_records(_o)\n \n def pandas_to_beams(self, df, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the dataframe to beam\n \"\"\"\n beams = []\n for bm in np.unique(df.bmnum):\n o = df[df.bmnum==bm]\n d = o.to_dict(orient=\"list\")\n for p in s_params:\n d[p] = d[p][0]\n b = Beam()\n b.set(o.time.tolist()[0], d, s_params, v_params)\n beams.append(b)\n return beams\n \n def pandas_to_scans(self, df, smode, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"time\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"]):\n \"\"\"\n Convert the dataframe to scans\n \"\"\"\n bmax = 0\n scans = []\n for sn in np.unique(df.scnum):\n o = df[df.scnum==sn]\n beams = []\n for bn in np.unique(o.sbnum):\n ox = o[o.sbnum==bn]\n b = self.pandas_to_beams(ox, s_params, v_params)\n beams.extend(b)\n bmax = len(beams) if bmax < len(beams) else bmax\n sc = Scan(None, None, smode)\n sc.beams.extend(beams)\n sc.update_time()\n scans.append(sc)\n mscans = []\n if len(scans[0].beams) + len(scans[1].beams) == len(scans[2].beams):\n sc = Scan(None, None, scans[0].s_mode)\n sc.beams.extend(scans[0].beams)\n sc.beams.extend(scans[1].beams)\n sc.update_time()\n mscans.append(sc)\n for i in range(2,len(scans)):\n mscans.append(scans[i])\n scans = copy.copy(mscans) if len(mscans) > 0 else scans\n return scans, bmax\n \n def fetch_data(self, s_params=[\"bmnum\", \"noise.sky\", \"tfreq\", \"scan\", \"nrang\", \"intt.sc\", \"intt.us\",\\\n \"mppul\", \"nrang\", \"rsep\", \"cp\", \"frang\", \"smsep\", \"lagfr\", \"channel\"],\n v_params=[\"v\", \"w_l\", \"gflg\", \"p_l\", \"slist\", \"v_e\", \"phi0\", \"elv\"],\n by=\"beam\", scan_prop={\"s_time\": 1, \"s_mode\": \"normal\"}):\n \"\"\"\n Fetch data from file list and return the dataset\n params: parameter list to fetch\n by: sort data by beam or scan\n scan_prop: provide scan properties if by='scan'\n {\"s_mode\": type of scan, \"s_time\": duration in min}\n \"\"\"\n data = []\n for f in self.files:\n with bz2.open(f) as fp:\n fs = fp.read()\n if self.verbose: logger.info(f\"Read file - {f}\")\n reader = pydarn.SDarnRead(fs, True)\n records = reader.read_fitacf()\n data += records\n if by is not None: data = self._parse_data(data, s_params, v_params, by, scan_prop)\n return data\n \nif __name__ == \"__main__\":\n fdata = FetchData( \"sas\", [dt.datetime(2015,3,17,3),\n dt.datetime(2015,3,17,3,20)] )\n fdata.fetch_data()\n fdata.fetch_data(by=\"scan\", scan_prop={\"s_time\": 2, \"s_mode\": \"themis\"})"
] |
[
[
"numpy.abs",
"numpy.unique",
"numpy.mean",
"pandas.DataFrame.from_records",
"numpy.array"
]
] |
abhinavarora/text
|
[
"69f67f3a775f3d3c6f85cfaa4ac3819500b90696",
"69f67f3a775f3d3c6f85cfaa4ac3819500b90696"
] |
[
"torchtext/transforms.py",
"test/data/test_modules.py"
] |
[
"import json\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom typing import Any, List, Optional, Union\n\nimport torch\nimport torchtext # noqa: F401\nfrom torch import Tensor\nfrom torch.nn import Module\nfrom torchtext._torchtext import CLIPEncoder as CLIPEncoderPyBind, GPT2BPEEncoder as GPT2BPEEncoderPyBind\nfrom torchtext.data.functional import load_sp_model\nfrom torchtext.utils import get_asset_local_path\nfrom torchtext.vocab import Vocab\n\nfrom . import functional as F\n\n__all__ = [\n \"SentencePieceTokenizer\",\n \"VocabTransform\",\n \"ToTensor\",\n \"LabelToIndex\",\n \"Truncate\",\n \"AddToken\",\n \"GPT2BPETokenizer\",\n \"Sequential\",\n]\n\n\nclass SentencePieceTokenizer(Module):\n \"\"\"\n Transform for Sentence Piece tokenizer from pre-trained sentencepiece model\n\n Additiona details: https://github.com/google/sentencepiece\n\n :param sp_model_path: Path to pre-trained sentencepiece model\n :type sp_model_path: str\n\n Example\n >>> from torchtext.transforms import SpmTokenizerTransform\n >>> transform = SentencePieceTokenizer(\"spm_model\")\n >>> transform([\"hello world\", \"attention is all you need!\"])\n \"\"\"\n\n def __init__(self, sp_model_path: str):\n super().__init__()\n self.sp_model = load_sp_model(get_asset_local_path(sp_model_path))\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sentence or list of sentences on which to apply tokenizer.\n :type input: Union[str, List[str]]\n :return: tokenized text\n :rtype: Union[List[str], List[List(str)]]\n \"\"\"\n if torch.jit.isinstance(input, List[str]):\n tokens: List[List[str]] = []\n for text in input:\n tokens.append(self.sp_model.EncodeAsPieces(text))\n return tokens\n elif torch.jit.isinstance(input, str):\n return self.sp_model.EncodeAsPieces(input)\n else:\n raise TypeError(\"Input type not supported\")\n\n\nclass VocabTransform(Module):\n r\"\"\"Vocab transform to convert input batch of tokens into corresponding token ids\n\n :param vocab: an instance of :class:`torchtext.vocab.Vocab` class.\n\n Example:\n >>> import torch\n >>> from torchtext.vocab import vocab\n >>> from torchtext.transforms import VocabTransform\n >>> from collections import OrderedDict\n >>> vocab_obj = vocab(OrderedDict([('a', 1), ('b', 1), ('c', 1)]))\n >>> vocab_transform = VocabTransform(vocab_obj)\n >>> output = vocab_transform([['a','b'],['a','b','c']])\n >>> jit_vocab_transform = torch.jit.script(vocab_transform)\n \"\"\"\n\n def __init__(self, vocab: Vocab):\n super().__init__()\n assert isinstance(vocab, Vocab)\n self.vocab = vocab\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input batch of token to convert to correspnding token ids\n :type input: Union[List[str], List[List[str]]]\n :return: Converted input into corresponding token ids\n :rtype: Union[List[int], List[List[int]]]\n \"\"\"\n\n if torch.jit.isinstance(input, List[str]):\n return self.vocab.lookup_indices(input)\n elif torch.jit.isinstance(input, List[List[str]]):\n output: List[List[int]] = []\n for tokens in input:\n output.append(self.vocab.lookup_indices(tokens))\n\n return output\n else:\n raise TypeError(\"Input type not supported\")\n\n\nclass ToTensor(Module):\n r\"\"\"Convert input to torch tensor\n\n :param padding_value: Pad value to make each input in the batch of length equal to the longest sequence in the batch.\n :type padding_value: Optional[int]\n :param dtype: :class:`torch.dtype` of output tensor\n :type dtype: :class:`torch.dtype`\n \"\"\"\n\n def __init__(self, padding_value: Optional[int] = None, dtype: torch.dtype = torch.long) -> None:\n super().__init__()\n self.padding_value = padding_value\n self.dtype = dtype\n\n def forward(self, input: Any) -> Tensor:\n \"\"\"\n :param input: Sequence or batch of token ids\n :type input: Union[List[int], List[List[int]]]\n :rtype: Tensor\n \"\"\"\n return F.to_tensor(input, padding_value=self.padding_value, dtype=self.dtype)\n\n\nclass LabelToIndex(Module):\n r\"\"\"\n Transform labels from string names to ids.\n\n :param label_names: a list of unique label names\n :type label_names: Optional[List[str]]\n :param label_path: a path to file containing unique label names containing 1 label per line. Note that either label_names or label_path should be supplied\n but not both.\n :type label_path: Optional[str]\n \"\"\"\n\n def __init__(\n self,\n label_names: Optional[List[str]] = None,\n label_path: Optional[str] = None,\n sort_names=False,\n ):\n assert label_names or label_path, \"label_names or label_path is required\"\n assert not (label_names and label_path), \"label_names and label_path are mutually exclusive\"\n super().__init__()\n\n if label_path:\n with open(label_path, \"r\") as f:\n label_names = [line.strip() for line in f if line.strip()]\n else:\n label_names = label_names\n\n if sort_names:\n label_names = sorted(label_names)\n self._label_vocab = Vocab(torch.classes.torchtext.Vocab(label_names, None))\n self._label_names = self._label_vocab.get_itos()\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input labels to convert to corresponding ids\n :type input: Union[str, List[str]]\n :rtype: Union[int, List[int]]\n \"\"\"\n if torch.jit.isinstance(input, List[str]):\n return self._label_vocab.lookup_indices(input)\n elif torch.jit.isinstance(input, str):\n return self._label_vocab.__getitem__(input)\n else:\n raise TypeError(\"Input type not supported\")\n\n @property\n def label_names(self) -> List[str]:\n return self._label_names\n\n\nclass Truncate(Module):\n r\"\"\"Truncate input sequence\n\n :param max_seq_len: The maximum allowable length for input sequence\n :type max_seq_len: int\n \"\"\"\n\n def __init__(self, max_seq_len: int) -> None:\n super().__init__()\n self.max_seq_len = max_seq_len\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sequence or batch of sequence to be truncated\n :type input: Union[List[Union[str, int]], List[List[Union[str, int]]]]\n :return: Truncated sequence\n :rtype: Union[List[Union[str, int]], List[List[Union[str, int]]]]\n \"\"\"\n return F.truncate(input, self.max_seq_len)\n\n\nclass AddToken(Module):\n \"\"\"Add token to beginning or end of sequence\n\n :param token: The token to be added\n :type token: Union[int, str]\n :param begin: Whether to insert token at start or end or sequence, defaults to True\n :type begin: bool, optional\n \"\"\"\n\n def __init__(self, token: Union[int, str], begin: bool = True) -> None:\n super().__init__()\n self.token = token\n self.begin = begin\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sequence or batch\n :type input: Union[List[Union[str, int]], List[List[Union[str, int]]]]\n \"\"\"\n\n return F.add_token(input, self.token, self.begin)\n\n\nclass GPT2BPETokenizer(Module):\n __jit_unused_properties__ = [\"is_jitable\"]\n \"\"\"\n Transform for GPT-2 BPE Tokenizer.\n\n Reimplements openai GPT-2 BPE in TorchScript. Original openai implementation\n https://github.com/openai/gpt-2/blob/master/src/encoder.py\n\n :param encoder_json_path: Path to GPT-2 BPE encoder json file.\n :type encoder_json_path: str\n :param vocab_bpe_path: Path to bpe vocab file.\n :type vocab_bpe_path: str\n \"\"\"\n _seperator: torch.jit.Final[str]\n\n def __init__(\n self,\n encoder_json_path: str,\n vocab_bpe_path: str,\n ):\n super().__init__()\n self._seperator = \"\\u0001\"\n # load bpe encoder and bpe decoder\n with open(get_asset_local_path(encoder_json_path), \"r\", encoding=\"utf-8\") as f:\n bpe_encoder = json.load(f)\n # load bpe vocab\n with open(get_asset_local_path(vocab_bpe_path), \"r\", encoding=\"utf-8\") as f:\n bpe_vocab = f.read()\n bpe_merge_ranks = {\n self._seperator.join(merge_pair.split()): i for i, merge_pair in enumerate(bpe_vocab.split(\"\\n\")[1:-1])\n }\n # Caching is enabled in Eager mode\n self.bpe = GPT2BPEEncoderPyBind(bpe_encoder, bpe_merge_ranks, self._seperator, bytes_to_unicode(), True)\n\n @property\n def is_jitable(self):\n return isinstance(self.bpe, torch._C.ScriptObject)\n\n @torch.jit.export\n def _tokenize(self, text: str) -> List[str]:\n \"\"\"Encode text into a list of tokens\n\n Args:\n text: An input text string.\n\n Returns:\n A list of bpe token ids represents each bpe tokens\n\n For example: \"awesome,awe\"\n --> bpe --> bpe tokens: [\"aw\", \"esome\"], [\",\"], [\"aw\", e]\n --> bpe encode --> bpe token ids: [707, 5927, 11, 707, 68]\n \"\"\"\n bpe_token_ids: List[int] = self.bpe.encode(text)\n bpe_tokens: List[str] = []\n\n for bpe_token_id in bpe_token_ids:\n bpe_tokens.append(str(bpe_token_id))\n\n return bpe_tokens\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sentence or list of sentences on which to apply tokenizer.\n :type input: Union[str, List[str]]\n :return: tokenized text\n :rtype: Union[List[str], List[List(str)]]\n \"\"\"\n if torch.jit.isinstance(input, List[str]):\n tokens: List[List[str]] = []\n for text in input:\n tokens.append(self._tokenize(text))\n return tokens\n elif torch.jit.isinstance(input, str):\n return self._tokenize(input)\n else:\n raise TypeError(\"Input type not supported\")\n\n def __prepare_scriptable__(self):\n r\"\"\"Return a JITable tokenizer.\"\"\"\n if not self.is_jitable:\n tokenizer_copy = deepcopy(self)\n # Disable caching in script mode\n tokenizer_copy.bpe = torch.classes.torchtext.GPT2BPEEncoder(\n self.bpe.bpe_encoder_, self.bpe.bpe_merge_ranks_, self.bpe.seperator_, self.bpe.byte_encoder_, False\n )\n return tokenizer_copy\n return self\n\n\nclass CLIPTokenizer(Module):\n __jit_unused_properties__ = [\"is_jitable\"]\n \"\"\"\n Transform for CLIP Tokenizer. Based on Byte-Level BPE.\n\n Reimplements CLIP Tokenizer in TorchScript. Original implementation:\n https://github.com/mlfoundations/open_clip/blob/main/src/clip/tokenizer.py\n\n This tokenizer has been trained to treat spaces like parts of the tokens\n (a bit like sentencepiece) so a word will be encoded differently whether it\n is at the beginning of the sentence (without space) or not.\n\n The below code snippet shows how to use the CLIP tokenizer with encoder and merges file\n taken from the original paper implementation.\n\n Example\n >>> from torchtext.transforms import CLIPTokenizer\n >>> MERGES_FILE = \"http://download.pytorch.org/models/text/clip_merges.bpe\"\n >>> ENCODER_FILE = \"http://download.pytorch.org/models/text/clip_encoder.json\"\n >>> tokenizer = CLIPTokenizer(merges_path=MERGES_FILE, encoder_json_path=ENCODER_FILE)\n >>> tokenizer(\"the quick brown fox jumped over the lazy dog\")\n\n :param merges_path: Path to bpe merges file.\n :type merges_path: str\n :param encoder_json_path: Optional, path to BPE encoder json file. When specified, this is used\n to infer num_merges.\n :type encoder_json_path: str\n :param num_merges: Optional, number of merges to read from the bpe merges file.\n :type num_merges: int\n \"\"\"\n\n _seperator: torch.jit.Final[str]\n\n def __init__(self, merges_path: str, encoder_json_path: Optional[str] = None, num_merges: Optional[int] = None):\n super().__init__()\n self._seperator = \"\\u0001\"\n # load bpe merges\n with open(get_asset_local_path(merges_path), \"r\", encoding=\"utf-8\") as f:\n bpe_merges = f.read().split(\"\\n\")[1:]\n\n if encoder_json_path:\n # load bpe encoder\n with open(get_asset_local_path(encoder_json_path), \"r\", encoding=\"utf-8\") as f:\n bpe_encoder = json.load(f)\n # 256 * 2 for each byte. For each byte we have ['a', 'a</w>']\n # Additional 2 tokens for bos and eos\n num_merges = len(bpe_encoder) - (256 * 2 + 2)\n bpe_merge_ranks = {\n self._seperator.join(merge_pair.split()): i for i, merge_pair in enumerate(bpe_merges[:num_merges])\n }\n else:\n num_merges = num_merges or len(bpe_merges)\n bpe_merge_ranks = {\n self._seperator.join(merge_pair.split()): i for i, merge_pair in enumerate(bpe_merges[:num_merges])\n }\n bpe_vocab = list(bytes_to_unicode().values())\n bpe_vocab = bpe_vocab + [v + \"</w>\" for v in bpe_vocab]\n bpe_vocab.extend([\"\".join(merge_pair.split()) for merge_pair in bpe_merges[:num_merges]])\n bpe_vocab.extend([\"<|startoftext|>\", \"<|endoftext|>\"])\n bpe_encoder = {v: i for i, v in enumerate(bpe_vocab)}\n\n # Caching is enabled in Eager mode\n self.bpe = CLIPEncoderPyBind(bpe_encoder, bpe_merge_ranks, self._seperator, bytes_to_unicode(), True)\n\n @property\n def is_jitable(self):\n return isinstance(self.bpe, torch._C.ScriptObject)\n\n @torch.jit.export\n def _tokenize(self, text: str) -> List[str]:\n \"\"\"Encode text into a list of tokens\n\n Args:\n text: An input text string.\n\n Returns:\n A list of bpe token ids represents each bpe tokens\n\n For example: \"awesome,awe\"\n --> bpe --> bpe tokens: [\"aw\", \"esome\"], [\",\"], [\"aw\", \"e\"]\n --> bpe encode --> bpe token ids: [707, 5927, 11, 707, 68]\n \"\"\"\n text = text.lower().strip()\n bpe_token_ids: List[int] = self.bpe.encode(text)\n bpe_tokens: List[str] = []\n\n for bpe_token_id in bpe_token_ids:\n bpe_tokens.append(str(bpe_token_id))\n\n return bpe_tokens\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sentence or list of sentences on which to apply tokenizer.\n :type input: Union[str, List[str]]\n :return: tokenized text\n :rtype: Union[List[str], List[List(str)]]\n \"\"\"\n if torch.jit.isinstance(input, List[str]):\n tokens: List[List[str]] = []\n for text in input:\n tokens.append(self._tokenize(text))\n return tokens\n elif torch.jit.isinstance(input, str):\n return self._tokenize(input)\n else:\n raise TypeError(\"Input type not supported\")\n\n def __prepare_scriptable__(self):\n r\"\"\"Return a JITable tokenizer.\"\"\"\n if not self.is_jitable:\n tokenizer_copy = deepcopy(self)\n # Disable caching in script mode\n tokenizer_copy.bpe = torch.classes.torchtext.CLIPEncoder(\n self.bpe.bpe_encoder_, self.bpe.bpe_merge_ranks_, self.bpe.seperator_, self.bpe.byte_encoder_, False\n )\n return tokenizer_copy\n return self\n\n\n@lru_cache()\ndef bytes_to_unicode():\n \"\"\"\n Original Source: https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9\n\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n bs = list(range(ord(\"!\"), ord(\"~\") + 1)) + list(range(ord(\"¡\"), ord(\"¬\") + 1)) + list(range(ord(\"®\"), ord(\"ÿ\") + 1))\n cs = bs[:]\n n = 0\n for b in range(2 ** 8):\n if b not in bs:\n bs.append(b)\n cs.append(2 ** 8 + n)\n n += 1\n cs = [chr(n) for n in cs]\n return dict(zip(bs, cs))\n\n\nclass Sequential(torch.nn.Sequential):\n r\"\"\"A container to host a sequence of text transforms.\"\"\"\n\n def forward(self, input: Any) -> Any:\n \"\"\"\n :param input: Input sequence or batch. The input type must be supported by the first transform in the sequence.\n :type input: `Any`\n \"\"\"\n for module in self:\n input = module(input)\n return input\n",
"import torch\nfrom torch.nn import Linear\nfrom torch.nn.functional import multi_head_attention_forward as mha_forward\nfrom torchtext.nn import InProjContainer, MultiheadAttentionContainer, ScaledDotProduct\n\nfrom ..common.torchtext_test_case import TorchtextTestCase\n\n\nclass TestModels(TorchtextTestCase):\n def test_multiheadattention(self):\n embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64\n # Build torchtext MultiheadAttention module\n in_proj = InProjContainer(\n Linear(embed_dim, embed_dim, bias=False),\n Linear(embed_dim, embed_dim, bias=False),\n Linear(embed_dim, embed_dim, bias=False),\n )\n\n MHA = MultiheadAttentionContainer(nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False))\n\n query = torch.rand((tgt_len, bsz, embed_dim))\n key = value = torch.rand((src_len, bsz, embed_dim))\n attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)\n bias_k = bias_v = torch.rand((1, 1, embed_dim))\n mha_output, attn_weights = MHA(\n query,\n key,\n value,\n attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),\n bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),\n bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),\n )\n\n # Use torch.nn.functional.multi_head_attention_forward\n torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float(\"-inf\"))\n in_proj_weight = torch.cat(\n [\n MHA.in_proj_container.query_proj.weight,\n MHA.in_proj_container.key_proj.weight,\n MHA.in_proj_container.value_proj.weight,\n ]\n )\n torch_mha_output, torch_mha_weights = mha_forward(\n query,\n key,\n value,\n embed_dim,\n nhead,\n in_proj_weight,\n None,\n bias_k,\n bias_v,\n False,\n 0.0,\n MHA.out_proj.weight,\n None,\n attn_mask=torch_attn_mask,\n )\n\n self.assertEqual(mha_output, torch_mha_output)\n # With bias_k and bias_v, src_len needs to plus 1\n attn_weights = attn_weights.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead\n self.assertEqual(attn_weights, torch_mha_weights)\n\n def test_mha_batch_first(self):\n embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64\n # Build torchtext MultiheadAttention module\n in_proj = InProjContainer(\n Linear(embed_dim, embed_dim, bias=False),\n Linear(embed_dim, embed_dim, bias=False),\n Linear(embed_dim, embed_dim, bias=False),\n )\n\n MHA_batch_1st = MultiheadAttentionContainer(\n nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False), batch_first=True\n )\n\n query = torch.rand((tgt_len, bsz, embed_dim))\n key = value = torch.rand((src_len, bsz, embed_dim))\n attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)\n bias_k = bias_v = torch.rand((1, 1, embed_dim))\n mha_output_1st, attn_weights_1st = MHA_batch_1st(\n query.transpose(0, 1),\n key.transpose(0, 1),\n value.transpose(0, 1),\n attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),\n bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),\n bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),\n )\n\n # Use torch.nn.functional.multi_head_attention_forward\n torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float(\"-inf\"))\n in_proj_weight = torch.cat(\n [\n MHA_batch_1st.in_proj_container.query_proj.weight,\n MHA_batch_1st.in_proj_container.key_proj.weight,\n MHA_batch_1st.in_proj_container.value_proj.weight,\n ]\n )\n torch_mha_output, torch_mha_weights = mha_forward(\n query,\n key,\n value,\n embed_dim,\n nhead,\n in_proj_weight,\n None,\n bias_k,\n bias_v,\n False,\n 0.0,\n MHA_batch_1st.out_proj.weight,\n None,\n attn_mask=torch_attn_mask,\n )\n\n self.assertEqual(mha_output_1st.transpose(0, 1), torch_mha_output)\n # With bias_k and bias_v, src_len needs to plus 1\n attn_weights_1st = attn_weights_1st.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead\n self.assertEqual(attn_weights_1st, torch_mha_weights)\n\n def test_broadcast_scaled_dot_product(self):\n embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64\n SDP = ScaledDotProduct()\n query = torch.rand((tgt_len, 1, embed_dim))\n key = value = torch.rand((src_len, 1, embed_dim))\n attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)\n\n sdp_attn_output_full, sdp_attn_weights_full = SDP(\n query.expand(tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, bsz * nhead, embed_dim),\n value.expand(src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n\n # query has a batch size of 1 while key/value have a batch size of bsz * nhead\n sdp_attn_output, sdp_attn_weights = SDP(\n query,\n key.expand(src_len, bsz * nhead, embed_dim),\n value.expand(src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n self.assertEqual(sdp_attn_output, sdp_attn_output_full)\n self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)\n\n # key/value have a batch size of 1 while query has a batch size of bsz * nhead\n sdp_attn_output, sdp_attn_weights = SDP(\n query.expand(tgt_len, bsz * nhead, embed_dim),\n key,\n value,\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n self.assertEqual(sdp_attn_output, sdp_attn_output_full)\n self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)\n\n # key/value have a size of (3, 3, src_len, bsz * nhead, embed_dim)\n # while query has a size of (tgt_len, 1, embed_dim)\n sdp_attn_output, sdp_attn_weights = SDP(\n query.expand(tgt_len, 1, embed_dim),\n key.expand(3, 3, src_len, bsz * nhead, embed_dim),\n value.expand(3, 3, src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n assert list(sdp_attn_output.size()) == [3, 3, tgt_len, bsz * nhead, embed_dim]\n assert list(sdp_attn_weights.size()) == [3, 3, bsz * nhead, tgt_len, embed_dim]\n self.assertEqual(sdp_attn_output[2][2], sdp_attn_output_full)\n self.assertEqual(sdp_attn_weights[2][2], sdp_attn_weights_full)\n # dim -2 is not equal to neither key/value's dim -2 or 1\n with self.assertRaises(RuntimeError):\n SDP(\n query.expand(tgt_len, 2, embed_dim),\n key.expand(3, 3, src_len, bsz * nhead, embed_dim),\n value.expand(3, 3, src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n\n # key/value have a size of (src_len, 1, embed_dim)\n # while query has a size of (1, 2, 3, tgt_len, bsz * nhead, embed_dim)\n sdp_attn_output, sdp_attn_weights = SDP(\n query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, 1, embed_dim),\n value.expand(src_len, 1, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n assert list(sdp_attn_output.size()) == [1, 2, 3, tgt_len, bsz * nhead, embed_dim]\n assert list(sdp_attn_weights.size()) == [1, 2, 3, bsz * nhead, tgt_len, embed_dim]\n self.assertEqual(sdp_attn_output[0][1][2], sdp_attn_output_full)\n self.assertEqual(sdp_attn_weights[0][1][2], sdp_attn_weights_full)\n # key dim -2 is not equal to value dim -2\n with self.assertRaisesRegex(AssertionError, \"Shape of key, value must match\"):\n SDP(\n query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, 2, embed_dim),\n value.expand(src_len, 1, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n # key/value dim -2 is not equal to neither query's dim -2 or 1\n with self.assertRaises(RuntimeError):\n SDP(\n query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, 2, embed_dim),\n value.expand(src_len, 2, embed_dim),\n attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),\n )\n\n # attn_mask in a size of (1, tgt_len, src_len)\n # 2D tensor is not supported for attn_mask\n sdp_attn_output, sdp_attn_weights = SDP(\n query.expand(tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, bsz * nhead, embed_dim),\n value.expand(src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(1, tgt_len, src_len),\n )\n self.assertEqual(sdp_attn_output, sdp_attn_output_full)\n self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)\n # attn_mask's dim -3 is not equal to neither batch size or 1\n with self.assertRaisesRegex(RuntimeError, \"The size of the attn_mask is not correct.\"):\n SDP(\n query.expand(tgt_len, bsz * nhead, embed_dim),\n key.expand(src_len, bsz * nhead, embed_dim),\n value.expand(src_len, bsz * nhead, embed_dim),\n attn_mask=attn_mask_2D.expand(2, tgt_len, src_len),\n )\n"
] |
[
[
"torch.classes.torchtext.Vocab",
"torch.classes.torchtext.CLIPEncoder",
"torch.classes.torchtext.GPT2BPEEncoder",
"torch.jit.isinstance"
],
[
"torch.randint",
"torch.cat",
"torch.zeros",
"torch.nn.functional.multi_head_attention_forward",
"torch.nn.Linear",
"torch.rand",
"torch.stack"
]
] |
grlee77/chainer
|
[
"c064bb33701bc35fee9500a334a8fc76e4179bfc"
] |
[
"tests/onnx_chainer_tests/functions_tests/test_arrays.py"
] |
[
"import chainer\nimport chainer.functions as F\nfrom chainer import testing\nimport numpy as np\nimport onnx\nimport pytest\n\nfrom onnx_chainer import export\nfrom onnx_chainer.testing import input_generator\nfrom onnx_chainer_tests.helper import ONNXModelChecker\nfrom onnx_chainer_tests.helper import ONNXModelTest\n\n\[email protected](\n # cast\n # {'ops': 'cast', 'input_shape': (1, 5),\n # 'input_argname': 'x',\n # 'args': {'typ': np.float16}},\n {'ops': 'cast', 'input_shape': (1, 5),\n 'input_argname': 'x',\n 'args': {'typ': np.float64}},\n\n # depth2space\n {'ops': 'depth2space', 'input_shape': (1, 12, 6, 6),\n 'input_argname': 'X',\n 'args': {'r': 2}},\n\n # pad\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),\n 'mode': 'constant'},\n 'name': 'pad_constant'},\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),\n 'mode': 'reflect'},\n 'name': 'pad_reflect'},\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),\n 'mode': 'edge'},\n 'name': 'pad_edge'},\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': ((1, 3), (2, 0), (7, 1), (4, 4)),\n 'mode': 'constant'},\n 'name': 'pad_imbalance_pad_width'},\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),\n 'mode': 'constant',\n 'constant_values': -1},\n 'name': 'pad_with_constant_values'},\n {'ops': 'pad', 'input_shape': (1, 2, 3, 4),\n 'input_argname': 'x',\n 'args': {'pad_width': 2,\n 'mode': 'constant'},\n 'name': 'pad_scalar_pad_width'},\n\n # reshape\n {'ops': 'reshape', 'input_shape': (1, 6),\n 'input_argname': 'x',\n 'args': {'shape': (1, 2, 1, 3)}},\n\n # space2depth\n {'ops': 'space2depth', 'input_shape': (1, 12, 6, 6),\n 'input_argname': 'X',\n 'args': {'r': 2}},\n\n # split_axis\n {'ops': 'split_axis', 'input_shape': (1, 6),\n 'input_argname': 'x',\n 'args': {'indices_or_sections': 2,\n 'axis': 1, 'force_tuple': True},\n 'name': 'split_axis_force_tuple_true'},\n {'ops': 'split_axis', 'input_shape': (1, 6),\n 'input_argname': 'x',\n 'args': {'indices_or_sections': 2,\n 'axis': 1, 'force_tuple': False},\n 'name': 'split_axis_force_tuple_false'},\n {'ops': 'split_axis', 'input_shape': (1, 6),\n 'input_argname': 'x',\n 'args': {'indices_or_sections': [1, 2], 'axis': 1},\n 'name': 'split_axis_list'},\n\n # squeeze\n {'ops': 'squeeze', 'input_shape': (1, 3, 1, 2),\n 'input_argname': 'x',\n 'args': {'axis': None},\n 'name': 'squeeze_axis_none'},\n {'ops': 'squeeze', 'input_shape': (1, 3, 1, 2, 1),\n 'input_argname': 'x',\n 'args': {'axis': (2, 4)}},\n\n # swapaxes\n {'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),\n 'input_argname': 'x',\n 'args': {'axis1': 1, 'axis2': 2}},\n {'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),\n 'input_argname': 'x',\n 'args': {'axis1': -3, 'axis2': -1}},\n\n # tile\n {'ops': 'tile', 'input_shape': (1, 5),\n 'input_argname': 'x',\n 'args': {'reps': (1, 2)}},\n\n # transpose\n {'ops': 'transpose', 'input_shape': (1, 5),\n 'input_argname': 'x',\n 'args': {'axes': None}},\n\n # copy\n {'ops': 'copy', 'input_shape': (1, 5),\n 'input_argname': 'x',\n 'args': {'dst': -1}},\n\n # get_item\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': slice(0, 2)},\n 'name': 'get_item_0to2'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': (slice(1))},\n 'name': 'get_item_to1'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': (slice(1, None))},\n 'name': 'get_item_1tonone'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': 0},\n 'name': 'get_item_0'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': np.array(0)},\n 'name': 'get_item_npscalar0'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': (None, slice(0, 2))},\n 'name': 'get_item_none_0to2'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': (Ellipsis, slice(0, 2))},\n 'name': 'get_item_ellipsis_0to2'},\n # get_item, combine newaxis, slice, single index, ellipsis\n {'ops': 'get_item', 'input_shape': (2, 2, 3, 3, 3, 4),\n 'input_argname': 'x',\n 'args': {'slices': (0, None, Ellipsis, 0, None, slice(0, 2), None, 0)},\n 'name': 'get_item_complicated'},\n {'ops': 'get_item', 'input_shape': (2, 2, 3),\n 'input_argname': 'x',\n 'args': {'slices': (slice(None), slice(0, 1), slice(None, 2))},\n 'name': 'get_item_start_from_none'},\n\n # expand_dims\n {'ops': 'expand_dims', 'input_shape': (3,),\n 'input_argname': 'x', 'args': {'axis': 0},\n 'name': 'expand_dims_0'},\n {'ops': 'expand_dims', 'input_shape': (3,),\n 'input_argname': 'x', 'args': {'axis': 1},\n 'name': 'expand_dims_1'},\n {'ops': 'expand_dims', 'input_shape': (3,),\n 'input_argname': 'x', 'args': {'axis': -2},\n 'name': 'expand_dims_minus2'},\n\n # repeat\n {'ops': 'repeat', 'input_shape': (3,),\n 'input_argname': 'x', 'args': {'repeats': 2},\n 'name': 'repeat_ndim1'},\n {'ops': 'repeat', 'input_shape': (2, 3),\n 'input_argname': 'x', 'args': {'repeats': 2, 'axis': 1},\n 'name': 'repeat_with_axis'},\n {'ops': 'repeat', 'input_shape': (2, 3),\n 'input_argname': 'x', 'args': {'repeats': 2},\n 'name': 'repeat_default_axis'},\n\n # separate\n {'ops': 'separate', 'input_shape': (2, 3),\n 'input_argname': 'x', 'args': {}, 'name': 'separate_axis0'},\n {'ops': 'separate', 'input_shape': (2, 3),\n 'input_argname': 'x', 'args': {'axis': 1}, 'name': 'separate_axis1'},\n {'ops': 'separate', 'input_shape': (1, 2, 3),\n 'input_argname': 'x', 'args': {}, 'name': 'separate_single_output'},\n\n # moveaxis\n {'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),\n 'input_argname': 'x', 'args': {'source': 0, 'destination': -1}},\n {'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),\n 'input_argname': 'x', 'args': {'source': (0, 3), 'destination': (2, 0)}},\n\n # rollaxis\n {'ops': 'rollaxis', 'input_shape': (2, 3, 4, 5),\n 'input_argname': 'x', 'args': {'axis': 2, 'start': 0}},\n)\nclass TestArrayOperators(ONNXModelTest):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self, ops, args, input_argname):\n super(Model, self).__init__()\n self.ops = getattr(F, ops)\n self.args = args\n self.input_argname = input_argname\n\n def __call__(self, x):\n self.args[self.input_argname] = x\n return self.ops(**self.args)\n\n self.model = Model(self.ops, self.args, self.input_argname)\n self.x = input_generator.increasing(*self.input_shape)\n\n def test_output(self):\n name = self.ops\n if hasattr(self, 'name'):\n name = self.name\n self.expect(\n self.model, self.x, name=name, expected_num_initializers=0)\n\n\nclass TestGetItemGather(ONNXModelChecker):\n # When chainer.testing.parameterize is used with list or ndarray parameter,\n # it causes regex warning. To resolve, use pytest's parameterize.\n\n @pytest.mark.parametrize(\n 'name,slices', [\n ('gather_axis0', ([[0, 1], [0, 1]],)),\n ('gather_axis1', (slice(None), [[0, 1], [1, 2]], slice(None))),\n ('gather_axis2', (slice(None), slice(None), [[0, 1], [1, 2]])),\n ('gather_ndarray', (\n Ellipsis, np.array([[0, 1], [1, 2]], dtype=np.int64))),\n ('gather_before_squeezed', (slice(None), 0, [[0, 1], [2, 3]])),\n ('gather_after_squeezed', (slice(None), [[0, 1], [1, 2]], 0)),\n ('gather_unsqueezed', (\n slice(None), None, [[0, 1], [1, 2]], slice(None))),\n ('gathernd', [[0, 1], [1, 2]]),\n ('gathernd_slice_none', [[0, 1], [0, 1], slice(None)]),\n ('gathernd_full_idx', [[0, 1], [0, 1], [2, 3]]),\n ('gathernd_before_slice', [0, [0, 1], [2, 3]]),\n ('gathernd_after_slice', [[0, 1], [0, 2], 0]),\n ('gathernd_unsqueezed', [[0, 1], [0, 2], None])\n ])\n def test_output(self, name, slices):\n skip_opsets = None\n if name.startswith('gathernd'):\n skip_opsets = tuple(range(7, 11))\n name = 'get_item_' + name\n\n model = chainer.Sequential(\n lambda x: F.get_item(x, slices=slices))\n x = input_generator.increasing(2, 3, 4)\n\n self.expect(\n model, x, name=name, expected_num_initializers=0,\n skip_opset_version=skip_opsets)\n\n\[email protected](\n onnx.defs.onnx_opset_version() < 11, reason='not support GatherND')\[email protected](\n 'slices', [\n [slice(0, 2, 2)],\n [[0, 1], 0, [0, 1]],\n [slice(None), [0, 1], [0, 1]],\n [None, [0, 1], [0, 1]]\n ]\n)\ndef test_get_item_error(slices):\n model = chainer.Sequential(\n lambda x: F.get_item(x, slices=slices))\n x = input_generator.increasing(2, 3, 4)\n\n with pytest.raises(ValueError):\n export(model, x)\n\n\nclass TestConcat(ONNXModelTest):\n\n def setUp(self):\n class Model(chainer.Chain):\n\n def __init__(self):\n super(Model, self).__init__()\n\n def __call__(self, x1, x2):\n return F.concat((x1, x2))\n\n self.model = Model()\n self.x1 = input_generator.increasing(2, 5)\n self.x2 = input_generator.increasing(2, 4)\n\n def test_output(self):\n self.expect(self.model, (self.x1, self.x2))\n\n\nclass TestWhere(ONNXModelTest):\n\n def test_output(self):\n model = chainer.Sequential(\n F.where\n )\n cond = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.bool)\n x = input_generator.increasing(2, 3)\n y = np.zeros((2, 3), np.float32)\n self.expect(model, (cond, x, y), skip_opset_version=[7, 8])\n\n\nclass TestResizeImages(ONNXModelTest):\n\n def setUp(self):\n\n class Model(chainer.Chain):\n\n def __init__(self, ops, args, input_argname):\n super(Model, self).__init__()\n self.ops = ops\n self.args = args\n self.input_argname = input_argname\n\n def __call__(self, x):\n self.args[self.input_argname] = x\n return self.ops(**self.args)\n\n # (batch, channel, height, width) = (1, 1, 2, 2)\n self.x = np.array([[[[64, 32], [64, 32]]]], np.float32)\n\n # 2x upsampling\n args = {'output_shape': (4, 4)}\n self.model = Model(F.resize_images, args, 'x')\n\n def test_output(self):\n\n # FIXME(syoyo): Currently the test will fail due to the different\n # behavior of bilinear interpolation between Chainer and onnxruntime.\n # So disable output value check for a while.\n #\n # Currently Chainer will give [64, 53.333336, 42.666668, 32]\n # (same result with tensorflow r1.13.1 with `align_corners=True`),\n # while onnxruntime gives [64, 48, 32, 32]\n # (same result with tensorflow r1.13.1 with `align_corners=False`)\n #\n # However, the correct behavior will be [64, 54, 40, 32].\n # (cv2.resize and tensorflow master(r1.14 or r2.0) after this fix:\n # https://github.com/tensorflow/tensorflow/issues/6720)\n\n self.check_out_values = None # Skip output value check\n\n with testing.assert_warns(UserWarning):\n self.expect(self.model, self.x, expected_num_initializers=0)\n\n\[email protected](\n {'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {},\n 'name': 'stack_default'},\n {'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 1},\n 'name': 'stack_axis1'},\n {'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 2},\n 'name': 'stack_axis2'},\n {'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': -1},\n 'name': 'stack_axis_neg'},\n\n {'ops': 'vstack', 'inputs': [2, 3], 'kwargs': {},\n 'name': 'vstack_ndim0'},\n {'ops': 'vstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},\n 'name': 'vstack_ndim1'},\n {'ops': 'vstack', 'in_shapes': [(3, 4), (2, 4)], 'kwargs': {},\n 'name': 'vstack_ndim2'},\n\n {'ops': 'hstack', 'inputs': [2, 3], 'kwargs': {},\n 'name': 'hstack_ndim0'},\n {'ops': 'hstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},\n 'name': 'hstack_ndim1'},\n {'ops': 'hstack', 'in_shapes': [(3, 4), (3, 2)], 'kwargs': {},\n 'name': 'hstack_ndim2'},\n\n {'ops': 'dstack', 'inputs': [2, 3], 'kwargs': {},\n 'name': 'dstack_ndim0'},\n {'ops': 'dstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},\n 'name': 'dstack_ndim1'},\n {'ops': 'dstack', 'in_shapes': [(3, 2), (3, 2)], 'kwargs': {},\n 'name': 'dstack_ndim2'},\n {'ops': 'dstack', 'in_shapes': [(3, 2, 2), (3, 2, 1)], 'kwargs': {},\n 'name': 'dstack_ndim3'},\n)\nclass TestStack(ONNXModelTest):\n\n def test_output(self):\n\n class Model(chainer.Chain):\n def __init__(self, ops, kwargs):\n super(Model, self).__init__()\n self.ops = getattr(F, ops)\n self.kwargs = kwargs\n\n def __call__(self, *xs):\n return self.ops(xs, **self.kwargs)\n\n model = Model(ops=self.ops, kwargs=self.kwargs)\n if hasattr(self, 'inputs'):\n xs = [np.array(value, dtype=np.float32) for value in self.inputs]\n else:\n xs = [input_generator.increasing(*shape) for\n shape in self.in_shapes]\n\n self.expect(model, xs, name=self.name)\n\n\nclass TestShape(ONNXModelTest):\n\n def test_output(self):\n from onnx_chainer.replace_func import as_funcnode\n\n class Model(chainer.Chain):\n def __init__(self):\n super().__init__()\n\n @as_funcnode('Shape')\n def shape(self, x):\n # ONNX Shape operator constrains to return int64 type\n return np.array(x.shape, dtype=np.int64)\n\n def forward(self, x):\n # use shape method instead of x.shape to connect graph.\n return self.shape(x)\n\n model = Model()\n x = input_generator.increasing(3, 4, 5)\n\n self.expect(model, (x,))\n\n\nclass TestDynamicReshape(ONNXModelTest):\n\n def test_output(self):\n from onnx_chainer.replace_func import as_funcnode\n\n class Model(chainer.Chain):\n def __init__(self):\n super().__init__()\n\n @as_funcnode('Reshape')\n def dynamic_reshape(self, x, shape):\n # shape is expected as variable type\n return F.reshape(x, tuple(shape.array))\n\n def forward(self, x, shape):\n return self.dynamic_reshape(x, shape)\n\n model = Model()\n x = input_generator.increasing(3, 4, 5)\n shape = np.array([12, 5], dtype=np.int64)\n\n def check_no_param(onnx_model, path):\n assert not any(['param' in v.name for v in onnx_model.graph.input])\n\n self.expect(model, (x, shape), custom_model_test_func=check_no_param)\n\n\[email protected](\n {'kwargs': {}, 'name': 'permutate'},\n {'kwargs': {'inv': True}, 'name': 'permutate_inv'},\n {'kwargs': {'axis': 1}, 'name': 'permutate_axis1'},\n {'kwargs': {'axis': 1, 'inv': True}, 'name': 'permutate_axis1_inv'},\n)\nclass TestPermutate(ONNXModelTest):\n\n def test_output(self):\n\n class Model(chainer.Chain):\n def __init__(self, kwargs):\n super(Model, self).__init__()\n self.kwargs = kwargs\n\n def forward(self, x, indices):\n return F.permutate(x, indices, **self.kwargs)\n\n model = Model(kwargs=self.kwargs)\n\n x = np.arange(6).reshape((3, 2)).astype(np.float32)\n if self.kwargs.get('axis') == 1:\n indices = np.array([1, 0], np.int32)\n else:\n indices = np.array([2, 0, 1], np.int32)\n self.expect(model, (x, indices), name=self.name,\n skip_opset_version=[7, 8])\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.zeros"
]
] |
etrulls/d2-net
|
[
"95e5557d7f64641ba7991d7370b845c5a036f183"
] |
[
"lib/pyramid.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom lib.exceptions import EmptyTensorError\nfrom lib.utils import interpolate_dense_features, upscale_positions\n\n\ndef process_multiscale(image, model, scales=[.5, 1, 2]):\n b, _, h_init, w_init = image.size()\n device = image.device\n assert(b == 1)\n\n all_keypoints = torch.zeros([3, 0])\n all_descriptors = torch.zeros([\n model.dense_feature_extraction.num_channels, 0\n ])\n all_scores = torch.zeros(0)\n\n previous_dense_features = None\n banned = None\n for idx, scale in enumerate(scales):\n current_image = F.interpolate(\n image, scale_factor=scale,\n mode='bilinear', align_corners=True\n )\n _, _, h_level, w_level = current_image.size()\n\n dense_features = model.dense_feature_extraction(current_image)\n del current_image\n\n _, _, h, w = dense_features.size()\n\n # Sum the feature maps.\n if previous_dense_features is not None:\n dense_features += F.interpolate(\n previous_dense_features, size=[h, w],\n mode='bilinear', align_corners=True\n )\n del previous_dense_features\n\n # Recover detections.\n detections = model.detection(dense_features)\n if banned is not None:\n banned = F.interpolate(banned.float(), size=[h, w]).byte()\n detections = torch.min(detections, (1 - banned))\n banned = torch.max(\n torch.max(detections, dim=1)[0].unsqueeze(1), banned\n )\n else:\n banned = torch.max(detections, dim=1)[0].unsqueeze(1)\n fmap_pos = torch.nonzero(detections[0]).t().cpu()\n del detections\n\n # Recover displacements.\n displacements = model.localization(dense_features)[0]\n displacements_i = displacements[\n 0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]\n ]\n displacements_j = displacements[\n 1, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]\n ]\n del displacements\n\n mask = torch.min(\n torch.abs(displacements_i) < 0.5,\n torch.abs(displacements_j) < 0.5\n )\n fmap_pos = fmap_pos[:, mask]\n valid_displacements = torch.stack([\n displacements_i[mask],\n displacements_j[mask]\n ], dim=0).cpu()\n del mask, displacements_i, displacements_j\n\n fmap_keypoints = fmap_pos[1 :, :].float() + valid_displacements\n del valid_displacements\n\n try:\n raw_descriptors, _, ids = interpolate_dense_features(\n fmap_keypoints.to(device),\n dense_features[0]\n )\n except EmptyTensorError:\n continue\n fmap_pos = fmap_pos[:, ids]\n fmap_keypoints = fmap_keypoints[:, ids]\n del ids\n\n keypoints = upscale_positions(fmap_keypoints, scaling_steps=2)\n del fmap_keypoints\n\n descriptors = F.normalize(raw_descriptors, dim=0).cpu()\n del raw_descriptors\n\n keypoints[0, :] *= h_init / h_level\n keypoints[1, :] *= w_init / w_level\n\n fmap_pos = fmap_pos.cpu()\n keypoints = keypoints.cpu()\n\n keypoints = torch.cat([\n keypoints,\n torch.ones([1, keypoints.size(1)]) * 1 / scale,\n ], dim=0)\n\n scores = dense_features[\n 0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]\n ].cpu() / (idx + 1)\n del fmap_pos\n\n all_keypoints = torch.cat([all_keypoints, keypoints], dim=1)\n all_descriptors = torch.cat([all_descriptors, descriptors], dim=1)\n all_scores = torch.cat([all_scores, scores], dim=0)\n del keypoints, descriptors\n\n previous_dense_features = dense_features\n del dense_features\n del previous_dense_features, banned\n\n keypoints = all_keypoints.t().numpy()\n del all_keypoints\n scores = all_scores.numpy()\n del all_scores\n descriptors = all_descriptors.t().numpy()\n del all_descriptors\n return keypoints, scores, descriptors\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.abs",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.min",
"torch.nn.functional.interpolate",
"torch.nonzero",
"torch.stack"
]
] |
tho15/tfplusplus
|
[
"e151986f7d449ee5ccb440fbb947fbc64fd62f49"
] |
[
"experimental/model.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n#import cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport csv\nimport math\nimport os\nfrom keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D, ELU, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.models import Sequential, model_from_json\nfrom keras.optimizers import Adam\nfrom keras.preprocessing import image\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nimport json\nfrom keras import backend as K\n\nnum_epochs = 30\nbatch_size = 64\n\ndef preporcess_img(img, state):\n\t#img = cv2.resize(img, (64, 48), interpolation = cv2.INTER_CUBIC)\n\t#img.thumbnail((128,96), Image.ANTIALIAS)\n\t\n\treturn img, state\n\n\n# generate training/validation batch\ndef get_batch(X, batch_size = 64):\n\t# randomly pickup training data to create a batch\n\twhile(True):\n\t\tX_batch = []\n\t\ty_batch = []\n\t\t\n\t\tpicked = []\n\t\tn_imgs = 0\n\t\t\n\t\t# randomly selected batch size images and light state\n\t\twhile n_imgs < batch_size:\n\t\t\ti = np.random.randint(0, len(X))\n\t\t\tif (i in picked):\n\t\t\t\tcontinue # skip if this image has been picked\n\t\t\ty_state = int(X[i][1])\n\t\t\t\n\t\t\n\t\t\tpicked.append(i)\n\t\t\timg_path = './images/' + X[i][0].strip()\n\t\t\tlight_img = plt.imread(img_path)\n\t\t\tlight_img = Image.open(img_path)\n\t\t\tlight_img = image.load_img(img_path, target_size=(96, 128))\n\t\t\timg_array = image.img_to_array(light_img)\n\t\t\t#light_img = cv2.imread(img_path)\n\t\t\t#img_array = cv2.resize(light_img, (128, 96), interpolation = cv2.INTER_CUBIC)\n\t\t\t\n\t\t\t# preprocess image\n\t\t\t#light_img, y_state = preporcess_img(light_img, y_state)\n\t\t\t\n\t\t\tX_batch.append(img_array)\n\t\t\tst_v = [0, 0, 0]\n\t\t\tst_v[y_state] = 1\n\t\t\ty_batch.append(st_v)\n\t\t\tn_imgs += 1\n\t\t\n\t\tyield np.array(X_batch), np.array(y_batch)\n\t\t\n\t\t\ndef get_samples_per_epoch(num_samples, batch_size):\n\t# return samples per epoch that is multiple of batch_size\n\treturn math.ceil(num_samples/batch_size)\n\t\n\ndef get_model():\n\t\n\tmodel = Sequential()\n\t\n\t# normalization layer\n\tmodel.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(96, 128, 3)))\n\t\n\t# convolution 2D with filter 5x5\n\t#model.add(Convolution2D(24, 5, 5, border_mode='same', subsample=(2, 2)))\n\tmodel.add(Conv2D(24, (5, 5), padding='same', strides=(2, 2)))\n\tmodel.add(ELU())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))\n\t\n\t#model.add(Convolution2D(36, 5, 5, border_mode='same', subsample=(2, 2)))\n\tmodel.add(Conv2D(36, (5, 5), padding='same', strides=(2, 2)))\n\tmodel.add(ELU())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))\n\tmodel.add(Dropout(0.4))\n\t\n\t#model.add(Convolution2D(48, 5, 5, border_mode='same', subsample=(2, 2)))\n\tmodel.add(Conv2D(48, (5, 5), padding='same', strides=(2, 2)))\n\tmodel.add(ELU())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))\n\tmodel.add(Dropout(0.25))\n\t\n\t#model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))\n\tmodel.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))\n\tmodel.add(ELU())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))\n\tmodel.add(Dropout(0.25))\n\t\n\t#model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))\n\tmodel.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))\n\tmodel.add(ELU())\n\t\n\tmodel.add(Flatten())\n\t\n\tmodel.add(Dense(1164))\n\tmodel.add(ELU())\n\tmodel.add(Dropout(0.5))\n\t\n\tmodel.add(Dense(100))\n\tmodel.add(ELU())\n\t\n\tmodel.add(Dense(50))\n\tmodel.add(ELU())\n\t\n\tmodel.add(Dense(10))\n\tmodel.add(ELU())\n\t\n\tmodel.add(Dense(3))\n\tmodel.add(Activation('softmax'))\n\t\n\treturn model\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n\t\"\"\"\n\tFreezes the state of a session into a prunned computation graph.\n\n\tCreates a new computation graph where variable nodes are replaced by\n\tconstants taking their current value in the session. The new graph will be\n\tprunned so subgraphs that are not neccesary to compute the requested\n\toutputs are removed.\n\t@param session The TensorFlow session to be frozen.\n\t@param keep_var_names A list of variable names that should not be frozen,\n\t\t\tor None to freeze all the variables in the graph.\n\t@param output_names Names of the relevant graph outputs.\n\t@param clear_devices Remove the device directives from the graph for better portability.\n\t@return The frozen graph definition.\n\t\"\"\"\n\t\n\tfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\n\tgraph = session.graph\n\twith graph.as_default():\n\t\tfreeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n\t\toutput_names = output_names or []\n\t\toutput_names += [v.op.name for v in tf.global_variables()]\n\t\tinput_graph_def = graph.as_graph_def()\n\t\tif clear_devices:\n\t\t\tfor node in input_graph_def.node:\n\t\t\t\tnode.device = \"\"\n\t\tfrozen_graph = convert_variables_to_constants(session, input_graph_def,\n output_names, freeze_var_names)\n\t\treturn frozen_graph\n\n\nif __name__ == \"__main__\":\n\tdriving_data = []\n\t# create a list of image paths and angles\n\twith open('traffic_light_data.csv') as drvfile:\n\t\treader = csv.DictReader(drvfile)\n\t\tfor row in reader:\n\t\t\tdriving_data.append((row['images'], row['state']))\n\n\tdriving_data = shuffle(driving_data)\n\t# split the data, 20% for validation\n\tX_train, X_validation = train_test_split(driving_data, test_size = 0.2, random_state = 7898)\n\n\ttrain_generator = get_batch(X_train)\n\tval_generator = get_batch(X_validation)\n\n\tmodel = get_model()\n\tmodel.compile(optimizer = Adam(lr = 0.0001), loss='mse', metrics=['accuracy'])\n\n\tprint(\"Start training...\")\n\th = model.fit_generator( train_generator,\n \tsteps_per_epoch = get_samples_per_epoch(len(X_train), batch_size),\n \tepochs = num_epochs,\n \tvalidation_data = val_generator,\n \tvalidation_steps = get_samples_per_epoch(len(X_validation), batch_size))\n\n\t#print (\"fit history: \", h.history.keys())\n\n\t# save model and weights\n\tmodel_json = model.to_json()\n\twith open(\"./model.json\", \"w\") as json_file:\n\t\tjson.dump(model_json, json_file)\n\n\tmodel.save_weights(\"./model.h5\")\n\tprint(\"Saved model to disk\")\n\n\tmodel.save(\"./k_model.h5\")\n\t\n\t#frozen_graph = freeze_session(K.get_session(), output_names=[model.output.op.name])\n\t#tf.train.write_graph(frozen_graph, \"./\", \"traffic_light_frozen.pb\", as_text=False)\n\t#tf.train.write_graph(K.get_session().graph.as_graph_def(), \"./\",\"model_graph.ascii\", as_text=True)\n\n\n\n"
] |
[
[
"sklearn.utils.shuffle",
"matplotlib.pyplot.imread",
"tensorflow.global_variables",
"sklearn.model_selection.train_test_split",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"numpy.array"
]
] |
graehl/pytorch-transformers
|
[
"59292fe230ee2c2c681b7966bf2bc1f374ce67d4"
] |
[
"run_sent.py"
] |
[
" #!/usr/bin/python3\n\n# pip3 install --user torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html\n# pip3 install --user transformers\n\n\n\nimport torch\nimport argparse\nimport sys\n\n\nmodelname=\"finmodel_distilbert\" #\"bert-base-cased-finetuned-mrpc\"\n\n\nparser = argparse.ArgumentParser(description='3-class sentiment analyzer')\n\nparser.add_argument('--model-name', type=str, default=modelname, help='model directory, must include model type in name')\n\nparser.add_argument('--input-file', '-i', type=str, default=None, help='input file name, default is 3 test lines')\n\nparser.add_argument('--output-format-short', '-s', action='store_true', default=False, help='one line, three scores (adding to 1.0)')\n\nparser.add_argument('--output-format-class', '-c', action='store_true', default=False, help='one line, single best class')\n\nparser.add_argument('--keep-case', '-k', action='store_true', default=False, help='do not lowercase the input (default: lowercase it)')\n\nparser.add_argument('--max-length', '-m', type=int, default=512, help='maximum length handled by the model')\n\nargs = parser.parse_args()\n\n\nusecfg = False\nif usecfg:\n from transformers import (\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer,\n )\n config = DistilBertConfig.from_pretrained(args.model_name, finetuning_task='sentiment3', num_labels=3)\n model = DistilBertForSequenceClassification.from_pretrained(args.model_name, config=config)\n tokenizer = DistilBertTokenizer.from_pretrained(args.model_name, do_lower_case=(not args.keep_case))\nelse:\n from transformers import AutoTokenizer, AutoModelForSequenceClassification\n tokenizer = AutoTokenizer.from_pretrained(args.model_name, do_lower_case=(not args.keep_case))\n model = AutoModelForSequenceClassification.from_pretrained(args.model_name)\n\nmodel.to(\"cpu\")\nmodel.eval()\n\n\nclasses = [\"0\", \"1\", \"2\"]\n\n\ntexts = [\"I hate you\", \"I love you\", \"Isomorphic protein matrices\"]\n\n\niter = (sys.stdin if args.input_file == '-' else open(args.input_file)) if args.input_file is not None else texts\n\nfor t in iter:\n\n proc = t.strip() # if args.keep_case else t.strip().lower()\n\n input = tokenizer.encode(proc, return_tensors=\"pt\", add_special_tokens=True, max_length=args.max_length) # only keep first 512 (sub)words\n\n\n tensors = model(input)\n\n\n sm = torch.softmax(tensors[0], dim=1).tolist()[0]\n\n\n out = None\n if args.output_format_short:\n out = classes[sm.index(max(sm))]\n\n if args.output_format_class:\n if out is not None:\n out += '\\t'\n out += '\\t'.join('{0:.4f}'.format(x) for x in sm)\n print(out)\n elif args.output_format_short:\n print(out)\n else:\n print(f\"input: {proc}\")\n\n for i, cls in enumerate(classes):\n\n print(f\" {cls}: {round(sm[i]*100)}%\")\n"
] |
[
[
"torch.softmax"
]
] |
harshkasyap/PyAriesFL
|
[
"dd78dcebc771971abfee301b80cdd5d246c14840"
] |
[
"data/generate_model.py"
] |
[
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\nimport sys\nimport traceback\n\n\n# prep\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.datasets import make_classification\nfrom sklearn.preprocessing import binarize, LabelEncoder, MinMaxScaler\n\n# models\nfrom torch import nn\nfrom torch import optim\nfrom torch.autograd import Variable\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom demo.runners.support.utils import log_msg\n\nasync def generate_model():\n log_msg(\"COORDINATOR IS GENERATING THE INITIAL MODEL\")\n\n # One Model\n model = nn.Sequential(\n nn.Linear(8, 4),\n nn.Sigmoid(),\n nn.Linear(4, 2),\n nn.Sigmoid(),\n nn.Linear(2, 1),\n nn.Sigmoid()\n )\n torch.save(model, \"model/model.pt\")\n\n return True\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.save"
]
] |
unsw-cse-soc/REST2Bot
|
[
"b4dc549ee61611afb8cfbee612f7b7c7ce9ee8a5"
] |
[
"swagger/swagger_utils.py"
] |
[
"import os\nimport re\n\nimport wordninja\nfrom nltk.stem import WordNetLemmatizer\nfrom pandas import read_csv\nfrom tabulate import tabulate\n\nfrom swagger.entities import Param\nfrom utils.preprocess import remove_stopword\nfrom utils.language_tool import LanguageChecker\nfrom utils.text import is_singular\n\nlemmatizer = WordNetLemmatizer()\nsch = LanguageChecker()\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nparams_file = os.path.join(__location__, \"files/parameters.tsv\")\nentity_file = os.path.join(__location__, \"files/entity-list.txt\")\n\n\nclass ParamUtils:\n def __init__(self, params_tsv=params_file, entity_file=entity_file) -> None:\n\n self.df_params = read_csv(params_tsv, sep='\\t')\n self.df_params = self.df_params[self.df_params.is_auth_param == False] # Remove authentication parameters\n self.df_params['name'] = self.df_params['name'].apply(ParamUtils.normalize)\n self.values = {}\n self.pattern_values = {}\n self.entities = set()\n\n with open(entity_file) as f:\n for entity in set(f.readlines()):\n self.entities.add(ParamUtils.normalize(entity, lemmatize=True))\n\n for index, row in self.df_params.iterrows():\n name, pattern, example, typ, count = row['name'], row['pattern'], row['example'], row['type'], row['count']\n\n if example == 'None':\n continue\n\n if (name, typ) not in self.values:\n self.values[(name, typ)] = {example: count}\n else:\n if example not in self.values[(name, typ)]:\n self.values[(name, typ)][example] = 0\n self.values[(name, typ)][example] += count\n\n if pattern == 'None':\n continue\n\n if pattern not in self.pattern_values:\n self.pattern_values[pattern] = {example: count}\n else:\n if example not in self.pattern_values[pattern]:\n self.pattern_values[pattern][example] = 0\n self.pattern_values[pattern][example] += count\n\n @staticmethod\n def is_param(text: str):\n if not text:\n return False\n\n if text.startswith('{') and text.endswith('}'):\n return True\n return False\n\n @staticmethod\n def normalize(text, lowercase=True, lemmatize=False, remove_punct=True, rm_stopwords=False):\n if not isinstance(text, str):\n return\n\n if text.lower() == 'api':\n return 'API'\n\n if rm_stopwords:\n text = remove_stopword(text)\n\n if text.startswith(\"x-\"):\n t = text.replace(\"x-\", \"\")\n else:\n t = text\n for ch in ['X-Amz-', 'x-amz-', '$', '_', '-', '.']:\n t = t.replace(ch, \" \")\n\n if lemmatize:\n lt = \"\"\n for w in text.strip().lower().split():\n lt += lemmatizer.lemmatize(w) + \" \"\n t = lt.strip()\n\n if t == t.upper() and ' ' not in t:\n return t\n\n t = t.replace('’', \"'\")\n if remove_punct:\n t = re.sub(r\"(\\w)([A-Z])\", r\"\\1 \\2\", t)\n\n m = ''\n for w in t.split():\n if w in {\"<<\", \">>\"}:\n m += \" \" + w\n continue\n\n parts = wordninja.split(w)\n current = \" \".join(parts)\n for p in parts:\n if len(p) == 1:\n current = w\n m += \" \" + current\n t = m\n\n if lowercase:\n t = t.lower()\n\n t = re.sub('\\s+', ' ', t).strip()\n return t\n\n @staticmethod\n def is_entity_parameter(param: Param):\n \"\"\"\n Shows if a parameter should be present in a canonical expression or not.\n For example, it returns `False` for header params, and parameters related to authentication and API versioning \n :param param: \n :return: \n \"\"\"\n\n if param.location in {'header', 'file'}:\n return False\n\n if param.is_auth_param or ParamUtils.is_version(param.name):\n return False\n\n if param.name in {'username'}:\n return False\n\n return True\n\n @staticmethod\n def human_readable_name(param: Param):\n threshold = 20\n name = ParamUtils.normalize(param.name)\n desc = ParamUtils.normalize(param.desc)\n\n if not sch.misspellings(name):\n return name\n\n if desc and len(desc) > threshold:\n return sch.spelling_corector(name)\n\n if desc and not sch.misspellings(desc):\n return desc\n\n return name\n\n @staticmethod\n def is_authentication(param_name: str):\n param_name = ParamUtils.normalize(param_name)\n terms = set(param_name.split())\n for t in ['token', 'api key', 'key', \"api token\", 'x-aio-key', 'aio-key', 'x-aio-signature', 'accesstoken']:\n if t in terms:\n return True\n\n return False\n\n @staticmethod\n def is_necessary_param(param_name: str, auth=True):\n \"\"\"\n if a parameter should be appeared in the canonical sentence\n :param param_name: \n :return: \n \"\"\"\n\n if ParamUtils.is_version(param_name) or (ParamUtils.is_authentication(param_name) and auth):\n return False\n\n if param_name in {'username', 'user', 'account', 'user_name'}:\n return False\n\n return True\n\n @staticmethod\n def is_version(param_name: str):\n\n if not param_name:\n return False\n\n if param_name.startswith(\"v1\") or param_name.startswith(\"v2\"):\n return True\n\n param_name = ParamUtils.normalize(param_name)\n terms = set(param_name.split())\n for t in ['version', 'versions', 'v', 'v0', 'ver', 'v1', 'v2', 'v3', 'v4', 'v5']:\n if t in terms or param_name.startswith(t + \".\") or param_name.startswith(t + \"_\") or param_name.startswith(\n t + \"-\"):\n return True\n return False\n\n @staticmethod\n def is_identifier(param_name: str, auth=False, version=False):\n param_name = ParamUtils.normalize(param_name)\n if auth and ParamUtils.is_authentication(param_name):\n return True\n if version and ParamUtils.is_version(param_name):\n return True\n\n param_name = ParamUtils.normalize(param_name, lemmatize=True)\n terms = set(param_name.split())\n for id in {'id', 'key', 'identifier', 'ids', 'sid', 'token', 'credential', 'credentials', 'guid', 'uuid',\n 'code', 'serial'}:\n if id in terms:\n return True\n if param_name.endswith(\"id\"):\n return True\n\n return False\n\n def is_named_entity(self, param_name):\n\n param_name = ParamUtils.normalize(param_name, lemmatize=True)\n terms = set(param_name.split())\n for t in terms:\n if t in self.entities:\n return True\n\n return False\n\n def stats(self):\n print(\"—————————————————— params.tsv ———————————————————————\")\n print(\"#Unique Parameters : \", self.df_params.shape[0])\n # print(\"—————————————————————————————————————————————————————\")\n # name\ttype\trequired\tpattern \tcount\n g = self.df_params.groupby(['type']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters({}) by Types:\".format(total))\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n g = self.df_params.groupby(['required']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters by Required:\".format(total))\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n g = self.df_params.groupby(['location']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters Request Location:\".format(total))\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n g = self.df_params['desc'].apply(lambda a: a != 'None')\n self.df_params['desc'] = g\n g = self.df_params.groupby(['desc']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters by Desc:\")\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n g = self.df_params['pattern'].apply(lambda a: a != 'None')\n self.df_params['pattern'] = g\n g = self.df_params.groupby(['pattern']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters by Patterns:\")\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n g = self.df_params['example'].apply(lambda a: a != 'None')\n self.df_params['example'] = g\n g = self.df_params.groupby(['example']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Parameters by Example:\")\n print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")\n # g = df_params.groupby(['is_auth_param']).sum()\n # total = g['count'].sum()\n # g['percent'] = g['count'] / total * 100\n # print(\"Authentication Parameters:\")\n # print(tabulate(g, headers='keys', tablefmt='psql'))\n\n # print(\"—————————————————————————————————————————————————————\")[, 'key', 'identifier', 'ids']\n self.df_params['identifier'] = self.df_params['name'].apply(ParamUtils.is_identifier)\n g = self.df_params.groupby(['identifier']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Identifier Parameters:\")\n print(tabulate(g, headers='keys', tablefmt='psql'))\n self.df_params.drop(columns=['identifier'], inplace=True)\n\n # print(\"—————————————————————————————————————————————————————\")[, 'key', 'identifier', 'ids']\n self.df_params['entity'] = self.df_params['name'].apply(self.is_named_entity)\n g = self.df_params.groupby(['entity']).sum()\n total = g['count'].sum()\n g['percent'] = g['count'] / total * 100\n print(\"Entity Parameters:\")\n print(tabulate(g, headers='keys', tablefmt='psql'))\n self.df_params.drop(columns=['entity'], inplace=True)\n\n\nclass PathUtils:\n @staticmethod\n def filter_redundant_url_segments(parts):\n ret = []\n previous = None\n for word in parts:\n\n if not word:\n previous = word\n continue\n\n if previous and ParamUtils.normalize(word[1:-1]) == ParamUtils.normalize(previous) and is_singular(\n word[1:-1]):\n '''remove cases like: /countries/country_code/{country_code}'''\n ret = ret[:-1]\n ret.append(word)\n continue\n\n if word in []:\n '''ignore common URL prefixes like search/filter/...'''\n previous = word\n continue\n\n # tagged = nlp.pos_tag(word)\n # if tagged:\n # '''Ignore cases like /items/latest'''\n # if tagged[0][1].startswith('JJ'):\n # previous = word\n # continue\n\n previous = word\n ret.append(word)\n\n return ret\n\n @staticmethod\n def x(segments):\n if not segments:\n return None\n\n for i in range(len(segments)):\n if ParamUtils.is_necessary_param(segments[i]):\n break\n\n return segments[i:]\n\n @staticmethod\n def remove_non_informative_segments(url, base_path):\n \"\"\"\"\n Removes non informative segments of the given URL\n \"\"\"\n\n if \"?\" in url:\n url = url[:url.index(\"?\")]\n\n if base_path:\n parts = base_path.split('/')\n\n prev = None\n for i, p in enumerate(parts):\n if \"{\" in p and \"}\" in p or not is_singular(p) or p in [\"search\", \"query\", \"count\"] \\\n or ParamUtils.is_version(prev):\n base_path = base_path[:base_path.index(p)]\n break\n prev = p\n\n if base_path and base_path != '/':\n url = url.replace(base_path, \"/\")\n\n url = url.replace('http://', '/').replace('https://', '/')\n url = url.replace(\"{\", \"/{\")\n url = url.replace(\"}\", \"}/\")\n url = url.replace(\"//\", \"/\")\n\n return url, base_path\n\n @staticmethod\n def extract_segments(url):\n parts = re.compile(\"[./:]\").split(url)\n\n parts = PathUtils.filter_redundant_url_segments(parts)\n\n i = 0\n for i in range(len(parts)):\n if ParamUtils.is_necessary_param(parts[i]):\n break\n\n parts = parts[i:]\n ret = []\n for p in parts:\n if \"}{\" in p: # //{dataset}{format}\n ret.append(p[:p.index(\"}{\") + 1])\n ret.append(p[p.index(\"}{\") + 1:])\n else:\n ret.append(p)\n\n ret = list(filter(lambda p: ParamUtils.normalize(p), ret))\n return ret\n\n\nif __name__ == \"__main__\":\n print(PathUtils.extract_segments(\"/{url}/hello.json/{dataset}{format}\"))\n"
] |
[
[
"pandas.read_csv"
]
] |
DeltaMarine101/neural_net
|
[
"fd0b9793a9b3dffd1ee2330ff8e3e9bda98cec33"
] |
[
"nn.py"
] |
[
"import math\nimport random as r\nimport numpy as np\nimport pickle\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as cplt\n\n# data = [(np.array(list(map(float, i[38:].split()))) / 256., np.array(list(map(float, i[8:28].split())))) for i in open('data/mnist.txt').read().strip().split('\\n')]\n# pickle.dump(data, open('data/mnist.pickle', 'wb'))\n\ndata = pickle.load(open('data/mnist.pickle', 'rb'))\n\ntraining_data = data[:50000]\ntest_data = data[50000:]\n\ndef time_func(func):\n def wrapper(*arg, **kw):\n t1 = time.time()\n res = func(*arg, **kw)\n print('{} {:.3f}s'.format(func.__name__, time.time() - t1))\n return res\n return wrapper\n\ndef show_img(img, size=28):\n plt.style.use('dark_background')\n\n cmap = cplt.LinearSegmentedColormap.from_list(\"\", [\"k\", \"w\"])\n\n fig, ax = plt.subplots()\n ax.set_axis_off()\n X = np.reshape(img, (size, size))\n ax.imshow(X, interpolation='nearest', cmap=cmap)\n\n plt.subplots_adjust(left=0.03, right=0.97, top=0.97, bottom=0.03, wspace=0.1, hspace=0.1)\n\n plt.show()\n\ndef sigmoid(x, derivative=False):\n sigm = 1. / (1. + np.exp(-x))\n if derivative:\n return sigm * (1. - sigm)\n return sigm\n\nclass neural_net:\n def __init__(self, struct, lr=.001, rp=0.001):\n # structure is a tuple with entries representing no. of nodes in each layer\n self.struct = struct\n self.n_layers = len(struct) - 2\n self.lr = lr\n self.rp = rp\n\n # Random value initalisation\n self.weight = [(np.random.rand(w, v) * 2 - 1) / math.sqrt(v) for v, w in zip(struct[:-1], struct[1:])]\n self.bias = [np.random.rand(v) for v in struct[1:-1]]\n\n def run(self, L, show=False):\n for i in range(self.n_layers):\n L = sigmoid(np.dot(self.weight[i], L) + self.bias[i])\n L = sigmoid(np.dot(self.weight[self.n_layers], L))\n\n if show:\n print(\"Result\\n_____________________\\n\")\n for i in range(len(L)):\n print(str(i) + \":\", L[i])\n print(\"_____________________\\n\")\n\n return L\n\n @time_func\n def backprop(self, training):\n # Init deltas to 0\n dweight = [np.zeros((w, v)) for v, w in zip(self.struct[:-1], self.struct[1:])]\n dbias = [np.zeros(v) for v in self.struct[1:-1]]\n dactivation = [np.zeros(v) for v in self.struct[1:]]\n\n for x, y in training:\n L = [x]\n for i in range(self.n_layers):\n L += [sigmoid(np.dot(self.weight[i], L[i]) + self.bias[i])]\n L += [sigmoid(np.dot(self.weight[-1], L[-1]))]\n\n dactivation[-1] = (2 / self.struct[-1]) * (L[-1] - y)\n\n for n in reversed(range(self.n_layers + 1)):\n # rpc = -1 * np.sign(L[n]) * self.rp\n\n bias = [np.zeros(self.struct[n])] + self.bias\n for nodej in range(self.struct[n + 1]):\n deriv = sigmoid(L[n] * self.weight[n][nodej] + bias[n], derivative=True) * dactivation[n][nodej]\n dweight[n][nodej] += deriv * L[n] # + rpc * self.weight[n][nodej]\n if n > 0:\n dbias[n - 1] += deriv\n dactivation[n - 1] += deriv * self.weight[n][nodej]\n\n self.weight = [x - (y * self.lr) / len(training) for x, y in zip(self.weight, dweight)]\n self.bias = [x - (y * self.lr) / len(training) for x, y in zip(self.bias, dbias)]\n\n @time_func\n def loss(self, data):\n return sum([np.sum(np.square(self.run(x) - y)) / len(y) for x, y in data]) / len(data) # + self.rp * sum([np.sum(np.square(i)) for i in self.weight]) / 2\n\n def test(self, test_data):\n n_pass = 0\n for x, y in test_data:\n fx = self.run(x).tolist()\n n_pass += (y[fx.index(max(fx))] == 1)\n\n return n_pass / len(test_data)\n\n def show(self, layer=1):\n plt.style.use('dark_background')\n\n side = int(math.sqrt(self.struct[layer - 1]))\n\n cmap = cplt.LinearSegmentedColormap.from_list(\"\", [\"#ff704d\", \"#222222\", \"#70db70\"])\n\n n_plots = int(math.sqrt(self.struct[layer]))\n fig, axes = plt.subplots(n_plots, n_plots)\n for i in range(n_plots):\n for j in range(n_plots):\n axes[i, j].set_axis_off()\n X = np.reshape(self.weight[layer - 1][i * n_plots + j], (side, side))\n axes[i, j].imshow(X, interpolation='nearest', cmap=cmap)\n\n plt.subplots_adjust(left=0.03, right=0.97, top=0.97, bottom=0.03, wspace=0.1, hspace=0.1)\n plt.show()\n\n def save(self, name='last_model.nn'):\n pickle.dump((self.struct, self.n_layers, self.lr, self.weight, self.bias), open('model/' + name, 'wb'))\n\n def load(self, name='last_model.nn'):\n self.struct, self.n_layers, self.lr, self.weight, self.bias = pickle.load(open('model/' + name, 'rb'))\n\nnn = neural_net((28 * 28, 256, 256, 10))\n## nn.load()\nnn.run(training_data[0][0], show=True)\nloss = nn.loss(training_data)\nprint(\"Initial loss:\", loss)\n# print(\"Accuracy:\", str(nn.test(test_data) * 100) + \"%\")\n\n# show_img(training_data[0][0])\n# nn.show()\n\nprev = loss\nbatch = 100\ncycles = 500\nwhile True:\n for i, data in enumerate([training_data[n:n + batch] for n in range(0, batch * cycles, batch)]):\n nn.backprop(data)\n\n if not i % 10:\n print(i, \"Accuracy:\", str(nn.test(test_data) * 100) + \"%\")\n\n loss = nn.loss(training_data[i % 3::3])\n print(\"(\" + str(i + 1) + \"/\" + str(cycles) + \") Loss:\", loss, ['+', '-'][prev > loss])\n prev = loss\n\n nn.save()\n\n# for i in range(1000):\n# nn.backprop([training_data[0]])\n# print(\"Accuracy:\", str(nn.test(test_data) * 100) + \"%\")\nprint(\"Final Accuracy:\", str(nn.test(test_data) * 100) + \"%\")\nnn.run(training_data[0][0], show=True)\n\n# nn.show()\n# show_img(training_data[0][0])\n"
] |
[
[
"numpy.dot",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"numpy.random.rand",
"matplotlib.pyplot.subplots_adjust",
"numpy.exp",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.style.use"
]
] |
calmarazlopez/Deep-Learning-Udacity-Nanodegree
|
[
"19114fe796043c550a6ba3b3ae67c1e239002c11"
] |
[
"Predicting Bike-Sharing Patterns/my_answers.py"
] |
[
"import numpy as np\n\n\nclass NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n\n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n \n final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below\n # Implement the backproagation function below\n delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, \n delta_weights_i_h, delta_weights_h_o)\n self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)\n\n\n def forward_pass_train(self, X):\n ''' Implement forward pass here \n \n Arguments\n ---------\n X: features batch\n\n '''\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n \n return final_outputs, hidden_outputs\n\n def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):\n ''' Implement backpropagation\n \n Arguments\n ---------\n final_outputs: output from forward pass\n y: target (i.e. label) batch\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n\n '''\n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n \n\n # TODO: Backpropagated error terms - Replace these values with your calculations.\n output_error_term = error\n \n \n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(self.weights_hidden_to_output, output_error_term) \n \n hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)\n \n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:,None]\n return delta_weights_i_h, delta_weights_h_o\n\n def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):\n ''' Update weights on gradient descent step\n \n Arguments\n ---------\n delta_weights_i_h: change in weights from input to hidden layers\n delta_weights_h_o: change in weights from hidden to output layers\n n_records: number of records\n\n '''\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step\n\n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer \n \n return final_outputs\n\n\n#########################################################\n# Set your hyperparameters here\n##########################################################\niterations = 3000\nlearning_rate = 1.1\nhidden_nodes = 10\noutput_nodes = 1\n"
] |
[
[
"numpy.dot",
"numpy.random.normal",
"numpy.exp",
"numpy.zeros"
]
] |
dhowardCS/git_vscode_demo
|
[
"edbe4397c3b27cd31cfd3036ebf465c40c79432b"
] |
[
"SKlearn Course/knn_classifier1/main.py"
] |
[
"import numpy as np \nimport pandas as pd\nfrom sklearn import neighbors, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\ndata = pd.read_csv('car.data')\nprint(data.head())"
] |
[
[
"pandas.read_csv"
]
] |
dotrungkien3210/paper_faceNet
|
[
"2a371e00e0d5faa717c66e289f102993ec712311"
] |
[
"plot.py"
] |
[
"import numpy as np\nimport os.path\nfrom model import create_model\nimport cv2\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport warnings\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nnn4_small2_pretrained = create_model()\nnn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')\n\nclass IdentityMetadata():\n def __init__(self, base, name, file):\n # dataset base directory\n self.base = base\n # identity name\n self.name = name\n # image file name\n self.file = file\n\n def __repr__(self):\n return self.image_path()\n\n def image_path(self):\n return os.path.join(self.base, self.name, self.file)\n\n\ndef load_metadata(path):\n metadata = []\n for i in sorted(os.listdir(path)):\n for f in sorted(os.listdir(os.path.join(path, i))):\n # Check file extension. Allow only jpg/jpeg' files.\n ext = os.path.splitext(f)[1]\n if ext == '.jpg' or ext == '.png':\n metadata.append(IdentityMetadata(path, i, f))\n return np.array(metadata)\n\n\nmetadataTrain = load_metadata('face_datasetTrain')\nmetadataTest = load_metadata('face_datasetTest')\ndef load_image(path):\n img = cv2.imread(path, 1)\n # OpenCV loads images with color channels\n # in BGR order. So we need to reverse them\n return img[...,::-1]\n\nembedded = np.zeros((metadataTrain.shape[0], 128))\n\nfor i, m in enumerate(metadataTrain):\n img = load_image(m.image_path())\n # scale RGB values to interval [0,1]\n img = (img / 255.).astype(np.float32)\n # obtain embedding vector for image\n embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]\n\n\ndef distance(emb1, emb2):\n return np.sum(np.square(emb1 - emb2))\n\ndef show_pair(idx1, idx2):\n #plt.figure(figsize=(8,3))\n #plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')\n print(distance(embedded[idx1], embedded[idx2]))\n #plt.subplot(121)\n #cv2.imshow(\"anh1\",load_image(metadataTrain[idx1].image_path()))\n #plt.subplot(122)\n #cv2.imshow(\"anh2\",load_image(metadataTrain[idx2].image_path()));\n #cv2.waitKey()\n\n'''load cho một hs\nfor i in range(10,20):\n for j in range(i + 1, 20):\n show_pair(i, j)'''\n\n'''tạo 2 mảng khoảng cách và gán nhán tương ứng với nhau'''\n\ndistances = [] # squared L2 distance between pairs\nidentical = [] # 1 if same identity, 0 otherwise\n\nnum = len(metadataTrain)\n'''k = 0\nfor i in range(num):\n df = pd.DataFrame({'stt': [k], 'name': [metadataTrain[i].name]})\n df.to_csv('test.csv', mode='a', header=False)\n if(metadataTrain[i].name != metadataTrain[i+1].name):\n k = k+1'''\n\n\ndf2 = pd.read_csv('test.csv', date_parser={'x': eval, 'y': eval})\n\n\nk=0\nfor i in range(num - 1):\n for j in range(i + 1, num):\n distances.append(k if distance(embedded[i], embedded[j]) < 0.4 else 0)\n identical.append(k if metadataTrain[i].name == metadataTrain[j].name else 0)\n if(metadataTrain[i].name != metadataTrain[i+1].name and i < num - 1):\n k = k + 1\ndistances = np.array(distances)\nidentical = np.array(identical)\ncnf_matrix = confusion_matrix(distances, identical)\n\ncm_df = pd.DataFrame(cnf_matrix,\n index = ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32'],\n columns = ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32'])\n#Plotting the confusion matrix\n\nsns.heatmap(cm_df, annot=True)\nplt.title('Confusion Matrix')\nplt.ylabel('Actal Values')\nplt.xlabel('Predicted Values')\nplt.show()\n\n"
] |
[
[
"numpy.square",
"pandas.read_csv",
"numpy.expand_dims",
"matplotlib.pyplot.title",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Jannkar/doom_actionspace
|
[
"37663341f60a05943202b77394a4203d070fad95"
] |
[
"agent_stable_baselines/stable_baselines/deepq/experiments/enjoy_mountaincar.py"
] |
[
"import argparse\r\n\r\nimport gym\r\nimport numpy as np\r\n\r\nfrom stable_baselines.deepq import DQN\r\n\r\n\r\ndef main(args):\r\n \"\"\"\r\n Run a trained model for the mountain car problem\r\n\r\n :param args: (ArgumentParser) the input arguments\r\n \"\"\"\r\n env = gym.make(\"MountainCar-v0\")\r\n model = DQN.load(\"mountaincar_model.pkl\", env)\r\n\r\n while True:\r\n obs, done = env.reset(), False\r\n episode_rew = 0\r\n while not done:\r\n if not args.no_render:\r\n env.render()\r\n # Epsilon-greedy\r\n if np.random.random() < 0.02:\r\n action = env.action_space.sample()\r\n else:\r\n action, _ = model.predict(obs, deterministic=True)\r\n obs, rew, done, _ = env.step(action)\r\n episode_rew += rew\r\n print(\"Episode reward\", episode_rew)\r\n # No render is only used for automatic testing\r\n if args.no_render:\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description=\"Enjoy trained DQN on MountainCar\")\r\n parser.add_argument('--no-render', default=False, action=\"store_true\", help=\"Disable rendering\")\r\n args = parser.parse_args()\r\n main(args)\r\n"
] |
[
[
"numpy.random.random"
]
] |
fmfn/lifelines
|
[
"fec81897674ebeb3223efba48b99e7b1302cdf9e"
] |
[
"lifelines/fitters/SBGSurvival.py"
] |
[
"from __future__ import print_function\nfrom lifelines.utils.DataHandler import DataHandler\nfrom lifelines.utils.ShiftedBetaGeometric import ShiftedBetaGeometric\nimport numpy as np\nimport pandas as pd\n\n\nclass SBGSurvival(object):\n \"\"\"\n This class implements an extended version of the Shifted-Beta-Geometric\n model by P. Fader and B. Hardie.\n\n The original model works by assuming a constant in time, beta distributed\n individual probability of churn. Due to the heterogeneity of a cohort's\n churn rates (since each individual will have a different probability of\n churning), expected behaviours such as the decrease of cohort churn rate\n over time arise naturally.\n\n The extension done here generalizes the coefficients alpha and beta of the\n original model to function of features on the individual level. A\n log-linear model is used to construct alpha(x) and beta(x) and the\n likelihood is then computed by combining the contributions of each and\n every sample in the training set.\n\n The model takes as inputs ...\n \"\"\"\n\n def __init__(self,\n age,\n alive,\n features=None,\n gamma=1.0,\n gamma_beta=None,\n bias=True,\n normalize=True,\n verbose=False):\n \"\"\"\n Initializes objects with parameters necessary to create the supporting\n objects: DataHandler and ShiftedBeta\n\n :param age: str\n The column name to identify the age of each individual. Age has to\n be an integer value, and will determine the time intervals the\n model with work with.\n --- See DataHandler.py\n\n :param alive: str\n The column name with the status of each individual. In the context\n of survival analysis, an individual may be dead or alive, and its\n contribution to the model will depend on it.\n --- See DataHandler.py\n\n :param features: str, list or None\n A string with the name of the column to be used as features, or a\n list of names of columns to be used as features or None, if no\n features are to be used.\n --- See DataHandler.py\n\n :param gamma: float\n A non-negative float specifying the strength of the regularization\n applied to w_alpha (alpha's weights) and, if gamma_beta is not\n given, it is also applied to beta.\n --- See ShiftedBeta.py\n\n :param gamma_beta: float\n A non-negative float specifying the strength of the regularization\n applied to w_beta (beta's weights). If specified, overwrites the\n value of gamma for beta.\n --- See ShiftedBeta.py\n\n :param bias: bool\n Whether or not a bias term should be added to the feature matrix.\n --- See DataHandler.py\n\n :param normalize: bool\n Whether or not numerical fields should be normalized (centered and\n scaled to have std=1)\n --- See DataHandler.py\n\n :param verbose: bool\n Whether of not status updates should be printed\n --- See ShiftedBeta.py\n \"\"\"\n\n # Create objects!\n # DATA-HANDLER OBJECT\n # The DataHandler object may be created without the training data, so\n # we do it here.\n self.dh = DataHandler(age=age,\n alive=alive,\n features=features,\n bias=bias,\n normalize=normalize)\n\n # Shifted beta model object\n # Was a different gammab parameter passed? If not, we use the same\n # value passed to gamma.\n if gamma_beta is None:\n gamma_beta = 1.0 * gamma\n # create shifted beta object\n self.sb = ShiftedBetaGeometric(gamma_alpha=gamma,\n gamma_beta=gamma_beta,\n verbose=verbose)\n\n def fit(self, df, restarts=1):\n \"\"\"\n A method responsible for learning both the transformation of the data,\n including addition of a bias parameters, centering and re-scaling of\n numerical features, and one-hot-encoding of categorical features. In\n addition to learning the parameters alpha and beta of the shifted-beta-\n geometric model.\n\n This is just a wrapper, the real heavy-lifting is done by the\n DataHandler and ShiftedBeta objects.\n\n :param df: pandas DataFrame\n A pandas DataFrame with similar schema as the one used to train\n the model. Similar in the sense that the columns used as cohort,\n age and categories must match. Extra columns with not affect\n anything.\n\n :param restarts: int\n Number of times to restart the optimization procedure with a\n different seed, to avoid getting stuck on local maxima.\n \"\"\"\n # Transform dataframe extracting feature matrix, ages and alive status.\n x, y, z = self.dh.fit_transform(df)\n\n # fit to data using the ShiftedBeta object.\n self.sb.fit(X=x,\n age=y,\n alive=z,\n restarts=restarts)\n\n def summary(self):\n \"\"\"\n Simple method to get the learned weights and their corresponding\n categories\n\n :return: pandas DataFrame\n A DataFrame object with alpha and beta weights for each category\n \"\"\"\n # Construct a DataFrame consisting of feature name and corresponding\n # alpha and beta parameters. Names are obtained by invoking the\n # get_names() method, and the parameter displayed are the weights,\n # not the final values (since that cannot be made sense in separate).\n suma = pd.DataFrame(data={name: (a, b) for name, a, b in\n zip(self.dh.get_names(),\n self.sb.alpha,\n self.sb.beta)},\n index=['w_alpha', 'w_beta']\n ).T\n return suma\n\n def predict_params(self, df):\n \"\"\"\n predict_params is a method capable of predicting the values of alpha\n and beta for given combination of features. It invokes the\n compute_alpha_beta method from the ShiftedBeta object to compute the\n arrays of alpha and beta for every sample in df given the available\n features.\n\n Notice that it must first transform the dataframe df using\n DataHandler's transform method, so that it can than work with the lower\n level feature matrix, x.\n\n :param df: pandas DataFrame\n A pandas dataframe with at least the same feature columns as the\n one used to train the model.\n\n :return: pandas DataFrame\n A DataFrame with the predicted alpha and beta for each sample in df\n \"\"\"\n # Start by transforming df to its lower level np.array representation\n x, y, z = self.dh.transform(df=df)\n\n # Use compute_alpha_beta to compute alpha and beta for every sample in\n # df based on the feature matrix extracted from df, x.\n alpha, beta = self.sb._compute_alpha_beta(x, self.sb.alpha, self.sb.beta)\n\n # Return a dataframe with predictions.\n return pd.DataFrame(data=np.vstack([alpha, beta]),\n index=['alpha', 'beta']).T\n\n def predict_churn(self, df, age=None, **kwargs):\n \"\"\"\n predict_churn is a method to compute churn rate for a number of periods\n conditioned on the age of the sample.\n\n This method invokes the churn_p_of_t method from ShiftedBeta to compute\n the churn rate for a given number of periods conditional on age. See\n the description of churn_p_of_t in ShiftedBeta.py for more details.\n\n This method is a wrapper, it transforms the dataframe df to the\n appropriate representation and feed it to the lower level method from\n ShiftedBeta.\n\n It is worth noticing that the user has the option to pass the value for\n age, which can wither be a single number of an array with the same\n length as df, and this will overwrite whatever other value for age\n might come out when transforming df.\n\n :param df: pandas DataFrame\n A pandas dataframe with at least the same feature columns as the\n one used to train the model.\n\n :param age: None or float or ndarray of shape(df.shape[0], )\n If age is None, the method will use the age parameter extracted\n from df.\n ** Notice that if age=None and df does not contain an age field,\n a RuntimeError will be raised! **\n If age != None, pass this value along to churn_p_of_t.\n\n :param kwargs:\n Any other arguments that should be redirected to churn_p_of_t.\n\n :return: pandas DataFrame\n A DataFrame with the churn_p_of_t matrix.\n \"\"\"\n x, y, z = self.dh.transform(df=df)\n\n # If age field is present in prediction dataframe, we may choose to\n # use it to calculate future churn. To do so, we first check if the\n # user passed a new age parameter, if answer is yes, use the new age.\n # If, however, the user did not pass age, use the value extracted from\n # the dataframe, df.\n # ** If no value for age is passed and the dataframe does not contain\n # age, a RuntimeError is raised.\n if age is None:\n age = y\n if age is None:\n raise RuntimeError('The \"age\" field must either be present in '\n 'the dataframe or passed separately as an '\n 'argument.')\n\n # Create a dataframe with the churn_p_of_t matrix with all relevant\n # parameters.\n out = pd.DataFrame(data=self.sb.churn_p_of_t(x, age=age, **kwargs))\n\n # Give columns a decent, generic name.\n out.columns = ['period_{}'.format(col)\n for col in range(1, out.shape[1] + 1)]\n\n return out\n\n def predict_survival(self, df, age=None, **kwargs):\n \"\"\"\n predict_survival is a method to compute the survival curve for a number\n of periods conditioned on the age of the sample.\n\n This method invokes the survival_function method from ShiftedBeta to\n compute the retention rate for a given number of periods conditional\n on age. See the description of survival_function in ShiftedBeta.py for\n more details.\n\n This method is a wrapper, it transforms the dataframe df to the\n appropriate representation and feed it to the lower level method from\n ShiftedBeta.\n\n It is worth noticing that the user has the option to pass the value for\n age, which can wither be a single number of an array with the same\n length as df, and this will overwrite whatever other value for age\n might come out when transforming df.\n\n :param df: pandas DataFrame\n A pandas dataframe with at least the same feature columns as the\n one used to train the model.\n\n :param age: None or float or ndarray of shape(df.shape[0], )\n If age is None, the method will use the age parameter extracted\n from df.\n ** Notice that if age=None and df does not contain an age field,\n a RuntimeError will be raised! **\n If age != None, pass this value along to survival_function.\n\n :param kwargs:\n Any other arguments that should be redirected to survival_function.\n\n :return: pandas DataFrame\n A DataFrame with the survival_function matrix.\n \"\"\"\n x, y, z = self.dh.transform(df=df)\n\n # If age field is present in prediction dataframe, we may choose to\n # use it to calculate future churn. To do so, we first check if the\n # user passed a new age parameter, if answer is yes, use the new age.\n # If, however, the user did not pass age, use the value extracted from\n # the dataframe, df.\n # ** If no value for age is passed and the dataframe does not contain\n # age, a RuntimeError is raised.\n if age is None:\n age = y\n if age is None:\n raise RuntimeError('The \"age\" field must either be present in '\n 'the dataframe or passed separately as an '\n 'argument.')\n\n # Create a dataframe with the churn_p_of_t matrix with all relevant\n # parameters.\n out = pd.DataFrame(data=self.sb.survival_function(x,\n age=age,\n **kwargs))\n\n # Give columns a decent, generic name.\n out.columns = ['period_{}'.format(col)\n for col in range(1, out.shape[1] + 1)]\n\n return out\n\n def predict_ltv(self, df, age=None, alive=None, **kwargs):\n \"\"\"\n predict_ltv is a method to compute the ltv for each sample conditioned\n on age.\n\n This method invokes the derl method from ShiftedBeta to compute\n the residual ltv of each sample given its given age. See the\n description of derl in ShiftedBeta.py for more details.\n\n This method is a wrapper, it transforms the dataframe df to the\n appropriate representation and feed it to the lower level method from\n ShiftedBeta.\n\n It is worth noticing that the user has the option to pass the value for\n both age and alive fields, which can wither be a single number of an\n array with the same length as df, and this will overwrite whatever\n other value for age and/or alive might come out when transforming df.\n\n :param df: pandas DataFrame\n A pandas dataframe with at least the same feature columns as the\n one used to train the model.\n\n :param age: None or float or ndarray of shape(df.shape[0], )\n If age is None, the method will use the age parameter extracted\n from df.\n ** Notice that if age=None and df does not contain an age field,\n a RuntimeError will be raised! **\n If age != None, pass this value along to derl.\n\n :param alive: None or float or ndarray of shape(df.shape[0], )\n If age is None, the method will use the alive parameter extracted\n from df.\n ** Notice that if alive=None and df does not contain an alive\n field, a RuntimeError will be raised! **\n If alive != None, pass this value along to derl.\n\n :param kwargs:\n Any other arguments that should be redirected to derl.\n\n :return: pandas DataFrame\n A DataFrame with the ltv predictions.\n \"\"\"\n x, y, z = self.dh.transform(df=df)\n\n # If age field is present in prediction dataframe, we may choose to\n # use it to calculate future churn. To do so, we first check if the\n # user passed a new age parameter, if answer is yes, use the new age.\n # If, however, the user did not pass age, use the value extracted from\n # the dataframe, df.\n # ** If no value for age is passed and the dataframe does not contain\n # age, a RuntimeError is raised.\n if age is None:\n age = y\n if age is None:\n raise RuntimeError('The \"age\" field must either be present in '\n 'the dataframe or passed separately as an '\n 'argument.')\n\n # See the discussion above for age, exact same logic applies.\n if alive is None:\n alive = z\n if alive is None:\n raise RuntimeError('The \"alive\" must either be present in the '\n 'dataframe or passed separately as an '\n 'argument.')\n\n # Get LTVs and return a dataframe!\n ltvs = self.sb.derl(x, age=age, alive=alive, **kwargs)\n\n return pd.DataFrame(data=ltvs, columns=['ltv'])\n"
] |
[
[
"numpy.vstack",
"pandas.DataFrame"
]
] |
Space0726/FontTools
|
[
"a322a9bc403e93b0b32856a461fa6bf384d921e9"
] |
[
"tools/derivativetools.py"
] |
[
"\"\"\" Font tools for calculating derivative and using it.\n\nLast modified date: 2019/08/17\n\nCreated by Seongju Woo.\n\"\"\"\nimport math\nimport numpy as np\nimport bezier\nfrom fwig.tools import appendtools\n\ndef _calculate_distance(point_1, point_2):\n return math.sqrt(pow(point_1[0]-point_2[0], 2)\n + pow(point_1[1]-point_2[1], 2))\n\ndef _is_curve_meet(curve_1, curve_2):\n if curve_2.intersect(curve_1)[0, :]:\n return True\n return False\n\ndef calculate_derivative(contour_points, target_index):\n \"\"\" Calculates derivative.\n\n Calculates the derivative of the current point(contour_points[target_index])\n and returned it.\n\n Args:\n contour_points:: [RPoint, RPoint, ...]\n RContour's points(RPoint objects) to be derivative.\n target_index:: int\n Index(at contour_points) of RPoint to be derivative.\n\n Returns:\n derivative value:: int\n The result of derivative calculating.\n \"\"\"\n # Makes currrent point's bezier instance.\n nodes = np.asfortranarray([\n [float(contour_points[target_index+i].x) for i in range(-3, 1)],\n [float(contour_points[target_index+i].y) for i in range(-3, 1)]\n ])\n # Extends the curve for the derivative function.\n curve = bezier.Curve(nodes, degree=3).specialize(0, 1.5)\n\n # Calculates two x value for the derivative function.\n # These are the values from the original value plus and minus the very\n # small value(1e-4).\n current_x, _ = contour_points[target_index].position\n delta_x = 1e-4\n line_1 = bezier.Curve(np.asfortranarray([\n [current_x + delta_x, current_x + delta_x],\n [-1000, 1000]\n ]), degree=1)\n line_2 = bezier.Curve(np.asfortranarray([\n [current_x - delta_x, current_x - delta_x],\n [-1000, 1000]\n ]), degree=1)\n\n # Finds the y value that corresponds to the x value.\n prev_derivative = curve.evaluate(curve.intersect(line_1)[0, :][0])[1][0]\n next_derivative = curve.evaluate(curve.intersect(line_2)[0, :][0])[1][0]\n\n # Returns derivative function value.\n return (prev_derivative-next_derivative) / (2*delta_x)\n\ndef append_point_by_derivative(contour_points, target_index, target_contour):\n \"\"\" Appends point to opposite curve by using derivative.\n\n Appends point to opposite curve using line with gradient(by derivative)\n for pairing. It is recommended to use this function from inside(derivative)\n to outside(append point).\n\n Args:\n contour_points:: [RPoint, RPoint, ...]\n RContour's points(RPoint objects) to be derivative.\n target_index:: int\n Index(at contour_points) of RPoint to be derivative.\n target_contour: RContour\n RContour object which containing the opposite curve.\n\n Examples:\n from fontParts.world import CurrentGlyph\n glyph = CurrentGlyph()\n\n # RContour's list of RPoints which you want to derivative.\n contour_points = glyph.contours[0].points\n\n # Index(at contour_points) of RPoint to be derivative.\n target_index = 3\n\n # RContour object which you want to add a point.\n target_contour = glyph.contours[1]\n\n append_point_by_derivative(contour_points,target_index,target_contour)\n \"\"\"\n target_contour_points = target_contour.points\n distance = 0xFFFFFF\n points_to_append, rate = None, 0\n x_value, y_value = contour_points[target_index].position\n\n try:\n # Calculates gradient by derivative.\n gradient = -1 / calculate_derivative(contour_points, target_index)\n # Line's equation.\n linear_function = lambda x: gradient*x + y_value - (x_value*gradient)\n # Extends 500 up and down from standard point.\n line = bezier.Curve(np.asfortranarray([\n [x_value+500, x_value-500],\n [linear_function(x_value+500), linear_function(x_value-500)]\n ]), degree=1)\n except ZeroDivisionError:\n line = bezier.Curve(np.asfortranarray([\n [x_value, x_value],\n [float(y_value+500), float(y_value-500)]\n ]), degree=1)\n\n # Finds what curve in target contour is meeted with line.\n for i, _ in enumerate(target_contour_points):\n if i == target_index and target_contour_points == contour_points:\n continue\n if target_contour_points[i].type != 'offcurve' \\\n and target_contour_points[i-1].type == 'offcurve':\n nodes = np.asfortranarray([\n [float(target_contour_points[i+j].x) for j in range(-3, 1)],\n [float(target_contour_points[i+j].y) for j in range(-3, 1)]\n ])\n curve = bezier.Curve(nodes, degree=3)\n\n # If line meet curve.\n if _is_curve_meet(line, curve):\n meeting_object = curve.evaluate(curve.intersect(line)[0, :][0])\n meeting_point = tuple(meeting_object.flatten())\n new_distance = _calculate_distance( \\\n contour_points[target_index].position, meeting_point)\n # Finds nearest curve.\n if new_distance < distance:\n distance = new_distance\n points_to_append = [target_contour_points[i+j] \\\n for j in range(-3, 1)]\n rate = curve.locate(meeting_object)\n\n # Appends point at target curve.\n if points_to_append and rate:\n appendtools.append_point_rate(target_contour, points_to_append, rate)\n"
] |
[
[
"numpy.asfortranarray"
]
] |
francescobarbara/idad
|
[
"7931daeec5ae7db0c212d0b13f3c13d4784ecfdb"
] |
[
"neural/critics.py"
] |
[
"from collections import OrderedDict\n\nimport torch\nfrom torch import nn\n\n\n## MI critics\nclass CriticDotProd(nn.Module):\n \"\"\"\n Separable critic\n\n returns:\n scores_joint: tensor of shape [batch_size, batch_size] where only non-zero terms are on the diagonal\n scores_prod: tensor of shape [batch_size, batch_size] where the diagonal terms are all zeros\n \"\"\"\n\n def __init__(\n self, history_encoder_network, latent_encoder_network,\n ):\n super().__init__()\n self.critic_type = \"separable\"\n self.history_encoder_network = history_encoder_network\n self.latent_encoder_network = latent_encoder_network\n\n def forward(self, latent, *design_obs_pairs):\n history_encoding = self.history_encoder_network(*design_obs_pairs)\n latent_encoding = self.latent_encoder_network(latent)\n\n pos_mask = torch.eye(history_encoding.shape[0], device=history_encoding.device)\n neg_mask = 1.0 - pos_mask\n\n # we get (N^2 - batch_size) terms for \"free\" by reusing sampled data\n score_matrix = torch.matmul(history_encoding, latent_encoding.T)\n scores_joint = score_matrix * pos_mask\n scores_prod = score_matrix * neg_mask\n return scores_joint, scores_prod\n\n\nclass CriticJointNetwork(nn.Module):\n \"\"\"Joint critic\n fc_layers : nn.Sequential instance, should return output of size 1\n if not specified, default is to do linear -> Relu -> output\n returns:\n scores_joint: tensor of shape [batch_size, 1 + num_negative_samples]\n The first column contains the positive examples scores; the rest are 0.\n scores_prod: tensor of shape [batch_size, 1 + num_negative_samples];\n The first column is 0s; the rest contain the negative examples scores.\n \"\"\"\n\n def __init__(\n self, history_encoder_network, latent_encoder_network, head_layer=None\n ):\n super().__init__()\n self.critic_type = \"joint\"\n self.history_encoder_network = history_encoder_network\n self.latent_encoder_network = latent_encoder_network\n\n if head_layer is not None:\n self.head_layer = head_layer\n else:\n ## [!] relying on encoder netowkrs having .encoding_dim attributes ##\n input_dim = (\n latent_encoder_network.encoding_dim\n + history_encoder_network.encoding_dim\n )\n\n self.head_layer = nn.Sequential(\n OrderedDict(\n [\n (\"critic_l1\", nn.Linear(input_dim, 512)),\n (\"critic_relu1\", nn.ReLU()),\n # (\"critic_l2\", nn.Linear(2 * input_dim, input_dim)),\n # (\"critic_relu2\", nn.ReLU()),\n (\"critic_output\", nn.Linear(512, 1)),\n ]\n )\n )\n\n def forward(self, latent, *design_obs_pairs):\n # Latents is a tensor of dim [batch_samples, negativesamples + 1, encodning dim]\n latent_encoding = self.latent_encoder_network(latent)\n history_encoding = self.history_encoder_network(*design_obs_pairs)\n # expand the middle dimension (i.e. negative samples)\n history_encoding = history_encoding.unsqueeze(1).expand(latent_encoding.shape)\n\n inputs = torch.cat([history_encoding, latent_encoding], axis=-1)\n # remove last dim (output (ie score_matrix last dim) is of size 1):\n score_matrix = self.head_layer(inputs).squeeze(-1)\n\n pos_mask = score_matrix.new_zeros(score_matrix.shape)\n pos_mask[:, 0] = 1.0 # this is the unshuffled latent\n neg_mask = 1.0 - pos_mask\n\n scores_joint = score_matrix * pos_mask\n scores_prod = score_matrix * neg_mask\n\n return scores_joint, scores_prod\n\n\nclass CriticBA(nn.Module):\n \"\"\"Barber Agakov variational critic\n fc_layers : nn.Sequential instance, should return output of size 1\n if not specified, default is to do linear -> Relu -> output\n returns:\n scores_joint: tensor of shape [batch_size, 1 + num_negative_samples]\n The first column contains the positive examples scores; the rest are 0.\n scores_prod: tensor of shape [batch_size, 1 + num_negative_samples];\n The first column is 0s; the rest contain the negative examples scores.\n \"\"\"\n\n def __init__(\n self,\n latent_dim,\n history_encoder_network,\n # latent_encoder_network,\n head_layer_mean=None,\n head_layer_sd=None,\n ):\n super().__init__()\n self.critic_type = \"joint\"\n self.history_encoder_network = history_encoder_network\n # self.latent_encoder_network = latent_encoder_network\n\n ## [!] relying on encoder networkrs having .encoding_dim attributes ##\n input_dim = history_encoder_network.encoding_dim\n ## [!] relying on latent encoder networkr having .input_dim_flat attribute ##\n # this is the dimension of the latent\n # this is to set the output dimension equal to the dim of the latent.\n def _reshape_input(x):\n return x.flatten(-2)\n\n def _id(x):\n return x\n\n if isinstance(latent_dim, int):\n latent_dim_flat = latent_dim\n self._prepare_input = _id\n else:\n latent_dim_flat = latent_dim[0] * latent_dim[1]\n self._prepare_input = _reshape_input\n\n if head_layer_mean is not None:\n self.head_layer_mean = head_layer_mean\n else:\n self.head_layer_mean = nn.Sequential(\n OrderedDict(\n [\n (\"critic_ba_l1_mean\", nn.Linear(input_dim, 512)),\n (\"critic_ba_relu1_mean\", nn.ReLU()),\n (\"critic_ba_output_mean\", nn.Linear(512, latent_dim_flat)),\n ]\n )\n )\n if head_layer_sd is not None:\n self.head_layer_sd = head_layer_sd\n else:\n self.head_layer_sd = nn.Sequential(\n OrderedDict(\n [\n (\"critic_ba_l1_sd\", nn.Linear(input_dim, 512)),\n (\"critic_ba_relu1_sd\", nn.ReLU()),\n (\"critic_ba_output_sd\", nn.Linear(512, latent_dim_flat)),\n (\"critic_ba_softplus\", nn.Softplus()),\n ]\n )\n )\n\n def get_variational_params(self, *design_obs_pairs):\n history_encoding = self.history_encoder_network(*design_obs_pairs)\n mean = self.head_layer_mean(history_encoding)\n sd = 1e-5 + self.head_layer_sd(history_encoding)\n return mean, sd\n\n def forward(self, latent, *design_obs_pairs):\n latent_flat = self._prepare_input(latent)\n mean, sd = self.get_variational_params(*design_obs_pairs)\n log_probs_q = (\n torch.distributions.Normal(loc=mean, scale=sd)\n .log_prob(latent_flat)\n .sum(axis=-1)\n )\n\n return log_probs_q\n"
] |
[
[
"torch.nn.Softplus",
"torch.cat",
"torch.eye",
"torch.nn.Linear",
"torch.matmul",
"torch.distributions.Normal",
"torch.nn.ReLU"
]
] |
Juanjoglvz/MachineLearning
|
[
"2d979978448bf14c628dad0d8b87062e5687a101"
] |
[
"src/visualization/Interpretation.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import preprocessing \nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom sklearn.decomposition import PCA\n\n# Read the data and load it into memory\ndf_T = pd.read_csv(\"../../data/processed/T2_Accelerometer.csv\")\n\n# Principal Component Analysis\n#1 Scalation\n\nscaler = preprocessing.MinMaxScaler()\ndatanorm = scaler.fit_transform(df_T)\n\n#2 Modelling (PCA - 2 components)\n\nn_components = 2\nestimator = PCA (n_components)\nX_pca = estimator.fit_transform(datanorm)\n\n# is it representative?\nprint (estimator.explained_variance_ratio_)\n\n\n\n# Plot the PCA result\nx = X_pca[:,0]\ny = X_pca[:,1]\nplt.scatter(x,y)\nplt.savefig(\"../../reports/figures/PCA_Plot_Accelerometer_Day2_Selected\")\nplt.show()"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"sklearn.preprocessing.MinMaxScaler"
]
] |
zhivko/tensortrade
|
[
"af7a4a323415457d8ddb3befa3dabeac1844fdd0"
] |
[
"examples/myexample/main.py"
] |
[
"from tensortrade.oms.instruments import Instrument, BTC, USD\nfrom tensortrade.env.default.actions import BSH\n\nfrom tensortrade.env.generic import Renderer\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport ray\nimport numpy as np\nimport pandas as pd\n\nimport tensortrade.env.default as default\n\nfrom tensortrade.feed.core import DataFeed, Stream\nfrom tensortrade.oms.exchanges import Exchange\nfrom tensortrade.oms.exchanges import ExchangeOptions\nfrom tensortrade.oms.services.execution.simulated import execute_order\nfrom tensortrade.oms.wallets import Wallet, Portfolio\n\nfrom tensortrade.data.cdd import CryptoDataDownload\n\nfrom ray import tune\nfrom ray.tune.registry import register_env\nimport ray.rllib.agents.ppo as ppo\n\nimport ray\nfrom ray.rllib.utils.filter import MeanStdFilter\n\nimport torch\n\nprint(torch.zeros(1).cuda())\nprint(\"Torch-cuda available?: \" + str(torch.cuda.is_available()))\n\n\nclass PositionChangeChart(Renderer):\n\n def __init__(self, color: str = \"orange\"):\n self.color = \"orange\"\n\n def render(self, env, **kwargs):\n history = pd.DataFrame(env.observer.renderer_history)\n\n actions = list(history.action)\n p = list(history.price)\n\n buy = {}\n sell = {}\n\n for i in range(len(actions) - 1):\n a1 = actions[i]\n a2 = actions[i + 1]\n\n if a1 != a2:\n if a1 == 0 and a2 == 1:\n buy[i] = p[i]\n else:\n sell[i] = p[i]\n\n buy = pd.Series(buy)\n sell = pd.Series(sell)\n\n fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n\n fig.suptitle(\"Performance\")\n\n axs[0].plot(np.arange(len(p)), p, label=\"price\", color=self.color)\n axs[0].scatter(buy.index, buy.values, marker=\"^\", color=\"green\")\n axs[0].scatter(sell.index, sell.values, marker=\"^\", color=\"red\")\n axs[0].set_title(\"Trading Chart\")\n\n performance = pd.DataFrame.from_dict(env.action_scheme.portfolio.performance, orient='index') # changed ISO\n performance.plot(ax=axs[1])\n axs[1].set_title(\"Net Worth\")\n\n plt.show()\n\n\ndef create_env(config, train=\"train\"):\n cdd = CryptoDataDownload()\n data = cdd.fetch(\"Bitstamp\", \"USD\", \"BTC\", \"1h\")\n if False:\n data.close = data.close / 20 + range(len(data))\n print(\"genenrating fake increase\")\n if train == \"train\":\n data = data[0:int(len(data) / 2)] # training\n print(\"using first half for training\")\n elif train == \"eval\":\n data = data[int(len(data) / 2):] # validation\n print(\"using second half for eval\")\n else:\n print(\"using all data\")\n\n pclose = Stream.source(list(data.close), dtype=\"float\").rename(\"USD-BTC\")\n pmin = Stream.source(list(data.low), dtype=\"float\").rename(\"USD-BTClow\")\n pmax = Stream.source(list(data.high), dtype=\"float\").rename(\"USD-BTChigh\")\n\n pmin = Stream.source(list(data.low), dtype=\"float\").rename(\"USD-BTClow\")\n pmax = Stream.source(list(data.high), dtype=\"float\").rename(\"USD-BTChigh\")\n\n pmin3 = pmin.rolling(window=3).min()\n pmin10 = pmin.rolling(window=10).min()\n pmin20 = pmin.rolling(window=20).min()\n pmax3 = pmax.rolling(window=3).max()\n pmax10 = pmax.rolling(window=10).max()\n pmax20 = pmax.rolling(window=20).max()\n\n eo = ExchangeOptions(commission=0.002) #\n coinbase = Exchange(\"coinbase\", service=execute_order, options=eo)(\n pclose\n )\n\n cash = Wallet(coinbase, 100000 * USD)\n asset = Wallet(coinbase, 0 * BTC)\n\n portfolio = Portfolio(USD, [\n cash,\n asset\n ])\n\n feed = DataFeed([\n\n (pclose.log() - pmin3.log()).fillna(0).rename(\"relmin3\"),\n (pclose.log() - pmin10.log()).fillna(0).rename(\"relmin10\"),\n (pclose.log() - pmin20.log()).fillna(0).rename(\"relmin20\"),\n (pclose.log() - pmax3.log()).fillna(0).rename(\"relmax3\"),\n (pclose.log() - pmax10.log()).fillna(0).rename(\"relmax10\"),\n (pclose.log() - pmax20.log()).fillna(0).rename(\"relmax20\"),\n\n ])\n\n action_scheme = BSH(cash=cash, asset=asset)\n\n renderer_feed = DataFeed([\n Stream.source(list(data.close), dtype=\"float\").rename(\"price\"),\n Stream.sensor(action_scheme, lambda s: s.action, dtype=\"float\").rename(\"action\") # only works for BSH\n ])\n\n environment = default.create(\n\n feed=feed,\n portfolio=portfolio,\n action_scheme=action_scheme,\n reward_scheme=\"simple\",\n renderer_feed=renderer_feed,\n renderer=PositionChangeChart(),\n window_size=config[\"window_size\"],\n min_periods=20,\n max_allowed_loss=0.6\n )\n return environment\n\n\nregister_env(\"TradingEnv\", create_env)\n#register_env(\"TradingEnv\", lambda _: TradingEnv(num_agents=3))\n\nwindow_size = 30\n\nray.init(local_mode=True)\n\n# Get checkpoint\n# c:\\work\\git\\tensortrade\\examples\\myexample\\Experiments\\PPO\\PPO_TradingEnv_2eecd_00000_0_2021-02-15_20-08-27\\checkpoint_1120\\checkpoint-1120\npname = \"PPO_TradingEnv_222c2_00000_0_2021-02-16_07-42-27\"\ncheckpoint_path = \"c:/work/git/tensortrade/examples/myexample/Experiments/PPO/\" + pname + \"/checkpoint_1140/checkpoint-1140\"\n\nanalysis = tune.run(\n \"PPO\",\n stop={\n \"episode_reward_mean\": 2e15,\n \"training_iteration\": 2000\n },\n config={\n \"env\": \"TradingEnv\",\n \"env_config\": {\n \"window_size\": window_size\n },\n \"model\": {\n # Share layers for value function. If you set this to True, it's\n # important to tune vf_loss_coeff.\n # \"vf_share_layers\": True,\n \"vf_share_layers\": False,\n \"fcnet_hiddens\": [32, 16, 16],\n\n \"use_lstm\": True,\n # Max seq len for training the LSTM, defaults to 20.\n \"max_seq_len\": 20,\n # Size of the LSTM cell.\n # \"lstm_cell_size\": 256,\n \"lstm_cell_size\": 32,\n # Whether to feed a_{t-1} to LSTM (one-hot encoded if discrete).\n \"lstm_use_prev_action\": False, # TODO: play with this\n # Whether to feed r_{t-1} to LSTM.\n \"lstm_use_prev_reward\": False,\n # Experimental (only works with `_use_trajectory_view_api`=True):\n # Whether the LSTM is time-major (TxBx..) or batch-major (BxTx..).\n \"_time_major\": False,\n },\n \"log_level\": \"DEBUG\",\n \"framework\": \"torch\",\n \"ignore_worker_failures\": True,\n # \"num_workers\": 3, #max\n #\"num_gpus\": 1,\n \n \"num_workers\": 3, \n \"num_gpus\": 1,\n \"clip_rewards\": False,\n\n \"lr\": 5e-5,\n \"gamma\": 0,\n\n \"observation_filter\": \"MeanStdFilter\",\n\n \"lambda\": 0.72,\n \"vf_loss_coeff\": 1.0,\n \"entropy_coeff\": 0.1,\n\n },\n # checkpoint_freq=200, # new\n checkpoint_freq=20, # new\n checkpoint_at_end=True,\n # restore=\"c:\\work\\klemen\\rlagent\\Experiments\\\",\n local_dir='c:/work/git/tensortrade/examples/myexample/Experiments',\n restore=checkpoint_path\n)\n"
] |
[
[
"pandas.Series",
"torch.zeros",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"torch.cuda.is_available",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.show"
]
] |
Originofamonia/DANN
|
[
"97541e913e050855818f562574b28b5f2b550a1f"
] |
[
"train/main.py"
] |
[
"import random\nimport os\nimport sys\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchvision import datasets\nfrom torchvision import transforms\nimport numpy as np\n\n\ndef add_path(path):\n if path not in sys.path:\n print('Adding {}'.format(path))\n sys.path.append(path)\n\n\nabs_current_path = os.path.realpath('./')\nroot_path = os.path.join('/', *abs_current_path.split(os.path.sep)[:-1])\nadd_path(root_path)\n\nfrom models.model import CNNModel\nfrom dataset.data_loader import GetLoader\nfrom train.test import test\n\nsource_dataset_name = 'MNIST'\ntarget_dataset_name = 'mnist_m'\nsource_image_root = os.path.join('..', 'dataset', source_dataset_name)\ntarget_image_root = os.path.join('..', 'dataset', target_dataset_name)\nmodel_root = os.path.join('..', 'models')\ncuda = True\ncudnn.benchmark = True\nlr = 1e-3\nbatch_size = 128\nimage_size = 28\nn_epoch = 100\n\nmanual_seed = random.randint(1, 10000)\nrandom.seed(manual_seed)\ntorch.manual_seed(manual_seed)\n\n# load data\n\nimg_transform_source = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n])\n\nimg_transform_target = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\ndataset_source = datasets.MNIST(\n root='../dataset',\n train=True,\n transform=img_transform_source,\n download=True\n)\n\ndataloader_source = torch.utils.data.DataLoader(\n dataset=dataset_source,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\ntrain_list = os.path.join(target_image_root, 'mnist_m_train_labels.txt')\n\ndataset_target = GetLoader(\n data_root=os.path.join(target_image_root, 'mnist_m_train'),\n data_list=train_list,\n transform=img_transform_target\n)\n\ndataloader_target = torch.utils.data.DataLoader(\n dataset=dataset_target,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8)\n\n# load model\n\nmy_net = CNNModel()\n\n# setup optimizer\n\noptimizer = optim.Adam(my_net.parameters(), lr=lr)\n\nloss_class = torch.nn.NLLLoss()\nloss_domain = torch.nn.NLLLoss()\n\nif cuda:\n my_net = my_net.cuda()\n loss_class = loss_class.cuda()\n loss_domain = loss_domain.cuda()\n\nfor p in my_net.parameters():\n p.requires_grad = True\n\n# training\n\nfor epoch in range(n_epoch):\n\n len_dataloader = min(len(dataloader_source), len(dataloader_target))\n data_source_iter = iter(dataloader_source)\n data_target_iter = iter(dataloader_target)\n\n i = 0\n while i < len_dataloader:\n\n p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\n\n # training model using source data\n data_source = data_source_iter.next()\n s_img, s_label = data_source\n\n my_net.zero_grad()\n batch_size = len(s_label)\n\n input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)\n class_label = torch.LongTensor(batch_size)\n domain_label = torch.zeros(batch_size)\n domain_label = domain_label.long()\n\n if cuda:\n s_img = s_img.cuda()\n s_label = s_label.cuda()\n input_img = input_img.cuda()\n class_label = class_label.cuda()\n domain_label = domain_label.cuda()\n\n input_img.resize_as_(s_img).copy_(s_img)\n class_label.resize_as_(s_label).copy_(s_label)\n\n class_output, domain_output = my_net(input_data=input_img, alpha=alpha)\n err_s_label = loss_class(class_output, class_label)\n err_s_domain = loss_domain(domain_output, domain_label)\n\n # training model using target data\n data_target = data_target_iter.next()\n t_img, _ = data_target\n\n batch_size = len(t_img)\n\n input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)\n domain_label = torch.ones(batch_size)\n domain_label = domain_label.long()\n\n if cuda:\n t_img = t_img.cuda()\n input_img = input_img.cuda()\n domain_label = domain_label.cuda()\n\n input_img.resize_as_(t_img).copy_(t_img)\n\n _, domain_output = my_net(input_data=input_img, alpha=alpha)\n err_t_domain = loss_domain(domain_output, domain_label)\n err = err_t_domain + err_s_domain + err_s_label\n err.backward()\n optimizer.step()\n\n i += 1\n\n print('epoch: %d, [iter: %d / all %d], err_s_label: %f, err_s_domain: %f, err_t_domain: %f' \\\n % (epoch, i, len_dataloader, err_s_label.cpu().data.numpy(),\n err_s_domain.cpu().data.numpy(), err_t_domain.cpu().data.numpy()))\n\n torch.save(my_net, '{0}/mnist_mnistm_model_epoch_{1}.pth'.format(model_root, epoch))\n test(source_dataset_name, epoch)\n test(target_dataset_name, epoch)\n"
] |
[
[
"numpy.exp"
]
] |
94mia/DeepLEGO
|
[
"9458b13da6117f6054ce406bdb3942358e2cd764"
] |
[
"heads/pspnet.py"
] |
[
"'''\nRe-implementation of head module of PSPNet introduced in paper [1]\nThe structure of this module refers to the Caffe implementation from [2]\n\nReference:\n[1] Pyramid Scene Parsing Network\n https://arxiv.org/abs/1612.01105\n[2] hszhao/PSPNet/evaluation/prototxt/pspnet101_cityscapes_713.prototxt\n https://github.com/hszhao/PSPNet/blob/4b53f1c97a5921a99a965a60c0940eec2d46bb06/evaluation/prototxt/pspnet101_cityscapes_713.prototxt\n'''\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\n\n\ndef conv_bn_relu(in_channels, out_channels, kernel_size=1):\n \"\"\" 1x1 Convolution with batch norm and relu \"\"\"\n pad = (kernel_size-1) // 2\n return nn.Sequential(nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, padding=pad, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()).cuda()\n\n\nclass PSP(nn.Module):\n def __init__(self, params):\n super(PSP, self).__init__()\n\n self.pool1 = nn.AdaptiveAvgPool2d((1, 1))\n self.pool2 = nn.AdaptiveAvgPool2d((2, 2))\n self.pool3 = nn.AdaptiveAvgPool2d((3, 3))\n self.pool4 = nn.AdaptiveAvgPool2d((6, 6))\n\n self.conv1 = conv_bn_relu(params.output_channels, 512)\n self.conv2 = conv_bn_relu(params.output_channels, 512)\n self.conv3 = conv_bn_relu(params.output_channels, 512)\n self.conv4 = conv_bn_relu(params.output_channels, 512)\n\n self.conv5 = conv_bn_relu(512*4+params.output_channels, 512, 3)\n self.class_conv = nn.Conv2d(512, params.num_class, 1)\n self.output_stride = params.output_stride\n\n def forward(self, logits):\n x = logits[-1]\n input_size = x.shape[2:]\n\n x1 = self.pool1(x)\n x2 = self.pool2(x)\n x3 = self.pool3(x)\n x4 = self.pool4(x)\n\n x1 = self.conv1(x1)\n x2 = self.conv2(x2)\n x3 = self.conv3(x3)\n x4 = self.conv4(x4)\n\n x1 = F.upsample(x1, size=input_size, mode='bilinear', align_corners=False)\n x2 = F.upsample(x2, size=input_size, mode='bilinear', align_corners=False)\n x3 = F.upsample(x3, size=input_size, mode='bilinear', align_corners=False)\n x4 = F.upsample(x4, size=input_size, mode='bilinear', align_corners=False)\n\n x = torch.cat((x, x1, x2, x3, x4), dim=1)\n x = self.conv5(x)\n x = self.class_conv(x)\n\n x = F.upsample(x, scale_factor=self.output_stride)\n return x\n"
] |
[
[
"torch.nn.functional.upsample",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
iisys-hof/HUI-Audio-Corpus-German
|
[
"4d2de2ed538a6b943166e1e35c10ee8b0b266be6"
] |
[
"huiAudioCorpus/converter/ListToHistogramConverter.py"
] |
[
"from huiAudioCorpus.model.Histogram import Histogram\nfrom typing import List, TypeVar\n\nimport numpy as np\nnumber = TypeVar('number', int, float)\n\nclass ListToHistogramConverter:\n def __init__(self, stepSize: int):\n self.stepSize =stepSize\n\n def convert(self, list: List[number]):\n bins = np.arange(round(min(1,min(list)))-1,max(list) + 2*self.stepSize,self.stepSize)\n exportBins: List[number]\n values : List[number]\n valuesNumpy, exportBinsNumpy = np.histogram(list, bins=bins) # type: ignore\n exportBins = exportBinsNumpy.tolist()# type: ignore\n values = valuesNumpy.tolist()# type: ignore\n histogram = Histogram(exportBins[:-1], values)\n return histogram"
] |
[
[
"numpy.histogram"
]
] |
nnn112358/python-control_test
|
[
"58e1b5e6feec0477fd4bad3683fb8af470faed4f"
] |
[
"pvtol-nested.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#このファイルは、Python制御パッケージの基本的な機能を実証することを目的としています。\n#AstromとMrurayの平面垂直離着陸(PVTOL)機に対応する、かなり複雑な制御設計と解析を動作します。\n#\n\n\n# pvtol-nested.py - aircraftのスラスタベクトルの内外ループ制御設計\n\n\nfrom __future__ import print_function\nfrom matplotlib.pyplot import * # MATLAB プロット関数\nfrom control.matlab import * # MATLAB-like 関数\nimport numpy as np\n\n# システムのパラメータ\nm = 4; # aircraftの質量\nJ = 0.0475; #ピッチ軸周りの慣性\nr = 0.25; #力の中心までの距離\ng = 9.8; # 重力定数\nc = 0.05; # 減衰係数(推定値)\n\n\n#ダイナミクスの伝達関数\nPi = tf([r], [J, 0, 0]); # 内側のループ (Roll角度)\nPo = tf([1], [m, c, 0]); # 外側のループ (位置)\n\n# Use state space versions\nPi = tf2ss(Pi);\nPo = tf2ss(Po);\n\n#\n# 内側のループ制御設計\n#\n\n# システムのシンプルなリードコントローラの設計\nk = 200; a = 2; b = 50;\nCi = k*tf([1, a], [1, b]);\t\t# リード補償\nLi = Pi*Ci;\n\n#オープンループのボード線図\nfigure(1); \nbode(Pi);\nshow()\n\n# マージンを含めたループ伝達関数のボード線図\nfigure(2); \nbode(Li);\nshow()\n# ゲインと位相マージンを計算する\n#! 実装されていなかった\n(gm, pm, wcg, wcp) = margin(Li);\nprint (gm, pm, wcg, wcp)\n\n# 感度と相補感度関数を計算する\nSi = feedback(1, Li);\nTi = Li * Si;\n\n# 仕様が満たされていることを確認する\nfigure(3); gangof4(Pi, Ci);\n# u1からv1への実際の伝達関数を計算する(see L8.2 notes)\n# Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi);\nHi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1));\nshow()\n\nfigure(4); clf; subplot(221);\nbode(Hi);\n\n# 横方向制御システムをここで設計する\na = 0.02; b = 5; K = 2;\nCo = -K*tf([1, 0.3], [1, 10]);\t\t# another lead compensator\nLo = -m*g*Po*Co;\nshow()\n\n\nfigure(5); \nbode(Lo); # margin(Lo)\n#最後に、実際の外側のループのループゲインと応答を計算する\nL = Co*Hi*Po;\nS = feedback(1, L);\nT = feedback(L, 1);\n\n# 安定性マージンの計算\n#! 実装されていなかった\n(gm, pm, wgc, wpc) = margin(L); \nprint (gm, pm, wgc, wpc)\nshow()\n\n#! TODO:この数字には何か問題があります。軸の制限が不一致\nfigure(6); clf; \nbode(L);\n\n# クロスオーバーラインを追加\nsubplot(211); hold(True);\nloglog([1e-4, 1e3], [1, 1], 'k-')\n\n#-90度から始まるように位相反転\nbode(L, logspace(-4, 3));\n(mag, phase, w) = freqresp(L, logspace(-4, 3));\nphase = phase - 360;\nsubplot(212);\nsemilogx([1e-4, 1e3], [-180, -180], 'k-')\nhold(True);\nsemilogx(w, np.squeeze(phase), 'b-')\naxis([1e-4, 1e3, -360, 0]);\nxlabel('Frequency [deg]'); ylabel('Phase [deg]');\n# set(gca, 'YTick', [-360, -270, -180, -90, 0]);\n# set(gca, 'XTick', [10^-4, 10^-2, 1, 100]);\nshow()\n#\n# ナイキスト線図\n#\nfigure(7); clf;\naxis([-700, 5300, -3000, 3000]); hold(True);\nnyquist(L, (0.0001, 1000));\naxis([-700, 5300, -3000, 3000]);\n\n# 展開する領域にボックスを追加する\nplot([-400, -400, 200, 200, -400], [-100, 100, 100, -100, -100], 'r-')\nshow()\n\n# 拡張領域\nfigure(8); clf; subplot(231); \naxis([-10, 5, -20, 20]); hold(True);\nnyquist(L);\naxis([-10, 5, -20, 20]);\n\n#色を設定\ncolor = 'b';\nshow()\n\n# プロットに矢印を追加する\n# H1 = L.evalfr(0.4); H2 = L.evalfr(0.41);\n# arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \\\n# 'EdgeColor', color, 'FaceColor', color);\n\n# H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36);\n# arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \\\n# 'EdgeColor', color, 'FaceColor', color);\n\nfigure(9); \n(Yvec, Tvec) = step(T, linspace(0, 20));\nplot(Tvec.T, Yvec.T); hold(True);\n\n(Yvec, Tvec) = step(Co*S, linspace(0, 20));\nplot(Tvec.T, Yvec.T);\nshow()\nfigure(10); clf();\n(P, Z) = pzmap(T, Plot=True)\nprint(\"Closed loop poles and zeros: \", P, Z)\nshow()\n# Gang of Four\nfigure(11); clf();\ngangof4(Hi*Po, Co);\n\nshow()\n\n"
] |
[
[
"numpy.squeeze"
]
] |
Gorilla-Lab-SCUT/SRDC-CVPR2020
|
[
"9cd07156e5c520e955a7df33c42819777d012ecb"
] |
[
"data/prepare_data.py"
] |
[
"import os\nimport shutil\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torch.nn.functional as F\nfrom utils.folder import ImageFolder\nimport numpy as np\nimport cv2\n\ndef generate_dataloader(args):\n # Data loading code\n traindir = os.path.join(args.data_path_source, args.src)\n traindir_t = os.path.join(args.data_path_target, args.tar)\n valdir = os.path.join(args.data_path_target, args.tar)\n valdir_t = os.path.join(args.data_path_target_t, args.tar_t)\n \n classes = os.listdir(traindir)\n classes.sort()\n ins_num_for_each_cls_src = torch.cuda.FloatTensor(args.num_classes)\n for i,c in enumerate(classes):\n ins_num_for_each_cls_src[i] = len(os.listdir(os.path.join(traindir, c)))\n \n if not os.path.isdir(traindir):\n raise ValueError ('the require data path is not exist, please download the dataset')\n\n if args.no_da:\n # transformation on the training data during training\n data_transform_train = transforms.Compose([\n \t\t\ttransforms.Resize((224, 224)), # spatial size of vgg-f input\n #transforms.RandomHorizontalFlip(),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n # transformation on the duplicated data during training\n data_transform_train_dup = transforms.Compose([\n \t\t\ttransforms.Resize((224, 224)),\n \t\t\t#transforms.RandomHorizontalFlip(),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Lambda(lambda x: _random_affine_augmentation(x)),\n \t\t\ttransforms.Lambda(lambda x: _gaussian_blur(x, sigma=args.sigma)),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n # transformation on the grayscale data during training\n data_transform_train_gray = transforms.Compose([\n transforms.Grayscale(3),\n \t\ttransforms.Resize((224, 224)), # spatial size of vgg-f input\n #transforms.RandomHorizontalFlip(),\n \t\ttransforms.ToTensor(),\n \t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n # transformation on the test data during test\n data_transform_test = transforms.Compose([\n \t\t\ttransforms.Resize((224, 224)), # spatial size of vgg-f input\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n else:\n # transformation on the training data during training\n data_transform_train = transforms.Compose([\n \t\t\t#transforms.Resize((256, 256)), # spatial size of vgg-f input\n transforms.Resize(256),\n \t\t\ttransforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n # transformation on the duplicated data during training\n data_transform_train_dup = transforms.Compose([\n \t\t\ttransforms.Resize(256),\n \t\t\ttransforms.RandomCrop(224),\n \t\t\ttransforms.RandomHorizontalFlip(),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Lambda(lambda x: _random_affine_augmentation(x)),\n \t\t\ttransforms.Lambda(lambda x: _gaussian_blur(x, sigma=args.sigma)),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n # transformation on the grayscale data during training\n data_transform_train_gray = transforms.Compose([\n transforms.Grayscale(3),\n \t\t\ttransforms.Resize(256), # spatial size of vgg-f input\n \t\t\ttransforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n # transformation on the test data during test\n data_transform_test = transforms.Compose([\n \t\t\ttransforms.Resize(256), # spatial size of vgg-f input\n \t\t\ttransforms.CenterCrop(224),\n \t\t\ttransforms.ToTensor(),\n \t\t\ttransforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n \t])\n \n source_train_dataset = ImageFolder(root=traindir, transform=data_transform_train)\n source_test_dataset = ImageFolder(root=traindir, transform=data_transform_test)\n if args.aug_tar_agree and (not args.gray_tar_agree):\n target_train_dataset = ImageFolder(root=traindir_t, transform=data_transform_train, transform_aug=data_transform_train_dup)\n elif args.gray_tar_agree and (not args.aug_tar_agree):\n target_train_dataset = ImageFolder(root=traindir_t, transform=data_transform_train, transform_gray=data_transform_train_gray)\n elif args.aug_tar_agree and args.gray_tar_agree:\n target_train_dataset = ImageFolder(root=traindir_t, transform=data_transform_train, transform_aug=data_transform_train_dup, transform_gray=data_transform_train_gray)\n else:\n target_train_dataset = ImageFolder(root=traindir_t, transform=data_transform_train)\n target_test_dataset = ImageFolder(root=valdir, transform=data_transform_test)\n target_test_dataset_t = ImageFolder(root=valdir_t, transform=data_transform_test)\n \n source_train_loader = torch.utils.data.DataLoader(\n source_train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, sampler=None, drop_last=True\n )\n source_test_loader = torch.utils.data.DataLoader(\n source_test_dataset, batch_size=63, shuffle=False,\n num_workers=args.workers, pin_memory=True\n )\n target_train_loader = torch.utils.data.DataLoader(\n target_train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, sampler=None, drop_last=True\n )\n target_test_loader = torch.utils.data.DataLoader(\n target_test_dataset, batch_size=63, shuffle=False,\n num_workers=args.workers, pin_memory=True\n )\n target_test_loader_t = torch.utils.data.DataLoader(\n target_test_dataset_t, batch_size=63, shuffle=False,\n num_workers=args.workers, pin_memory=True\n )\n \n return source_train_loader, target_train_loader, target_test_loader, target_test_loader_t, source_test_loader\n\n\ndef _random_affine_augmentation(x):\n\tM = np.float32([[1 + np.random.normal(0.0, 0.1), np.random.normal(0.0, 0.1), 0], \n\t\t\t\t[np.random.normal(0.0, 0.1), 1 + np.random.normal(0.0, 0.1), 0]])\n\trows, cols = x.shape[1:3]\n\tdst = cv2.warpAffine(np.transpose(x.numpy(), [1, 2, 0]), M, (cols,rows))\n\tdst = np.transpose(dst, [2, 0, 1])\n\treturn torch.from_numpy(dst)\n\n\ndef _gaussian_blur(x, sigma=0.1):\n\tksize = int(sigma + 0.5) * 8 + 1\n\tdst = cv2.GaussianBlur(x.numpy(), (ksize, ksize), sigma)\n\treturn torch.from_numpy(dst)\n \n \n \n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.cuda.FloatTensor",
"numpy.random.normal",
"numpy.transpose"
]
] |
aky15/espnet
|
[
"1dc734839d34e2f2dd13cfa375713aecf232ae25"
] |
[
"espnet/nets/pytorch_backend/transformer/decoder.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Shigeki Karita\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Decoder definition.\"\"\"\n\nimport logging\n\nfrom typing import Any\nfrom typing import List\nfrom typing import Tuple\n\nimport torch\n\nfrom espnet.nets.pytorch_backend.nets_utils import rename_state_dict\nfrom espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention\nfrom espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer\nfrom espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution\nfrom espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D\nfrom espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding\nfrom espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm\nfrom espnet.nets.pytorch_backend.transformer.lightconv import LightweightConvolution\nfrom espnet.nets.pytorch_backend.transformer.lightconv2d import LightweightConvolution2D\nfrom espnet.nets.pytorch_backend.transformer.mask import subsequent_mask\nfrom espnet.nets.pytorch_backend.transformer.positionwise_feed_forward import (\n PositionwiseFeedForward, # noqa: H301\n)\nfrom espnet.nets.pytorch_backend.transformer.repeat import repeat\nfrom espnet.nets.scorer_interface import BatchScorerInterface\n\n\ndef _pre_hook(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n):\n # https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563\n rename_state_dict(prefix + \"output_norm.\", prefix + \"after_norm.\", state_dict)\n\n\nclass Decoder(BatchScorerInterface, torch.nn.Module):\n \"\"\"Transfomer decoder module.\n\n Args:\n odim (int): Output diminsion.\n self_attention_layer_type (str): Self-attention layer type.\n attention_dim (int): Dimention of attention.\n attention_heads (int): The number of heads of multi head attention.\n conv_wshare (int): The number of kernel of convolution. Only used in\n self_attention_layer_type == \"lightconv*\" or \"dynamiconv*\".\n conv_kernel_length (Union[int, str]): Kernel size str of convolution\n (e.g. 71_71_71_71_71_71). Only used in self_attention_layer_type\n == \"lightconv*\" or \"dynamiconv*\".\n conv_usebias (bool): Whether to use bias in convolution. Only used in\n self_attention_layer_type == \"lightconv*\" or \"dynamiconv*\".\n linear_units (int): The number of units of position-wise feed forward.\n num_blocks (int): The number of decoder blocks.\n dropout_rate (float): Dropout rate.\n positional_dropout_rate (float): Dropout rate after adding positional encoding.\n self_attention_dropout_rate (float): Dropout rate in self-attention.\n src_attention_dropout_rate (float): Dropout rate in source-attention.\n input_layer (Union[str, torch.nn.Module]): Input layer type.\n use_output_layer (bool): Whether to use output layer.\n pos_enc_class (torch.nn.Module): Positional encoding module class.\n `PositionalEncoding `or `ScaledPositionalEncoding`\n normalize_before (bool): Whether to use layer_norm before the first block.\n concat_after (bool): Whether to concat attention layer's input and output.\n if True, additional linear will be applied.\n i.e. x -> x + linear(concat(x, att(x)))\n if False, no additional linear will be applied. i.e. x -> x + att(x)\n\n \"\"\"\n\n def __init__(\n self,\n odim,\n selfattention_layer_type=\"selfattn\",\n attention_dim=256,\n attention_heads=4,\n conv_wshare=4,\n conv_kernel_length=11,\n conv_usebias=False,\n linear_units=2048,\n num_blocks=6,\n dropout_rate=0.1,\n positional_dropout_rate=0.1,\n self_attention_dropout_rate=0.0,\n src_attention_dropout_rate=0.0,\n input_layer=\"embed\",\n use_output_layer=True,\n pos_enc_class=PositionalEncoding,\n normalize_before=True,\n concat_after=False,\n ):\n \"\"\"Construct an Decoder object.\"\"\"\n torch.nn.Module.__init__(self)\n self._register_load_state_dict_pre_hook(_pre_hook)\n if input_layer == \"embed\":\n self.embed = torch.nn.Sequential(\n torch.nn.Embedding(odim, attention_dim),\n pos_enc_class(attention_dim, positional_dropout_rate),\n )\n elif input_layer == \"linear\":\n self.embed = torch.nn.Sequential(\n torch.nn.Linear(odim, attention_dim),\n torch.nn.LayerNorm(attention_dim),\n torch.nn.Dropout(dropout_rate),\n torch.nn.ReLU(),\n pos_enc_class(attention_dim, positional_dropout_rate),\n )\n elif isinstance(input_layer, torch.nn.Module):\n self.embed = torch.nn.Sequential(\n input_layer, pos_enc_class(attention_dim, positional_dropout_rate)\n )\n else:\n raise NotImplementedError(\"only `embed` or torch.nn.Module is supported.\")\n self.normalize_before = normalize_before\n\n # self-attention module definition\n if selfattention_layer_type == \"selfattn\":\n logging.info(\"decoder self-attention layer type = self-attention\")\n decoder_selfattn_layer = MultiHeadedAttention\n decoder_selfattn_layer_args = [\n (\n attention_heads,\n attention_dim,\n self_attention_dropout_rate,\n )\n ] * num_blocks\n elif selfattention_layer_type == \"lightconv\":\n logging.info(\"decoder self-attention layer type = lightweight convolution\")\n decoder_selfattn_layer = LightweightConvolution\n decoder_selfattn_layer_args = [\n (\n conv_wshare,\n attention_dim,\n self_attention_dropout_rate,\n int(conv_kernel_length.split(\"_\")[lnum]),\n True,\n conv_usebias,\n )\n for lnum in range(num_blocks)\n ]\n elif selfattention_layer_type == \"lightconv2d\":\n logging.info(\n \"decoder self-attention layer \"\n \"type = lightweight convolution 2-dimentional\"\n )\n decoder_selfattn_layer = LightweightConvolution2D\n decoder_selfattn_layer_args = [\n (\n conv_wshare,\n attention_dim,\n self_attention_dropout_rate,\n int(conv_kernel_length.split(\"_\")[lnum]),\n True,\n conv_usebias,\n )\n for lnum in range(num_blocks)\n ]\n elif selfattention_layer_type == \"dynamicconv\":\n logging.info(\"decoder self-attention layer type = dynamic convolution\")\n decoder_selfattn_layer = DynamicConvolution\n decoder_selfattn_layer_args = [\n (\n conv_wshare,\n attention_dim,\n self_attention_dropout_rate,\n int(conv_kernel_length.split(\"_\")[lnum]),\n True,\n conv_usebias,\n )\n for lnum in range(num_blocks)\n ]\n elif selfattention_layer_type == \"dynamicconv2d\":\n logging.info(\n \"decoder self-attention layer type = dynamic convolution 2-dimentional\"\n )\n decoder_selfattn_layer = DynamicConvolution2D\n decoder_selfattn_layer_args = [\n (\n conv_wshare,\n attention_dim,\n self_attention_dropout_rate,\n int(conv_kernel_length.split(\"_\")[lnum]),\n True,\n conv_usebias,\n )\n for lnum in range(num_blocks)\n ]\n\n self.decoders = repeat(\n num_blocks,\n lambda lnum: DecoderLayer(\n attention_dim,\n decoder_selfattn_layer(*decoder_selfattn_layer_args[lnum]),\n MultiHeadedAttention(\n attention_heads, attention_dim, src_attention_dropout_rate\n ),\n PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),\n dropout_rate,\n normalize_before,\n concat_after,\n ),\n )\n self.selfattention_layer_type = selfattention_layer_type\n if self.normalize_before:\n self.after_norm = LayerNorm(attention_dim)\n if use_output_layer:\n self.output_layer = torch.nn.Linear(attention_dim, odim)\n else:\n self.output_layer = None\n\n def forward(self, tgt, tgt_mask, memory, memory_mask):\n \"\"\"Forward decoder.\n\n Args:\n tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out) if\n input_layer == \"embed\". In the other case, input tensor\n (#batch, maxlen_out, odim).\n tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).\n dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+\n (include 1.2).\n memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).\n memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).\n dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+\n (include 1.2).\n\n Returns:\n torch.Tensor: Decoded token score before softmax (#batch, maxlen_out, odim)\n if use_output_layer is True. In the other case,final block outputs\n (#batch, maxlen_out, attention_dim).\n torch.Tensor: Score mask before softmax (#batch, maxlen_out).\n\n \"\"\"\n x = self.embed(tgt)\n x, tgt_mask, memory, memory_mask = self.decoders(\n x, tgt_mask, memory, memory_mask\n )\n if self.normalize_before:\n x = self.after_norm(x)\n if self.output_layer is not None:\n x = self.output_layer(x)\n return x, tgt_mask\n\n def forward_one_step(self, tgt, tgt_mask, memory, cache=None):\n \"\"\"Forward one step.\n\n Args:\n tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out).\n tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).\n dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+\n (include 1.2).\n memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).\n cache (List[torch.Tensor]): List of cached tensors.\n Each tensor shape should be (#batch, maxlen_out - 1, size).\n\n Returns:\n torch.Tensor: Output tensor (batch, maxlen_out, odim).\n List[torch.Tensor]: List of cache tensors of each decoder layer.\n\n \"\"\"\n x = self.embed(tgt)\n if cache is None:\n cache = [None] * len(self.decoders)\n new_cache = []\n for c, decoder in zip(cache, self.decoders):\n x, tgt_mask, memory, memory_mask = decoder(\n x, tgt_mask, memory, None, cache=c\n )\n new_cache.append(x)\n\n if self.normalize_before:\n y = self.after_norm(x[:, -1])\n else:\n y = x[:, -1]\n if self.output_layer is not None:\n y = torch.log_softmax(self.output_layer(y), dim=-1)\n\n return y, new_cache\n\n # beam search API (see ScorerInterface)\n def score(self, ys, state, x):\n \"\"\"Score.\"\"\"\n ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)\n if self.selfattention_layer_type != \"selfattn\":\n # TODO(karita): implement cache\n logging.warning(\n f\"{self.selfattention_layer_type} does not support cached decoding.\"\n )\n state = None\n logp, state = self.forward_one_step(\n ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state\n )\n return logp.squeeze(0), state\n\n # batch beam search API (see BatchScorerInterface)\n def batch_score(\n self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor\n ) -> Tuple[torch.Tensor, List[Any]]:\n \"\"\"Score new token batch (required).\n\n Args:\n ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).\n states (List[Any]): Scorer states for prefix tokens.\n xs (torch.Tensor):\n The encoder feature that generates ys (n_batch, xlen, n_feat).\n\n Returns:\n tuple[torch.Tensor, List[Any]]: Tuple of\n batchfied scores for next token with shape of `(n_batch, n_vocab)`\n and next state list for ys.\n\n \"\"\"\n # merge states\n n_batch = len(ys)\n n_layers = len(self.decoders)\n if states[0] is None:\n batch_state = None\n else:\n # transpose state of [batch, layer] into [layer, batch]\n batch_state = [\n torch.stack([states[b][i] for b in range(n_batch)])\n for i in range(n_layers)\n ]\n\n # batch decoding\n ys_mask = subsequent_mask(ys.size(-1), device=xs.device).unsqueeze(0)\n logp, states = self.forward_one_step(ys, ys_mask, xs, cache=batch_state)\n\n # transpose state of [layer, batch] into [batch, layer]\n state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]\n return logp, state_list\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.Module.__init__",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
gugarosa/dropout_rbm
|
[
"1b16e71315e1577b468ea2cd0d3c7fcf48bc6851"
] |
[
"optimization.py"
] |
[
"import argparse\n\nimport numpy as np\nimport torch\n\nimport utils.loader as l\nimport utils.objects as m\nimport utils.opt as o\nimport utils.target as t\n\n\ndef get_arguments():\n \"\"\"Gets arguments from the command line.\n\n Returns:\n A parser with the input arguments.\n\n \"\"\"\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Optimizes a model over the validation set.')\n\n parser.add_argument('dataset', help='Dataset identifier', choices=['mnist', 'semeion', 'usps'])\n\n parser.add_argument('mh', help='Meta-heuristic identifier', choices=['ba', 'cs', 'fa', 'pso'])\n\n parser.add_argument('-n_visible', help='Number of visible units', type=int, default=784)\n\n parser.add_argument('-n_hidden', help='Number of hidden units', type=int, default=400)\n\n parser.add_argument('-steps', help='Number of CD steps', type=int, default=1)\n\n parser.add_argument('-lr', help='Learning rate', type=float, default=0.1)\n\n parser.add_argument('-momentum', help='Momentum', type=float, default=0.0002)\n\n parser.add_argument('-decay', help='Weight decay', type=float, default=0.5)\n\n parser.add_argument('-temp', help='Temperature', type=float, default=1)\n\n parser.add_argument('-batch_size', help='Batch size', type=int, default=20)\n\n parser.add_argument('-device', help='CPU or GPU usage', choices=['cpu', 'cuda'])\n\n parser.add_argument('-epochs', help='Number of training epochs', type=int, default=25)\n\n parser.add_argument('-n_agents', help='Number of meta-heuristic agents', type=int, default=15)\n\n parser.add_argument('-n_iter', help='Number of meta-heuristic iterations', type=int, default=25)\n\n parser.add_argument('-seed', help='Seed identifier', type=int, default=0)\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n # Gathers the input arguments\n args = get_arguments()\n\n # Gathering common variables\n dataset = args.dataset\n seed = args.seed\n\n # Gathering RBM variables\n n_visible = args.n_visible\n n_hidden = args.n_hidden\n steps = args.steps\n lr = args.lr\n momentum = args.momentum\n decay = args.decay\n T = args.temp\n device = args.device\n batch_size = args.batch_size\n epochs = args.epochs\n model = m.get_model('drbm').obj\n\n # Gathering optimization variables\n meta = args.mh\n n_agents = args.n_agents\n n_iterations = args.n_iter\n meta_heuristic = m.get_mh(meta).obj\n hyperparams = m.get_mh(meta).hyperparams\n\n # Checks for the name of device\n if device == 'cpu':\n # Updates accordingly\n use_gpu = False\n else:\n # Updates accordingly\n use_gpu = True\n\n # Loads the data\n train, val, _ = l.load_dataset(name=dataset)\n\n # Defining torch and numpy seeds\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # Initializes the optimization target\n opt_fn = t.reconstruction(model, train, val, n_visible, n_hidden, steps, lr, momentum, decay, T, use_gpu, batch_size, epochs)\n\n # Running the optimization task\n history = o.optimize(meta_heuristic, opt_fn, n_agents, n_iterations, hyperparams)\n\n # Saves the history object to an output file\n history.save(f'models/{meta}_{n_hidden}hid_{lr}lr_drbm_{dataset}_{seed}.pkl')\n\n # Opens an outbut file to save a variable\n with open('best_p.txt', 'w') as f:\n # Saves the best `p` parameter\n f.write(str(history.best_agent[-1][0][0][0]))\n"
] |
[
[
"torch.manual_seed",
"numpy.random.seed"
]
] |
srijannnd/Data-Set-Visualization-App
|
[
"cbac1a5c076c056cfb5d26795c0505b60e2850b4"
] |
[
"uploads/core/views.py"
] |
[
"from django.shortcuts import render, redirect, get_object_or_404\nfrom uploads.core.models import Document\nfrom uploads.core.forms import DocumentForm\nimport pandas as pd\nimport seaborn as sns\n\n\ndef home(request):\n documents = Document.objects.all()\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n ds = pd.read_csv('./media/' + documents[0].document.name)\n viz = sns.heatmap(ds.corr())\n fig = viz.get_figure()\n fig.savefig('./uploads/core/static/image/plot.png')\n documents.delete()\n return redirect('home')\n else:\n form = DocumentForm()\n return render(request, 'core/home.html', {'documents': documents, 'form': form})\n\n\n\n"
] |
[
[
"pandas.read_csv"
]
] |
VERITAS-Observatory/V2DL3
|
[
"3b4691cbb3a06805b722e494d4ae84ce7a866dd4"
] |
[
"pyV2DL3/vegas/fillRESPONSE_not_safe.py"
] |
[
"import logging\n\nimport numpy as np\n\nfrom pyV2DL3.vegas.irfloader import IRFLoader\nfrom pyV2DL3.vegas.util import getThetaSquareCut\n\nlogger = logging.getLogger(__name__)\n\n\ndef __fillRESPONSE_not_safe__(\n effectiveAreaIO, azimuth, zenith, noise, irf_to_store=None\n):\n\n if irf_to_store is None:\n irf_to_store = {}\n\n response_dict = {}\n effectiveAreaIO.loadTheRootFile()\n irfloader = IRFLoader(effectiveAreaIO, pointlike=irf_to_store[\"point-like\"])\n ea_final_data, ebias_final_data, abias_final_data = irfloader.getIRF(\n azimuth, zenith, noise\n )\n minEnergy, maxEnergy = irfloader.getSafeEnergy(azimuth, zenith, noise)\n response_dict[\"LO_THRES\"] = minEnergy\n response_dict[\"HI_THRES\"] = maxEnergy\n\n # Point-like\n if irf_to_store[\"point-like\"]:\n response_dict[\"EA\"] = ea_final_data\n response_dict[\"MIGRATION\"] = ebias_final_data\n\n # Load the theta squared cut\n logger.debug(\"Getting Theta2 cut from EA file\")\n cuts = effectiveAreaIO.loadTheCutsInfo()\n for k in cuts:\n theta2cut = getThetaSquareCut(k.fCutsFileText)\n logger.debug(f\"Theta2 cut is {theta2cut:.2f}\")\n response_dict[\"RAD_MAX\"] = np.sqrt(theta2cut)\n\n # Full-enclosure\n elif irf_to_store[\"full-enclosure\"]:\n response_dict[\"FULL_EA\"] = ea_final_data\n response_dict[\"FULL_MIGRATION\"] = ebias_final_data\n response_dict[\"PSF\"] = abias_final_data\n else:\n raise ValueError(\"IRF requested should be point-like or full-enclosure\")\n\n return response_dict\n"
] |
[
[
"numpy.sqrt"
]
] |
SuperToxicCat/Drowsiness-Detection
|
[
"fac50f762719e76cb41a8a7d0206f63ae35cab89"
] |
[
"face_landmarks.py"
] |
[
"\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\ndef get_landmark_model(saved_model='models/pose_model'):\n \"\"\"\n \n Parameters\n ----------\n saved_model : string, optional\n Path to facial landmarks model. The default is 'models/pose_model'.\n\n Returns\n -------\n model : Tensorflow model\n Facial landmarks model\n\n \"\"\"\n #model = keras.models.load_model(saved_model)\n model = tf.saved_model.load(saved_model)\n return model\n\ndef get_square_box(box):\n \"\"\"Get a square box out of the given box, by expanding it.\"\"\"\n left_x = box[0]\n top_y = box[1]\n right_x = box[2]\n bottom_y = box[3]\n\n box_width = right_x - left_x\n box_height = bottom_y - top_y\n\n # Check if box is already a square. If not, make it a square.\n diff = box_height - box_width\n delta = int(abs(diff) / 2)\n\n if diff == 0: # Already a square.\n return box\n elif diff > 0: # Height > width, a slim box.\n left_x -= delta\n right_x += delta\n if diff % 2 == 1:\n right_x += 1\n else: # Width > height, a short box.\n top_y -= delta\n bottom_y += delta\n if diff % 2 == 1:\n bottom_y += 1\n\n # Make sure box is always square.\n assert ((right_x - left_x) == (bottom_y - top_y)), 'Box is not square.'\n\n return [left_x, top_y, right_x, bottom_y]\n\ndef move_box(box, offset):\n \"\"\"Move the box to direction specified by vector offset\"\"\"\n left_x = box[0] + offset[0]\n top_y = box[1] + offset[1]\n right_x = box[2] + offset[0]\n bottom_y = box[3] + offset[1]\n return [left_x, top_y, right_x, bottom_y]\n\ndef detect_marks(img, model, face):\n \"\"\"\n Find the facial landmarks in an image from the faces\n\n Parameters\n ----------\n img : np.uint8\n The image in which landmarks are to be found\n model : Tensorflow model\n Loaded facial landmark model\n face : list\n Face coordinates (x, y, x1, y1) in which the landmarks are to be found\n\n Returns\n -------\n marks : numpy array\n facial landmark points\n\n \"\"\"\n\n offset_y = int(abs((face[3] - face[1]) * 0.1))\n box_moved = move_box(face, [0, offset_y])\n facebox = get_square_box(box_moved)\n \n h, w = img.shape[:2]\n if facebox[0] < 0:\n facebox[0] = 0\n if facebox[1] < 0:\n facebox[1] = 0\n if facebox[2] > w:\n facebox[2] = w\n if facebox[3] > h:\n facebox[3] = h\n \n face_img = img[facebox[1]: facebox[3],\n facebox[0]: facebox[2]]\n face_img = cv2.resize(face_img, (128, 128))\n face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)\n \n # # Actual detection.\n predictions = model.signatures[\"predict\"](\n tf.constant([face_img], dtype=tf.uint8))\n\n # Convert predictions to landmarks.\n marks = np.array(predictions['output']).flatten()[:136]\n marks = np.reshape(marks, (-1, 2))\n \n marks *= (facebox[2] - facebox[0])\n marks[:, 0] += facebox[0]\n marks[:, 1] += facebox[1]\n marks = marks.astype(np.uint)\n\n return marks\n\ndef draw_marks(image, marks, color=(0, 255, 0)):\n \"\"\"\n Draw the facial landmarks on an image\n\n Parameters\n ----------\n image : np.uint8\n Image on which landmarks are to be drawn.\n marks : list or numpy array\n Facial landmark points\n color : tuple, optional\n Color to which landmarks are to be drawn with. The default is (0, 255, 0).\n\n Returns\n -------\n None.\n\n \"\"\"\n for mark in marks:\n cv2.circle(image, (mark[0], mark[1]), 2, color, -1, cv2.LINE_AA)\n \n"
] |
[
[
"numpy.reshape",
"numpy.array",
"tensorflow.constant",
"tensorflow.saved_model.load"
]
] |
AnonSubm2021/TransStyleGAN
|
[
"0194cd6f0e96c801d55c0cb9683e1f552bcf6d48"
] |
[
"metrics/prdc.py"
] |
[
"\"\"\"\nprdc\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n\"\"\"\nimport numpy as np\nimport sklearn.metrics\n\n__all__ = ['compute_prdc']\n\n\ndef compute_pairwise_distance(data_x, data_y=None):\n \"\"\"\n Args:\n data_x: numpy.ndarray([N, feature_dim], dtype=np.float32)\n data_y: numpy.ndarray([N, feature_dim], dtype=np.float32)\n Returns:\n numpy.ndarray([N, N], dtype=np.float32) of pairwise distances.\n \"\"\"\n if data_y is None:\n data_y = data_x\n dists = sklearn.metrics.pairwise_distances(\n data_x, data_y, metric='euclidean', n_jobs=8)\n return dists\n\n\ndef get_kth_value(unsorted, k, axis=-1):\n \"\"\"\n Args:\n unsorted: numpy.ndarray of any dimensionality.\n k: int\n Returns:\n kth values along the designated axis.\n \"\"\"\n indices = np.argpartition(unsorted, k, axis=axis)[..., :k]\n k_smallests = np.take_along_axis(unsorted, indices, axis=axis)\n kth_values = k_smallests.max(axis=axis)\n return kth_values\n\n\ndef compute_nearest_neighbour_distances(input_features, nearest_k):\n \"\"\"\n Args:\n input_features: numpy.ndarray([N, feature_dim], dtype=np.float32)\n nearest_k: int\n Returns:\n Distances to kth nearest neighbours.\n \"\"\"\n distances = compute_pairwise_distance(input_features)\n radii = get_kth_value(distances, k=nearest_k + 1, axis=-1)\n return radii\n\n\ndef compute_prdc(real_features, fake_features, nearest_k):\n \"\"\"\n Computes precision, recall, density, and coverage given two manifolds.\n Args:\n real_features: numpy.ndarray([N, feature_dim], dtype=np.float32)\n fake_features: numpy.ndarray([N, feature_dim], dtype=np.float32)\n nearest_k: int.\n Returns:\n dict of precision, recall, density, and coverage.\n \"\"\"\n\n print('Num real: {} Num fake: {}'\n .format(real_features.shape[0], fake_features.shape[0]))\n\n real_nearest_neighbour_distances = compute_nearest_neighbour_distances(\n real_features, nearest_k)\n fake_nearest_neighbour_distances = compute_nearest_neighbour_distances(\n fake_features, nearest_k)\n distance_real_fake = compute_pairwise_distance(\n real_features, fake_features)\n\n precision = (\n distance_real_fake <\n np.expand_dims(real_nearest_neighbour_distances, axis=1)\n ).any(axis=0).mean()\n\n recall = (\n distance_real_fake <\n np.expand_dims(fake_nearest_neighbour_distances, axis=0)\n ).any(axis=1).mean()\n\n density = (1. / float(nearest_k)) * (\n distance_real_fake <\n np.expand_dims(real_nearest_neighbour_distances, axis=1)\n ).sum(axis=0).mean()\n\n coverage = (\n distance_real_fake.min(axis=1) <\n real_nearest_neighbour_distances\n ).mean()\n\n return dict(precision=precision, recall=recall,\n density=density, coverage=coverage)\n"
] |
[
[
"numpy.take_along_axis",
"numpy.expand_dims",
"numpy.argpartition"
]
] |
rigdenlab/SWAMP
|
[
"3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e"
] |
[
"swamp/clustering/spectral.py"
] |
[
"from swamp.clustering.clustering import Clustering\nfrom sklearn.cluster import SpectralClustering\nfrom scipy.stats import randint, expon\n\n\nclass Spectral(Clustering):\n \"\"\"This class implements methods and datastructures to work with :py:obj:`sklearn.cluster.Spectral`\n\n :example:\n\n >>> from swamp.clustering import Spectral\n >>> import joblib\n >>> dist_mtx = joblib.load('<dist_mtx.pckl>')\n >>> dist_mtx = dist_mtx.fillna(0)\n >>> my_clst = Spectral(dist_mtx)\n >>> my_clst.grid_search()\n\n\n \"\"\"\n\n @property\n def _algorithm_name(self):\n \"\"\"Name of the clustering algorithm (spectral)\"\"\"\n return \"spectral\"\n\n @property\n def _hyper_params(self):\n \"\"\"Dictionary with the range of possible values for each of the clustering hyper-parameters\"\"\"\n\n return {\"n_clusters\": randint(200, 900),\n \"eigen_solver\": [None, \"arpack\", \"lobpcg\"],\n \"assign_labels\": [\"kmeans\", \"discretize\"],\n \"n_neighbors\": randint(2, 10),\n \"gamma\": expon(0.1),\n }\n\n def _clustering(self, **kwargs):\n \"\"\"Perform clustering with a given set of arguments\"\"\"\n return SpectralClustering(affinity='precomputed', n_jobs=1, **kwargs)\n\n def cluster(self):\n \"\"\"Method to perform a clustering using the :py:attr:`~swamp.clustering.Clustering.best_params`\n\n :raises ValueError: the attribute :py:attr:`~swamp.clustering.Clustering.similarity_mtx` is None\n \"\"\"\n\n self.logger.info(self.clustering_header)\n\n if self.similarity_mtx is None:\n raise ValueError('Need to load a distance matrix before clustering!')\n\n clst = SpectralClustering(n_jobs=self.nthreads, affinity='precomputed', **self.best_params)\n clst.fit(self.similarity_mtx)\n self.labels = clst.labels_\n"
] |
[
[
"scipy.stats.randint",
"scipy.stats.expon",
"sklearn.cluster.SpectralClustering"
]
] |
BrunoCoimbra/decisionengine_modules
|
[
"bfd14644eb2e16b72b75fdcc3ebe8ad1323b904f",
"bfd14644eb2e16b72b75fdcc3ebe8ad1323b904f"
] |
[
"src/decisionengine_modules/glideinwms/resource_dist_plugins.py",
"src/decisionengine_modules/glideinwms/sources/factory_entries.py"
] |
[
"# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC\n# SPDX-License-Identifier: Apache-2.0\n\nimport pandas as pd\n\n_RESOURCE_FROM_COLUMN_MAP = {\n \"Grid_Figure_Of_Merit\": \"Grid_Figure_Of_Merit\",\n \"GCE_Figure_Of_Merit\": \"FigureOfMerit\",\n \"AWS_Figure_Of_Merit\": \"AWS_Figure_Of_Merit\",\n \"Nersc_Figure_Of_Merit\": \"FigureOfMerit\",\n}\n\n\ndef order_resources(resources, logger=None):\n ordered_resources = []\n rss_foms = pd.DataFrame()\n\n for rss, column_name in _RESOURCE_FROM_COLUMN_MAP.items():\n fom_df = resources.get(rss)\n if logger is not None:\n logger.info(f\"Ordering resources based on {rss}\")\n if (fom_df is not None) and (fom_df.empty is False):\n # Create a new dataframe with just EntryName and FOM\n df = fom_df[[\"EntryName\", column_name]]\n # Rename the entry type specific FOM columns to just 'fom'\n df = df.rename(columns={column_name: \"FOM\"})\n # Append the results\n rss_foms = rss_foms.append(df)\n elif logger is not None:\n logger.info(f\"{rss} does not have any entries to order\")\n try:\n ordered_resources = rss_foms.sort_values(by=[\"FOM\", \"EntryName\"], ascending=True).reset_index(drop=True)\n except KeyError:\n if logger is not None:\n logger.exception(\n f'Unable to find Figure of Merrit \"FOM\" in the dataframe columns {list(resources.columns)}'\n )\n return ordered_resources\n\n\ndef fom_eligible_resources(resources, constraint=None, limit=None, logger=None):\n ordered_resources = order_resources(resources, logger)\n if constraint is None:\n return ordered_resources.head(limit)\n return ordered_resources.query(constraint).head(limit)\n",
"# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC\n# SPDX-License-Identifier: Apache-2.0\n\nfrom functools import partial\n\nimport pandas\n\nfrom decisionengine.framework.modules import Source\nfrom decisionengine.framework.modules.Source import Parameter\nfrom decisionengine_modules.htcondor import htcondor_query\nfrom decisionengine_modules.util.retry_function import retry_wrapper\n\n\[email protected]_config(\n Parameter(\"condor_config\", type=str, comment=\"path to condor configuration\"),\n Parameter(\n \"factories\",\n default=[],\n comment=\"\"\"Supported list entry layout:\n\n {\n 'collector_host': 'factory_collector.com',\n 'classad_attrs': [],\n 'constraints': 'HTCondor classad query constraints'\n }\n\"\"\",\n ),\n Parameter(\"max_retries\", default=0),\n Parameter(\"retry_interval\", default=0),\n)\[email protected](\n Factory_Entries_Grid=pandas.DataFrame,\n Factory_Entries_AWS=pandas.DataFrame,\n Factory_Entries_GCE=pandas.DataFrame,\n Factory_Entries_LCF=pandas.DataFrame,\n)\nclass FactoryEntries(Source.Source):\n def __init__(self, config):\n super().__init__(config)\n self.condor_config = config.get(\"condor_config\")\n self.factories = config.get(\"factories\", [])\n self._entry_gridtype_map = {\n \"Factory_Entries_Grid\": (\"gt2\", \"condor\"),\n \"Factory_Entries_AWS\": (\"ec2\",),\n \"Factory_Entries_GCE\": (\"gce\",),\n \"Factory_Entries_LCF\": (\"batch slurm\",),\n }\n\n # The combination of max_retries=10 and retry_interval=2 adds up to just\n # over 15 minutes\n self.max_retries = config.get(\"max_retries\", 0)\n self.retry_interval = config.get(\"retry_interval\", 0)\n\n self.subsystem_name = \"any\"\n\n def acquire(self):\n \"\"\"\n Acquire factory entries from the factory collector\n and return as pandas frame\n :rtype: :obj:`~pd.DataFrame`\n \"\"\"\n\n self.logger.debug(\"in FactoryEntries acquire\")\n dataframe = pandas.DataFrame()\n\n for factory in self.factories:\n collector_host = factory.get(\"collector_host\")\n constraint = f\"({factory.get('constraint', True)})&&(glideinmytype==\\\"glidefactory\\\")\"\n classad_attrs = factory.get(\"classad_attrs\")\n correction_map = factory.get(\"correction_map\")\n\n try:\n condor_status = htcondor_query.CondorStatus(\n subsystem_name=self.subsystem_name,\n pool_name=collector_host,\n group_attr=[\"GLIDEIN_GridType\"],\n logger=self.logger,\n )\n\n retry_wrapper(\n partial(condor_status.load, *(constraint, classad_attrs, self.condor_config)),\n max_retries=self.max_retries,\n retry_interval=self.retry_interval,\n logger=self.logger,\n )\n\n if correction_map is not None:\n for eachDict in condor_status.stored_data:\n for key, value in correction_map.items():\n if eachDict.get(key) is None:\n eachDict[key] = value\n\n df = pandas.DataFrame(condor_status.stored_data)\n if not df.empty:\n (col_host, sec_cols) = htcondor_query.split_collector_host(collector_host)\n df[\"CollectorHost\"] = [col_host] * len(df)\n if sec_cols != \"\":\n df[\"CollectorHosts\"] = [f\"{col_host},{sec_cols}\"] * len(df)\n else:\n df[\"CollectorHosts\"] = [col_host] * len(df)\n\n dataframe = pandas.concat([dataframe, df], ignore_index=True, sort=True)\n except htcondor_query.QueryError:\n self.logger.exception(f\"Failed to fetch glidefactory classads from collector host(s) {collector_host}\")\n except Exception:\n self.logger.exception(\n f\"Unexpected error fetching glidefactory classads from collector host(s) {collector_host}\"\n )\n\n if dataframe.empty:\n # There were no entry classads in the factory collector or\n # quering the collector failed\n return dict.fromkeys(self._entry_gridtype_map, pandas.DataFrame())\n\n results = {}\n for key, value in self._entry_gridtype_map.items():\n results[key] = dataframe.loc[(dataframe.GLIDEIN_GridType.isin(list(value)))]\n return results\n\n\nSource.describe(FactoryEntries)\n"
] |
[
[
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.DataFrame"
]
] |
moshes7/Competitions
|
[
"7434ed9ef51c6f8e61f87d180c025f4f0a7c32b9"
] |
[
"models/pretrained_xception.py"
] |
[
"from __future__ import print_function, division, absolute_import\n\n# -*- coding: utf-8 -*-\n\"\"\"pretrained-Xception.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1nrkV8ju4mc1uw5CqV7h8u7QInWkO0asL\n\"\"\"\n\nimport math\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom torch.nn import init\nimport torch\n\n__all__ = ['xception']\n\nmodel_urls = {\n 'xception':'https://www.dropbox.com/s/1hplpzet9d7dv29/xception-c0a72b38.pth.tar?dl=1'\n}\n\n\nclass SeparableConv2d(nn.Module):\n def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):\n super(SeparableConv2d,self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)\n self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)\n \n def forward(self,x):\n x = self.conv1(x)\n x = self.pointwise(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):\n super(Block, self).__init__()\n\n if out_filters != in_filters or strides!=1:\n self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)\n self.skipbn = nn.BatchNorm2d(out_filters)\n else:\n self.skip=None\n \n self.relu = nn.ReLU(inplace=True)\n rep=[]\n\n filters=in_filters\n if grow_first:\n rep.append(self.relu)\n rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))\n rep.append(nn.BatchNorm2d(out_filters))\n filters = out_filters\n\n for i in range(reps-1):\n rep.append(self.relu)\n rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))\n rep.append(nn.BatchNorm2d(filters))\n \n if not grow_first:\n rep.append(self.relu)\n rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))\n rep.append(nn.BatchNorm2d(out_filters))\n\n if not start_with_relu:\n rep = rep[1:]\n else:\n rep[0] = nn.ReLU(inplace=False)\n\n if strides != 1:\n rep.append(nn.MaxPool2d(3,strides,1))\n self.rep = nn.Sequential(*rep)\n\n def forward(self,inp):\n x = self.rep(inp)\n\n if self.skip is not None:\n skip = self.skip(inp)\n skip = self.skipbn(skip)\n else:\n skip = inp\n\n x+=skip\n return x\n\n\n\nclass Xception(nn.Module):\n\n def __init__(self, num_classes=1000):\n \"\"\" Constructor\n Args:\n num_classes: number of classes\n \"\"\"\n super(Xception, self).__init__()\n\n \n self.num_classes = num_classes\n\n self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(32,64,3,bias=False)\n self.bn2 = nn.BatchNorm2d(64)\n\n self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)\n self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)\n self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)\n\n self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)\n\n self.conv3 = SeparableConv2d(1024,1536,3,1,1)\n self.bn3 = nn.BatchNorm2d(1536)\n\n self.conv4 = SeparableConv2d(1536,2048,3,1,1)\n self.bn4 = nn.BatchNorm2d(2048)\n\n self.fc = nn.Linear(2048, num_classes)\n\n\n\n #------- init weights --------\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n #-----------------------------\n\n\n\n\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n \n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n \n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block5(x)\n x = self.block6(x)\n x = self.block7(x)\n x = self.block8(x)\n x = self.block9(x)\n x = self.block10(x)\n x = self.block11(x)\n x = self.block12(x)\n \n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n \n x = self.conv4(x)\n x = self.bn4(x)\n x = self.relu(x)\n\n x = F.adaptive_avg_pool2d(x, (1, 1))\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n \ndef weight_init(m):\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0.)\n\ndef xception(pretrained=False,**kwargs):\n \"\"\"\n Construct Xception.\n \"\"\"\n\n model = Xception(**kwargs)\n \n if pretrained:\n model.apply(weight_init)\n model_dict = model.state_dict()\n pretrained_dict = model_zoo.load_url(model_urls['xception'])\n filtered_dict = {k: v for k, v in pretrained_dict.items()\\\n if (k in model_dict and ('fc' not in k))}\n model_dict.update(filtered_dict)\n model.load_state_dict(model_dict)\n\n return model\n\n\nif __name__ == '__main__':\n # Building the model & assigning to device\n model = xception(pretrained = True, num_classes = 300)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n model = model.to(device)\n\n # Testing Inference\n a = torch.rand(32,3,299,299)\n a = a.to(device)\n with torch.no_grad():\n b = model(a)\n\n print('Done!')"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.init.xavier_uniform_",
"torch.no_grad",
"torch.rand",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] |
hannesfelipe/oemof-solph
|
[
"a29802c73b9f3a1240a9ea6cec28f9d52bf1001c"
] |
[
"tests/test_scripts/test_solph/test_simple_model/test_simple_invest.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\" This example shows how to create an energysystem with oemof objects and\nsolve it with the solph module.\n\nData: example_data.csv\n\nThis file is part of project oemof (github.com/oemof/oemof). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location\noemof/tests/test_scripts/test_solph/test_simple_dispatch/test_simple_dispatch.py\n\nSPDX-License-Identifier: MIT\n\"\"\"\n\nimport os\n\nimport pandas as pd\nfrom nose.tools import eq_\nfrom oemof.network.network import Node\nfrom oemof.solph import Bus\nfrom oemof.solph import EnergySystem\nfrom oemof.solph import Flow\nfrom oemof.solph import Investment\nfrom oemof.solph import Model\nfrom oemof.solph import Sink\nfrom oemof.solph import Source\nfrom oemof.solph import Transformer\nfrom oemof.solph import processing\nfrom oemof.solph import views\nfrom oemof.tools import economics\n\n\ndef test_dispatch_example(solver='cbc', periods=24*5):\n \"\"\"Create an energy system and optimize the dispatch at least costs.\"\"\"\n Node.registry = None\n\n filename = os.path.join(os.path.dirname(__file__), 'input_data.csv')\n data = pd.read_csv(filename, sep=\",\")\n\n # ######################### create energysystem components ################\n\n # resource buses\n bcoal = Bus(label='coal', balanced=False)\n bgas = Bus(label='gas', balanced=False)\n boil = Bus(label='oil', balanced=False)\n blig = Bus(label='lignite', balanced=False)\n\n # electricity and heat\n bel = Bus(label='b_el')\n bth = Bus(label='b_th')\n\n # an excess and a shortage variable can help to avoid infeasible problems\n excess_el = Sink(label='excess_el', inputs={bel: Flow()})\n # shortage_el = Source(label='shortage_el',\n # outputs={bel: Flow(variable_costs=200)})\n\n # sources\n ep_wind = economics.annuity(capex=1000, n=20, wacc=0.05)\n wind = Source(label='wind', outputs={bel: Flow(\n fix=data['wind'],\n investment=Investment(ep_costs=ep_wind, existing=100))})\n\n ep_pv = economics.annuity(capex=1500, n=20, wacc=0.05)\n pv = Source(label='pv', outputs={bel: Flow(\n fix=data['pv'],\n investment=Investment(ep_costs=ep_pv, existing=80))})\n\n # demands (electricity/heat)\n demand_el = Sink(label='demand_elec', inputs={bel: Flow(nominal_value=85,\n fix=data['demand_el'])})\n\n demand_th = Sink(label='demand_therm',\n inputs={bth: Flow(nominal_value=40,\n fix=data['demand_th'])})\n\n # power plants\n pp_coal = Transformer(label='pp_coal',\n inputs={bcoal: Flow()},\n outputs={bel: Flow(nominal_value=20.2,\n variable_costs=25)},\n conversion_factors={bel: 0.39})\n\n pp_lig = Transformer(label='pp_lig',\n inputs={blig: Flow()},\n outputs={bel: Flow(nominal_value=11.8,\n variable_costs=19)},\n conversion_factors={bel: 0.41})\n\n pp_gas = Transformer(label='pp_gas',\n inputs={bgas: Flow()},\n outputs={bel: Flow(nominal_value=41,\n variable_costs=40)},\n conversion_factors={bel: 0.50})\n\n pp_oil = Transformer(label='pp_oil',\n inputs={boil: Flow()},\n outputs={bel: Flow(nominal_value=5,\n variable_costs=50)},\n conversion_factors={bel: 0.28})\n\n # combined heat and power plant (chp)\n pp_chp = Transformer(label='pp_chp',\n inputs={bgas: Flow()},\n outputs={bel: Flow(nominal_value=30,\n variable_costs=42),\n bth: Flow(nominal_value=40)},\n conversion_factors={bel: 0.3, bth: 0.4})\n\n # heatpump with a coefficient of performance (COP) of 3\n b_heat_source = Bus(label='b_heat_source')\n\n heat_source = Source(label='heat_source', outputs={b_heat_source: Flow()})\n\n cop = 3\n heat_pump = Transformer(label='el_heat_pump',\n inputs={bel: Flow(), b_heat_source: Flow()},\n outputs={bth: Flow(nominal_value=10)},\n conversion_factors={\n bel: 1/3, b_heat_source: (cop-1)/cop})\n\n datetimeindex = pd.date_range('1/1/2012', periods=periods, freq='H')\n energysystem = EnergySystem(timeindex=datetimeindex)\n energysystem.add(bcoal, bgas, boil, bel, bth, blig, excess_el, wind, pv,\n demand_el, demand_th, pp_coal, pp_lig, pp_oil, pp_gas,\n pp_chp, b_heat_source, heat_source, heat_pump)\n\n # ################################ optimization ###########################\n\n # create optimization model based on energy_system\n optimization_model = Model(energysystem=energysystem)\n\n # solve problem\n optimization_model.solve(solver=solver)\n\n # write back results from optimization object to energysystem\n optimization_model.results()\n\n # ################################ results ################################\n\n # generic result object\n results = processing.results(om=optimization_model)\n\n # subset of results that includes all flows into and from electrical bus\n # sequences are stored within a pandas.DataFrames and scalars e.g.\n # investment values within a pandas.Series object.\n # in this case the entry data['scalars'] does not exist since no investment\n # variables are used\n data = views.node(results, 'b_el')\n\n # generate results to be evaluated in tests\n comp_results = data['sequences'].sum(axis=0).to_dict()\n comp_results['pv_capacity'] = results[(pv, bel)]['scalars'].invest\n comp_results['wind_capacity'] = results[(wind, bel)]['scalars'].invest\n\n test_results = {\n (('wind', 'b_el'), 'flow'): 9239,\n (('pv', 'b_el'), 'flow'): 1147,\n (('b_el', 'demand_elec'), 'flow'): 7440,\n (('b_el', 'excess_el'), 'flow'): 6261,\n (('pp_chp', 'b_el'), 'flow'): 477,\n (('pp_lig', 'b_el'), 'flow'): 850,\n (('pp_gas', 'b_el'), 'flow'): 934,\n (('pp_coal', 'b_el'), 'flow'): 1256,\n (('pp_oil', 'b_el'), 'flow'): 0,\n (('b_el', 'el_heat_pump'), 'flow'): 202,\n 'pv_capacity': 44,\n 'wind_capacity': 246,\n }\n\n for key in test_results.keys():\n eq_(int(round(comp_results[key])), int(round(test_results[key])))\n"
] |
[
[
"pandas.read_csv",
"pandas.date_range"
]
] |
BCV-Uniandes/SIMBA
|
[
"1f25cdd5005ad71918938ea6bef4544a4c24281b"
] |
[
"simba/train.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\nBone Age Assessment SIMBA train routine.\n\"\"\"\n\n# Standard lib imports\nimport os\nimport csv\nimport glob\nimport time\nimport argparse\nimport warnings\nimport pandas as pd\nimport os.path as osp\n\n# PyTorch imports\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport horovod.torch as hvd\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\n# Other imports\nfrom tqdm import tqdm\n\n# Local imports\nfrom models.simba import SIMBA\nfrom data.data_loader import BoneageDataset as Dataset\nfrom utils import AverageMeter\nfrom utils import metric_average\n\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser()\n\n# Model settings\nparser.add_argument('--relative-age', default=False, action='store_true',\n help='Train model with relative age')\nparser.add_argument('--chronological-age', default=False, action='store_true',\n help='Train model with chronological age multiplier')\nparser.add_argument('--gender-multiplier', default=False, action='store_true',\n help='Train model with gender multiplier')\n\n# Dataloading-related settings\nparser.add_argument('--cropped', default=False, action='store_true',\n help='Train model with cropped images according to bbox')\nparser.add_argument('--dataset', default='RSNA', type=str,choices=['RSNA','RHPE'],\n help='Dataset to perform training')\n\nparser.add_argument('--data-train', default='data/train/images', type=str,\n help='path to train data folder')\nparser.add_argument('--heatmaps-train', default='data/train/heatmaps', type=str,\n help='path to train heatmaps data folder')\nparser.add_argument('--ann-path-train', default='train.csv', type=str,\n help='path to BAA annotations file')\nparser.add_argument('--rois-path-train', default='train.json',\n type=str, help='path to ROIs annotations in coco format')\n\nparser.add_argument('--data-val', default='data/val/images', type=str,\n help='path to val data folder')\nparser.add_argument('--heatmaps-val', default='data/val/heatmaps', type=str,\n help='path to train heatmaps data folder')\nparser.add_argument('--ann-path-val', default='val.csv', type=str,\n help='path to BAA annotations file')\nparser.add_argument('--rois-path-val', default='val.json',\n type=str, help='path to ROIs annotations in coco format')\n\nparser.add_argument('--trainval', default=False, action='store_true',\n help='Train model with train and validation sets')\n\nparser.add_argument('--save-folder', default='TRAIN/new_test/',\n help='location to save checkpoint models')\nparser.add_argument('--snapshot', default='boneage_bonet_weights.pth',\n help='path to weight snapshot file')\nparser.add_argument('--optim-snapshot', type=str,\n default='boneage_bonet_optim.pth',\n help='path to optimizer state snapshot')\n\nparser.add_argument('--eval-first', default=False, action='store_true',\n help='evaluate model weights before training')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n\n# Training procedure settings\nparser.add_argument('--batch-size', default=1, type=int,\n help='Batch size for training')\nparser.add_argument('--epochs', type=int, default=20,\n help='upper epoch limit')\nparser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,\n help='initial learning rate')\nparser.add_argument('--patience', default=2, type=int,\n help='patience epochs for LR decreasing')\nparser.add_argument('--start-epoch', type=int, default=1,\n help='epoch number to resume')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--log-interval', type=int, default=30, metavar='N',\n help='report interval')\n\nparser.add_argument('--gpu', type=str, default='2,3')\n\nargs = parser.parse_args()\n\nargs_dict = vars(args)\n\n\nprint('Argument list to program')\nprint('\\n'.join(['--{0} {1}'.format(arg, args_dict[arg])\n for arg in args_dict]))\nprint('\\n\\n')\n\ntorch.manual_seed(args.seed)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\nif not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n\n# Horovod settings\nhvd.init()\ntorch.cuda.set_device(hvd.local_rank())\ntorch.cuda.manual_seed(hvd.size())\n\nargs.distributed = hvd.size() > 1\nargs.rank = hvd.rank()\nargs.size = hvd.size()\n\n# Create the network architecture and load the best model\nnet = SIMBA(\n chronological_age=args.chronological_age,\n gender_multiplier=args.gender_multiplier\n)\n\nif args.rank == 0:\n print('---> Number of params: {}'.format(\n sum([p.data.nelement() for p in net.parameters()])))\n\nmodel_to_load = args.snapshot\nif osp.exists(model_to_load) and args.rank == 0:\n print('Loading state dict from: {0}'.format(model_to_load))\n snapshot_dict = torch.load(model_to_load, map_location=lambda storage, loc: storage) \n weights = net.state_dict()\n new_snapshot_dict = snapshot_dict.copy()\n for key in snapshot_dict:\n if key not in weights.keys():\n new_key = 'inception_v3.' + key\n if new_key in snapshot_dict and weights[new_key].shape == snapshot_dict[key].shape:\n new_snapshot_dict[new_key] = snapshot_dict[key]\n new_snapshot_dict.pop(key)\n elif weights[key].shape != snapshot_dict[key].shape:\n new_snapshot_dict.pop(key)\n\n weights.update(new_snapshot_dict)\n net.load_state_dict(weights)\n\nnet = net.to(device)\n\n# Criterion\ncriterion = nn.L1Loss()\n\n# Optimizer\noptimizer = optim.Adam(net.parameters(), lr=args.lr * args.size)\nannealing = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, factor=0.8, patience=args.patience, cooldown=5,\n min_lr=0.00001, eps=0.0001, verbose=True)\n\n\noptim_to_load = args.optim_snapshot\nif osp.exists(optim_to_load) and args.rank == 0:\n print('loading optim snapshot from {}'.format(optim_to_load))\n optimizer.load_state_dict(torch.load(optim_to_load, map_location=lambda storage,\n loc: storage))\n\n# Horovod\nhvd.broadcast_parameters(net.state_dict(), root_rank=0)\noptimizer = hvd.DistributedOptimizer(optimizer,\n named_parameters=net.named_parameters()\n )\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\ngroup = optimizer.param_groups[0]\ngroup['betas'] = (float(group['betas'][0]), float(group['betas'][1]))\n\n# Dataloaders\ntrain_transform = transforms.Compose([transforms.Resize((500, 500)),\n transforms.RandomAffine(\n 20, translate=(0.2, 0.2),\n scale=(1, 1.2)\n ),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()]\n )\n\nval_transform = transforms.Compose([transforms.Resize((500, 500)),\n transforms.ToTensor()]\n )\n\nif args.trainval:\n train_dataset = Dataset([args.data_train,args.data_val],\n [args.heatmaps_train, args.heatmaps_val],\n [args.ann_path_train,args.ann_path_val],\n [args.rois_path_train,args.rois_path_val],\n img_transform=train_transform, crop=args.cropped,\n dataset=args.dataset\n )\nelse:\n train_dataset = Dataset([args.data_train], [args.heatmaps_train],\n [args.ann_path_train],[args.rois_path_train],\n img_transform=train_transform, crop=args.cropped,\n dataset=args.dataset\n )\n\nval_dataset = Dataset([args.data_val], [args.heatmaps_val],\n [args.ann_path_val], [args.rois_path_val],\n img_transform=val_transform, crop=args.cropped,\n dataset=args.dataset\n )\n\n# Data samplers\ntrain_sampler = None\nval_sampler = None\n\nif args.distributed:\n train_sampler = DistributedSampler(train_dataset,\n num_replicas=args.size,\n rank=args.rank\n )\n val_sampler = DistributedSampler(val_dataset,\n num_replicas=args.size,\n rank=args.rank\n )\n\ntrain_loader = DataLoader(train_dataset,\n shuffle=(train_sampler is None),\n sampler=train_sampler,\n batch_size=args.batch_size,\n num_workers=args.workers\n )\n\nval_loader = DataLoader(val_dataset,\n shuffle=(val_sampler is None),\n sampler=val_sampler,\n batch_size=1,\n num_workers=args.workers\n )\n\ndef main():\n if args.rank == 0:\n print('Train begins...')\n best_val_loss = None\n # Find best model in validation\n if osp.exists(osp.join(args.save_folder, 'train.csv')):\n with open(osp.join(args.save_folder, 'train.csv')) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n val_list = []\n for row in csv_reader:\n val_list.append(float(row[2]))\n best_val_loss = min(val_list)\n if args.eval_first:\n val_loss = evaluate(relative_age=args.relative_age)\n try:\n out_file = open(os.path.join(args.save_folder, 'train.csv'), 'a+')\n \n for epoch in range(args.start_epoch, args.epochs + 1):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n val_sampler.set_epoch(epoch)\n if args.rank == 0:\n epoch_start_time = time.time()\n train_loss = train(epoch, relative_age=args.relative_age)\n annealing.step(train_loss)\n val_loss = evaluate(relative_age=args.relative_age)\n if args.rank == 0:\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s '\n '| epoch loss {:.6f} |'.format(\n epoch, time.time() - epoch_start_time, train_loss))\n print('-' * 89)\n out_file.write('{}, {}, {}\\n'.format(epoch, train_loss, val_loss))\n out_file.flush()\n\n if best_val_loss is None or val_loss < best_val_loss and args.rank == 0:\n best_val_loss = val_loss\n filename = osp.join(args.save_folder, 'boneage_bonet_weights.pth')\n torch.save(net.state_dict(), filename)\n out_file.close()\n except KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n\ndef train(epoch, relative_age=True):\n net.train()\n total_loss = AverageMeter()\n epoch_loss_stats = AverageMeter()\n time_stats = AverageMeter()\n loss = 0\n optimizer.zero_grad()\n for (batch_idx, (imgs, bone_ages, genders, chronological_ages, _)) in enumerate(train_loader):\n imgs = imgs.to(device)\n bone_ages = bone_ages.to(device)\n genders = genders.to(device)\n chronological_ages = chronological_ages.to(device)\n if relative_age:\n relative_ages = chronological_ages.squeeze(1) - bone_ages\n\n start_time = time.time()\n outputs = net(imgs, genders, chronological_ages)\n if relative_age:\n loss = criterion(outputs.squeeze(), relative_ages)\n else:\n loss = criterion(outputs.squeeze(), bone_ages)\n loss.backward()\n optimizer.step()\n\n loss = metric_average(loss.item(), 'loss')\n\n time_stats.update(time.time() - start_time, 1)\n total_loss.update(loss, 1)\n epoch_loss_stats.update(loss, 1)\n optimizer.zero_grad()\n\n if (batch_idx % args.log_interval == 0) and args.rank == 0:\n elapsed_time = time_stats.avg\n print(' [{:5d}] ({:5d}/{:5d}) | ms/batch {:.4f} |'\n ' loss {:.6f} | avg loss {:.6f} | lr {:.7f}'.format(\n epoch, batch_idx, len(train_loader),\n elapsed_time * 1000, total_loss.avg,\n epoch_loss_stats.avg,\n optimizer.param_groups[0]['lr']))\n total_loss.reset()\n\n epoch_total_loss = epoch_loss_stats.avg\n args.resume_iter = 0\n\n if args.rank == 0:\n filename = 'boneage_bonet_snapshot.pth'\n filename = osp.join(args.save_folder, filename)\n torch.save(net.state_dict(), filename)\n\n optim_filename = 'boneage_bonet_optim.pth'\n optim_filename = osp.join(args.save_folder, optim_filename)\n torch.save(optimizer.state_dict(), optim_filename)\n\n return epoch_total_loss\n\n\ndef evaluate(relative_age=True):\n net.eval()\n epoch_total_loss = AverageMeter()\n for (batch_idx, (imgs, bone_ages, genders, chronological_ages, _)) in enumerate(val_loader):\n imgs = imgs.to(device)\n bone_ages = bone_ages.to(device)\n genders = genders.to(device)\n chronological_ages = chronological_ages.to(device)\n if relative_age:\n relative_ages = chronological_ages.squeeze(1) - bone_ages\n\n with torch.no_grad():\n outputs = net(imgs, genders, chronological_ages)\n if relative_age:\n loss = criterion(outputs.squeeze(), relative_ages)\n else:\n loss = criterion(outputs.squeeze(), bone_ages)\n loss = metric_average(loss.item(), 'loss')\n epoch_total_loss.update(loss, 1)\n\n epoch_total_loss = epoch_total_loss.avg\n\n if args.rank == 0:\n print('Val loss: {:.5f}'.format(epoch_total_loss))\n\n return epoch_total_loss\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.L1Loss"
]
] |
chbehrens/brian2
|
[
"46b5264caa5375ae13084508b5c1049e0c9e019e"
] |
[
"brian2/core/variables.py"
] |
[
"from __future__ import absolute_import\n'''\nClasses used to specify the type of a function, variable or common\nsub-expression.\n'''\nimport collections\nimport functools\nimport numbers\n\nimport numpy as np\nimport sympy\nfrom past.builtins import basestring\n\nfrom brian2.units.fundamentalunits import (Quantity, get_unit, DIMENSIONLESS,\n fail_for_dimension_mismatch,\n Dimension)\nfrom brian2.utils.logger import get_logger\nfrom brian2.utils.stringtools import get_identifiers, word_substitute\nfrom brian2.utils.caching import CacheKey\n\nfrom .base import weakproxy_with_fallback, device_override\nfrom .preferences import prefs\n\n__all__ = ['Variable',\n 'Constant',\n 'ArrayVariable',\n 'DynamicArrayVariable',\n 'Subexpression',\n 'AuxiliaryVariable',\n 'VariableView',\n 'Variables',\n 'LinkedVariable',\n 'linked_var'\n ]\n\n\nlogger = get_logger(__name__)\n\n\ndef get_dtype(obj):\n '''\n Helper function to return the `numpy.dtype` of an arbitrary object.\n\n Parameters\n ----------\n obj : object\n Any object (but typically some kind of number or array).\n\n Returns\n -------\n dtype : `numpy.dtype`\n The type of the given object.\n '''\n if hasattr(obj, 'dtype'):\n return obj.dtype\n else:\n return np.obj2sctype(type(obj))\n\n\ndef get_dtype_str(val):\n '''\n Returns canonical string representation of the dtype of a value or dtype\n \n Returns\n -------\n \n dtype_str : str\n The numpy dtype name\n '''\n if isinstance(val, np.dtype):\n return val.name\n if isinstance(val, type):\n return get_dtype_str(val())\n\n is_bool = (val is True or\n val is False or\n val is np.True_ or\n val is np.False_)\n if is_bool:\n return 'bool'\n if hasattr(val, 'dtype'):\n return get_dtype_str(val.dtype)\n if isinstance(val, numbers.Number):\n return get_dtype_str(np.array(val).dtype)\n \n return 'unknown[%s, %s]' % (str(val), val.__class__.__name__)\n\n\ndef variables_by_owner(variables, owner):\n owner_name = getattr(owner, 'name', None)\n return dict([(varname, var) for varname, var in variables.items()\n if getattr(var.owner, 'name', None) is owner_name])\n\n\nclass Variable(CacheKey):\n '''\n An object providing information about model variables (including implicit\n variables such as ``t`` or ``xi``). This class should never be\n instantiated outside of testing code, use one of its subclasses instead.\n \n Parameters\n ----------\n name : 'str'\n The name of the variable. Note that this refers to the *original*\n name in the owning group. The same variable may be known under other\n names in other groups (e.g. the variable ``v`` of a `NeuronGroup` is\n known as ``v_post`` in a `Synapse` connecting to the group).\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n owner : `Nameable`, optional\n The object that \"owns\" this variable, e.g. the `NeuronGroup` or\n `Synapses` object that declares the variable in its model equations.\n Defaults to ``None`` (the value used for `Variable` objects without an\n owner, e.g. external `Constant`\\ s).\n dtype : `dtype`, optional\n The dtype used for storing the variable. Defaults to the preference\n `core.default_scalar.dtype`.\n scalar : bool, optional\n Whether the variable is a scalar value (``True``) or vector-valued, e.g.\n defined for every neuron (``False``). Defaults to ``False``.\n constant: bool, optional\n Whether the value of this variable can change during a run. Defaults\n to ``False``.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user (this is used for example\n for the variable ``N``, the number of neurons in a group). Defaults\n to ``False``.\n array : bool, optional\n Whether this variable is an array. Allows for simpler check than testing\n ``isinstance(var, ArrayVariable)``. Defaults to ``False``.\n '''\n\n _cache_irrelevant_attributes = {'owner'}\n\n def __init__(self, name, dimensions=DIMENSIONLESS, owner=None, dtype=None,\n scalar=False, constant=False, read_only=False, dynamic=False,\n array=False):\n assert isinstance(dimensions, Dimension)\n\n #: The variable's dimensions.\n self.dim = dimensions\n\n #: The variable's name.\n self.name = name\n\n #: The `Group` to which this variable belongs.\n self.owner = weakproxy_with_fallback(owner) if owner is not None else None\n\n #: The dtype used for storing the variable.\n self.dtype = dtype\n if dtype is None:\n self.dtype = prefs.core.default_float_dtype\n\n if self.is_boolean:\n if dimensions is not DIMENSIONLESS:\n raise ValueError('Boolean variables can only be dimensionless')\n\n #: Whether the variable is a scalar\n self.scalar = scalar\n\n #: Whether the variable is constant during a run\n self.constant = constant\n\n #: Whether the variable is read-only\n self.read_only = read_only\n\n #: Whether the variable is dynamically sized (only for non-scalars)\n self.dynamic = dynamic\n\n #: Whether the variable is an array\n self.array = array\n\n @property\n def is_boolean(self):\n return np.issubdtype(self.dtype, np.bool_)\n\n @property\n def is_integer(self):\n return np.issubdtype(self.dtype, np.signedinteger)\n\n @property\n def dtype_str(self):\n '''\n String representation of the numpy dtype\n '''\n return get_dtype_str(self)\n\n @property\n def unit(self):\n '''\n The `Unit` of this variable\n '''\n return get_unit(self.dim)\n\n def get_value(self):\n '''\n Return the value associated with the variable (without units). This\n is the way variables are accessed in generated code.\n '''\n raise TypeError('Cannot get value for variable %s' % self)\n\n def set_value(self, value):\n '''\n Set the value associated with the variable.\n '''\n raise TypeError('Cannot set value for variable %s' % self)\n\n def get_value_with_unit(self):\n '''\n Return the value associated with the variable (with units).\n '''\n return Quantity(self.get_value(), self.dim)\n\n def get_addressable_value(self, name, group):\n '''\n Get the value (without units) of this variable in a form that can be\n indexed in the context of a group. For example, if a\n postsynaptic variable ``x`` is accessed in a synapse ``S`` as\n ``S.x_post``, the synaptic indexing scheme can be used.\n\n Parameters\n ----------\n name : str\n The name of the variable\n group : `Group`\n The group providing the context for the indexing. Note that this\n `group` is not necessarily the same as `Variable.owner`: a variable\n owned by a `NeuronGroup` can be indexed in a different way if\n accessed via a `Synapses` object.\n\n Returns\n -------\n variable : object\n The variable in an indexable form (without units).\n '''\n return self.get_value()\n\n def get_addressable_value_with_unit(self, name, group):\n '''\n Get the value (with units) of this variable in a form that can be\n indexed in the context of a group. For example, if a postsynaptic\n variable ``x`` is accessed in a synapse ``S`` as ``S.x_post``, the\n synaptic indexing scheme can be used.\n\n Parameters\n ----------\n name : str\n The name of the variable\n group : `Group`\n The group providing the context for the indexing. Note that this\n `group` is not necessarily the same as `Variable.owner`: a variable\n owned by a `NeuronGroup` can be indexed in a different way if\n accessed via a `Synapses` object.\n\n Returns\n -------\n variable : object\n The variable in an indexable form (with units).\n '''\n return self.get_value_with_unit()\n\n def get_len(self):\n '''\n Get the length of the value associated with the variable or ``0`` for\n a scalar variable.\n '''\n if self.scalar:\n return 0\n else:\n return len(self.get_value())\n\n def __len__(self):\n return self.get_len()\n\n def __repr__(self):\n description = ('<{classname}(dimensions={dimensions}, '\n ' dtype={dtype}, scalar={scalar}, constant={constant},'\n ' read_only={read_only})>')\n return description.format(classname=self.__class__.__name__,\n dimensions=repr(self.dim),\n dtype=repr(self.dtype),\n scalar=repr(self.scalar),\n constant=repr(self.constant),\n read_only=repr(self.read_only))\n\n\n\n# ------------------------------------------------------------------------------\n# Concrete classes derived from `Variable` -- these are the only ones ever\n# instantiated.\n# ------------------------------------------------------------------------------\n\nclass Constant(Variable):\n '''\n A scalar constant (e.g. the number of neurons ``N``). Information such as\n the dtype or whether this variable is a boolean are directly derived from\n the `value`. Most of the time `Variables.add_constant` should be used\n instead of instantiating this class directly.\n\n Parameters\n ----------\n name : str\n The name of the variable\n dimensions : `Dimension`, optional\n The physical dimensions of the variable. Note that the variable itself\n (as referenced by value) should never have units attached.\n value: reference to the variable value\n The value of the constant.\n owner : `Nameable`, optional\n The object that \"owns\" this variable, for constants that belong to a\n specific group, e.g. the ``N`` constant for a `NeuronGroup`. External\n constants will have ``None`` (the default value).\n '''\n def __init__(self, name, value, dimensions=DIMENSIONLESS, owner=None):\n # Determine the type of the value\n is_bool = (value is True or\n value is False or\n value is np.True_ or\n value is np.False_)\n\n if is_bool:\n dtype = np.bool\n else:\n dtype = get_dtype(value)\n\n # Use standard Python types if possible for numpy scalars (generates\n # nicer code for C++ when using weave)\n if getattr(value, 'shape', None) == () and hasattr(value, 'dtype'):\n numpy_type = value.dtype\n if np.can_cast(numpy_type, np.int_):\n value = int(value)\n elif np.can_cast(numpy_type, np.float_):\n value = float(value)\n elif np.can_cast(numpy_type, np.complex_):\n value = complex(value)\n elif value is np.True_:\n value = True\n elif value is np.False_:\n value = False\n\n #: The constant's value\n self.value = value\n\n super(Constant, self).__init__(dimensions=dimensions, name=name, owner=owner,\n dtype=dtype, scalar=True, constant=True,\n read_only=True)\n\n def get_value(self):\n return self.value\n\n\nclass AuxiliaryVariable(Variable):\n '''\n Variable description for an auxiliary variable (most likely one that is\n added automatically to abstract code, e.g. ``_cond`` for a threshold\n condition), specifying its type and unit for code generation. Most of the\n time `Variables.add_auxiliary_variable` should be used instead of\n instantiating this class directly.\n\n Parameters\n ----------\n name : str\n The name of the variable\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n scalar : bool, optional\n Whether the variable is a scalar value (``True``) or vector-valued, e.g.\n defined for every neuron (``False``). Defaults to ``False``.\n '''\n def __init__(self, name, dimensions=DIMENSIONLESS, dtype=None, scalar=False):\n super(AuxiliaryVariable, self).__init__(dimensions=dimensions,\n name=name, dtype=dtype,\n scalar=scalar)\n\n def get_value(self):\n raise TypeError('Cannot get the value for an auxiliary variable (%s).' % self.name)\n\n\nclass ArrayVariable(Variable):\n '''\n An object providing information about a model variable stored in an array\n (for example, all state variables). Most of the time `Variables.add_array`\n should be used instead of instantiating this class directly.\n\n Parameters\n ----------\n name : 'str'\n The name of the variable. Note that this refers to the *original*\n name in the owning group. The same variable may be known under other\n names in other groups (e.g. the variable ``v`` of a `NeuronGroup` is\n known as ``v_post`` in a `Synapse` connecting to the group).\n dimensions : `Dimension`, optional\n The physical dimensions of the variable\n owner : `Nameable`\n The object that \"owns\" this variable, e.g. the `NeuronGroup` or\n `Synapses` object that declares the variable in its model equations.\n size : int\n The size of the array\n device : `Device`\n The device responsible for the memory access.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n constant : bool, optional\n Whether the variable's value is constant during a run.\n Defaults to ``False``.\n scalar : bool, optional\n Whether this array is a 1-element array that should be treated like a\n scalar (e.g. for a single delay value across synapses). Defaults to\n ``False``.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user. Defaults\n to ``False``.\n unique : bool, optional\n Whether the values in this array are all unique. This information is\n only important for variables used as indices and does not have to\n reflect the actual contents of the array but only the possibility of\n non-uniqueness (e.g. synaptic indices are always unique but the\n corresponding pre- and post-synaptic indices are not). Defaults to\n ``False``.\n '''\n def __init__(self, name, owner, size, device, dimensions=DIMENSIONLESS,\n dtype=None, constant=False, scalar=False, read_only=False,\n dynamic=False, unique=False):\n super(ArrayVariable, self).__init__(dimensions=dimensions, name=name,\n owner=owner,\n dtype=dtype, scalar=scalar,\n constant=constant,\n read_only=read_only,\n dynamic=dynamic,\n array=True)\n\n #: Wether all values in this arrays are necessarily unique (only\n #: relevant for index variables).\n self.unique = unique\n\n #: The `Device` responsible for memory access.\n self.device = device\n\n #: The size of this variable.\n self.size = size\n\n if scalar and size != 1:\n raise ValueError(('Scalar variables need to have size 1, not '\n 'size %d.') % size)\n\n #: Another variable, on which the write is conditioned (e.g. a variable\n #: denoting the absence of refractoriness)\n self.conditional_write = None\n\n def set_conditional_write(self, var):\n if not var.is_boolean:\n raise TypeError(('A variable can only be conditionally writeable '\n 'depending on a boolean variable, %s is not '\n 'boolean.') % var.name)\n self.conditional_write = var\n\n def get_value(self):\n return self.device.get_value(self)\n\n def set_value(self, value):\n self.device.fill_with_array(self, value)\n\n def get_len(self):\n return self.size\n\n def get_addressable_value(self, name, group):\n return VariableView(name=name, variable=self, group=group,\n dimensions=None)\n\n def get_addressable_value_with_unit(self, name, group):\n return VariableView(name=name, variable=self, group=group,\n dimensions=self.dim)\n\n\nclass DynamicArrayVariable(ArrayVariable):\n '''\n An object providing information about a model variable stored in a dynamic\n array (used in `Synapses`). Most of the time `Variables.add_dynamic_array`\n should be used instead of instantiating this class directly.\n\n Parameters\n ----------\n name : 'str'\n The name of the variable. Note that this refers to the *original*\n name in the owning group. The same variable may be known under other\n names in other groups (e.g. the variable ``v`` of a `NeuronGroup` is\n known as ``v_post`` in a `Synapse` connecting to the group).\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n owner : `Nameable`\n The object that \"owns\" this variable, e.g. the `NeuronGroup` or\n `Synapses` object that declares the variable in its model equations.\n size : int or tuple of int\n The (initial) size of the variable.\n device : `Device`\n The device responsible for the memory access.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n constant : bool, optional\n Whether the variable's value is constant during a run.\n Defaults to ``False``.\n needs_reference_update : bool, optional\n Whether the code objects need a new reference to the underlying data at\n every time step. This should be set if the size of the array can be\n changed by other code objects. Defaults to ``False``.\n scalar : bool, optional\n Whether this array is a 1-element array that should be treated like a\n scalar (e.g. for a single delay value across synapses). Defaults to\n ``False``.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user. Defaults\n to ``False``.\n unique : bool, optional\n Whether the values in this array are all unique. This information is\n only important for variables used as indices and does not have to\n reflect the actual contents of the array but only the possibility of\n non-uniqueness (e.g. synaptic indices are always unique but the\n corresponding pre- and post-synaptic indices are not). Defaults to\n ``False``.\n '''\n # The size of a dynamic variable can of course change and changes in\n # size should not invalidate the cache\n _cache_irrelevant_attributes = (ArrayVariable._cache_irrelevant_attributes |\n {'size'})\n\n def __init__(self, name, owner, size, device, dimensions=DIMENSIONLESS,\n dtype=None, constant=False, needs_reference_update=False,\n resize_along_first=False, scalar=False, read_only=False,\n unique=False):\n\n if isinstance(size, int):\n ndim = 1\n else:\n ndim = len(size)\n\n #: The number of dimensions\n self.ndim = ndim\n\n if constant and needs_reference_update:\n raise ValueError('A variable cannot be constant and '\n 'need reference updates')\n #: Whether this variable needs an update of the reference to the\n #: underlying data whenever it is passed to a code object\n self.needs_reference_update = needs_reference_update\n\n #: Whether this array will be only resized along the first dimension\n self.resize_along_first = resize_along_first\n\n super(DynamicArrayVariable, self).__init__(dimensions=dimensions,\n owner=owner,\n name=name,\n size=size,\n device=device,\n constant=constant,\n dtype=dtype,\n scalar=scalar,\n dynamic=True,\n read_only=read_only,\n unique=unique)\n\n\n @property\n def dimensions(self):\n logger.warn('The DynamicArrayVariable.dimensions attribute is '\n 'deprecated, use .ndim instead', 'deprecated_dimensions',\n once=True)\n return self.ndim\n\n def resize(self, new_size):\n '''\n Resize the dynamic array. Calls `self.device.resize` to do the\n actual resizing.\n\n Parameters\n ----------\n new_size : int or tuple of int\n The new size.\n '''\n if self.resize_along_first:\n self.device.resize_along_first(self, new_size)\n else:\n self.device.resize(self, new_size)\n\n self.size = new_size\n\n\nclass Subexpression(Variable):\n '''\n An object providing information about a named subexpression in a model.\n Most of the time `Variables.add_subexpression` should be used instead of\n instantiating this class directly.\n\n Parameters\n ----------\n name : str\n The name of the subexpression.\n dimensions : `Dimension`, optional\n The physical dimensions of the subexpression.\n owner : `Group`\n The group to which the expression refers.\n expr : str\n The subexpression itself.\n device : `Device`\n The device responsible for the memory access.\n dtype : `dtype`, optional\n The dtype used for the expression. Defaults to\n `core.default_float_dtype`.\n scalar: bool, optional\n Whether this is an expression only referring to scalar variables.\n Defaults to ``False``\n '''\n def __init__(self, name, owner, expr, device, dimensions=DIMENSIONLESS,\n dtype=None, scalar=False):\n super(Subexpression, self).__init__(dimensions=dimensions, owner=owner,\n name=name, dtype=dtype,\n scalar=scalar,\n constant=False, read_only=True)\n\n #: The `Device` responsible for memory access\n self.device = device\n\n #: The expression defining the subexpression\n self.expr = expr.strip()\n\n if scalar:\n from brian2.parsing.sympytools import str_to_sympy\n # We check here if the corresponding sympy expression contains a\n # reference to _vectorisation_idx which indicates that an implicitly\n # vectorized function (e.g. rand() ) has been used. We do not allow\n # this since it would lead to incorrect results when substituted into\n # vector equations\n sympy_expr = str_to_sympy(self.expr)\n if sympy.Symbol('_vectorisation_idx') in sympy_expr.atoms():\n raise SyntaxError(('The scalar subexpression %s refers to an '\n 'implicitly vectorized function -- this is '\n 'not allowed since it leads to different '\n 'interpretations of this subexpression '\n 'depending on whether it is used in a '\n 'scalar or vector context.') % name)\n\n #: The identifiers used in the expression\n self.identifiers = get_identifiers(expr)\n\n def get_addressable_value(self, name, group):\n return VariableView(name=name, variable=self, group=group,\n dimensions=DIMENSIONLESS)\n\n def get_addressable_value_with_unit(self, name, group):\n return VariableView(name=name, variable=self, group=group,\n dimensions=self.dim)\n\n def __contains__(self, var):\n return var in self.identifiers\n\n def __repr__(self):\n description = ('<{classname}(name={name}, dimensions={dimensions}, dtype={dtype}, '\n 'expr={expr}, owner=<{owner}>)>')\n return description.format(classname=self.__class__.__name__,\n name=repr(self.name),\n dimensions=repr(self.dim),\n dtype=repr(self.dtype),\n expr=repr(self.expr),\n owner=self.owner.name)\n\n# ------------------------------------------------------------------------------\n# Classes providing views on variables and storing variables information\n# ------------------------------------------------------------------------------\nclass LinkedVariable(object):\n '''\n A simple helper class to make linking variables explicit. Users should use\n `linked_var` instead.\n\n Parameters\n ----------\n group : `Group`\n The group through which the `variable` is accessed (not necessarily the\n same as ``variable.owner``.\n name : str\n The name of `variable` in `group` (not necessarily the same as\n ``variable.name``).\n variable : `Variable`\n The variable that should be linked.\n index : str or `ndarray`, optional\n An indexing array (or the name of a state variable), providing a mapping\n from the entries in the link source to the link target.\n '''\n def __init__(self, group, name, variable, index=None):\n self.group = group\n self.name = name\n self.variable = variable\n self.index = index\n\n\ndef linked_var(group_or_variable, name=None, index=None):\n '''\n Represents a link target for setting a linked variable.\n\n Parameters\n ----------\n group_or_variable : `NeuronGroup` or `VariableView`\n Either a reference to the target `NeuronGroup` (e.g. ``G``) or a direct\n reference to a `VariableView` object (e.g. ``G.v``). In case only the\n group is specified, `name` has to be specified as well.\n name : str, optional\n The name of the target variable, necessary if `group_or_variable` is a\n `NeuronGroup`.\n index : str or `ndarray`, optional\n An indexing array (or the name of a state variable), providing a mapping\n from the entries in the link source to the link target.\n\n Examples\n --------\n >>> from brian2 import *\n >>> G1 = NeuronGroup(10, 'dv/dt = -v / (10*ms) : volt')\n >>> G2 = NeuronGroup(10, 'v : volt (linked)')\n >>> G2.v = linked_var(G1, 'v')\n >>> G2.v = linked_var(G1.v) # equivalent\n '''\n if isinstance(group_or_variable, VariableView):\n if name is not None:\n raise ValueError(('Cannot give a variable and a variable name at '\n 'the same time.'))\n return LinkedVariable(group_or_variable.group,\n group_or_variable.name,\n group_or_variable.variable, index=index)\n elif name is None:\n raise ValueError('Need to provide a variable name')\n else:\n return LinkedVariable(group_or_variable,\n name,\n group_or_variable.variables[name], index=index)\n\n\nclass VariableView(object):\n '''\n A view on a variable that allows to treat it as an numpy array while\n allowing special indexing (e.g. with strings) in the context of a `Group`.\n\n Parameters\n ----------\n name : str\n The name of the variable (not necessarily the same as ``variable.name``).\n variable : `Variable`\n The variable description.\n group : `Group`\n The group through which the variable is accessed (not necessarily the\n same as `variable.owner`).\n dimensions : `Dimension`, optional\n The physical dimensions to be used for the variable, should be `None`\n when a variable is accessed without units (e.g. when accessing\n ``G.var_``).\n '''\n def __init__(self, name, variable, group, dimensions=None):\n self.name = name\n self.variable = variable\n self.index_var_name = group.variables.indices[name]\n if self.index_var_name in ('_idx', '0'):\n self.index_var = self.index_var_name\n else:\n self.index_var = group.variables[self.index_var_name]\n\n if isinstance(variable, Subexpression):\n # For subexpressions, we *always* have to go via codegen to get\n # their value -- since we cannot do this without the group, we\n # hold a strong reference\n self.group = group\n else:\n # For state variable arrays, we can do most access without the full\n # group, using the indexing reference below. We therefore only keep\n # a weak reference to the group.\n self.group = weakproxy_with_fallback(group)\n self.group_name = group.name\n # We keep a strong reference to the `Indexing` object so that basic\n # indexing is still possible, even if the group no longer exists\n self.indexing = self.group._indices\n self.dim = dimensions\n\n @property\n def unit(self):\n '''\n The `Unit` of this variable\n '''\n return get_unit(self.dim)\n\n def get_item(self, item, level=0, namespace=None):\n '''\n Get the value of this variable. Called by `__getitem__`.\n\n Parameters\n ----------\n item : slice, `ndarray` or string\n The index for the setting operation\n level : int, optional\n How much farther to go up in the stack to find the implicit\n namespace (if used, see `run_namespace`).\n namespace : dict-like, optional\n An additional namespace that is used for variable lookup (if not\n defined, the implicit namespace of local variables is used).\n '''\n from brian2.core.namespace import get_local_namespace # avoids circular import\n if isinstance(item, basestring):\n # Check whether the group still exists to give a more meaningful\n # error message if it does not\n try:\n self.group.name\n except ReferenceError:\n raise ReferenceError(('Cannot use string expressions, the '\n 'group \"%s\", providing the context for '\n 'the expression, no longer exists. '\n 'Consider holding an explicit reference '\n 'to it to keep it '\n 'alive.') % self.group_name)\n if namespace is None:\n namespace = get_local_namespace(level=level+1)\n values = self.get_with_expression(item,\n run_namespace=namespace)\n else:\n if isinstance(self.variable, Subexpression):\n if namespace is None:\n namespace = get_local_namespace(level=level + 1)\n values = self.get_subexpression_with_index_array(item,\n run_namespace=namespace)\n else:\n values = self.get_with_index_array(item)\n\n if self.dim is DIMENSIONLESS:\n return values\n else:\n return Quantity(values, self.dim)\n\n def __getitem__(self, item):\n return self.get_item(item, level=1)\n\n def set_item(self, item, value, level=0, namespace=None):\n '''\n Set this variable. This function is called by `__setitem__` but there\n is also a situation where it should be called directly: if the context\n for string-based expressions is higher up in the stack, this function\n allows to set the `level` argument accordingly.\n\n Parameters\n ----------\n item : slice, `ndarray` or string\n The index for the setting operation\n value : `Quantity`, `ndarray` or number\n The value for the setting operation\n level : int, optional\n How much farther to go up in the stack to find the implicit\n namespace (if used, see `run_namespace`).\n namespace : dict-like, optional\n An additional namespace that is used for variable lookup (if not\n defined, the implicit namespace of local variables is used).\n '''\n from brian2.core.namespace import get_local_namespace # avoids circular import\n variable = self.variable\n if variable.read_only:\n raise TypeError('Variable %s is read-only.' % self.name)\n\n # Check whether the group allows writing to the variable (e.g. for\n # synaptic variables, writing is only allowed after a connect)\n try:\n self.group.check_variable_write(variable)\n except ReferenceError:\n # Ignore problems with weakly referenced groups that don't exist\n # anymore at this time (e.g. when doing neuron.axon.var = ...)\n pass\n\n # The second part is equivalent to item == slice(None) but formulating\n # it this way prevents a FutureWarning if one of the elements is a\n # numpy array\n if isinstance(item, slice) and (item.start is None and\n item.stop is None and\n item.step is None):\n item = 'True'\n\n check_units = self.dim is not None\n\n if namespace is None:\n namespace = get_local_namespace(level=level+1)\n\n # Both index and values are strings, use a single code object do deal\n # with this situation\n if isinstance(value, basestring) and isinstance(item, basestring):\n self.set_with_expression_conditional(item, value,\n check_units=check_units,\n run_namespace=namespace)\n elif isinstance(item, basestring):\n try:\n float(value) # only checks for the exception\n try:\n # length-1 arrays are also convertible to float, but we\n # don't want the repr used later to be something like\n # array([...]).\n value = value[0]\n except (IndexError, TypeError):\n # was scalar already apparently\n pass\n except (TypeError, ValueError):\n if item != 'True':\n raise TypeError('When setting a variable based on a string '\n 'index, the value has to be a string or a '\n 'scalar.')\n\n if item == 'True':\n # We do not want to go through code generation for runtime\n self.set_with_index_array(slice(None), value,\n check_units=check_units)\n else:\n self.set_with_expression_conditional(item,\n repr(value),\n check_units=check_units,\n run_namespace=namespace)\n elif isinstance(value, basestring):\n self.set_with_expression(item, value,\n check_units=check_units,\n run_namespace=namespace)\n else: # No string expressions involved\n self.set_with_index_array(item, value,\n check_units=check_units)\n\n def __setitem__(self, item, value):\n self.set_item(item, value, level=1)\n\n @device_override('variableview_set_with_expression')\n def set_with_expression(self, item, code, run_namespace, check_units=True):\n '''\n Sets a variable using a string expression. Is called by\n `VariableView.set_item` for statements such as\n ``S.var[:, :] = 'exp(-abs(i-j)/space_constant)*nS'``\n\n Parameters\n ----------\n item : `ndarray`\n The indices for the variable (in the context of this `group`).\n code : str\n The code that should be executed to set the variable values.\n Can contain references to indices, such as `i` or `j`\n run_namespace : dict-like, optional\n An additional namespace that is used for variable lookup (if not\n defined, the implicit namespace of local variables is used).\n check_units : bool, optional\n Whether to check the units of the expression.\n run_namespace : dict-like, optional\n An additional namespace that is used for variable lookup (if not\n defined, the implicit namespace of local variables is used).\n '''\n # Some fairly complicated code to raise a warning in ambiguous\n # situations, when indexing with a group. For example, in:\n # group.v[subgroup] = 'i'\n # the index 'i' is the index of 'group' (\"absolute index\") and not of\n # subgroup (\"relative index\")\n if hasattr(item, 'variables') or (isinstance(item, tuple)\n and any(hasattr(one_item, 'variables')\n for one_item in item)):\n # Determine the variables that are used in the expression\n from brian2.codegen.translation import get_identifiers_recursively\n identifiers = get_identifiers_recursively([code],\n self.group.variables)\n variables = self.group.resolve_all(identifiers, run_namespace,\n user_identifiers=set())\n if not isinstance(item, tuple):\n index_groups = [item]\n else:\n index_groups = item\n\n for varname, var in variables.items():\n for index_group in index_groups:\n if not hasattr(index_group, 'variables'):\n continue\n if varname in index_group.variables or var.name in index_group.variables:\n indexed_var = index_group.variables.get(varname,\n index_group.variables.get(var.name))\n if not indexed_var is var:\n logger.warn(('The string expression used for setting '\n '{varname} refers to {referred_var} which '\n 'might be ambiguous. It will be '\n 'interpreted as referring to '\n '{referred_var} in {group}, not as '\n 'a variable of a group used for '\n 'indexing.').format(varname=self.name,\n referred_var=varname,\n group=self.group.name,\n index_group=index_group.name),\n 'ambiguous_string_expression')\n break # no need to warn more than once for a variable\n\n indices = np.atleast_1d(self.indexing(item))\n abstract_code = self.name + ' = ' + code\n variables = Variables(self.group)\n variables.add_array('_group_idx', size=len(indices), dtype=np.int32,\n values=indices)\n\n # TODO: Have an additional argument to avoid going through the index\n # array for situations where iterate_all could be used\n from brian2.codegen.codeobject import create_runner_codeobj\n from brian2.devices.device import get_device\n device = get_device()\n\n codeobj = create_runner_codeobj(self.group,\n abstract_code,\n 'group_variable_set',\n additional_variables=variables,\n check_units=check_units,\n run_namespace=run_namespace,\n codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target'))\n codeobj()\n\n @device_override('variableview_set_with_expression_conditional')\n def set_with_expression_conditional(self, cond, code, run_namespace,\n check_units=True):\n '''\n Sets a variable using a string expression and string condition. Is\n called by `VariableView.set_item` for statements such as\n ``S.var['i!=j'] = 'exp(-abs(i-j)/space_constant)*nS'``\n\n Parameters\n ----------\n cond : str\n The string condition for which the variables should be set.\n code : str\n The code that should be executed to set the variable values.\n run_namespace : dict-like, optional\n An additional namespace that is used for variable lookup (if not\n defined, the implicit namespace of local variables is used).\n check_units : bool, optional\n Whether to check the units of the expression.\n '''\n variable = self.variable\n if variable.scalar and cond != 'True':\n raise IndexError(('Cannot conditionally set the scalar variable '\n '%s.') % self.name)\n abstract_code_cond = '_cond = '+cond\n abstract_code = self.name + ' = ' + code\n variables = Variables(None)\n variables.add_auxiliary_variable('_cond', dtype=np.bool)\n from brian2.codegen.codeobject import create_runner_codeobj\n # TODO: Have an additional argument to avoid going through the index\n # array for situations where iterate_all could be used\n from brian2.devices.device import get_device\n device = get_device()\n codeobj = create_runner_codeobj(self.group,\n {'condition': abstract_code_cond,\n 'statement': abstract_code},\n 'group_variable_set_conditional',\n additional_variables=variables,\n check_units=check_units,\n run_namespace=run_namespace,\n codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target'))\n codeobj()\n\n @device_override('variableview_get_with_expression')\n def get_with_expression(self, code, run_namespace):\n '''\n Gets a variable using a string expression. Is called by\n `VariableView.get_item` for statements such as\n ``print(G.v['g_syn > 0'])``.\n\n Parameters\n ----------\n code : str\n An expression that states a condition for elements that should be\n selected. Can contain references to indices, such as ``i`` or ``j``\n and to state variables. For example: ``'i>3 and v>0*mV'``.\n run_namespace : dict-like\n An additional namespace that is used for variable lookup (either\n an explicitly defined namespace or one taken from the local\n context).\n '''\n variable = self.variable\n if variable.scalar:\n raise IndexError(('Cannot access the variable %s with a '\n 'string expression, it is a scalar '\n 'variable.') % self.name)\n # Add the recorded variable under a known name to the variables\n # dictionary. Important to deal correctly with\n # the type of the variable in C++\n variables = Variables(None)\n variables.add_auxiliary_variable('_variable', dimensions=variable.dim,\n dtype=variable.dtype,\n scalar=variable.scalar)\n variables.add_auxiliary_variable('_cond', dtype=np.bool)\n\n abstract_code = '_variable = ' + self.name + '\\n'\n abstract_code += '_cond = ' + code\n from brian2.codegen.codeobject import create_runner_codeobj\n from brian2.devices.device import get_device\n device = get_device()\n codeobj = create_runner_codeobj(self.group,\n abstract_code,\n 'group_variable_get_conditional',\n additional_variables=variables,\n run_namespace=run_namespace,\n codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target')\n )\n return codeobj()\n\n @device_override('variableview_get_with_index_array')\n def get_with_index_array(self, item):\n variable = self.variable\n if variable.scalar:\n if not (isinstance(item, slice) and item == slice(None)):\n raise IndexError(('Illegal index for variable %s, it is a '\n 'scalar variable.') % self.name)\n indices = 0\n elif (isinstance(item, slice) and item == slice(None)\n and self.index_var == '_idx'):\n indices = slice(None)\n else:\n indices = self.indexing(item, self.index_var)\n\n return variable.get_value()[indices]\n\n @device_override('variableview_get_subexpression_with_index_array')\n def get_subexpression_with_index_array(self, item, run_namespace):\n variable = self.variable\n if variable.scalar:\n if not (isinstance(item, slice) and item == slice(None)):\n raise IndexError(('Illegal index for variable %s, it is a '\n 'scalar variable.') % self.name)\n indices = np.array(0)\n else:\n indices = self.indexing(item, self.index_var)\n\n # For \"normal\" variables, we can directly access the underlying data\n # and use the usual slicing syntax. For subexpressions, however, we\n # have to evaluate code for the given indices\n variables = Variables(None, default_index='_group_index')\n variables.add_auxiliary_variable('_variable',\n dimensions=variable.dim,\n dtype=variable.dtype,\n scalar=variable.scalar)\n if indices.shape == ():\n single_index = True\n indices = np.array([indices])\n else:\n single_index = False\n variables.add_array('_group_idx', size=len(indices), dtype=np.int32)\n variables['_group_idx'].set_value(indices)\n # Force the use of this variable as a replacement for the original\n # index variable\n using_orig_index = [varname for varname, index in self.group.variables.indices.items()\n if index == self.index_var_name and index != '0']\n for varname in using_orig_index:\n variables.indices[varname] = '_idx'\n\n abstract_code = '_variable = ' + self.name + '\\n'\n from brian2.codegen.codeobject import create_runner_codeobj\n from brian2.devices.device import get_device\n device = get_device()\n codeobj = create_runner_codeobj(self.group,\n abstract_code,\n 'group_variable_get',\n # Setting the user code to an empty\n # string suppresses warnings if the\n # subexpression refers to variable\n # names that are also present in the\n # local namespace\n user_code='',\n needed_variables=['_group_idx'],\n additional_variables=variables,\n run_namespace=run_namespace,\n codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target')\n )\n result = codeobj()\n if single_index and not variable.scalar:\n return result[0]\n else:\n return result\n\n @device_override('variableview_set_with_index_array')\n def set_with_index_array(self, item, value, check_units):\n variable = self.variable\n if check_units:\n fail_for_dimension_mismatch(variable.dim, value,\n 'Incorrect unit for setting variable %s' % self.name)\n if variable.scalar:\n if not (isinstance(item, slice) and item == slice(None)):\n raise IndexError(('Illegal index for variable %s, it is a '\n 'scalar variable.') % self.name)\n indices = 0\n elif (isinstance(item, slice) and item == slice(None)\n and self.index_var == '_idx'):\n indices = slice(None)\n else:\n indices = self.indexing(item, self.index_var)\n\n q = Quantity(value, copy=False)\n if len(q.shape):\n if not len(q.shape) == 1 or len(q) != 1 and len(q) != len(indices):\n raise ValueError(('Provided values do not match the size '\n 'of the indices, '\n '%d != %d.') % (len(q),\n len(indices)))\n variable.get_value()[indices] = value\n\n # Allow some basic calculations directly on the ArrayView object\n def __array__(self, dtype=None):\n try:\n # This will fail for subexpressions that refer to external\n # parameters\n value = self[:]\n except ValueError:\n raise ValueError(('Cannot get the values for variable {var}. If it '\n 'is a subexpression referring to external '\n 'variables, use \"group.{var}[:]\" instead of '\n '\"group.{var}\"'.format(var=self.variable.name)))\n return np.asanyarray(self[:], dtype=dtype)\n\n def __array_prepare__(self, array, context=None):\n if self.dim is None:\n return array\n else:\n this = self[:]\n if isinstance(this, Quantity):\n return Quantity.__array_prepare__(this, array,\n context=context)\n else:\n return array\n\n def __array_wrap__(self, out_arr, context=None):\n if self.dim is None:\n return out_arr\n else:\n this = self[:]\n if isinstance(this, Quantity):\n return Quantity.__array_wrap__(self[:], out_arr,\n context=context)\n else:\n return out_arr\n\n def __len__(self):\n return len(self.get_item(slice(None), level=1))\n\n def __neg__(self):\n return -self.get_item(slice(None), level=1)\n\n def __pos__(self):\n return self.get_item(slice(None), level=1)\n\n def __add__(self, other):\n return self.get_item(slice(None), level=1) + np.asanyarray(other)\n\n def __radd__(self, other):\n return np.asanyarray(other) + self.get_item(slice(None), level=1)\n\n def __sub__(self, other):\n return self.get_item(slice(None), level=1) - np.asanyarray(other)\n\n def __rsub__(self, other):\n return np.asanyarray(other) - self.get_item(slice(None), level=1)\n\n def __mul__(self, other):\n return self.get_item(slice(None), level=1) * np.asanyarray(other)\n\n def __rmul__(self, other):\n return np.asanyarray(other) * self.get_item(slice(None), level=1)\n\n def __div__(self, other):\n return self.get_item(slice(None), level=1) / np.asanyarray(other)\n\n def __truediv__(self, other):\n return self.get_item(slice(None), level=1) / np.asanyarray(other)\n\n def __floordiv__(self, other):\n return self.get_item(slice(None), level=1) // np.asanyarray(other)\n\n def __rdiv__(self, other):\n return np.asanyarray(other) / self.get_item(slice(None), level=1)\n\n def __rtruediv__(self, other):\n return np.asanyarray(other) / self.get_item(slice(None), level=1)\n\n def __rfloordiv__(self, other):\n return np.asanyarray(other) // self.get_item(slice(None), level=1)\n\n def __iadd__(self, other):\n if isinstance(other, basestring):\n raise TypeError(('In-place modification with strings not '\n 'supported. Use group.var = \"var + expression\" '\n 'instead of group.var += \"expression\".'))\n elif isinstance(self.variable, Subexpression):\n raise TypeError('Cannot assign to a subexpression.')\n else:\n rhs = self[:] + np.asanyarray(other)\n self[:] = rhs\n return self\n\n def __isub__(self, other):\n if isinstance(other, basestring):\n raise TypeError(('In-place modification with strings not '\n 'supported. Use group.var = \"var - expression\" '\n 'instead of group.var -= \"expression\".'))\n elif isinstance(self.variable, Subexpression):\n raise TypeError('Cannot assign to a subexpression.')\n else:\n rhs = self[:] - np.asanyarray(other)\n self[:] = rhs\n return self\n\n def __imul__(self, other):\n if isinstance(other, basestring):\n raise TypeError(('In-place modification with strings not '\n 'supported. Use group.var = \"var * expression\" '\n 'instead of group.var *= \"expression\".'))\n elif isinstance(self.variable, Subexpression):\n raise TypeError('Cannot assign to a subexpression.')\n else:\n rhs = self[:] * np.asanyarray(other)\n self[:] = rhs\n return self\n\n def __idiv__(self, other):\n if isinstance(other, basestring):\n raise TypeError(('In-place modification with strings not '\n 'supported. Use group.var = \"var / expression\" '\n 'instead of group.var /= \"expression\".'))\n elif isinstance(self.variable, Subexpression):\n raise TypeError('Cannot assign to a subexpression.')\n else:\n rhs = self[:] / np.asanyarray(other)\n self[:] = rhs\n return self\n\n # Also allow logical comparisons\n\n def __eq__(self, other):\n return self.get_item(slice(None), level=1) == np.asanyarray(other)\n\n def __ne__(self, other):\n return self.get_item(slice(None), level=1) != np.asanyarray(other)\n\n def __lt__(self, other):\n return self.get_item(slice(None), level=1) < np.asanyarray(other)\n\n def __le__(self, other):\n return self.get_item(slice(None), level=1) <= np.asanyarray(other)\n\n def __gt__(self, other):\n return self.get_item(slice(None), level=1) > np.asanyarray(other)\n\n def __ge__(self, other):\n return self.get_item(slice(None), level=1) >= np.asanyarray(other)\n\n def __repr__(self):\n varname = self.name\n if self.dim is None:\n varname += '_'\n\n if self.variable.scalar:\n dim = self.dim if self.dim is not None else DIMENSIONLESS\n values = repr(Quantity(self.variable.get_value().item(),\n dim=dim))\n else:\n try:\n # This will fail for subexpressions that refer to external\n # parameters\n values = repr(self[:])\n except KeyError:\n values = ('[Subexpression refers to external parameters. Use '\n '\"group.{var}[:]\"]').format(var=self.variable.name)\n\n return '<%s.%s: %s>' % (self.group_name, varname,\n values)\n\n # Get access to some basic properties of the underlying array\n @property\n def shape(self):\n return self.get_item(slice(None), level=1).shape\n\n @property\n def dtype(self):\n return self.get_item(slice(None), level=1).dtype\n\n\nclass Variables(collections.Mapping):\n '''\n A container class for storing `Variable` objects. Instances of this class\n are used as the `Group.variables` attribute and can be accessed as\n (read-only) dictionaries.\n\n Parameters\n ----------\n owner : `Nameable`\n The object (typically a `Group`) \"owning\" the variables.\n default_index : str, optional\n The index to use for the variables (only relevant for `ArrayVariable`\n and `DynamicArrayVariable`). Defaults to ``'_idx'``.\n '''\n\n def __init__(self, owner, default_index='_idx'):\n #: A reference to the `Group` owning these variables\n self.owner = weakproxy_with_fallback(owner)\n # The index that is used for arrays if no index is given explicitly\n self.default_index = default_index\n\n # We do the import here to avoid a circular dependency.\n from brian2.devices.device import get_device\n self.device = get_device()\n\n self._variables = {}\n #: A dictionary given the index name for every array name\n self.indices = collections.defaultdict(functools.partial(str, default_index))\n # Note that by using functools.partial (instead of e.g. a lambda\n # function) above, this object remains pickable.\n\n def __getitem__(self, item):\n return self._variables[item]\n\n def __len__(self):\n return len(self._variables)\n\n def __iter__(self):\n return iter(self._variables)\n\n def _add_variable(self, name, var, index=None):\n if name in self._variables:\n raise KeyError(('The name \"%s\" is already present in the variables'\n ' dictionary.') % name)\n #TODO: do some check for the name, part of it has to be device-specific\n self._variables[name] = var\n\n if isinstance(var, ArrayVariable):\n # Tell the device to actually create the array (or note it down for\n # later code generation in standalone).\n self.device.add_array(var)\n\n if getattr(var, 'scalar', False):\n if index not in (None, '0'):\n raise ValueError('Cannot set an index for a scalar variable')\n self.indices[name] = '0'\n\n if index is not None:\n self.indices[name] = index\n\n def add_array(self, name, size, dimensions=DIMENSIONLESS, values=None,\n dtype=None, constant=False, read_only=False, scalar=False,\n unique=False, index=None):\n '''\n Add an array (initialized with zeros).\n\n Parameters\n ----------\n name : str\n The name of the variable.\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n size : int\n The size of the array.\n values : `ndarray`, optional\n The values to initalize the array with. If not specified, the array\n is initialized to zero.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n constant : bool, optional\n Whether the variable's value is constant during a run.\n Defaults to ``False``.\n scalar : bool, optional\n Whether this is a scalar variable. Defaults to ``False``, if set to\n ``True``, also implies that `size` equals 1.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user. Defaults\n to ``False``.\n index : str, optional\n The index to use for this variable. Defaults to\n `Variables.default_index`.\n unique : bool, optional\n See `ArrayVariable`. Defaults to ``False``.\n '''\n if np.asanyarray(size).shape == ():\n # We want a basic Python type for the size instead of something\n # like numpy.int64\n size = int(size)\n var = ArrayVariable(name=name, dimensions=dimensions, owner=self.owner,\n device=self.device, size=size,\n dtype=dtype,\n constant=constant,\n scalar=scalar,\n read_only=read_only,\n unique=unique)\n self._add_variable(name, var, index)\n # This could be avoided, but we currently need it so that standalone\n # allocates the memory\n self.device.init_with_zeros(var, dtype)\n if values is not None:\n if scalar:\n if np.asanyarray(values).shape != ():\n raise ValueError('Need a scalar value.')\n self.device.fill_with_array(var, values)\n else:\n if len(values) != size:\n raise ValueError(('Size of the provided values does not match '\n 'size: %d != %d') % (len(values), size))\n self.device.fill_with_array(var, values)\n\n def add_arrays(self, names, size, dimensions=DIMENSIONLESS,\n dtype=None, constant=False, read_only=False, scalar=False,\n unique=False, index=None):\n '''\n Adds several arrays (initialized with zeros) with the same attributes\n (size, units, etc.).\n\n Parameters\n ----------\n names : list of str\n The names of the variable.\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n size : int\n The sizes of the arrays.\n dtype : `dtype`, optional\n The dtype used for storing the variables. If none is given, defaults\n to `core.default_float_dtype`.\n constant : bool, optional\n Whether the variables' values are constant during a run.\n Defaults to ``False``.\n scalar : bool, optional\n Whether these are scalar variables. Defaults to ``False``, if set to\n ``True``, also implies that `size` equals 1.\n read_only : bool, optional\n Whether these are read-only variables, i.e. variables that are set\n internally and cannot be changed by the user. Defaults\n to ``False``.\n index : str, optional\n The index to use for these variables. Defaults to\n `Variables.default_index`.\n unique : bool, optional\n See `ArrayVariable`. Defaults to ``False``.\n '''\n for name in names:\n self.add_array(name, dimensions=dimensions, size=size, dtype=dtype,\n constant=constant, read_only=read_only,\n scalar=scalar, unique=unique, index=index)\n\n def add_dynamic_array(self, name, size, dimensions=DIMENSIONLESS,\n values=None, dtype=None, constant=False,\n needs_reference_update=False,\n resize_along_first=False, read_only=False,\n unique=False, scalar=False, index=None):\n '''\n Add a dynamic array.\n\n Parameters\n ----------\n name : str\n The name of the variable.\n dimensions : `Dimension`, optional\n The physical dimensions of the variable.\n size : int or tuple of int\n The (initital) size of the array.\n values : `ndarray`, optional\n The values to initalize the array with. If not specified, the array\n is initialized to zero.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n constant : bool, optional\n Whether the variable's value is constant during a run.\n Defaults to ``False``.\n needs_reference_update : bool, optional\n Whether the code objects need a new reference to the underlying data at\n every time step. This should be set if the size of the array can be\n changed by other code objects. Defaults to ``False``.\n scalar : bool, optional\n Whether this is a scalar variable. Defaults to ``False``, if set to\n ``True``, also implies that `size` equals 1.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user. Defaults\n to ``False``.\n index : str, optional\n The index to use for this variable. Defaults to\n `Variables.default_index`.\n unique : bool, optional\n See `DynamicArrayVariable`. Defaults to ``False``.\n '''\n var = DynamicArrayVariable(name=name, dimensions=dimensions, owner=self.owner,\n device=self.device,\n size=size, dtype=dtype,\n constant=constant,\n needs_reference_update=needs_reference_update,\n resize_along_first=resize_along_first,\n scalar=scalar,\n read_only=read_only, unique=unique)\n self._add_variable(name, var, index)\n if np.prod(size) > 0:\n self.device.resize(var, size)\n if values is None and np.prod(size) > 0:\n self.device.init_with_zeros(var, dtype)\n elif values is not None:\n if len(values) != size:\n raise ValueError(('Size of the provided values does not match '\n 'size: %d != %d') % (len(values), size))\n if np.prod(size) > 0:\n self.device.fill_with_array(var, values)\n\n def add_arange(self, name, size, start=0, dtype=np.int32, constant=True,\n read_only=True, unique=True, index=None):\n '''\n Add an array, initialized with a range of integers.\n\n Parameters\n ----------\n name : str\n The name of the variable.\n size : int\n The size of the array.\n start : int\n The start value of the range.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `np.int32`.\n constant : bool, optional\n Whether the variable's value is constant during a run.\n Defaults to ``True``.\n read_only : bool, optional\n Whether this is a read-only variable, i.e. a variable that is set\n internally and cannot be changed by the user. Defaults\n to ``True``.\n index : str, optional\n The index to use for this variable. Defaults to\n `Variables.default_index`.\n unique : bool, optional\n See `ArrayVariable`. Defaults to ``True`` here.\n '''\n self.add_array(name=name, dimensions=DIMENSIONLESS, size=size, dtype=dtype,\n constant=constant, read_only=read_only, unique=unique,\n index=index)\n self.device.init_with_arange(self._variables[name], start, dtype=dtype)\n\n def add_constant(self, name, value, dimensions=DIMENSIONLESS):\n '''\n Add a scalar constant (e.g. the number of neurons `N`).\n\n Parameters\n ----------\n name : str\n The name of the variable\n value: reference to the variable value\n The value of the constant.\n dimensions : `Dimension`, optional\n The physical dimensions of the variable. Note that the variable\n itself (as referenced by value) should never have units attached.\n '''\n var = Constant(name=name, dimensions=dimensions, owner=self.owner,\n value=value)\n self._add_variable(name, var)\n\n def add_subexpression(self, name, expr, dimensions=DIMENSIONLESS,\n dtype=None, scalar=False, index=None):\n '''\n Add a named subexpression.\n\n Parameters\n ----------\n name : str\n The name of the subexpression.\n dimensions : `Dimension`\n The physical dimensions of the subexpression.\n expr : str\n The subexpression itself.\n dtype : `dtype`, optional\n The dtype used for the expression. Defaults to\n `core.default_float_dtype`.\n scalar : bool, optional\n Whether this is an expression only referring to scalar variables.\n Defaults to ``False``\n index : str, optional\n The index to use for this variable. Defaults to\n `Variables.default_index`.\n '''\n var = Subexpression(name=name, dimensions=dimensions, expr=expr, owner=self.owner,\n dtype=dtype, device=self.device, scalar=scalar)\n self._add_variable(name, var, index=index)\n\n def add_auxiliary_variable(self, name, dimensions=DIMENSIONLESS,\n dtype=None, scalar=False):\n '''\n Add an auxiliary variable (most likely one that is added automatically\n to abstract code, e.g. ``_cond`` for a threshold condition),\n specifying its type and unit for code generation.\n\n Parameters\n ----------\n name : str\n The name of the variable\n dimensions : `Dimension`\n The physical dimensions of the variable.\n dtype : `dtype`, optional\n The dtype used for storing the variable. If none is given, defaults\n to `core.default_float_dtype`.\n scalar : bool, optional\n Whether the variable is a scalar value (``True``) or vector-valued,\n e.g. defined for every neuron (``False``). Defaults to ``False``.\n '''\n var = AuxiliaryVariable(name=name, dimensions=dimensions, dtype=dtype,\n scalar=scalar)\n self._add_variable(name, var)\n\n\n def add_referred_subexpression(self, name, group, subexpr, index):\n identifiers = subexpr.identifiers\n substitutions = {}\n for identifier in identifiers:\n if identifier not in subexpr.owner.variables:\n # external variable --> nothing to do\n continue\n subexpr_var = subexpr.owner.variables[identifier]\n if hasattr(subexpr_var, 'owner'):\n new_name = '_%s_%s_%s' % (name,\n subexpr.owner.name,\n identifier)\n else:\n new_name = '_%s_%s' % (name, identifier)\n substitutions[identifier] = new_name\n\n subexpr_var_index = group.variables.indices[identifier]\n if subexpr_var_index == group.variables.default_index:\n subexpr_var_index = index\n elif subexpr_var_index == '0':\n pass # nothing to do for a shared variable\n elif subexpr_var_index == index:\n pass # The same index as the main subexpression\n elif index != self.default_index:\n index_var = self._variables.get(index, None)\n if isinstance(index_var, DynamicArrayVariable):\n raise TypeError(('Cannot link to subexpression %s: it refers '\n 'to the variable %s which is indexed with the '\n 'dynamic index %s.') % (name,\n identifier,\n subexpr_var_index))\n else:\n self.add_reference(subexpr_var_index, group)\n\n self.indices[new_name] = subexpr_var_index\n\n if isinstance(subexpr_var, Subexpression):\n self.add_referred_subexpression(new_name,\n group,\n subexpr_var,\n subexpr_var_index)\n else:\n self.add_reference(new_name,\n group,\n identifier,\n subexpr_var_index)\n\n new_expr = word_substitute(subexpr.expr, substitutions)\n new_subexpr = Subexpression(name, self.owner, new_expr,\n dimensions=subexpr.dim,\n device=subexpr.device,\n dtype=subexpr.dtype,\n scalar=subexpr.scalar)\n self._variables[name] = new_subexpr\n\n def add_reference(self, name, group, varname=None, index=None):\n '''\n Add a reference to a variable defined somewhere else (possibly under\n a different name). This is for example used in `Subgroup` and\n `Synapses` to refer to variables in the respective `NeuronGroup`.\n\n Parameters\n ----------\n name : str\n The name of the variable (in this group, possibly a different name\n from `var.name`).\n group : `Group`\n The group from which `var` is referenced\n varname : str, optional\n The variable to refer to. If not given, defaults to `name`.\n index : str, optional\n The index that should be used for this variable (defaults to\n `Variables.default_index`).\n '''\n if varname is None:\n varname = name\n if varname not in group.variables:\n raise KeyError(('Group {group} does not have a variable '\n '{name}.').format(group=group.name,\n name=varname))\n if index is None:\n if group.variables[varname].scalar:\n index = '0'\n else:\n index = self.default_index\n\n if (self.owner is not None and self.owner.name != group.name and\n index in self.owner.variables):\n if (not self.owner.variables[index].read_only and\n group.variables.indices[varname] != group.variables.default_index):\n raise TypeError(('Cannot link variable %s to %s in group %s -- '\n 'need to precalculate direct indices but '\n 'index %s can change') % (name,\n varname,\n group.name,\n index))\n\n # We don't overwrite existing names with references\n if name not in self._variables:\n var = group.variables[varname]\n if isinstance(var, Subexpression):\n self.add_referred_subexpression(name, group, var, index)\n else:\n self._variables[name] = var\n self.indices[name] = index\n\n def add_references(self, group, varnames, index=None):\n '''\n Add all `Variable` objects from a name to `Variable` mapping with the\n same name as in the original mapping.\n\n Parameters\n ----------\n group : `Group`\n The group from which the `variables` are referenced\n varnames : iterable of str\n The variables that should be referred to in the current group\n index : str, optional\n The index to use for all the variables (defaults to\n `Variables.default_index`)\n '''\n for name in varnames:\n self.add_reference(name, group, name, index)\n\n def add_object(self, name, obj):\n '''\n Add an arbitrary Python object. This is only meant for internal use\n and therefore only names starting with an underscore are allowed.\n\n Parameters\n ----------\n name : str\n The name used for this object (has to start with an underscore).\n obj : object\n An arbitrary Python object that needs to be accessed directly from\n a `CodeObject`.\n '''\n if not name.startswith('_'):\n raise ValueError('This method is only meant for internally used '\n 'objects, the name therefore has to start with '\n 'an underscore')\n self._variables[name] = obj\n\n def create_clock_variables(self, clock, prefix=''):\n '''\n Convenience function to add the ``t`` and ``dt`` attributes of a\n `clock`.\n\n Parameters\n ----------\n clock : `Clock`\n The clock that should be used for ``t`` and ``dt``.\n prefix : str, optional\n A prefix for the variable names. Used for example in monitors to\n not confuse the dynamic array of recorded times with the current\n time in the recorded group.\n '''\n self.add_reference(prefix + 't', clock, 't')\n self.add_reference(prefix + 'dt', clock, 'dt')\n self.add_reference(prefix + 't_in_timesteps', clock, 'timestep')\n"
] |
[
[
"numpy.can_cast",
"numpy.issubdtype",
"numpy.asanyarray",
"numpy.prod",
"numpy.array"
]
] |
xdralex/pytorch-wheel5
|
[
"336529e354a45908cf3f8f12cd401a95fb2a5351"
] |
[
"wheel5/dataset/functional.py"
] |
[
"from typing import Optional, Tuple, List\n\nimport numpy as np\nimport torch\nfrom numpy.random.mtrand import RandomState\n\nfrom wheel5.tricks.heatmap import heatmap_to_selection_mask\n\n\ndef mixup(img_src: torch.Tensor, lb_src: torch.Tensor,\n img_dst: torch.Tensor, lb_dst: torch.Tensor,\n alpha: float,\n random_state: Optional[RandomState] = None) -> Tuple[torch.Tensor, torch.Tensor, float]:\n # img shape: (F, H, W)\n # lb shape: (C)\n\n random_state = random_state or RandomState()\n weight = random_state.beta(a=alpha, b=alpha)\n\n assert len(img_src.shape) == 3 and len(img_dst.shape) == 3\n assert len(lb_src.shape) == 1 and len(lb_dst.shape) == 1\n assert img_src.shape == img_dst.shape\n assert lb_src.shape == lb_dst.shape\n\n img = torch.lerp(img_src, img_dst, weight=weight)\n lb = torch.lerp(lb_src, lb_dst, weight=weight)\n\n return img, lb, weight\n\n\ndef attentive_cutmix(img_src: torch.Tensor, lb_src: torch.Tensor,\n img_dst: torch.Tensor, lb_dst: torch.Tensor,\n heatmap_src: torch.Tensor,\n alpha: float, q_min: float = 0.0, q_max: float = 1.0, mode: str = 'compact',\n random_state: Optional[RandomState] = None) -> Tuple[torch.Tensor, torch.Tensor, float]:\n # img shape: (F, H, W)\n # lb shape: (C)\n # heatmap shape: (H, W)\n\n random_state = random_state or RandomState()\n q = random_state.beta(a=alpha, b=alpha)\n\n if (mode == 'compact' and q > 0.5) or (mode == 'halo' and q < 0.5):\n q = 1 - q\n\n q = min(max(q, q_min), q_max)\n\n heatmap_expanded = heatmap_src.unsqueeze(dim=0)\n mask_src = heatmap_to_selection_mask(heatmap_expanded, q)\n mask_src = mask_src.squeeze(dim=0)\n\n assert len(img_src.shape) == 3 and len(img_dst.shape) == 3\n assert len(lb_src.shape) == 1 and len(lb_dst.shape) == 1\n assert len(mask_src.shape) == 2\n\n assert img_src.shape == img_dst.shape\n assert lb_src.shape == lb_dst.shape\n\n assert img_src.shape[1:] == mask_src.shape\n\n img = img_dst.new_zeros(img_dst.shape)\n img[:, mask_src] = img_src[:, mask_src]\n img[:, ~mask_src] = img_dst[:, ~mask_src]\n\n weight = float(mask_src.int().sum()) / float(mask_src.numel())\n lb = torch.lerp(lb_dst, lb_src, weight)\n\n return img, lb, weight\n\n\ndef cutmix(img_src: torch.Tensor, lb_src: torch.Tensor,\n img_dst: torch.Tensor, lb_dst: torch.Tensor,\n alpha: float, q_min: float = 0.0, q_max: float = 1.0, mode: str = 'compact',\n random_state: Optional[RandomState] = None) -> Tuple[torch.Tensor, torch.Tensor, float]:\n # img shape: (F, H, W)\n # lb shape: (C)\n\n assert mode in ['compact', 'halo', 'both']\n\n random_state = random_state or RandomState()\n q = random_state.beta(a=alpha, b=alpha)\n\n if (mode == 'compact' and q > 0.5) or (mode == 'halo' and q < 0.5):\n q = 1 - q\n\n q = min(max(q, q_min), q_max)\n\n assert len(img_src.shape) == 3 and len(img_dst.shape) == 3\n assert len(lb_src.shape) == 1 and len(lb_dst.shape) == 1\n assert img_src.shape[0] == img_dst.shape[0]\n assert lb_src.shape == lb_dst.shape\n\n src_h, src_w = img_src.shape[1:]\n dst_h, dst_w = img_dst.shape[1:]\n\n factor = np.sqrt(q)\n patch_h, patch_w = int(np.round(dst_h * factor)), int(np.round(dst_w * factor))\n patch_h, patch_w = min(patch_h, src_h), min(patch_w, src_w)\n\n dst_cy = random_state.randint(dst_h)\n dst_cx = random_state.randint(dst_w)\n\n dst_y1 = int(np.clip(dst_cy - patch_h // 2, 0, dst_h))\n dst_y2 = int(np.clip(dst_cy + patch_h // 2, 0, dst_h))\n dst_x1 = int(np.clip(dst_cx - patch_w // 2, 0, dst_w))\n dst_x2 = int(np.clip(dst_cx + patch_w // 2, 0, dst_w))\n\n patch_h, patch_w = dst_y2 - dst_y1, dst_x2 - dst_x1\n\n src_y1 = random_state.randint(src_h - patch_h + 1)\n src_y2 = src_y1 + patch_h\n src_x1 = random_state.randint(src_w - patch_w + 1)\n src_x2 = src_x1 + patch_w\n\n img = img_dst.clone()\n img[:, dst_y1:dst_y2, dst_x1:dst_x2] = img_src[:, src_y1:src_y2, src_x1:src_x2]\n\n weight = float(patch_h * patch_w) / float(dst_h * dst_w)\n lb = torch.lerp(lb_dst, lb_src, weight=weight)\n\n return img, lb, weight\n"
] |
[
[
"torch.lerp",
"numpy.sqrt",
"numpy.clip",
"numpy.round",
"numpy.random.mtrand.RandomState"
]
] |
1036225283/Reinforcement-learning-with-tensorflow
|
[
"0422669b36faccfb1b96581a6345b8f39805ce7e"
] |
[
"contents/Double_DQN/run_Pendulum.py"
] |
[
"\"\"\"\nDouble DQN & Natural DQN comparison,\nThe Pendulum example.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\nUsing:\nTensorflow: 1.0\ngym: 0.8.0\n\"\"\"\n\n\nimport gym\nfrom contents.Double_DQN.RL_brain import DoubleDQN\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\nenv = gym.make('Pendulum-v0')\nenv = env.unwrapped\nenv.seed(1)\nMEMORY_SIZE = 3000\nACTION_SPACE = 11\n\nsess = tf.Session()\nwith tf.variable_scope('Natural_DQN'):\n natural_DQN = DoubleDQN(\n n_actions=ACTION_SPACE, n_features=3, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.001, double_q=False, sess=sess\n )\n\nwith tf.variable_scope('Double_DQN'):\n double_DQN = DoubleDQN(\n n_actions=ACTION_SPACE, n_features=3, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.001, double_q=True, sess=sess, output_graph=True)\n\nsess.run(tf.global_variables_initializer())\n\n\ndef train(RL):\n total_steps = 0\n observation = env.reset()\n while True:\n # if total_steps - MEMORY_SIZE > 8000: env.render()\n\n action = RL.choose_action(observation)\n\n f_action = (action-(ACTION_SPACE-1)/2)/((ACTION_SPACE-1)/4) # convert to [-2 ~ 2] float actions\n observation_, reward, done, info = env.step(np.array([f_action]))\n\n reward /= 10 # normalize to a range of (-1, 0). r = 0 when get upright\n # the Q target at upright state will be 0, because Q_target = r + gamma * Qmax(s', a') = 0 + gamma * 0\n # so when Q at this state is greater than 0, the agent overestimates the Q. Please refer to the final result.\n\n RL.store_transition(observation, action, reward, observation_)\n\n if total_steps > MEMORY_SIZE: # learning\n RL.learn()\n\n if total_steps - MEMORY_SIZE > 20000: # stop game\n break\n\n observation = observation_\n total_steps += 1\n return RL.q\n\nq_natural = train(natural_DQN)\nq_double = train(double_DQN)\n\nplt.plot(np.array(q_natural), c='r', label='natural')\nplt.plot(np.array(q_double), c='b', label='double')\nplt.legend(loc='best')\nplt.ylabel('Q eval')\nplt.xlabel('training steps')\nplt.grid()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.grid",
"tensorflow.Session",
"tensorflow.variable_scope",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
TeaKatz/AI_Training
|
[
"881a7176965a267a5d966f1f50edd29b39906c26"
] |
[
"utilities/SignalGenerator.py"
] |
[
"import numpy as np\n\n\nclass SignalGenerator:\n\tdef __init__(self, period, signal_type=\"sine\", amplitude=1, vertical_shift=0, phase_shift=0):\n\t\tassert signal_type.lower() in [\"sine\", \"cos\", \"half-sine\", \"half-cos\", \"sawtooth\"], \"get unknown signal_type: '{}'\".format(signal_type)\n\t\t\n\t\tself.period = period\n\t\tself.signal_type = signal_type.lower()\n\t\tself.amplitude = amplitude\n\t\tself.vertical_shift = vertical_shift\n\t\tself.phase_shift = phase_shift\n\t\t\n\tdef _cal_x(self, t):\n\t\t\"\"\"\n\t\tConvert t into x which has range of [0, 1]\n\t\t\"\"\"\n\t\tx = np.mod(t + self.phase_shift, self.period)\n\t\tx = x / self.period\n\t\treturn x\n\t\n\tdef _cal_y(self, x):\n\t\t\"\"\"\n\t\tCalculate y from x according to signal type\n\t\t\"\"\"\n\t\tif self.signal_type == \"sine\":\n\t\t\ty = np.sin(2 * np.pi * x) * self.amplitude + self.vertical_shift\n\t\telif self.signal_type == \"cos\":\n\t\t\ty = np.cos(2 * np.pi * x) * self.amplitude + self.vertical_shift\n\t\telif self.signal_type == \"half-sine\":\n\t\t\ty = np.sin(2 * np.pi * (x / 2 - 0.25)) * self.amplitude + self.vertical_shift\n\t\telif self.signal_type == \"half-cos\":\n\t\t\ty = np.cos(2 * np.pi * x / 2) * self.amplitude + self.vertical_shift\n\t\telse:\n\t\t\ty = x * self.amplitude + self.vertical_shift\n\t\treturn y\n\t\t\n\tdef __call__(self, start_t, stop_t):\n\t\tt = np.arange(start_t, stop_t)\n\t\tx = self._cal_x(t)\n\t\ty = self._cal_y(x)\n\t\treturn y\n\t\t"
] |
[
[
"numpy.mod",
"numpy.arange",
"numpy.cos",
"numpy.sin"
]
] |
staoxiao/LibVQ
|
[
"f844c60055ace872279daa272b0bad1005c02e2b"
] |
[
"examples/NQ/learnable_index/train_index.py"
] |
[
"import sys\nsys.path.append('./')\nimport os\nimport pickle\nimport gc\n\nimport faiss\nimport numpy as np\nfrom transformers import HfArgumentParser\nfrom torch.optim import AdamW\n\nfrom LibVQ.base_index import FaissIndex\nfrom LibVQ.dataset.dataset import load_rel, write_rel\nfrom LibVQ.learnable_index import LearnableIndex\nfrom LibVQ.utils import setuplogging\n\nfrom arguments import IndexArguments, DataArguments, ModelArguments, TrainingArguments\nfrom evaluate import validate, load_test_data\n\nfaiss.omp_set_num_threads(32)\n\nif __name__ == '__main__':\n setuplogging()\n parser = HfArgumentParser((IndexArguments, DataArguments, ModelArguments, TrainingArguments))\n index_args, data_args, model_args, training_args = parser.parse_args_into_dataclasses()\n\n # Load embeddings of queries and docs\n emb_size = 768\n doc_embeddings_file = os.path.join(data_args.embeddings_dir, 'docs.memmap')\n query_embeddings_file = os.path.join(data_args.embeddings_dir, 'train-queries.memmap')\n\n doc_embeddings = np.memmap(doc_embeddings_file,\n dtype=np.float32, mode=\"r\")\n doc_embeddings = doc_embeddings.reshape(-1, emb_size)\n\n train_query_embeddings = np.memmap(query_embeddings_file,\n dtype=np.float32, mode=\"r\")\n train_query_embeddings = train_query_embeddings.reshape(-1, emb_size)\n\n test_query_embeddings = np.memmap(os.path.join(data_args.embeddings_dir, 'test-queries.memmap'),\n dtype=np.float32, mode=\"r\")\n test_query_embeddings = test_query_embeddings.reshape(-1, emb_size)\n\n\n # Create Index\n # if there is a faiss index in init_index_file, it will creat learnable_index based on it;\n # if no, it will creat and save a faiss index in init_index_file\n init_index_file = os.path.join(data_args.embeddings_dir, f'{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index')\n learnable_index = LearnableIndex(index_method=index_args.index_method,\n init_index_file=init_index_file,\n doc_embeddings=doc_embeddings,\n ivf_centers_num=index_args.ivf_centers_num,\n subvector_num=index_args.subvector_num,\n subvector_bits=index_args.subvector_bits)\n\n # The class randomly sample the negative from corpus by default. You also can assgin speficed negative for each query (set --neg_file)\n neg_file = os.path.join(data_args.embeddings_dir, f\"train-queries_hardneg.pickle\")\n if not os.path.exists(neg_file):\n print('generating hard negatives for train queries ...')\n train_ground_truths = load_rel(os.path.join(data_args.preprocess_dir, 'train-rels.tsv'))\n trainquery2hardneg = learnable_index.hard_negative(train_query_embeddings,\n train_ground_truths,\n topk=400,\n batch_size=64,\n nprobe=index_args.ivf_centers_num)\n pickle.dump(trainquery2hardneg, open(neg_file, 'wb'))\n\n del trainquery2hardneg\n gc.collect()\n\n data_args.save_ckpt_dir = f'./saved_ckpts/{training_args.training_mode}_{index_args.index_method}/'\n\n # contrastive learning\n if training_args.training_mode == 'contrastive_index':\n learnable_index.fit_with_multi_gpus(query_embeddings_file=query_embeddings_file,\n doc_embeddings_file=doc_embeddings_file,\n rel_file=os.path.join(data_args.preprocess_dir, 'train-rels.tsv'),\n neg_file=os.path.join(data_args.embeddings_dir,\n f\"train-queries_hardneg.pickle\"),\n emb_size=emb_size,\n per_query_neg_num=1,\n checkpoint_path=data_args.save_ckpt_dir,\n logging_steps=training_args.logging_steps,\n per_device_train_batch_size=512,\n checkpoint_save_steps=training_args.checkpoint_save_steps,\n max_grad_norm=training_args.max_grad_norm,\n temperature=training_args.temperature,\n optimizer_class=AdamW,\n loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,\n 'ivf_weight': 'scaled_to_pqloss'},\n lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},\n loss_method='contras',\n epochs=16)\n\n\n # distill based on fixed embeddigns of queries and docs\n if training_args.training_mode == 'distill_index':\n learnable_index.fit_with_multi_gpus(query_embeddings_file=query_embeddings_file,\n doc_embeddings_file=doc_embeddings_file,\n rel_file=os.path.join(data_args.preprocess_dir, 'train-rels.tsv'),\n neg_file=os.path.join(data_args.embeddings_dir,\n f\"train-queries_hardneg.pickle\"),\n emb_size=emb_size,\n per_query_neg_num=1,\n checkpoint_path=data_args.save_ckpt_dir,\n logging_steps=training_args.logging_steps,\n per_device_train_batch_size=128,\n checkpoint_save_steps=training_args.checkpoint_save_steps,\n max_grad_norm=training_args.max_grad_norm,\n temperature=training_args.temperature,\n optimizer_class=AdamW,\n loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,\n 'ivf_weight': 'scaled_to_pqloss'},\n lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},\n loss_method='distill',\n epochs=10)\n\n\n if 'nolabel' in training_args.training_mode:\n '''\n If there is not relevance data, you can set the rel_file/rel_data to None, and it will automatically generate the data for training.\n You also can manually generate the data as following:\n '''\n # generate train data by brute-force or the index which should has similar performance with brute force\n if not os.path.exists(os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv')):\n print('generating relevance labels for train queries ...')\n # flat_index = FaissIndex(doc_embeddings=doc_embeddings, index_method='flat', dist_mode='ip')\n # query2pos, query2neg = flat_index.generate_virtual_traindata(train_query_embeddings,\n # topk=400, batch_size=64)\n # or\n query2pos, query2neg = trainquery2hardneg = learnable_index.generate_virtual_traindata(\n train_query_embeddings, topk=400, batch_size=64, nprobe=index_args.ivf_centers_num)\n\n write_rel(os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv'), query2pos)\n pickle.dump(query2neg,\n open(os.path.join(data_args.embeddings_dir, f\"train-queries-virtual_hardneg.pickle\"), 'wb'))\n\n del query2neg, query2pos\n gc.collect()\n\n\n # distill with no label data\n if training_args.training_mode == 'distill_index_nolabel':\n learnable_index.fit_with_multi_gpus(query_embeddings_file=query_embeddings_file,\n doc_embeddings_file=doc_embeddings_file,\n rel_file=os.path.join(data_args.embeddings_dir, 'train-virtual_rel.tsv'),\n neg_file=os.path.join(data_args.embeddings_dir,\n f\"train-queries-virtual_hardneg.pickle\"),\n emb_size=emb_size,\n per_query_neg_num=100,\n checkpoint_path=data_args.save_ckpt_dir,\n logging_steps=training_args.logging_steps,\n per_device_train_batch_size=64,\n checkpoint_save_steps=training_args.checkpoint_save_steps,\n max_grad_norm=training_args.max_grad_norm,\n temperature=training_args.temperature,\n optimizer_class=AdamW,\n loss_weight={'encoder_weight': 1.0, 'pq_weight': 1.0,\n 'ivf_weight': 'scaled_to_pqloss'},\n lr_params={'encoder_lr': 5e-6, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},\n loss_method='distill',\n epochs=10)\n\n # Test\n scores, ann_items = learnable_index.search(test_query_embeddings, topk=100, nprobe=index_args.nprobe)\n test_questions, test_answers, collections = load_test_data(\n query_andwer_file='./data/NQ/raw_dataset/nq-test.qa.csv',\n collections_file='./data/NQ/dataset/collection.tsv')\n validate(ann_items, test_questions, test_answers, collections)\n\n data_args.output_dir = f'./data/NQ/evaluate/LearnableIndex_{training_args.training_mode}'\n os.makedirs(data_args.output_dir, exist_ok=True)\n saved_index_file = os.path.join(data_args.output_dir,\n f'LibVQ_{training_args.training_mode}_{index_args.index_method}_ivf{index_args.ivf_centers_num}_pq{index_args.subvector_num}x{index_args.subvector_bits}.index')\n learnable_index.save_index(saved_index_file)\n learnable_index.load_index(saved_index_file)\n\n # get the faiss index and then you can use the faiss API.\n '''\n index = learnable_index.index \n index = faiss.read_index(saved_index_file)\n index = faiss.index_gpu_to_cpu(index)\n '''"
] |
[
[
"numpy.memmap"
]
] |
AI-Assistant/FEMAG-Python
|
[
"ff86e8f41485ae9df6034e6b8e810b59f8094c70"
] |
[
".venv/Lib/site-packages/ipopt-0.1.9/setup.py"
] |
[
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ncyipopt: Python wrapper for the Ipopt optimization package, written in Cython.\n\nCopyright (C) 2012-2015 Amit Aides\nCopyright (C) 2015-2018 Matthias Kümmerer\n\nAuthor: Matthias Kümmerer <[email protected]>\n(original Author: Amit Aides <[email protected]>)\nURL: https://github.com/matthias-k/cyipopt\nLicense: EPL 1.0\n\"\"\"\n\nimport sys\nimport os.path\nfrom distutils.sysconfig import get_python_lib\nimport subprocess as sp\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\nfrom Cython.Distutils import build_ext\nimport Cython.Distutils\nimport Cython.Compiler.Options\nimport numpy as np\nimport six\n\nif six.PY3:\n exec(open('ipopt/version.py', encoding=\"utf-8\").read())\nelse:\n exec(open('ipopt/version.py').read())\n\nPACKAGE_NAME = 'ipopt'\nVERSION = __version__\nDESCRIPTION = 'A Cython wrapper to the IPOPT optimization package'\nif six.PY3:\n with open('README.rst', encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\nelse:\n with open('README.rst') as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = 'Matthias Kümmerer'\nEMAIL = '[email protected]'\nURL = \"https://github.com/matthias-k/cyipopt\"\nDEPENDENCIES = ['numpy', 'cython', 'six', 'future', 'setuptools']\n\n\ndef pkgconfig(*packages, **kw):\n \"\"\"Based on http://code.activestate.com/recipes/502261-python-distutils-pkg-config/#c2\"\"\"\n\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n output = sp.Popen([\"pkg-config\", \"--libs\", \"--cflags\"] + list(packages),\n stdout=sp.PIPE).communicate()[0]\n if six.PY3:\n output = output.decode('utf8')\n for token in output.split():\n if token[:2] in flag_map:\n kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])\n else:\n kw.setdefault('extra_compile_args', []).append(token)\n\n kw['include_dirs'] += [np.get_include()]\n\n return kw\n\n\nif __name__ == '__main__':\n\n if sys.platform == 'win32':\n\n IPOPT_INCLUDE_DIRS = ['include_mt/coin', np.get_include()]\n IPOPT_LIBS = ['Ipopt39', 'IpoptFSS']\n IPOPT_LIB_DIRS = ['lib_mt/x64/release']\n IPOPT_DLL = ['Ipopt39.dll', 'IpoptFSS39.dll']\n\n EXT_MODULES = [\n Extension(\n PACKAGE_NAME + '.' + 'cyipopt',\n ['src/cyipopt.pyx'],\n include_dirs=IPOPT_INCLUDE_DIRS,\n libraries=IPOPT_LIBS,\n library_dirs=IPOPT_LIB_DIRS\n )\n ]\n DATA_FILES = [(os.path.join(get_python_lib(), PACKAGE_NAME),\n [os.path.join(IPOPT_LIB_DIRS[0], dll)\n for dll in IPOPT_DLL])] if IPOPT_DLL else None\n include_package_data = False\n\n else:\n\n EXT_MODULES = [Extension(\"cyipopt\", ['src/cyipopt.pyx'],\n **pkgconfig('ipopt'))]\n DATA_FILES = None\n include_package_data = True\n\n setup(\n name=PACKAGE_NAME,\n version=VERSION,\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n keywords=\"optimization\",\n license=\"EPL-1.0\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n packages=[PACKAGE_NAME],\n install_requires=DEPENDENCIES,\n include_package_data=include_package_data,\n data_files=DATA_FILES,\n cmdclass={'build_ext': build_ext},\n ext_modules=EXT_MODULES\n )\n"
] |
[
[
"numpy.get_include"
]
] |
yuriharrison/ml-algorithms
|
[
"b69c7e666006d43b10ef8f0d95fe745a430f04f1"
] |
[
"mlalgorithms/kMeans.py"
] |
[
"\"\"\"K-Means clustering Algorithm\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\nclass KMeans:\n \"\"\"K-Means clustering\n\n -- Arguments\n k: int, optional, default 2\n The number of clusters to form as well as the number of centroids to generate.\n max_iter: int, optional, default 300\n Maximum number of iterations of the k-means algorithm to run.\n tol: float, optional\n The relative increment in the results before declaring convergence.\n \"\"\"\n\n def __init__(self, k=2, tol=0.001, max_iter=300):\n self.k = k\n self.tol = tol\n self.max_iter = max_iter\n\n def fit(self, data):\n \"\"\"Compute k-means clustering.\"\"\"\n self.centroids = centroids = dict()\n\n for i in range(self.k):\n centroids[i] = data[i]\n\n for i in range(self.max_iter):\n self.classifications = classifications = dict()\n\n for i in range(self.k):\n classifications[i] = []\n\n for featureset in data:\n distances = []\n for centroid in centroids:\n n = np.linalg.norm(featureset - centroids[centroid])\n distances.append(n)\n\n classification = distances.index(min(distances))\n classifications[classification].append(featureset)\n\n prev_centroids = dict(centroids)\n\n for classification in classifications:\n centroids[classification] = np.average(classifications[classification],\n axis=0)\n\n optimized = True\n\n for c in centroids:\n original_centroid = prev_centroids[c]\n current_centroid = centroids[c]\n s = np.sum((current_centroid - original_centroid)\n / original_centroid*100)\n if s > self.tol:\n print(s)\n optimized = False\n\n if optimized:\n break\n\n def predict(self, data):\n \"\"\"Predict the closest cluster each sample in data belongs to.\"\"\"\n distances = []\n for centroid in self.centroids:\n n = np.linalg.norm(data-self.centroids[centroid])\n distances.append(n)\n\n classification = distances.index(min(distances))\n return classification\n\n\nif '__main__' == __name__:\n main_data = np.array([[1, 2],\n [1.5, 1.8],\n [5, 8],\n [8, 8],\n [1, 0.6],\n [9, 11]])\n\n colors = ['r','g','b','c','k','o','y']\n\n clf = KMeans()\n clf.fit(main_data)\n\n for centroid in clf.centroids:\n plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],\n marker=\"o\", color=\"k\", s=150, linewidths=5)\n\n for classification in clf.classifications:\n color = colors[classification]\n\n for featureset in clf.classifications[classification]:\n plt.scatter(featureset[0], featureset[1], marker=\"x\", \n color=color, s=150, linewidths=5)\n\n new_data = np.array([[1,3],\n [8,9],\n [0,3],\n [5,4],\n [6,4]])\n\n for item in new_data:\n classification = clf.predict(item)\n plt.scatter(item[0], item[1], marker=\"*\", color=colors[classification], \n s=150, linewidths=5)\n\n plt.show()"
] |
[
[
"matplotlib.pyplot.scatter",
"numpy.linalg.norm",
"numpy.average",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show"
]
] |
felixhao28/oneflow
|
[
"e558af6ef6c4ed90e4abc7bc1ba895f55795626d"
] |
[
"python/oneflow/test/modules/test_deconv2d.py"
] |
[
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom oneflow.test_utils.test_util import GenArgList\nfrom oneflow.test_utils.automated_test_util import *\n\nimport oneflow as flow\nimport oneflow.nn as nn\nimport oneflow.unittest\n\n\ndef _test_deconv_bias_false(test_case, device):\n np_arr = np.array(\n [\n [\n [\n [0.2735021114349365, -1.3842310905456543],\n [1.058540940284729, -0.03388553857803345],\n ]\n ]\n ]\n )\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n weight = np.array(\n [\n [\n [\n [0.06456436216831207, -0.10852358490228653, -0.21638715267181396],\n [-0.2279110550880432, 0.1476770043373108, 0.19457484781742096],\n [0.05026858672499657, 0.10818571597337723, 0.02056501805782318],\n ],\n [\n [0.205095112323761, 0.1488947868347168, -0.2344113141298294],\n [0.1684819906949997, -0.21986986696720123, 0.1082606166601181],\n [-0.1528974026441574, 0.17120417952537537, 0.01954500749707222],\n ],\n ]\n ]\n )\n m = nn.ConvTranspose2d(1, 2, 3, stride=1, bias=False)\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n m = m.to(device)\n output = m(input)\n np_out = np.array(\n [\n [\n [\n [\n 0.01765848882496357,\n -0.1190534234046936,\n 0.09103937447071075,\n 0.2995298206806183,\n ],\n [\n 0.006009865552186966,\n 0.2388070970773697,\n -0.37657976150512695,\n -0.26200416684150696,\n ],\n [\n -0.22750461101531982,\n 0.12405071407556534,\n 0.056831881403923035,\n -0.035060010850429535,\n ],\n [\n 0.053211357444524765,\n 0.11281562596559525,\n 0.0181029811501503,\n -0.0006968567031435668,\n ],\n ],\n [\n [\n 0.05609394609928131,\n -0.24317599833011627,\n -0.27021679282188416,\n 0.32447943091392517,\n ],\n [\n 0.26318174600601196,\n -0.14269141852855682,\n 0.08078087121248245,\n -0.14191456139087677,\n ],\n [\n 0.13652732968330383,\n 0.020019691437482834,\n -0.10959184169769287,\n -0.03072327747941017,\n ],\n [\n -0.16184815764427185,\n 0.1864076405763626,\n 0.014887845143675804,\n -0.0006622931105084717,\n ],\n ],\n ]\n ]\n )\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.24731683731079102, 0.24731683731079102],\n [0.24731683731079102, 0.24731683731079102],\n ]\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\ndef _test_deconv_bias_true(test_case, device):\n np_arr = np.array(\n [\n [\n [\n [0.2735021114349365, -1.3842310905456543],\n [1.058540940284729, -0.03388553857803345],\n ]\n ]\n ]\n )\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n weight = np.array(\n [\n [\n [\n [0.06456436216831207, -0.10852358490228653, -0.21638715267181396],\n [-0.2279110550880432, 0.1476770043373108, 0.19457484781742096],\n [0.05026858672499657, 0.10818571597337723, 0.02056501805782318],\n ],\n [\n [0.205095112323761, 0.1488947868347168, -0.2344113141298294],\n [0.1684819906949997, -0.21986986696720123, 0.1082606166601181],\n [-0.1528974026441574, 0.17120417952537537, 0.01954500749707222],\n ],\n ]\n ]\n )\n bias = np.array([0.06456436216831207, -0.10852358490228653])\n m = nn.ConvTranspose2d(1, 2, 3, stride=1)\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n m.bias = flow.nn.Parameter(flow.Tensor(bias))\n m = m.to(device)\n output = m(input)\n np_out = [\n [\n [\n [\n 0.0822228491306305,\n -0.05448906123638153,\n 0.15560373663902283,\n 0.36409419775009155,\n ],\n [\n 0.07057422399520874,\n 0.30337145924568176,\n -0.3120154142379761,\n -0.19743980467319489,\n ],\n [\n -0.16294024884700775,\n 0.188615083694458,\n 0.12139624357223511,\n 0.029504351317882538,\n ],\n [\n 0.11777572333812714,\n 0.17737999558448792,\n 0.08266734331846237,\n 0.06386750191450119,\n ],\n ],\n [\n [\n -0.05242963880300522,\n -0.3516995906829834,\n -0.3787403702735901,\n 0.21595585346221924,\n ],\n [\n 0.15465816855430603,\n -0.25121501088142395,\n -0.027742713689804077,\n -0.2504381537437439,\n ],\n [\n 0.028003744781017303,\n -0.088503897190094,\n -0.2181154191493988,\n -0.139246866106987,\n ],\n [\n -0.2703717350959778,\n 0.07788405567407608,\n -0.09363573789596558,\n -0.10918587446212769,\n ],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.24731683731079102, 0.24731683731079102],\n [0.24731683731079102, 0.24731683731079102],\n ]\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\ndef _test_deconv_group_bias_false(test_case, device):\n np_arr = np.array(\n [\n [\n [\n [-2.0125174206754517, 1.9917882689443576],\n [0.13146748727936577, -0.5356457374181375],\n ],\n [\n [1.020683505853394, 1.2900643048299678],\n [-0.549010560600543, 0.8088391626901512],\n ],\n ]\n ]\n )\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m = nn.ConvTranspose2d(2, 2, 3, stride=1, groups=2, bias=False)\n weight = np.array(\n [\n [\n [\n [0.06456436216831207, -0.10852358490228653, -0.21638715267181396],\n [-0.2279110550880432, 0.1476770043373108, 0.19457484781742096],\n [0.05026858672499657, 0.10818571597337723, 0.02056501805782318],\n ]\n ],\n [\n [\n [0.205095112323761, 0.1488947868347168, -0.2344113141298294],\n [0.1684819906949997, -0.21986986696720123, 0.1082606166601181],\n [-0.1528974026441574, 0.17120417952537537, 0.01954500749707222],\n ]\n ],\n ]\n )\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n m = m.to(device)\n output = m(input)\n np_out = np.array(\n [\n [\n [\n [\n -0.12993690371513367,\n 0.34700414538383484,\n 0.219326913356781,\n -0.43099740147590637,\n ],\n [\n 0.4671630859375,\n -0.8000040054321289,\n -0.06776165962219238,\n 0.5034587383270264,\n ],\n [\n -0.13112929463386536,\n 0.02389305830001831,\n 0.12057329714298248,\n -0.06326202303171158,\n ],\n [\n 0.00660868501290679,\n -0.012703249230980873,\n -0.05524558573961258,\n -0.011015564203262329,\n ],\n ],\n [\n [\n 0.20933720469474792,\n 0.4165603518486023,\n -0.04717591404914856,\n -0.3024056851863861,\n ],\n [\n 0.059367403388023376,\n 0.07707919180393219,\n 0.07597976922988892,\n -0.049937888979911804,\n ],\n [\n -0.24855825304985046,\n 0.2344835251569748,\n 0.003538096323609352,\n 0.11277973651885986,\n ],\n [\n 0.08394229412078857,\n -0.21766230463981628,\n 0.12774622440338135,\n 0.015808766707777977,\n ],\n ],\n ]\n ]\n )\n\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.03301373869180679, 0.03301373869180679],\n [0.03301373869180679, 0.03301373869180679],\n ],\n [\n [0.21430310606956482, 0.21430310606956482],\n [0.21430310606956482, 0.21430310606956482],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\ndef _test_deconv_group_bias_true(test_case, device):\n np_arr = np.array(\n [\n [\n [\n [-2.0125174206754517, 1.9917882689443576],\n [0.13146748727936577, -0.5356457374181375],\n ],\n [\n [1.020683505853394, 1.2900643048299678],\n [-0.549010560600543, 0.8088391626901512],\n ],\n ]\n ]\n )\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m = nn.ConvTranspose2d(2, 2, 3, stride=1, groups=2)\n weight = np.array(\n [\n [\n [\n [0.06456436216831207, -0.10852358490228653, -0.21638715267181396],\n [-0.2279110550880432, 0.1476770043373108, 0.19457484781742096],\n [0.05026858672499657, 0.10818571597337723, 0.02056501805782318],\n ]\n ],\n [\n [\n [0.205095112323761, 0.1488947868347168, -0.2344113141298294],\n [0.1684819906949997, -0.21986986696720123, 0.1082606166601181],\n [-0.1528974026441574, 0.17120417952537537, 0.01954500749707222],\n ]\n ],\n ]\n )\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n bias = np.array([0.06456436216831207, -0.10852358490228653])\n m.bias = flow.nn.Parameter(flow.Tensor(bias))\n m = m.to(device)\n output = m(input)\n np_out = [\n [\n [\n [\n -0.0653725415468216,\n 0.4115685224533081,\n 0.2838912606239319,\n -0.3664330244064331,\n ],\n [\n 0.5317274332046509,\n -0.735439658164978,\n -0.00319729745388031,\n 0.5680230855941772,\n ],\n [\n -0.06656493246555328,\n 0.08845742046833038,\n 0.18513765931129456,\n 0.0013023391366004944,\n ],\n [\n 0.0711730495095253,\n 0.05186111479997635,\n 0.009318776428699493,\n 0.053548797965049744,\n ],\n ],\n [\n [\n 0.1008136197924614,\n 0.30803677439689636,\n -0.1556994915008545,\n -0.41092926263809204,\n ],\n [\n -0.04915618151426315,\n -0.03144439309835434,\n -0.032543815672397614,\n -0.15846148133277893,\n ],\n [\n -0.3570818305015564,\n 0.12595993280410767,\n -0.10498549044132233,\n 0.004256151616573334,\n ],\n [\n -0.024581290781497955,\n -0.3261858820915222,\n 0.019222639501094818,\n -0.0927148163318634,\n ],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.03301373869180679, 0.03301373869180679],\n [0.03301373869180679, 0.03301373869180679],\n ],\n [\n [0.21430310606956482, 0.21430310606956482],\n [0.21430310606956482, 0.21430310606956482],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\ndef _test_deconv_group_large_out_channel(test_case, device):\n np_arr = np.array(\n [\n [\n [\n [-2.0125174206754517, 1.9917882689443576],\n [0.13146748727936577, -0.5356457374181375],\n ],\n [\n [1.020683505853394, 1.2900643048299678],\n [-0.549010560600543, 0.8088391626901512],\n ],\n ]\n ]\n )\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m = nn.ConvTranspose2d(2, 6, 3, stride=1, groups=2, bias=False)\n weight = np.array(\n [\n [\n [\n [0.05271657928824425, -0.08860913664102554, -0.17667937278747559],\n [-0.18608860671520233, 0.12057777494192123, 0.1588696986436844],\n [0.04104413092136383, 0.08833327144384384, 0.016791267320513725],\n ],\n [\n [0.16745945811271667, 0.1215720921754837, -0.19139604270458221],\n [0.13756497204303741, -0.17952299118041992, 0.08839442580938339],\n [-0.12484020739793777, 0.13978762924671173, 0.015958432108163834],\n ],\n [\n [-0.07709092646837234, -0.029757702723145485, -0.18154984712600708],\n [-0.14461342990398407, 0.06567336618900299, 0.05665326863527298],\n [0.04441174864768982, -0.04477253183722496, 0.191376194357872],\n ],\n ],\n [\n [\n [0.1850736141204834, 0.07141514122486115, 0.05791180208325386],\n [0.07253318279981613, -0.042754165828228, -0.14045141637325287],\n [0.08525089919567108, 0.009758883155882359, -0.07303793728351593],\n ],\n [\n [-0.005451973062008619, 0.1499139368534088, 0.16706342995166779],\n [-0.05473465472459793, 0.02753184549510479, -0.06856250017881393],\n [0.03629609942436218, -0.06238799914717674, -0.041715867817401886],\n ],\n [\n [0.15021666884422302, -0.10501708835363388, 0.04741475358605385],\n [-0.16011257469654083, 0.1280348002910614, 0.11050418764352798],\n [-0.10031674802303314, 0.1449088454246521, -0.16990724205970764],\n ],\n ],\n ]\n )\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n m = m.to(device)\n output = m(input)\n np_out = np.array(\n [\n [\n [\n [\n -0.10609303414821625,\n 0.28332769870758057,\n 0.17907968163490295,\n -0.3519079089164734,\n ],\n [\n 0.3814370930194855,\n -0.653200626373291,\n -0.055327147245407104,\n 0.41107234358787537,\n ],\n [\n -0.10706663131713867,\n 0.019508585333824158,\n 0.09844768047332764,\n -0.05165322124958038,\n ],\n [\n 0.005395968910306692,\n -0.010372160002589226,\n -0.04510783404111862,\n -0.00899417046457529,\n ],\n ],\n [\n [\n -0.3370150923728943,\n 0.08887782692909241,\n 0.6273337602615356,\n -0.38122040033340454,\n ],\n [\n -0.25483641028404236,\n 0.561577320098877,\n -0.6257490515708923,\n 0.27858346700668335,\n ],\n [\n 0.26932841539382935,\n -0.6272678375244141,\n 0.35409244894981384,\n -0.015562277287244797,\n ],\n [\n -0.01641242951154709,\n 0.08524765074253082,\n -0.0727786272764206,\n -0.008548066020011902,\n ],\n ],\n [\n [\n 0.15514683723449707,\n -0.09366090595722198,\n 0.3061012029647827,\n -0.3616088628768921,\n ],\n [\n 0.28090208768844604,\n -0.38282686471939087,\n 0.008863434195518494,\n 0.21008771657943726,\n ],\n [\n -0.10839138925075531,\n 0.2646597623825073,\n -0.5020549297332764,\n 0.35083478689193726,\n ],\n [\n 0.005838701035827398,\n -0.029675094410777092,\n 0.04914196580648422,\n -0.10250984132289886,\n ],\n ],\n [\n [\n 0.18890158832073212,\n 0.3116491138935089,\n 0.15123975276947021,\n 0.074709951877594,\n ],\n [\n -0.027573950588703156,\n 0.16042113304138184,\n -0.17254289984703064,\n -0.1343500316143036,\n ],\n [\n 0.047192707657814026,\n 0.20208004117012024,\n -0.01943095773458481,\n -0.20782624185085297,\n ],\n [\n -0.04680364578962326,\n 0.06359653919935226,\n 0.04799196869134903,\n -0.05907594412565231,\n ],\n ],\n [\n [\n -0.005564738996326923,\n 0.1459812968969345,\n 0.3639175295829773,\n 0.21552257239818573,\n ],\n [\n -0.05287356674671173,\n -0.12922403216362,\n -0.0049260929226875305,\n 0.04667740315198898,\n ],\n [\n 0.06709674000740051,\n -0.0762409120798111,\n -0.06315286457538605,\n -0.10927218943834305,\n ],\n [\n -0.019926942884922028,\n 0.06360937654972076,\n -0.027559401467442513,\n -0.03374142572283745,\n ],\n ],\n [\n [\n 0.1533236801624298,\n 0.08659995347261429,\n -0.08708333969116211,\n 0.06116808205842972,\n ],\n [\n -0.24589480459690094,\n 0.10328409075737,\n 0.16698980331420898,\n 0.1809084266424179,\n ],\n [\n -0.014488153159618378,\n -0.18130677938461304,\n 0.056411802768707275,\n -0.1298111528158188,\n ],\n [\n 0.05507495626807213,\n -0.1606965959072113,\n 0.21048882603645325,\n -0.13742762804031372,\n ],\n ],\n ]\n ]\n )\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.0822635293006897, 0.0822635293006897],\n [0.0822635293006897, 0.0822635293006897],\n ],\n [\n [0.4193778932094574, 0.4193778932094574],\n [0.4193778932094574, 0.4193778932094574],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\ndef _test_deconv_group_large_in_channel(test_case, device):\n np_arr = [\n [\n [\n [0.6393764315295867, 0.3890587560476374],\n [0.8467359871201484, 0.24046160407703143],\n ],\n [\n [0.23352071016856402, 0.6760713653927521],\n [0.061939453383917376, 0.13541973098624682],\n ],\n [\n [0.7524804920779914, 0.34366296030931365],\n [0.4961502482687954, 0.38175448164636205],\n ],\n [\n [0.01867975512238773, 0.12599156959160163],\n [0.2658608593205851, 0.6184459583178925],\n ],\n ]\n ]\n input = flow.tensor(\n np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n m = nn.ConvTranspose2d(4, 2, 3, stride=1, groups=2, bias=False)\n weight = np.array(\n [\n [\n [\n [0.09130779653787613, -0.15347552299499512, -0.30601766705513],\n [-0.32231491804122925, 0.2088468372821808, 0.27517038583755493],\n [0.07109051942825317, 0.1529977172613144, 0.02908332832157612],\n ]\n ],\n [\n [\n [0.2900483012199402, 0.21056903898715973, -0.33150768280029297],\n [0.23826952278614044, -0.31094294786453247, 0.15310363471508026],\n [-0.21622958779335022, 0.24211928248405457, 0.0276408139616251],\n ]\n ],\n [\n [\n [-0.13352541625499725, -0.051541853696107864, -0.3144535720348358],\n [-0.2504778206348419, 0.11374961584806442, 0.09812634438276291],\n [0.07692340761423111, -0.0775483027100563, 0.33147329092025757],\n ]\n ],\n [\n [\n [0.3205569088459015, 0.12369465827941895, 0.1003061905503273],\n [0.1256311535835266, -0.07405238598585129, -0.24326899647712708],\n [0.14765889942646027, 0.016902882605791092, -0.12650541961193085],\n ]\n ],\n ]\n )\n m.weight = flow.nn.Parameter(flow.Tensor(weight))\n m = m.to(device)\n np_out = np.array(\n [\n [\n [\n [\n 0.12611234188079834,\n 0.1826610565185547,\n -0.19042569398880005,\n -0.34318169951438904,\n ],\n [\n -0.05516064167022705,\n 0.04093143343925476,\n -0.2053149938583374,\n 0.0920882523059845,\n ],\n [\n -0.2631978690624237,\n 0.14817529916763306,\n 0.4988565742969513,\n 0.11690345406532288,\n ],\n [\n 0.04680176079273224,\n 0.13235820829868317,\n 0.09591575711965561,\n 0.010736535303294659,\n ],\n ],\n [\n [\n -0.09448734670877457,\n -0.04197392612695694,\n -0.2368750274181366,\n -0.09542831033468246,\n ],\n [\n -0.1671580672264099,\n 0.16854587197303772,\n 0.02652890235185623,\n -0.05493755638599396,\n ],\n [\n -0.030232630670070648,\n 0.0058259665966033936,\n 0.20417997241020203,\n -0.015012085437774658,\n ],\n [\n 0.07742229104042053,\n 0.0867031067609787,\n 0.11167682707309723,\n 0.048304662108421326,\n ],\n ],\n ]\n ]\n )\n output = m(input)\n test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))\n output = output.sum()\n output.backward()\n np_grad = [\n [\n [\n [0.046688467264175415, 0.046688467264175415],\n [0.046688467264175415, 0.046688467264175415],\n ],\n [\n [0.30307042598724365, 0.30307042598724365],\n [0.30307042598724365, 0.30307042598724365],\n ],\n [\n [-0.20727425813674927, -0.20727425813674927],\n [-0.20727425813674927, -0.20727425813674927],\n ],\n [\n [0.3909238576889038, 0.3909238576889038],\n [0.3909238576889038, 0.3909238576889038],\n ],\n ]\n ]\n test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))\n\n\[email protected]_unless_1n1d()\nclass TestDeconv2d(flow.unittest.TestCase):\n def test_deconv2d(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_deconv_bias_false,\n _test_deconv_bias_true,\n _test_deconv_group_bias_false,\n _test_deconv_group_bias_true,\n _test_deconv_group_large_out_channel,\n _test_deconv_group_large_in_channel,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n @autotest()\n def test_deconv2d_with_random_data(test_case):\n channels = random(1, 6)\n m = torch.nn.ConvTranspose2d(\n in_channels=channels,\n out_channels=random(1, 20),\n kernel_size=random(1, 4),\n stride=random() | nothing(),\n padding=random(1, 3).to(int) | nothing(),\n dilation=random(1, 5) | nothing(),\n groups=random(1, 5) | nothing(),\n padding_mode=constant(\"zeros\") | nothing(),\n )\n m.train(random())\n device = random_device()\n m.to(device)\n x = random_tensor(ndim=4, dim1=channels).to(device)\n y = m(x)\n return y\n\n @unittest.skip(\n \"Likely to fail the test. This case should run on cpu when the problem is solved.\"\n )\n @autotest(n=30)\n def test_deconv2d_group_with_random_data(test_case):\n channels = 720 # lcm(1, 2, 3, 4, 5, 6)\n m = torch.nn.ConvTranspose2d(\n in_channels=channels,\n out_channels=channels,\n kernel_size=random(1, 4),\n stride=random() | nothing(),\n padding=random(1, 3).to(int) | nothing(),\n dilation=random(1, 5) | nothing(),\n groups=random(1, 7),\n padding_mode=constant(\"zeros\") | nothing(),\n )\n m.train(random())\n\n device = random_device()\n m.to(device)\n m.pytorch.to(\"cuda\")\n x = random_tensor(ndim=4, dim1=channels).to(device)\n x.pytorch = x.pytorch.to(\"cuda\")\n y = m(x)\n return y\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array"
]
] |
Kingsford-Group/polarset
|
[
"26da7debd5a4c4a456fcf7ac3749f06527db0226"
] |
[
"anchor_sets_bit.py"
] |
[
"# these are re-implementations of CoverageChecker and KMerChain that\n# uses bitvectors (array in Python) to improve performance.\n\nfrom collections import defaultdict\nfrom anchor_sets import CoverageChecker, KMerChain\nfrom array import array\nimport numpy as np\nimport pickle\nimport logging\nfrom tqdm import trange\nimport argparse\n# from multiprocessing import shared_memory\nworking_dir = \"/home/hongyuz/data/anchorsets/\"\nkc_preload_dir = working_dir + \"preload/\"\n_global_sorted_idx = dict()\n_global_lookup_table = dict()\n_global_heights = dict()\n\n\ndef val_iter(n, val = 0):\n '''\n A helper function that yields N zeroes.\n @param n: number of zeroes.\n '''\n for i in range(n): yield val\n\n\nclass BitCoverageChecker(CoverageChecker):\n '''\n This class supercedes CoverageChecker and uses a bit vector to store set of\n current/commited locations.\n NOTE: This works for the layered anchor set only.\n NOTE: The labels are disabled.\n NOTE: The caching is disabled for now.\n '''\n\n @classmethod\n def empty_cells(self, len):\n return None # this will be implemented separately.\n\n\n def __init__(self, n, w):\n '''\n The new initializer.\n '''\n super().__init__(n, w)\n assert self.w <= 100\n self._need_exbit = (self.w > 50)\n # commited stores boundary only - signed bits\n self.commited_lo = array('b', val_iter(self.num_blocks, -1))\n self.commited_hi = array('b', val_iter(self.num_blocks, -1))\n # current stores bit vector - emulated by two unsigned LLs\n # commited are also stored here for the sake of clarity..?\n self.commited = array('Q', val_iter(self.num_blocks * 2))\n self.current = array('Q', val_iter(self.num_blocks * 2))\n\n def _verify_timestamp(self, b):\n if self._cur_ts[b] < self._current_time:\n self._cur_ts[b] = self._current_time\n self.current[b*2] = 0\n if self._need_exbit:\n self.current[b*2+1] = 0\n\n def check_commited(self, x):\n '''\n Check the commited locations for whether the location is covered.\n @param x: the location to check.\n @return: (leftmost closest element, rightmost closest element) if these\n are within w units; Otherwise return self.PH_MIN, self.PH_MAX as\n placeholders\n NOTE: This is not exact at times; However it is only wrong (that is,\n the return values are not exactly the closest elements) when it is\n guaranteed that the location is covered by commited locations already\n (that is, verify returns C_COVERED)\n '''\n if x == self._com_cached_x:\n return self._com_cached_res\n b = x // self.w # current block.\n rb = self.PH_MAX\n lb = self.PH_MIN\n if self.commited_lo[b] != -1: # nonempty block\n for offset in [self.commited_lo[b], self.commited_hi[b]]:\n y = offset + b * self.w\n if y >= x: rb = min(rb, y)\n if y <= x: lb = max(lb, y)\n if (b > 0) and (lb == self.PH_MIN):\n if self.commited_lo[b-1] != -1:\n y = self.commited_hi[b-1] + (b-1) * self.w\n if y >= x - self.w: lb = y\n if rb == self.PH_MAX:\n if self.commited_lo[b+1] != -1:\n y = self.commited_lo[b+1] + (b+1) * self.w\n if y <= x + self.w: rb = y\n self._com_cached_x = x\n self._com_cached_res = lb, rb\n return lb, rb\n\n def _location_in_block_iter(self, b, for_commited = False):\n '''\n Helper function to iterate over list of locations in a block.\n @param b: index of the block.\n @param for_commited: If looking for commited locations instead.\n @return: yields list of locations within this block, with offests added back.\n '''\n base = b * self.w\n if for_commited:\n ll = [self.commited[b*2]]\n if self._need_exbit:\n ll.append(self.commited[b*2+1])\n else:\n self._verify_timestamp(b)\n ll = [self.current[b*2]]\n if self._need_exbit:\n ll.append(self.current[b*2+1])\n for v in ll[:]:\n while v > 0:\n hs = v.bit_length() - 1\n yield base + hs\n v -= 1 << hs\n base += 50 # this handles the base shift from the second location\n\n def get_covered_list_from_current(self, x, tags = True):\n '''\n Get the set of (location, tag) that is within w units of current location.\n NOTE: Now tag is always None.\n @param x: the location to check.\n @param tags: if tags are appended. This is for legacy compatibility.\n @return: list of (location, tag) within w units of current location.\n '''\n assert not tags\n b = x // self.w # current block.\n ret = []\n self._verify_timestamp(b)\n for e in self._location_in_block_iter(b):\n ret.append(e)\n if b > 0:\n self._verify_timestamp(b-1)\n for e in self._location_in_block_iter(b-1):\n if e >= x - self.w: ret.append(e)\n self._verify_timestamp(b+1)\n for e in self._location_in_block_iter(b+1):\n if e <= x + self.w: ret.append(e)\n return ret\n\n def _add_to_counter(self, x, label = None):\n b = x // self.w\n assert self._cur_ts[b] == self._current_time\n base_val = x - b * self.w\n if base_val >= 50:\n assert base_val < 100\n vv = 1 << (base_val - 50)\n assert (self.current[b*2+1] & vv) == 0\n self.current[b*2+1] += vv\n else:\n assert base_val >= 0\n vv = 1 << base_val\n assert (self.current[b*2] & vv) == 0\n self.current[b*2] += vv\n\n def _rem_from_counter(self, x):\n b = x // self.w\n assert self._cur_ts[b] == self._current_time\n base_val = x - b * self.w\n if base_val >= 50:\n assert base_val < 100\n vv = 1 << (base_val - 50)\n assert (self.current[b*2+1] & vv) == vv\n self.current[b*2+1] -= vv\n else:\n assert base_val >= 0\n vv = 1 << base_val\n assert (self.current[b*2] & vv) == vv\n self.current[b*2] -= vv\n\n def commit_all(self):\n '''\n Commit everything in current and start over.\n '''\n for b in range(self.num_blocks):\n self._verify_timestamp(b)\n for v in self._location_in_block_iter(b):\n orig = v - b * self.w\n if self.commited_lo[b] == -1:\n self.commited_lo[b] = orig\n self.commited_hi[b] = orig\n else:\n self.commited_lo[b] = min(self.commited_lo[b], orig)\n self.commited_hi[b] = max(self.commited_hi[b], orig)\n if orig >= 50:\n vv = 1 << (orig - 50)\n assert (self.commited[b*2+1] & vv) == 0\n self.commited[b*2+1] += vv\n else:\n vv = 1 << orig\n assert (self.commited[b*2] & vv) == 0\n self.commited[b*2] += vv\n self.commited_ele += self.cur_ele\n self.commited_segs += self.cur_segs\n self.commited_cover += self.cur_cover\n self.start_over()\n\n def _get_all_locations(self, commited_only = False):\n '''\n Profile-only function.\n '''\n for i in range(self.num_blocks):\n l = list(self._location_in_block_iter(i, True))\n if len(l) > 0:\n assert self.commited_hi[i] == max(l) - i * self.w\n assert self.commited_lo[i] == min(l) - i * self.w\n if not commited_only:\n l.extend(list(self._location_in_block_iter(i, False)))\n l.sort()\n for x in l:\n yield x\n\ndef sequence_mer_list(k, seq):\n chmap = {'A': 2, 'C': 0, 'G': 1, 'T': 3}\n slen = len(seq)\n modulus = 4 ** k\n cur = 0\n ret = [0] * (slen - k + 1)\n for i in range(k-1):\n cur = cur * 4 + chmap[seq[i]]\n for i in trange(k-1, slen):\n cur = (cur * 4 + chmap[seq[i]]) % modulus\n ret[i-(k-1)] = cur\n return ret\n\n\nclass CompactKMerChain(KMerChain):\n '''\n Re-implementation of KMerChain class, now also with the original kmer\n iterator built in the same file.\n Update: screw that, this is literally a suffix array now.\n '''\n JUMP_CAP = 30000\n def __init__(self, buf_prefix, n, k):\n '''\n Initialization from a suffix array dump.\n The suffix array must have been loaded into shared memory.\n @param buf_prefix: prefix of buffers.\n @param n: length of data, returned by load_shared_partial_SA.\n @param k: the value of k used.\n '''\n super().__init__([])\n logging.info(\"Loading started\")\n self.n, self.k = n, k\n self.name = buf_prefix\n assert self.k <= 30\n # self.sh_sidx = shared_memory.SharedMemory(name=buf_prefix+\"_sidx\", size=4*n)\n # self.sh_lookup = shared_memory.SharedMemory(name=buf_prefix+\"_lookup\", size=4*n)\n # self.sh_heights = shared_memory.SharedMemory(name=buf_prefix+\"_heights\", size=n)\n # self.sorted_idx = np.frombuffer(self.sh_sidx.buf, dtype=np.uint32)\n # self.lookup_table = np.frombuffer(self.sh_lookup.buf, dtype=np.uint32)\n # self.heights = np.frombuffer(self.sh_heights.buf, dtype=np.uint8)\n self.sorted_idx, self.lookup_table, self.heights = \\\n _global_sorted_idx[buf_prefix], _global_lookup_table[buf_prefix], _global_heights[buf_prefix]\n logging.info(\"SA loaded, generating jump tables\")\n self.jump_table = array('I', self.heights)\n self.jump_table[0] = 0\n last_jval = 0\n self.kmer_freq = defaultdict(int)\n for i in range(1, self.n):\n if self.heights[i - 1] >= self.k:\n last_jval += 1\n else:\n self.kmer_freq[last_jval + 1] += 1\n last_jval = 0\n self.jump_table[i] = min(self.JUMP_CAP, last_jval)\n self.kmer_freq[last_jval + 1] += 1\n logging.info(\"Jump tables loaded\")\n\n def calc_freq_cutoff(self, p):\n '''\n Calculates cutoff so certain portion of k-mers are included.\n @param p: 0<p<1 the portion of k-mers to be included.\n @return: the cutoff value.\n '''\n cur = 0\n assert p < 1\n vals = sorted(list(self.kmer_freq.items()))\n for k, v in vals:\n cur += k * v\n if cur >= self.n * p:\n return k\n assert False\n\n @property\n def data_len(self):\n return self.n\n\n def iter_by_idx(self, idx, is_repr = False):\n '''\n Returns list of sequence indexes that share the same k-mer.\n @param idx: the index of the k-mer.\n @param jump_first: helper that yields the jump table value first.\n this serves as a quick check if a k-mer is too repetitive.\n THIS IS DEPRECATED FOR NOW\n @param is_repr: if this is already the \"representative element\".\n if this is set to True, it is assumed we don't need to\n consult the jump table.\n @return: the set of indexes that share the same k-mer (incl. input).\n '''\n sa_idx = self.lookup_table[idx]\n lb = sa_idx\n if not is_repr:\n ljump = self.jump_table[lb]\n lb = lb - ljump\n if ljump == self.JUMP_CAP:\n while (self.jump_table[lb] != 0):\n lb -= self.jump_table[lb]\n yield self.sorted_idx[lb]\n rb = lb\n while (rb < self.n - 1) and (self.heights[rb] >= self.k):\n rb += 1\n # print(self.sorted_idx[rb], rb, self.heights[rb], self.jump_table[rb])\n yield self.sorted_idx[rb]\n\n # since now we're representing k-mers by their indexes only...\n iter_by_value = iter_by_idx\n def single_value_to_loc(self, val):\n return val\n\n def __getitem__(self, idx):\n '''\n Instead of the actual k-mer values, this function return a \"representation\"\n that is, the first index in the suffix array with the same k-mer.\n @param idx: the index of the k-mer.\n @return: the \"representation\".\n '''\n sa_idx = self.lookup_table[idx]\n lb = sa_idx\n ljump = self.jump_table[lb]\n lb = lb - ljump\n if ljump == self.JUMP_CAP:\n while (self.jump_table[lb] != 0):\n lb -= self.jump_table[lb]\n return self.sorted_idx[lb]\n\n def get_real_kmers(self, idx):\n '''\n Gets the actual k-mer, in its usual form.\n Not implemented, for now.\n '''\n assert False\n\n\ndef coverage_checker_test_new():\n '''\n Initial testing of the CoverageChecker class.\n '''\n c = BitCoverageChecker(100, 10)\n c.add_loc(0)\n c.add_loc(5)\n c.add_loc(17)\n c.add_loc(30)\n c.add_loc(40)\n c.add_loc(46)\n c.add_loc(53)\n c._show_statistics()\n c.verify_stats()\n for x in [1, 4, 10, 23, 30, 35, 45, 60, 70]:\n print(x, c.verify(x))\n c.commit_all()\n c._show_statistics()\n for x in [1, 4, 10, 23, 30, 35, 45, 60, 70]:\n print(x, c.verify(x))\n c.add_loc(23)\n c.add_loc(64)\n c.add_loc(99)\n c.verify_stats()\n c.verify_stats(True)\n for x in [1, 4, 10, 23, 30, 35, 45, 60, 70]:\n print(x, c.verify(x))\n c._show_statistics()\n c.delete_loc(23)\n c.delete_loc(64)\n c.delete_loc(99)\n c._show_statistics()\n c.verify_stats()\n c.verify_stats(True)\n\ndef coverage_checker_test_2():\n '''\n edge case testing.\n '''\n c = BitCoverageChecker(100, 10)\n c.add_loc(0)\n c._show_statistics()\n c.add_loc(10)\n c._show_statistics()\n c.add_loc(5)\n c._show_statistics()\n c.delete_loc(10)\n c.delete_loc(5)\n c._show_statistics()\n c.verify_stats()\n\n# def prepare_kc(seq_file, k):\n # '''\n # Construct the KMerChain file and write to disk (to check if this thing will\n # be faster by any means, and how large it really is).\n # '''\n # with open(seq_file + \".seq\") as f:\n # seq = f.readline().strip()\n # logging.info(\"Sequence loaded\")\n # dat = sequence_mer_list(k, seq)\n # logging.info(\"KMer list generated\")\n # ch = CompactKMerChain(dat)\n # logging.info(\"CKC object constructed\")\n # ch.save(kc_preload_dir + \"{}_{}.dat\".format(seq_file, k))\n # logging.info(\"Serialized to disk\")\n # return ch\n\ndef preprocess_partial_SA(seq_file, k, dump_file = None):\n '''\n Call this function to preprocess (partial) suffix arrays and do all the\n dirty work in advance.\n '''\n assert k < 31\n with open(seq_file + \".seq\") as f:\n seq = f.readline().strip()\n assert len(seq) < (1 << 32) # just enough for hg38\n logging.info(\"Sequence loaded\")\n dat = sequence_mer_list(k, seq)\n n = len(dat)\n logging.info(\"KMer list generated\")\n sorted_idx_ = np.argsort(dat)\n logging.info(\"Argsort completed\")\n _ph_array = [0] * n\n sorted_idx = array('L', sorted_idx_)\n heights = array('B', _ph_array)\n lookup_table = array('L', _ph_array)\n # build lookup table now\n logging.info(\"Memory allocated for arrays\")\n for i in trange(n):\n lookup_table[sorted_idx[i]] = i\n logging.info(\"Reverse lookup table calculated\")\n for i in trange(n-1):\n # calculate height[x]: this is the longest common prefix of\n # suffix at sorted_idx[i] and sorted_idx[i+1].\n idx0 = sorted_idx[i]\n idx1 = sorted_idx[i+1]\n if dat[idx0] == dat[idx1]: # k-mer agrees\n heights[i] = k\n else: # iterate now\n for d in range(k):\n if seq[idx0+d] != seq[idx1+d]:\n heights[i] = d\n break\n data_tuple = (n, k, sorted_idx, lookup_table, heights)\n logging.info(\"Height calculated\")\n if dump_file is None:\n dump_file = kc_preload_dir + seq_file + \"_\" + str(k) + \".dump\"\n with open(dump_file, \"bw\") as f:\n pickle.dump(data_tuple, f, protocol=4)\n logging.info(\"Completed\")\n\ndef load_shared_partial_SA(dump_file, buf_prefix):\n '''\n Loads the partial suffix array to memory via mp.shared_memory.\n @param dump_file: dump file generated by preprocess_partial_SA.\n @param buf_prefix: prefix of buffer file.\n @return: value of N to be feed to subsequent uses.\n '''\n with open(dump_file, \"br\") as f:\n n, _max_k, sorted_idx, lookup_table, heights = pickle.load(f)\n assert _max_k >= 30\n\n # sh_sidx = shared_memory.SharedMemory(name=buf_prefix+\"_sidx\", create=True,\n # size=4*n)\n # sh_lookup = shared_memory.SharedMemory(name=buf_prefix+\"_lookup\", create=True,\n # size=4*n)\n # sh_heights = shared_memory.SharedMemory(name=buf_prefix+\"_heights\", create=True,\n # size=n)\n # np_sidx = np.frombuffer(sh_sidx.buf, dtype=np.uint32)\n # np_lookup = np.frombuffer(sh_lookup.buf, dtype=np.uint32)\n # np_heights = np.frombuffer(sh_heights.buf, dtype=np.uint8)\n # assert len(np_sidx) == n\n # assert len(np_heights) == n\n # np_sidx[:] = sorted_idx[:]\n # np_lookup[:] = lookup_table[:]\n # np_heights[:] = heights[:]\n _global_heights[buf_prefix] = heights\n _global_lookup_table[buf_prefix] = lookup_table\n _global_sorted_idx[buf_prefix] = sorted_idx\n return n\n\ndef unload_shared_partial_SA(buf_prefix, n):\n '''\n Unloads the partial suffix arrays.\n @param buf_prefix: prefix of buffer file.\n '''\n pass\n # sh_sidx = shared_memory.SharedMemory(name=buf_prefix+\"_sidx\", size=4*n)\n # sh_lookup = shared_memory.SharedMemory(name=buf_prefix+\"_lookup\", size=4*n)\n # sh_heights = shared_memory.SharedMemory(name=buf_prefix+\"_heights\", size=n)\n # sh_sidx.close()\n # sh_lookup.close()\n # sh_heights.close()\n # sh_sidx.unlink()\n # sh_lookup.unlink()\n # sh_heights.unlink()\n\ndef partial_SA_test(buf_prefix, k):\n logging.basicConfig(format=\"%(asctime)s %(message)s\",datefmt=\"%I:%M:%S\",\n level=logging.DEBUG)\n dump_file = kc_preload_dir + buf_prefix + \"_30.dump\"\n n = load_shared_partial_SA(dump_file, buf_prefix)\n logging.info(\"SA loading finished\")\n for k in range(15, 25):\n kc = CompactKMerChain(buf_prefix, n, k)\n rets = []\n for idx in range(1, 20):\n p = idx / 20\n rets.append((p, kc.calc_freq_cutoff(p)))\n print(k, rets)\n quit()\n height_dist = defaultdict(int)\n for i in trange(n-1):\n height_dist[kc.heights[i]] += 1\n if kc.heights[i] < k:\n assert kc[kc.sorted_idx[i]] != kc[kc.sorted_idx[i+1]]\n else:\n assert kc[kc.sorted_idx[i]] == kc[kc.sorted_idx[i+1]]\n x = kc.iter_by_idx(kc.sorted_idx[i])\n y = kc.iter_by_idx(kc.sorted_idx[i+1])\n for i in range(20):\n a = next(x, None)\n b = next(y, None)\n if a is None:\n break\n assert a == b\n\ndef preprocess_main():\n '''\n this is main process for preprocessing. No need for multiprocessing.\n '''\n logging.basicConfig(format=\"%(asctime)s %(message)s\",datefmt=\"%I:%M:%S\",\n level=logging.DEBUG)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"fn\")\n args = parser.parse_args()\n logging.info(\"Parameters: {}\".format(args))\n preprocess_partial_SA(args.fn, 30)\n logging.info(\"Preprocessing finished, now doing sanity test\")\n buf_prefix = args.fn\n dump_file = kc_preload_dir + args.fn + \"_30.dump\"\n n = load_shared_partial_SA(dump_file, buf_prefix)\n logging.info(\"SA loading finished, length = {}\".format(n))\n partial_SA_test(buf_prefix, 18)\n # unload_shared_partial_SA(buf_prefix, n)\n logging.info(\"Sanity test finished\")\n\n\nif __name__ == \"__main__\":\n # coverage_checker_test_new()\n # c = BitCoverageChecker(len(ch.data), 10)\n # partial_SA_test(\"hg38_all\", None)\n # coverage_checker_test_2()\n # unload_shared_partial_SA(buf_prefix, n)\n preprocess_main()\n pass\n"
] |
[
[
"numpy.argsort"
]
] |
kioma/densenet
|
[
"e03a590aa38159c5099f641b630cb4016e9ab6cf"
] |
[
"test_inference.py"
] |
[
"\"\"\"Test ImageNet pretrained DenseNet\"\"\"\nimport cv2\nimport numpy as np\nfrom keras.optimizers import SGD\nimport keras.backend as K\n\n# We only test DenseNet-161 in this script for demo purpose\nfrom models.densenet161 import DenseNet\n\nim = cv2.resize(cv2.imread('resources/cat.jpg'), (224, 224)).astype(np.float32)\n#im = cv2.resize(cv2.imread('shark.jpg'), (224, 224)).astype(np.float32)\n\n# Subtract mean pixel and multiple by scaling constant\n# Reference: https://github.com/shicai/DenseNet-Caffe\nim[:,:,0] = (im[:,:,0] - 103.94) * 0.017\nim[:,:,1] = (im[:,:,1] - 116.78) * 0.017\nim[:,:,2] = (im[:,:,2] - 123.68) * 0.017\n\nif K.image_dim_ordering() == 'th':\n # Transpose image dimensions (Theano uses the channels as the 1st dimension)\n im = im.transpose((2,0,1))\n\n # Use pre-trained weights for Theano backend\n weights_path = 'imagenet_weights/densenet161_weights_th.h5'\nelse:\n # Use pre-trained weights for Tensorflow backend\n weights_path = 'imagenet_weights/densenet161_weights_tf.h5'\n\n# Insert a new dimension for the batch_size\nim = np.expand_dims(im, axis=0)\n\n# Test pretrained model\nmodel = DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)\n\nsgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n\nout = model.predict(im)\n\n# Load ImageNet classes file\nclasses = []\nwith open('resources/classes.txt', 'r') as list_:\n for line in list_:\n classes.append(line.rstrip('\\n'))\n\nprint('Prediction: '+str(classes[np.argmax(out)]))\n"
] |
[
[
"numpy.expand_dims",
"numpy.argmax"
]
] |
debodeepkar/adni_research
|
[
"d8061d0a68e1aca6517b8f15089a3331dd6819e6"
] |
[
"entropy.py"
] |
[
"import nibabel as nb\nimport os\nfrom skimage import data\nfrom skimage.measure.entropy import shannon_entropy\nfrom skimage.color import rgb2gray\nimport numpy as np\n\npath = \"/Users/debodeepkar/Documents/ADNI/NORMALIZED/ANTS/CN/\"\nos.chdir(path)\nfile = os.listdir(path) #returns a list with path\n\nslice=[]\nA=[]\nB=[]\n\nfor i in file:\n #i = str(i)\n img = nb.load(i)\n ent=[]\n #name=np.append(name,i)\n for i in range(0,img.shape[2]):\n \n org=img.get_data()[:,:,i]\n gray=rgb2gray(org)\n entr=shannon_entropy(gray)\n ent.append(entr)\n slice=np.append(slice,np.argmax(ent))\n x = np.argmax(ent)\n a = x-5\n b = x+5\n A=np.append(A,a)\n B=np.append(B,b)\n \nprint(slice) # prints the slice number with highest entropy\nprint(file) #prints the respective file name\n"
] |
[
[
"numpy.append",
"numpy.argmax"
]
] |
rl-navigation/deployable
|
[
"c06f0913297069ac2a064124ea591a17552a3d75"
] |
[
"src/renderer.py"
] |
[
"from __future__ import print_function, division\nimport numpy as np, cv2, networkx as nx\n\n#==============================================================================\n# ENVIRONMENT RENDERER\n#==============================================================================\n\n_render_cache = {}\n_optimal_paths = None\ndef render(graph, id_to_location, goal_location, goal_backward, timestep,\n time_limit, agent_obs, goal_obs, goal_features, agent_features,\n agent_location=None, agent_backward=None, curriculum_levels=None,\n curriculum_level=None, start_location=None, curriculum_goals=None,\n mode=\"human\", wait=1, action_probs=None, label_clusters=False,\n msg_color=None, path_so_far=None, msg=None, localization=None,\n goal_estimate=None, valid_actions=None, action_keys=None):\n\n global _render_cache, _optimal_paths\n\n W = H = 475; B = 20\n agent_color = (0,255,0)\n goal_color = (0,128,255)\n node_color = (255,0,0)\n curriculum_color = (255,255,0)\n start_color = (0,0,255)\n path_color = (0,255,0)\n optimal_color = (255,0,255)\n msg_color = (0,0,255) if msg_color is None else msg_color\n\n # transform node positions to workspace coordinates\n def topos(node,offset=0):\n x,y = node[\"pose\"][0:2]\n if len(node[\"origin\"]) == 3: dx,dy,dtheta = node[\"origin\"]\n else: dx, dy = node[\"origin\"]; dtheta = 0\n xr = x*np.cos(dtheta) + y*np.sin(dtheta)\n yr = x*np.cos(dtheta+np.pi/2) + y*np.sin(dtheta+np.pi/2)\n x = xr + dx\n y = yr + dy\n if type(offset) is not int: offset = np.matmul(np.array([[np.cos(-dtheta), -np.sin(-dtheta)],[np.sin(-dtheta), np.cos(-dtheta)]]), offset)\n return np.array([x,y],np.float32) + offset\n\n # determine the size of the workspace\n poses = [topos(node) for node in graph[\"graph_nodes\"].values()]\n poses = np.array(poses)\n xmn = poses[:,0].min(); xmx = poses[:,0].max(); xgp = xmx-xmn\n ymn = poses[:,1].min(); ymx = poses[:,1].max(); ygp = ymx-ymn\n if ygp > xgp: xmn-=(ygp-xgp)/2; xmx+=(ygp-xgp)/2\n elif xgp > ygp: ymn-=(xgp-ygp)/2; ymx+=(xgp-ygp)/2\n def topix(x,y):\n return (int((x-xmn)/(xmx-xmn+1e-8)*(W-2*B)+B), H-int((y-ymn)/(ymx-ymn+1e-8)*(H-2*B)+B))\n\n render_key = (curriculum_level, start_location)\n if render_key in _render_cache:\n img = _render_cache[render_key].copy()\n\n else:\n img = np.full((H,W,3), 255, np.uint8)\n\n cluster_centers = {}\n\n # draw edges first\n for node_id,node in graph[\"graph_nodes\"].items():\n for target_id,edge_type,edge_direction in node[\"edges\"]:\n p1 = topix(*topos(node))\n p2 = topix(*topos(graph[\"graph_nodes\"][str(target_id)]))\n cv2.line(img, p1, p2, (0,0,0), 2)\n\n # draw nodes\n for node_id,node in graph[\"graph_nodes\"].items():\n pos = topix(*topos(node))\n if node[\"cluster\"] not in cluster_centers: cluster_centers[node[\"cluster\"]] = []\n cluster_centers[node[\"cluster\"]].append(list(pos))\n cv2.circle(img, pos, 5, node_color, -1)\n\n if curriculum_goals is not None:\n # draw curriculum-available goals\n for n_idx in curriculum_goals:\n node = graph[\"graph_nodes\"][str(id_to_location[n_idx])]\n cv2.circle(img, topix(*topos(node)), 3, curriculum_color, -1)\n\n if label_clusters:\n # draw cluster ids\n for cluster in cluster_centers.keys():\n pos = tuple(list(map(int, np.mean(cluster_centers[cluster], axis=0))))\n cv2.putText(img, str(cluster), pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)\n\n if start_location is not None:\n # draw optimal path\n if _optimal_paths is None:\n G = nx.Graph()\n for node_id,node in graph[\"graph_nodes\"].items(): G.add_node(node_id)\n for node_id,node in graph[\"graph_nodes\"].items():\n for (target_id,edge_type,edge_direction) in node[\"edges\"]:\n G.add_edge(node_id, str(target_id))\n G.add_edge(str(target_id), node_id)\n _optimal_paths = dict(nx.all_pairs_shortest_path(G))\n for node_id in _optimal_paths[id_to_location[start_location]][id_to_location[goal_location]]:\n cv2.circle(img, topix(*topos(graph[\"graph_nodes\"][str(node_id)])), 3, optimal_color, -1)\n\n # draw start location\n node = graph[\"graph_nodes\"][str(id_to_location[start_location])]\n cv2.circle(img, topix(*topos(node)), 4, start_color, -1)\n\n # draw little dot inside nodes\n for node_id,node in graph[\"graph_nodes\"].items():\n pos = topix(*topos(node))\n cv2.circle(img, pos, 1, (0,0,0), -1)\n\n # draw legend\n cv2.putText(img, \"agent\", (img.shape[1]-90, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, agent_color, 2)\n cv2.putText(img, \"goal\", (img.shape[1]-90, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, goal_color, 2)\n cv2.putText(img, \"nodes\", (img.shape[1]-90, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, node_color, 2)\n cv2.putText(img, \"curriculum\", (img.shape[1]-90, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, curriculum_color, 2)\n cv2.putText(img, \"start\", (img.shape[1]-90, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, start_color, 2)\n cv2.putText(img, \"path\", (img.shape[1]-90, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, path_color, 2)\n cv2.putText(img, \"optimal\", (img.shape[1]-90, 105), cv2.FONT_HERSHEY_SIMPLEX, 0.5, optimal_color, 2)\n\n # cache image so we don't have to do this every frame\n _render_cache[render_key] = img.copy()\n\n if path_so_far is not None:\n # render agent's path so far\n for loc in path_so_far:\n cv2.circle(img, topix(*topos(graph[\"graph_nodes\"][str(id_to_location[loc])])), 1, path_color, -1)\n\n # render localization and goal estimates\n if localization is not None or goal_estimate is not None:\n lmx = localization .max() if localization is not None else 1\n gmx = goal_estimate.max() if goal_estimate is not None else 1\n if localization is not None: nl = len(localization)\n if goal_estimate is not None: nl = len(goal_estimate)\n for i in sorted(range(nl), key=lambda x: localization[x]+goal_estimate[x]):\n L = localization [i]/lmx if localization is not None else 0\n G = goal_estimate[i]/gmx if goal_estimate is not None else 0\n color = (int(L*255), 0, int(G*255))\n pos = topix(*topos(graph[\"graph_nodes\"][str(id_to_location[i])]))\n cv2.circle(img, pos, 5, color, -1)\n\n # draw goal\n goal_node = id_to_location[goal_location]\n cv2.circle(img, topix(*topos(graph[\"graph_nodes\"][goal_node])), 10, goal_color, 2)\n\n if agent_location is not None:\n # draw agent\n agent_node = id_to_location[agent_location]\n cv2.circle(img, topix(*topos(graph[\"graph_nodes\"][agent_node])), 8, agent_color, 2)\n vector = np.array([5*np.cos (graph[\"graph_nodes\"][agent_node][\"pose\"][3]+agent_backward*-np.pi/2),\n 5*np.sin (graph[\"graph_nodes\"][agent_node][\"pose\"][3]+agent_backward*-np.pi/2)])\n cv2.line (img, topix(*topos(graph[\"graph_nodes\"][agent_node])),\n topix(*topos(graph[\"graph_nodes\"][agent_node], vector)), agent_color, 3)\n\n if curriculum_level is not None or curriculum_levels is not None:\n # print curriculum level\n cv2.putText(img, \"curriculum level {} / {}\".format(curriculum_level, curriculum_levels), (20,25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)\n\n # render time limit\n cv2.putText(img, \"time limit {} / {}\".format(int(timestep), int(time_limit)), (20,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)\n\n def getColorJet(v, vmin=0, vmax=1):\n c = [1., 1., 1.] # white\n R, G, B = 0, 1, 2\n dv = 0\n if v < vmin: v = vmin\n if v > vmax: v = vmax\n dv = vmax - vmin\n if v < (vmin + 0.25 * dv): c[R] = 0; c[G] = 4 * (v - vmin) / dv\n elif v < (vmin + 0.5 * dv): c[R] = 0.; c[B] = 1 + 4 * (vmin + 0.25 * dv - v) / dv\n elif v < (vmin + 0.75 * dv): c[R] = 4 * (v - vmin - 0.5 * dv) / dv; c[B] = 0.\n else: c[G] = 1 + 4 * (vmin + 0.75 * dv - v) / dv; c[B] = 0.\n return tuple([int(ci*255) for ci in reversed(c)])\n\n # render action probabilities\n if action_probs is not None:\n num_nonlocal_destinations = len(action_probs)-3\n labels = [\"L\", \"F\", \"R\"] + list(map(str, range(num_nonlocal_destinations)))\n for i,p in enumerate(action_probs):\n color = getColorJet(p)\n bw = 20; bh = 10; w = 20; h = 50\n x = bw + 2*w*i\n y = img.shape[1]-bh\n cv2.rectangle(img, (x,y-20), (x+w, y-20-int(p*h+1)), color, -1)\n cv2.putText(img, labels[i], (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)\n cv2.putText(img, \"policy\", (20, img.shape[1]-20-50-25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)\n\n # render observation and goal vectors\n def vec2img(v):\n if v is None: v = np.zeros((4096,), np.float32)\n v = (v.flatten()-np.abs(v).min())/(np.abs(v).max()-np.abs(v).min()+1e-8)\n im = np.zeros((int(np.ceil(v.shape[0]**0.5)**2),3), np.uint8)\n im[:v.shape[0],0] =-np.clip(v,a_min=None,a_max=0)*255\n im[:v.shape[0],2] = np.clip(v,a_min=0,a_max=None)*255\n im = im.reshape(int(im.shape[0]**0.5), int(im.shape[0]**0.5), 3)\n im = cv2.resize(im, dsize=(W,H), interpolation=cv2.INTER_NEAREST)\n return im\n\n # render obs and goal vectors as a square image\n v_o = vec2img(agent_features)\n v_g = vec2img(goal_features)\n cv2.putText(v_o, \"obs\", (20,35), cv2.FONT_HERSHEY_SIMPLEX, 1, agent_color, 4)\n cv2.putText(v_g, \"goal\", (20,35), cv2.FONT_HERSHEY_SIMPLEX, 1, goal_color, 4)\n vob = np.hstack([v_o, v_g])\n vob = cv2.resize(vob, dsize=None, fx=img.shape[1]/vob.shape[1], fy=img.shape[1]/vob.shape[1])\n img = np.vstack([img, vob])\n\n # render observation and goal images\n i_g = goal_obs\n i_o = cv2.resize(agent_obs, dsize=(i_g.shape[1], i_g.shape[0]))\n cv2.putText(i_o, \"obs image\", (20,25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, agent_color, 2)\n cv2.putText(i_g, \"goal image\", (20,25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, goal_color, 2)\n iob = np.vstack([i_o, i_g])\n\n # render user instruction\n if msg is not None:\n S = 1; T = 2; x = 20; y = 50\n ret, baseline = cv2.getTextSize(msg, cv2.FONT_HERSHEY_SIMPLEX, S, T)\n ygap = ret[1]+10\n for i,text in enumerate(msg.split(\"|\")):\n ret, baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, S, T)\n cv2.rectangle(iob, (x-10,y+i*ygap-ret[1]-10), (x+ret[0]+10,y+i*ygap+10), (0,0,0), -1)\n for i,text in enumerate(msg.split(\"|\")):\n cv2.putText(iob, text, (x,y+i*ygap), cv2.FONT_HERSHEY_SIMPLEX, S, msg_color, T)\n\n # combine map and images\n iob = cv2.resize(iob, dsize=None, fx=img.shape[0]/iob.shape[0], fy=img.shape[0]/iob.shape[0])\n img = np.hstack([img, iob])\n\n if valid_actions is not None and action_keys is not None:\n S = 2; T = 4; x = 200\n key_string = \"valid keys: \" + \" \".join([action_keys[action] for action in sorted(valid_actions)])\n ret, baseline = cv2.getTextSize(key_string, cv2.FONT_HERSHEY_SIMPLEX, S, T)\n cv2.rectangle(img, (x-10, img.shape[0]-ret[1]-50), (x+ret[0]+10, img.shape[0]), (0,0,0), -1)\n cv2.putText (img, key_string, (x,img.shape[0]-40), cv2.FONT_HERSHEY_SIMPLEX, S, (255,255,255), T)\n\n # 1080p aspect ratio\n dy = 1080-img.shape[0]\n img = np.pad(img, ((dy//2,dy//2),(0,0),(0,0)), mode=\"constant\", constant_values=0)\n img = cv2.resize(img, dsize=(1920,1080))\n\n # show in a window\n if mode == \"human\":\n cv2.imshow(\"GraphEnv\", img)\n key = cv2.waitKey(wait)\n return key, img\n\n return -1, img\n\n"
] |
[
[
"numpy.hstack",
"numpy.pad",
"numpy.abs",
"numpy.clip",
"numpy.cos",
"numpy.full",
"numpy.sin",
"numpy.ceil",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
G-arj/qdk-python
|
[
"4bb4fa371347dc76b0448dfc79c557d468002f74"
] |
[
"azure-quantum/tests/unit/test_target.py"
] |
[
"import unittest\nimport warnings\nimport pytest\n\nimport numpy as np\n\nfrom azure.core.exceptions import HttpResponseError\nfrom azure.quantum.job.job import Job\nfrom azure.quantum._client.models import CostEstimate, UsageEvent\nfrom azure.quantum.target import IonQ, Honeywell, Quantinuum\n\nfrom common import QuantumTestBase, ZERO_UID\n\n\nclass TestIonQ(QuantumTestBase):\n \"\"\"TestIonq\n\n Tests the azure.quantum.target.ionq module.\n \"\"\"\n\n mock_create_job_id_name = \"create_job_id\"\n create_job_id = Job.create_job_id\n\n def get_test_job_id(self):\n return ZERO_UID if self.is_playback \\\n else Job.create_job_id()\n\n def _3_qubit_ghz(self):\n return {\n \"qubits\": 3,\n \"circuit\": [\n {\n \"gate\": \"h\",\n \"target\": 0\n },\n {\n \"gate\": \"cnot\",\n \"control\": 0,\n \"target\": 1\n },\n {\n \"gate\": \"cnot\",\n \"control\": 0,\n \"target\": 2\n },\n ]\n }\n\n @pytest.mark.ionq\n def test_estimate_cost_ionq(self):\n workspace = self.create_workspace()\n circuit = self._3_qubit_ghz()\n target = IonQ(workspace=workspace, name=\"ionq.simulator\")\n cost = target.estimate_cost(circuit, num_shots=100e3)\n assert cost.estimated_total == 0.0\n\n target = IonQ(workspace=workspace, name=\"ionq.qpu\")\n cost = target.estimate_cost(circuit, num_shots=100e3)\n assert np.round(cost.estimated_total) == 63.0\n\n\n @pytest.mark.ionq\n @pytest.mark.live_test\n def test_job_submit_ionq(self):\n self._test_job_submit_ionq(num_shots=None)\n\n @pytest.mark.ionq\n @pytest.mark.live_test\n def test_job_submit_ionq_100_shots(self):\n self._test_job_submit_ionq(num_shots=100)\n\n @pytest.mark.ionq\n @pytest.mark.live_test\n def test_job_submit_ionq_cost_estimate(self):\n job = self._test_job_submit_ionq(num_shots=None)\n self.assertIsNotNone(job.details)\n cost_estimate: CostEstimate = job.details.cost_estimate\n self.assertIsNotNone(cost_estimate)\n self.assertEqual(cost_estimate.currency_code, \"USD\")\n events: list[UsageEvent] = cost_estimate.events\n self.assertGreater(len(events), 0)\n self.assertGreaterEqual(cost_estimate.estimated_total, 0)\n\n def _test_job_submit_ionq(self, num_shots, circuit=None):\n\n with unittest.mock.patch.object(\n Job,\n self.mock_create_job_id_name,\n return_value=self.get_test_job_id(),\n ):\n workspace = self.create_workspace()\n if circuit is None:\n circuit = self._3_qubit_ghz()\n target = IonQ(workspace=workspace)\n job = target.submit(\n circuit=circuit,\n name=\"ionq-3ghz-job\",\n num_shots=num_shots\n )\n\n # If in recording mode, we don't want to record the pooling of job\n # status as the current testing infrastructure does not support\n # multiple identical requests.\n # So we pause the recording until the job has actually completed.\n # See: https://github.com/microsoft/qdk-python/issues/118\n self.pause_recording()\n try:\n # Set a timeout for IonQ recording\n job.wait_until_completed(timeout_secs=60)\n except TimeoutError:\n warnings.warn(\"IonQ execution exceeded timeout. Skipping fetching results.\")\n\n # Check if job succeeded\n self.assertEqual(True, job.has_completed())\n assert job.details.status == \"Succeeded\"\n self.resume_recording()\n\n # Record a single GET request such that job.wait_until_completed\n # doesn't fail when running recorded tests\n # See: https://github.com/microsoft/qdk-python/issues/118\n job.refresh()\n\n job = workspace.get_job(job.id)\n self.assertEqual(True, job.has_completed())\n\n if job.has_completed():\n results = job.get_results()\n assert \"histogram\" in results\n assert results[\"histogram\"][\"0\"] == 0.5\n assert results[\"histogram\"][\"7\"] == 0.5\n\n if num_shots:\n assert job.details.input_params.get(\"shots\") == num_shots\n else:\n assert job.details.input_params.get(\"shots\") is None\n\n return job\n\n\nclass TestHoneywell(QuantumTestBase):\n mock_create_job_id_name = \"create_job_id\"\n create_job_id = Job.create_job_id\n\n def get_test_job_id(self):\n return ZERO_UID if self.is_playback \\\n else Job.create_job_id()\n\n def _teleport(self):\n return \"\"\"OPENQASM 2.0;\n include \"qelib1.inc\";\n\n qreg q[3];\n creg c0[1];\n creg c1[1];\n creg c2[1];\n\n h q[0];\n cx q[0], q[1];\n x q[2];\n h q[2];\n cx q[2], q[0];\n h q[2];\n measure q[0] -> c0[0];\n if (c0==1) x q[1];\n measure q[2] -> c1[0];\n if (c1==1) z q[1];\n h q[1];\n measure q[1] -> c2[0];\n \"\"\"\n\n @pytest.mark.honeywell\n def test_job_estimate_cost_honeywell(self, provider_id=\"honeywell\"):\n with unittest.mock.patch.object(\n Job,\n self.mock_create_job_id_name,\n return_value=self.get_test_job_id(),\n ):\n workspace = self.create_workspace()\n circuit = self._teleport()\n\n target = Honeywell(workspace=workspace, name=\"honeywell.hqs-lt-s1-apival\") if provider_id == \"honeywell\" \\\n else Quantinuum(workspace=workspace, name=\"quantinuum.hqs-lt-s1-apival\")\n\n cost = target.estimate_cost(circuit, num_shots=100e3)\n assert cost.estimated_total == 0.0\n\n target = Honeywell(workspace=workspace, name=\"honeywell.hqs-lt-s1\") if provider_id == \"honeywell\" \\\n else Quantinuum(workspace=workspace, name=\"quantinuum.hqs-lt-s1\")\n\n cost = target.estimate_cost(circuit, num_shots=100e3)\n assert cost.estimated_total == 845.0\n\n @pytest.mark.honeywell\n def test_job_estimate_cost_quantinuum(self):\n if self.get_test_quantinuum_enabled():\n self.test_job_estimate_cost_honeywell(provider_id=\"quantinuum\")\n\n @pytest.mark.honeywell\n @pytest.mark.live_test\n def test_job_submit_honeywell(self, provider_id=\"honeywell\"):\n with unittest.mock.patch.object(\n Job,\n self.mock_create_job_id_name,\n return_value=self.get_test_job_id(),\n ):\n workspace = self.create_workspace()\n circuit = self._teleport()\n target = Honeywell(workspace=workspace) if provider_id == \"honeywell\" \\\n else Quantinuum(workspace=workspace)\n try:\n job = target.submit(circuit)\n except HttpResponseError as e:\n if \"InvalidJobDefinition\" not in e.message \\\n and \"The provider specified does not exist\" not in e.message:\n raise(e)\n warnings.warn(e.message)\n else:\n # Make sure the job is completed before fetching the results\n # playback currently does not work for repeated calls\n if not self.is_playback:\n self.pause_recording()\n self.assertEqual(False, job.has_completed())\n try:\n # Set a timeout for Honeywell recording\n job.wait_until_completed(timeout_secs=60)\n except TimeoutError:\n warnings.warn(\"Quantinuum (formerly Honeywell) execution exceeded timeout. Skipping fetching results.\")\n else:\n # Check if job succeeded\n self.assertEqual(True, job.has_completed())\n assert job.details.status == \"Succeeded\"\n self.resume_recording()\n\n job = workspace.get_job(job.id)\n self.assertEqual(True, job.has_completed())\n\n if job.has_completed():\n results = job.get_results()\n assert results[\"c0\"] == [\"0\"]\n assert results[\"c1\"] == [\"0\"]\n\n @pytest.mark.honeywell\n def test_job_submit_quantinuum(self):\n if self.get_test_quantinuum_enabled():\n self.test_job_submit_honeywell(provider_id=\"quantinuum\")\n"
] |
[
[
"numpy.round"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.