author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
49,738
29.04.2018 14:44:07
25,200
b1c8aa5de0f2923ea3d8c75846e442f2401bf945
New sparsity estimator based on density maps This patch adds a sparsity estimator based on density maps with configurable block size for better sparsity estimates in the presence of sparsity skew. In addition this fixes and extends the related tests and makes necessary extensions to the dense block abstraction.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "@@ -31,6 +31,7 @@ public class EstimatorBasicAvg extends SparsityEstimator\n{\n@Override\npublic double estim(MMNode root) {\n+ //recursive sparsity evaluation of non-leaf nodes\ndouble sp1 = !root.getLeft().isLeaf() ? estim(root.getLeft()) :\nOptimizerUtils.getSparsity(root.getLeft().getMatrixCharacteristics());\ndouble sp2 = !root.getRight().isLeaf() ? estim(root.getRight()) :\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "diff": "@@ -35,6 +35,7 @@ public class EstimatorBasicWorst extends SparsityEstimator\n{\n@Override\npublic double estim(MMNode root) {\n+ //recursive sparsity evaluation of non-leaf nodes\ndouble sp1 = !root.getLeft().isLeaf() ? estim(root.getLeft()) :\nOptimizerUtils.getSparsity(root.getLeft().getMatrixCharacteristics());\ndouble sp2 = !root.getRight().isLeaf() ? estim(root.getRight()) :\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "diff": "@@ -32,6 +32,7 @@ public class MMNode\nprivate final MMNode _m2;\nprivate final MatrixBlock _data;\nprivate final MatrixCharacteristics _mc;\n+ private Object _synops = null;\npublic MMNode(MMNode left, MMNode right) {\n_m1 = left;\n@@ -42,12 +43,12 @@ public class MMNode\n_data.getNumColumns(), -1, -1);\n}\n- public long getRows() {\n- return _mc.getRows();\n+ public int getRows() {\n+ return (int)_mc.getRows();\n}\n- public long getCols() {\n- return _mc.getCols();\n+ public int getCols() {\n+ return (int)_mc.getCols();\n}\npublic MatrixCharacteristics getMatrixCharacteristics() {\n@@ -65,4 +66,16 @@ public class MMNode\npublic boolean isLeaf() {\nreturn _data != null;\n}\n+\n+ public MatrixBlock getData() {\n+ return _data;\n+ }\n+\n+ public void setSynopsis(Object obj) {\n+ _synops = obj;\n+ }\n+\n+ public Object getSynopsis() {\n+ return _synops;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic abstract class SparsityEstimator\n{\n+ protected static final Log LOG = LogFactory.getLog(SparsityEstimator.class.getName());\n+\n/**\n* Estimates the output sparsity of a DAG of matrix multiplications\n* for the given operator graph of a single root node.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "@@ -221,6 +221,24 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract int pos(int r, int c);\n+ /**\n+ * Increments the given value for a given row and column.\n+ *\n+ * @param r row index\n+ * @param c column index\n+ */\n+ public abstract void incr(int r, int c);\n+\n+ /**\n+ * Increments the given value for a given row and column\n+ * by delta.\n+ *\n+ * @param r row index\n+ * @param c column index\n+ * @param delta increment value\n+ */\n+ public abstract void incr(int r, int c, double delta);\n+\n/**\n* Set the given value for the entire dense block (fill).\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "@@ -170,6 +170,16 @@ public class DenseBlockDRB extends DenseBlock\nreturn r * clen + c;\n}\n+ @Override\n+ public void incr(int r, int c) {\n+ data[pos(r, c)] ++;\n+ }\n+\n+ @Override\n+ public void incr(int r, int c, double delta) {\n+ data[pos(r, c)] += delta;\n+ }\n+\n@Override\npublic DenseBlock set(double v) {\nArrays.fill(data, 0, rlen*clen, v);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -199,6 +199,16 @@ public class DenseBlockLDRB extends DenseBlock\nreturn (r % blen) * clen + c;\n}\n+ @Override\n+ public void incr(int r, int c) {\n+ data[index(r)][pos(r, c)] ++;\n+ }\n+\n+ @Override\n+ public void incr(int r, int c, double delta) {\n+ data[index(r)][pos(r, c)] += delta;\n+ }\n+\n@Override\npublic DenseBlock set(double v) {\nfor(int i=0; i<numBlocks(); i++)\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.test.integration.functions.estim;\nimport org.junit.Test;\nimport org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -47,22 +48,42 @@ public class OuterProductTest extends AutomatedTestBase\n@Test\npublic void testBasicAvgCase1() {\n- runSparsityEstimateTest(new EstimatorBasicAvg(), m, n, k, case1);\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, case1);\n}\n@Test\npublic void testBasicAvgCase2() {\n- runSparsityEstimateTest(new EstimatorBasicAvg(), m, n, k, case2);\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, case2);\n}\n@Test\npublic void testBasicWorstCase1() {\n- runSparsityEstimateTest(new EstimatorBasicWorst(), m, n, k, case1);\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, case1);\n}\n@Test\npublic void testBasicWorstCase2() {\n- runSparsityEstimateTest(new EstimatorBasicWorst(), m, n, k, case2);\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, case2);\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2288] New sparsity estimator based on density maps This patch adds a sparsity estimator based on density maps with configurable block size for better sparsity estimates in the presence of sparsity skew. In addition this fixes and extends the related tests and makes necessary extensions to the dense block abstraction.
49,766
30.04.2018 14:04:10
25,200
cd359c2e9760cb9481a0fd79ce59c207eef57790
Stop publishing MD5 hash with releases
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -290,8 +290,6 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\ncd svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\nrm -f *.asc\nfor i in *.zip *.tgz *.tar.gz; do gpg --output $i.asc --detach-sig --armor $i; done\n- rm -f *.md5\n- for i in *.zip *.tgz *.tar.gz; do openssl md5 -hex $i | sed 's/MD5(\\([^)]*\\))= \\([0-9a-f]*\\)/\\2 *\\1/' > $i.md5; done\nrm -f *.sha512\nfor i in *.zip *.tgz *.tar.gz; do shasum -a 512 $i > $i.sha512; done\n" } ]
Java
Apache License 2.0
apache/systemds
Stop publishing MD5 hash with releases
49,738
30.04.2018 14:02:14
25,200
18d98b61e55e71a2da51a5f0a855c98beacbcdd7
[MINOR] Performance and cleanup spark data partitioning
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1441,7 +1441,9 @@ public class ParForProgramBlock extends ForProgramBlock\nDataPartitioner dp = null;\n//determine max degree of parallelism\n- int numReducers = ConfigurationManager.getNumReducers();\n+ int numReducers = OptimizerUtils.isSparkExecutionMode() ?\n+ SparkExecutionContext.getDefaultParallelism(false) :\n+ ConfigurationManager.getNumReducers();\nint maxNumRed = InfrastructureAnalyzer.getRemoteParallelReduceTasks();\n//correction max number of reducers on yarn clusters\nif( InfrastructureAnalyzer.isYarnEnabled() )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/DataPartitionerRemoteSparkReducer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/DataPartitionerRemoteSparkReducer.java", "diff": "@@ -62,15 +62,14 @@ public class DataPartitionerRemoteSparkReducer implements VoidFunction<Tuple2<Lo\n//write entire partition to binary block sequence file\nSequenceFile.Writer writer = null;\n- try\n- {\n+ try {\n//create sequence file writer\nConfiguration job = new Configuration(ConfigurationManager.getCachedJobConf());\n+ job.setInt(MRConfigurationNames.DFS_REPLICATION, _replication);\n+\nPath path = new Path(_fnameNew + File.separator + key);\nFileSystem fs = IOUtilFunctions.getFileSystem(path, job);\n- writer = new SequenceFile.Writer(fs, job, path, MatrixIndexes.class, MatrixBlock.class,\n- job.getInt(MRConfigurationNames.IO_FILE_BUFFER_SIZE, 4096),\n- (short)_replication, fs.getDefaultBlockSize(), null, new SequenceFile.Metadata());\n+ writer = new SequenceFile.Writer(fs, job, path, MatrixIndexes.class, MatrixBlock.class);\n//write individual blocks unordered to output\nwhile( valueList.hasNext() ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance and cleanup spark data partitioning
49,738
01.05.2018 20:55:05
25,200
aa253eb7eaf272175fbfe7e0e7b1a586cb18b68c
Fix density-map sparsity estimator, more tests This patch fixes a correctness issue of the sparsity estimator based on density maps along with new tests for squared uniformly distributed matrices (for all estimators).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -65,7 +65,7 @@ public class EstimatorDensityMap extends SparsityEstimator\n//estimate output density map and sparsity\nMatrixBlock outMap = estimIntern(m1Map, m2Map,\n- true, root.getRows(), root.getCols());\n+ true, root.getRows(), root.getLeft().getCols(), root.getCols());\nroot.setSynopsis(outMap); //memoize density map\nreturn OptimizerUtils.getSparsity( //aggregate output histogram\nroot.getRows(), root.getCols(), (long)outMap.sum());\n@@ -76,7 +76,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nMatrixBlock m1Map = computeDensityMap(m1);\nMatrixBlock m2Map = computeDensityMap(m2);\nMatrixBlock outMap = estimIntern(m1Map, m2Map,\n- true, m1.getNumRows(), m2.getNumColumns());\n+ true, m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\nreturn OptimizerUtils.getSparsity( //aggregate output histogram\nm1.getNumRows(), m2.getNumColumns(), (long)outMap.sum());\n}\n@@ -135,11 +135,12 @@ public class EstimatorDensityMap extends SparsityEstimator\n* @param m1Map density map left-hand-side operand\n* @param m2Map density map right-hand-side operand\n* @param retNnz return number of non-zeros instead of sparsity per cell\n- * @param rlen number of rows of output matrix, required for returning nnz\n- * @param clen number of columns of output matrix, required for returning nnz\n+ * @param mOrig number of rows of output matrix, required for returning nnz\n+ * @param cdOrig common dimension of original matrix multiply\n+ * @param nOrig number of columns of output matrix, required for returning nnz\n* @return density map\n*/\n- private MatrixBlock estimIntern(MatrixBlock m1Map, MatrixBlock m2Map, boolean retNnz, int rlen, int clen) {\n+ private MatrixBlock estimIntern(MatrixBlock m1Map, MatrixBlock m2Map, boolean retNnz, int mOrig, int cdOrig, int nOrig) {\nfinal int m = m1Map.getNumRows();\nfinal int cd = m1Map.getNumColumns();\nfinal int n = m2Map.getNumColumns();\n@@ -151,7 +152,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nDenseBlock c = out.allocateBlock().getDenseBlock();\nfor(int i=0; i<m; i++) {\nfor(int k=0; k<cd; k++) {\n- int lbk = UtilFunctions.computeBlockSize(cd, k+1, _b);\n+ int lbk = UtilFunctions.computeBlockSize(cdOrig, k+1, _b);\ndouble sp1 = m1Map.quickGetValue(i, k);\nfor(int j=0; j<n; j++) {\ndouble sp2 = m2Map.quickGetValue(k, j);\n@@ -164,9 +165,9 @@ public class EstimatorDensityMap extends SparsityEstimator\n}\n//scale to non-zeros instead of sparsity if needed\nif( retNnz ) {\n- int lbm = UtilFunctions.computeBlockSize(rlen, i+1, _b);\n+ int lbm = UtilFunctions.computeBlockSize(mOrig, i+1, _b);\nfor( int j=0; j<n; j++ ) {\n- int lbn = UtilFunctions.computeBlockSize(clen, j+1, _b);\n+ int lbn = UtilFunctions.computeBlockSize(nOrig, j+1, _b);\nc.set(i, j, c.get(i, j) * lbm * lbn);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.test.integration.functions.estim;\nimport org.junit.Test;\nimport org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\n@@ -86,6 +87,16 @@ public class OuterProductTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, case2);\n}\n+ @Test\n+ public void testBitsetMatrixCase1() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase2() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, case2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorBitsetMM;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+/**\n+ * This is a basic sanity check for all estimator, which need\n+ * to compute a reasonable estimate for uniform data.\n+ */\n+public class SquaredProductTest extends AutomatedTestBase\n+{\n+ private final static int m = 1000;\n+ private final static int k = 1000;\n+ private final static int n = 1000;\n+ private final static double[] case1 = new double[]{0.0001, 0.00007};\n+ private final static double[] case2 = new double[]{0.0006, 0.00007};\n+\n+ private final static double eps1 = 0.05;\n+ private final static double eps2 = 1e-4;\n+ private final static double eps3 = 0;\n+\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase1() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase2() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testBasicWorstCase1() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testBasicWorstCase2() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase1() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase2() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, case2);\n+ }\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\n+ MatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\n+ MatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\n+ MatrixBlock m3 = m1.aggregateBinaryOperations(m1, m2,\n+ new MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n+\n+ //compare estimated and real sparsity\n+ double est = estim.estim(m1, m2);\n+ TestUtils.compareScalars(est, m3.getSparsity(),\n+ (estim instanceof EstimatorBitsetMM) ? eps3 : //exact\n+ (estim instanceof EstimatorBasicWorst) ? eps1 : eps2);\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2288] Fix density-map sparsity estimator, more tests This patch fixes a correctness issue of the sparsity estimator based on density maps along with new tests for squared uniformly distributed matrices (for all estimators).
49,738
02.05.2018 15:55:32
25,200
d17a2e22917bd04b25ed0ad7e050f869cb1da92b
[SYSTEMML-2288,2295] Fix estimates mm chains (density maps, bitsets) This patch fixes the sparsity estimation logic for matrix multiply chains, specifically for the estimators based on density maps and bitsets. Additionally, this also includes related test cases.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -45,7 +45,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\nBitsetMatrix m1Map = !root.getLeft().isLeaf() ?\n(BitsetMatrix)root.getLeft().getSynopsis() : new BitsetMatrix(root.getLeft().getData());\nBitsetMatrix m2Map = !root.getRight().isLeaf() ?\n- (BitsetMatrix)root.getLeft().getSynopsis() : new BitsetMatrix(root.getLeft().getData());\n+ (BitsetMatrix)root.getRight().getSynopsis() : new BitsetMatrix(root.getRight().getData());\n//estimate output density map and sparsity via boolean matrix mult\nBitsetMatrix outMap = m1Map.matMult(m2Map);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -61,11 +61,11 @@ public class EstimatorDensityMap extends SparsityEstimator\nMatrixBlock m1Map = !root.getLeft().isLeaf() ?\n(MatrixBlock)root.getLeft().getSynopsis() : computeDensityMap(root.getLeft().getData());\nMatrixBlock m2Map = !root.getRight().isLeaf() ?\n- (MatrixBlock)root.getLeft().getSynopsis() : computeDensityMap(root.getLeft().getData());\n+ (MatrixBlock)root.getRight().getSynopsis() : computeDensityMap(root.getRight().getData());\n//estimate output density map and sparsity\nMatrixBlock outMap = estimIntern(m1Map, m2Map,\n- true, root.getRows(), root.getLeft().getCols(), root.getCols());\n+ false, root.getRows(), root.getLeft().getCols(), root.getCols());\nroot.setSynopsis(outMap); //memoize density map\nreturn OptimizerUtils.getSparsity( //aggregate output histogram\nroot.getRows(), root.getCols(), (long)outMap.sum());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "diff": "@@ -34,13 +34,19 @@ public class MMNode\nprivate final MatrixCharacteristics _mc;\nprivate Object _synops = null;\n+ public MMNode(MatrixBlock in) {\n+ _m1 = null;\n+ _m2 = null;\n+ _data = in;\n+ _mc = in.getMatrixCharacteristics();\n+ }\n+\npublic MMNode(MMNode left, MMNode right) {\n_m1 = left;\n_m2 = right;\n_data = null;\n- _mc = isLeaf() ? _data.getMatrixCharacteristics() :\n- new MatrixCharacteristics(_data.getNumRows(),\n- _data.getNumColumns(), -1, -1);\n+ _mc = new MatrixCharacteristics(\n+ _m1.getRows(), _m2.getCols(), -1, -1);\n}\npublic int getRows() {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorBitsetMM;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.MMNode;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+/**\n+ * This is a basic sanity check for all estimator, which need\n+ * to compute a reasonable estimate for uniform data.\n+ */\n+public class SquaredProductChainTest extends AutomatedTestBase\n+{\n+ private final static int m = 1000;\n+ private final static int k = 1000;\n+ private final static int n = 1000;\n+ private final static int n2 = 1000;\n+ private final static double[] case1 = new double[]{0.0001, 0.00007, 0.001};\n+ private final static double[] case2 = new double[]{0.0006, 0.00007, 0.001};\n+\n+ private final static double eps1 = 1.0;\n+ private final static double eps2 = 1e-4;\n+ private final static double eps3 = 0;\n+\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase1() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase2() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, n2, case2);\n+ }\n+\n+ @Test\n+ public void testBasicWorstCase1() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testBasicWorstCase2() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, n2, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, n2, case2);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, k, n, n2, case2);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase1() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase2() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, n2, case2);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramCase1() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramCase2() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, k, n, n2, case2);\n+ }\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, int n2, double[] sp) {\n+ MatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 1);\n+ MatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 2);\n+ MatrixBlock m3 = MatrixBlock.randOperations(n, n2, sp[2], 1, 1, \"uniform\", 3);\n+ MatrixBlock m4 = m1.aggregateBinaryOperations(m1, m2,\n+ new MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n+ MatrixBlock m5 = m1.aggregateBinaryOperations(m4, m3,\n+ new MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n+\n+ //compare estimated and real sparsity\n+ double est = estim.estim(new MMNode(\n+ new MMNode(new MMNode(m1), new MMNode(m2)), new MMNode(m3)));\n+ TestUtils.compareScalars(est, m5.getSparsity(),\n+ (estim instanceof EstimatorBitsetMM) ? eps3 : //exact\n+ (estim instanceof EstimatorBasicWorst) ? eps1 : eps2);\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "diff": "@@ -27,6 +27,8 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nOuterProductTest.class,\n+ SquaredProductChainTest.class,\n+ SquaredProductTest.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2288,2295] Fix estimates mm chains (density maps, bitsets) This patch fixes the sparsity estimation logic for matrix multiply chains, specifically for the estimators based on density maps and bitsets. Additionally, this also includes related test cases.
49,738
02.05.2018 18:17:10
25,200
e7d948f9c41a107f04a3f9fe565d8c7528573613
Improved matrix histograms (histograms of intermediate) This patch adds the functionality to compute estimated matrix histograms for intermediates of matrix multiplication chains based on the histograms of the inputs. This is important for sparsity estimation of intermediates for plan alternatives in advanced optimizers.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "package org.apache.sysml.hops.estim;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\n@@ -37,8 +36,23 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n{\n@Override\npublic double estim(MMNode root) {\n- throw new DMLRuntimeException(\"Estimation of \"\n- + \"intermediate matrix histograms not supported yet.\");\n+ //recursive histogram computation of non-leaf nodes\n+ if( !root.getLeft().isLeaf() )\n+ estim(root.getLeft()); //obtain synopsis\n+ if( !root.getRight().isLeaf() )\n+ estim(root.getLeft()); //obtain synopsis\n+ MatrixHistogram h1 = !root.getLeft().isLeaf() ?\n+ (MatrixHistogram)root.getLeft().getSynopsis() : new MatrixHistogram(root.getLeft().getData());\n+ MatrixHistogram h2 = !root.getRight().isLeaf() ?\n+ (MatrixHistogram)root.getRight().getSynopsis() : new MatrixHistogram(root.getRight().getData());\n+\n+ //estimate output sparsity based on input histograms\n+ double ret = estimIntern(h1, h2);\n+\n+ //derive and memoize output histogram\n+ root.setSynopsis(MatrixHistogram.deriveOutputHistogram(h1, h2, ret));\n+\n+ return ret;\n}\n@Override\n@@ -83,6 +97,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nprivate final int[] rNnz;\nprivate final int[] cNnz;\nprivate int rMaxNnz = 0;\n+ @SuppressWarnings(\"unused\")\nprivate int cMaxNnz = 0;\npublic MatrixHistogram(MatrixBlock in) {\n@@ -117,9 +132,14 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nrMaxNnz = Math.max(rMaxNnz, lnnz);\n}\n}\n+ cMaxNnz = max(cNnz, 0, in.getNumColumns());\n+ }\n- for(int j=0; j<in.getNumColumns(); j++)\n- cMaxNnz = Math.max(cMaxNnz, cNnz[j]);\n+ public MatrixHistogram(int[] r, int[] c, int rmax, int cmax) {\n+ rNnz = r;\n+ cNnz = c;\n+ rMaxNnz = rmax;\n+ cMaxNnz = cmax;\n}\npublic int getRows() {\n@@ -129,5 +149,43 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\npublic int getCols() {\nreturn cNnz.length;\n}\n+\n+ public static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n+ //get input/output nnz for scaling\n+ long nnz1 = sum(h1.rNnz, 0, h1.getRows());\n+ long nnz2 = sum(h2.cNnz, 0, h2.getCols());\n+ double nnzOut = spOut * h1.getRows() * h2.getCols();\n+\n+ //propagate h1.r and h2.c to output via simple scaling\n+ //(this implies 0s propagate and distribution is preserved)\n+ int rMaxNnz = 0, cMaxNnz = 0;\n+ int[] rNnz = new int[h1.getRows()];\n+ for( int i=0; i<h1.getRows(); i++ ) {\n+ rNnz[i] = (int) Math.round(nnzOut/nnz1 * h1.rNnz[i]);\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ }\n+ int[] cNnz = new int[h2.getCols()];\n+ for( int i=0; i<h2.getCols(); i++ ) {\n+ cNnz[i] = (int) Math.round(nnzOut/nnz2 * h2.cNnz[i]);\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n+ }\n+\n+ //construct new histogram object\n+ return new MatrixHistogram(rNnz, cNnz, rMaxNnz, cMaxNnz);\n+ }\n+\n+ private static int max(int[] a, int ai, int alen) {\n+ int ret = Integer.MIN_VALUE;\n+ for(int i=ai; i<ai+alen; i++)\n+ ret = Math.max(ret, a[i]);\n+ return ret;\n+ }\n+\n+ private static long sum(int[] a, int ai, int alen) {\n+ int ret = 0;\n+ for(int i=ai; i<ai+alen; i++)\n+ ret += a[i];\n+ return ret;\n+ }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2296] Improved matrix histograms (histograms of intermediate) This patch adds the functionality to compute estimated matrix histograms for intermediates of matrix multiplication chains based on the histograms of the inputs. This is important for sparsity estimation of intermediates for plan alternatives in advanced optimizers.
49,738
05.05.2018 17:37:38
25,200
d74ded67cebd4b34323f5e71f5cb5e1e81f6d17f
[MINOR] Extended compilation chain (optional function handling)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -104,17 +104,17 @@ public class DMLTranslator\nRecompiler.reinitRecompiler();\n}\n- /**\n- * Validate parse tree\n- *\n- * @param dmlp dml program\n- */\n- public void validateParseTree(DMLProgram dmlp)\n+ public void validateParseTree(DMLProgram dmlp) {\n+ validateParseTree(dmlp, true);\n+ }\n+\n+ public void validateParseTree(DMLProgram dmlp, boolean inclFuns)\n{\n//STEP1: Pre-processing steps for validate - e.g., prepare read-after-write meta data\nboolean fWriteRead = prepareReadAfterWrite(dmlp, new HashMap<String, DataIdentifier>());\n//STEP2: Actual Validate\n+ if( inclFuns ) {\n// handle functions in namespaces (current program has default namespace)\nfor (String namespaceKey : dmlp.getNamespaces().keySet()){\n@@ -134,7 +134,7 @@ public class DMLTranslator\n}\nfblock.validate(dmlp, vs, constVars, false);\n}\n-\n+ }\n}\n// handle regular blocks -- \"main\" program\n@@ -164,8 +164,13 @@ public class DMLTranslator\n}\npublic void liveVariableAnalysis(DMLProgram dmlp) {\n+ liveVariableAnalysis(dmlp, true);\n+ }\n+\n+ public void liveVariableAnalysis(DMLProgram dmlp, boolean inclFuns) {\n// for each namespace, handle function program blocks -- forward direction\n+ if( inclFuns ) {\nfor (String namespaceKey : dmlp.getNamespaces().keySet()) {\nfor (String fname: dmlp.getFunctionStatementBlocks(namespaceKey).keySet()) {\nFunctionStatementBlock fsb = dmlp.getFunctionStatementBlock(namespaceKey, fname);\n@@ -202,7 +207,7 @@ public class DMLTranslator\nfsb.analyze(currentLiveIn, currentLiveOut);\n}\n}\n-\n+ }\n// handle regular program blocks\nVariableSet currentLiveOut = new VariableSet();\n@@ -226,13 +231,13 @@ public class DMLTranslator\n}\n}\n- /**\n- * Construct Hops from parse tree\n- *\n- * @param dmlp dml program\n- */\npublic void constructHops(DMLProgram dmlp) {\n+ constructHops(dmlp, true);\n+ }\n+\n+ public void constructHops(DMLProgram dmlp, boolean inclFuns) {\n// Step 1: construct hops for all functions\n+ if( inclFuns ) {\n// for each namespace, handle function program blocks\nfor (String namespaceKey : dmlp.getNamespaces().keySet()){\nfor (String fname: dmlp.getFunctionStatementBlocks(namespaceKey).keySet()) {\n@@ -240,6 +245,7 @@ public class DMLTranslator\nconstructHops(current);\n}\n}\n+ }\n// Step 2: construct hops for main program\n// handle regular program blocks\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Extended compilation chain (optional function handling)
49,738
05.05.2018 20:43:20
25,200
53fa046010aafa93c754324f4c2e3dd9d157300c
Exploit matrix histogram symmetry for improved accuracy Matrix histograms exploit common structural properties of sparse and ultra-sparse matrices that allow for exact inference of the output sparsity for special cases. This patch improves the matrix histogram implementation to exploit not just structural information of the left-hand-side but symmetrically also of the right-hand-side.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -73,7 +73,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nlong nnz = 0;\n//special case, with exact sparsity estimate, where the dot product\n//dot(h1.cNnz,h2rNnz) gives the exact number of non-zeros in the output\n- if( h1.rMaxNnz <= 1 ) {\n+ if( h1.rMaxNnz <= 1 || h2.cMaxNnz <= 1 ) {\nfor( int j=0; j<h1.getCols(); j++ )\nnnz += h1.cNnz[j] * h2.rNnz[j];\n}\n@@ -97,7 +97,6 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nprivate final int[] rNnz;\nprivate final int[] cNnz;\nprivate int rMaxNnz = 0;\n- @SuppressWarnings(\"unused\")\nprivate int cMaxNnz = 0;\npublic MatrixHistogram(MatrixBlock in) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2296] Exploit matrix histogram symmetry for improved accuracy Matrix histograms exploit common structural properties of sparse and ultra-sparse matrices that allow for exact inference of the output sparsity for special cases. This patch improves the matrix histogram implementation to exploit not just structural information of the left-hand-side but symmetrically also of the right-hand-side.
49,727
08.05.2018 19:23:27
25,200
1a37cfad4104996a0f0ac61015ff908731713039
[MINOR] Fix native BLAS directory to lower case Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "diff": "@@ -78,7 +78,7 @@ public class NativeHelper {\nif(!isBLASLoaded()) {\nDMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\nString userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n- String customLibPath = (dmlConfig == null) ? \"none\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS_DIR).trim().toLowerCase();\n+ String customLibPath = (dmlConfig == null) ? \"none\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS_DIR).trim();\nperformLoading(customLibPath, userSpecifiedBLAS);\n}\nif(maxNumThreads == -1)\n@@ -179,6 +179,8 @@ public class NativeHelper {\nif(userSpecifiedBLAS.equalsIgnoreCase(\"auto\")) {\nblas = new String[] { \"mkl\", \"openblas\" };\n}\n+\n+\nif(checkAndLoadBLAS(customLibPath, blas) && loadLibraryHelper(\"libsystemml_\" + blasType + \"-Linux-x86_64.so\")) {\nLOG.info(\"Using native blas: \" + blasType + getNativeBLASPath());\nCURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix native BLAS directory to lower case Closes #760.
49,698
09.05.2018 13:05:32
-19,080
23157be4de7dd91e755cbc2a919984f068d6fd61
Factorization Machines regression script This patchs adds factorization machines regression script with a sample dummy data script for evaluation.
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/examples/fm-regression-dummy-data.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Imports\n+source(\"staging/fm-regression.dml\") as fm_regression\n+\n+# generate dummy data (just a sample!)\n+n = 1000; d = 9; k=2;\n+X = rand(rows=n, cols=d);\n+y = rand(rows=n, cols=1);\n+X_val = rand(rows=100, cols=d);\n+y_val = rand(rows=100, cols=1);\n+\n+# Train\n+[w0, W, V] = fm_regression::train(X, y, X_val, y_val);\n+\n+# Write model out\n+#write(w0, out_dir+\"/w0\");\n+#write(W, out_dir+\"/W\");\n+#write(V, out_dir+\"/V\");\n+\n+# Evaluate\n+probs = fm_regression::predict(X, w0, W, V);\n+[loss, accuracy] = fm_regression::eval(probs, y);\n+\n+# Output results\n+print(\"Test Accuracy: \" + accuracy)\n+#write(accuracy, out_dir+\"/accuracy\")\n+\n+print(\"\")\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/fm-regression.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/**\n+ * Factorization Machines for Regression.\n+ */\n+\n+# Imports\n+source(\"nn/optim/adam.dml\") as adam\n+source(\"nn/layers/fm.dml\") as fm\n+source(\"nn/layers/l2_loss.dml\") as l2_loss\n+source(\"nn/layers/l2_reg.dml\") as l2_reg\n+\n+train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val)\n+ return (matrix[double] w0, matrix[double] W, matrix[double] V) {\n+ /*\n+ * Trains the FM model.\n+ *\n+ * Inputs:\n+ * - X : n examples with d features, of shape (n, d)\n+ * - y : Target matrix, of shape (n, 1)\n+ * - X_val : Input validation data matrix, of shape (n, d)\n+ * - y_val : Target validation matrix, of shape (n, 1)\n+ *\n+ * Outputs:\n+ * - w0, W, V : updated model parameters.\n+ *\n+ * Network Architecture:\n+ *\n+ * X --> [model] --> out --> l2_loss::backward(out, y) --> dout\n+ *\n+ */\n+\n+ n = nrow(X) # num examples\n+ d = ncol(X) # num features\n+ k = 2 # factorization dimensionality,\n+ # only (=2) possible\n+\n+ # 1.initialize fm core\n+ [w0, W, V] = fm::init(n, d, k);\n+\n+ # 2.initialize adam optimizer\n+ ## Default values for some parameters\n+ lr = 0.001;\n+ beta1 = 0.9; # [0, 1)\n+ beta2 = 0.999; # [0, 1)\n+ epsilon = 0.00000001;\n+ t = 0;\n+\n+ [mw0, vw0] = adam::init(w0);\n+ [mW, vW] = adam::init(W);\n+ [mV, vV] = adam::init(V);\n+\n+ # regularization\n+ lambda = 5e-04\n+\n+ # Optimize\n+ print(\"Starting optimization\")\n+ batch_size = 10\n+ epochs = 100; N = n;\n+ iters = ceil(N / batch_size)\n+\n+ for (e in 1:epochs) {\n+ for (i in 1:iters) {\n+ # Get the next batch\n+ beg = ((i-1) * batch_size) %% N + 1\n+ end = min(N, beg + batch_size - 1)\n+ X_batch = X[beg:end,]\n+ y_batch = y[beg:end,]\n+\n+ # 3.Send inputs through fm::forward\n+ out = fm::forward(X_batch, w0, W, V);\n+\n+ # 4.compute gradients from a loss l2_loss::backward\n+ dout = l2_loss::backward(out, y_batch)# (predictions, targets)\n+\n+ # Compute loss & accuracy for training & validation data every 100 iterations.\n+ if (i %% 100 == 0) {\n+ # Compute training loss & accuracy\n+ [loss_data, accuracy] = eval(out, y_batch);\n+ loss_reg_w0 = l2_reg::forward(w0, lambda)\n+ loss_reg_W = l2_reg::forward(W , lambda)\n+ loss_reg_V = l2_reg::forward(V , lambda)\n+ loss = loss_data + loss_reg_w0 + loss_reg_W + loss_reg_V\n+\n+ # Compute validation loss & accuracy\n+ probs_val = predict(X_val, w0, W, V)\n+ [loss_val, accuracy_val] = eval(probs_val, y_val);\n+\n+ # Output results\n+ print(\"Epoch: \" + e + \", Iter: \" + i + \", Train Loss: \" + loss + \", Train Accuracy: \"\n+ + accuracy + \", Val Loss: \" + loss_val + \", Val Accuracy: \" + accuracy_val)\n+ }\n+\n+ # 5.Send the above result through fm::backward\n+ [dw0, dW, dV] = fm::backward(dout, X_batch, w0, W, V);\n+\n+ # 6.update timestep\n+ t = e * i - 1;\n+\n+ # 7.Call adam::update for all parameters\n+ [w0,mw0,vw0] = adam::update(w0, dw0, lr, beta1, beta2, epsilon, t, mw0, vw0);\n+ [W, mW, vW] = adam::update(W, dW, lr, beta1, beta2, epsilon, t, mW, vW );\n+ [V, mV, vV] = adam::update(V, dV, lr, beta1, beta2, epsilon, t, mV, vV );\n+\n+ }\n+ }\n+}\n+\n+predict = function(matrix[double] X, matrix[double] w0, matrix[double] W, matrix[double] V)\n+ return (matrix[double] out) {\n+ /*\n+ * Computes the predictions for the given inputs.\n+ *\n+ * Inputs:\n+ * - X : n examples with d features, of shape (n, d).\n+ * - w0, W, V : trained model parameters.\n+ *\n+ * Outputs:\n+ * - out : target vector, y.\n+ */\n+\n+ # 1.Send inputs through fm::forward\n+ out = fm::forward(X, w0, W, V);\n+\n+}\n+\n+eval = function(matrix[double] probs, matrix[double] y)\n+ return (double loss, double accuracy) {\n+ /*\n+ * Computes loss and accuracy.\n+ */\n+\n+ # compute the log loss\n+ loss = l2_loss::forward(probs, y);\n+\n+ # compute accuracy\n+ sqr_mean = mean( (probs - y)^2 )\n+ accuracy = (sqr_mean)^0.5\n+\n+}\n+\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1437] Factorization Machines regression script This patchs adds factorization machines regression script with a sample dummy data script for evaluation.
49,698
09.05.2018 13:34:48
-19,080
42359f11c9c215107421606bab93f6db65fea2fa
Factorization Machines Binary classification script This patch adds the binary classification script built on top of factorization machines with a sample data script for evaluation. Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/examples/fm-binclass-dummy-data.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#imports\n+source(\"staging/fm-binclass.dml\") as fm_binclass\n+\n+# generate dummy data ( this is just a sample! )\n+n = 1000; d = 7; k=2;\n+X = rand(rows=n, cols=d);\n+y = round(rand(rows=n, cols=1));\n+X_val = rand(rows=100, cols=7);\n+y_val = round(rand(rows=100, cols=1));\n+\n+# Train\n+[w0, W, V, loss] = fm_binclass::train(X, y, X_val, y_val);\n+\n+# Write model out\n+#write(w0, out_dir+\"/w0\");\n+#write(W, out_dir+\"/W\");\n+#write(V, out_dir+\"/V\");\n+\n+# eval on test set\n+probs = fm_binclass::predict(X, w0, W, V);\n+[loss, accuracy] = fm_binclass::eval(probs, y);\n+\n+# Output results\n+print(\"Test Accuracy: \" + accuracy)\n+#write(accuracy, out_dir+\"/accuracy\")\n+\n+print(\"\")\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/fm-binclass.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+/*\n+ * Factorization Machines for binary classification.\n+ */\n+\n+# Imports\n+source(\"nn/optim/adam.dml\") as adam\n+source(\"nn/layers/fm.dml\") as fm\n+source(\"nn/layers/log_loss.dml\") as log_loss\n+source(\"nn/layers/sigmoid.dml\") as sigmoid\n+source(\"nn/layers/l2_reg.dml\") as l2_reg\n+source(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\n+\n+train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val)\n+ return (matrix[double] w0, matrix[double] W, matrix[double] V, double loss) {\n+ /*\n+ * Trains the FM model.\n+ *\n+ * Inputs:\n+ * - X : n examples with d features, of shape (n, d).\n+ * - y : label corresponds to n examples\n+ * - lambda : regularization (5e-04)\n+ *\n+ * Outputs:\n+ * - w0, W, V : updated model parameters.\n+ * - loss : computed loss with log_loss.\n+ *\n+ * input propagation through layers\n+ * fm::init -> adam::init -> fm::forward -> sigmoid::forward -> log_loss::forward \\\n+ * adam::update <- fm::backward <- sigmoid::backward <- log_loss::backward <-\n+ */\n+\n+ n = nrow(X);\n+ d = ncol(X);\n+ k = 2; # factorization dimensionality, only(=2) possible for now.\n+\n+ # 1.initialize fm core\n+ [w0, W, V] = fm::init(n, d, k);\n+\n+ # 2.initialize adam optimizer\n+ ## Default values for some parameters\n+ lr = 0.001;\n+ beta1 = 0.9; # [0, 1)\n+ beta2 = 0.999; # [0, 1)\n+ epsilon = 0.00000001;\n+ t = 0;\n+\n+ # [mX, vX] = adam::init(X); # to optimize input.\n+ [mw0, vw0] = adam::init(w0);\n+ [mW, vW] = adam::init(W);\n+ [mV, vV] = adam::init(V);\n+\n+ # Regularization\n+ lambda = 5e-04\n+\n+ # Optimize\n+ print(\"Starting optimization\")\n+ batch_size = 10\n+ iters = ceil(1000 / batch_size)\n+ epochs = 100; N = n;\n+ for (e in 1:epochs) {\n+ for (i in 1:iters) {\n+ # Get the next batch\n+ beg = ((i-1) * batch_size) %% N + 1\n+ end = min(N, beg + batch_size - 1)\n+ X_batch = X[beg:end,]\n+ y_batch = y[beg:end,]\n+\n+ # 3.Send inputs through fm::forward\n+ y_res = fm::forward(X_batch, w0, W, V);\n+\n+ # 4.Send the above result through sigmoid::forward\n+ sfy = sigmoid::forward(y_res);\n+\n+ # 5.Send the above result through log_loss::forward\n+ loss = log_loss::forward(sfy, y_batch);\n+\n+ # Compute loss & accuracy for training & validation data every 100 iterations.\n+ if (i %% 100 == 0) {\n+ # Compute training loss & accuracy\n+ loss_data = log_loss::forward(sfy, y_batch);\n+ loss_reg_w0 = l2_reg::forward(w0, lambda);\n+ loss_reg_W = l2_reg::forward(W, lambda);\n+ loss_reg_V = l2_reg::forward(V, lambda);\n+\n+ accuracy = mean((sfy<0.5) == (y_batch<0.5));\n+ loss = loss_data + loss_reg_w0 + loss_reg_W + loss_reg_V;\n+\n+ # Compute validation loss & accuracy\n+ probs_val = predict(X_val, w0, W, V)\n+ loss_val = log_loss::forward(probs_val, y_val)\n+ accuracy_val = mean((probs_val<0.5) == (y_val<0.5))\n+\n+ # Output results\n+ print(\"Epoch: \" + e + \", Iter: \" + i + \", Train Loss: \" + loss + \", Train Accuracy: \"\n+ + accuracy + \", Val Loss: \" + loss_val + \", Val Accuracy: \" + accuracy_val)\n+ }\n+\n+ # 6.Send the result of sigmoid::forward and the correct labels y to log_loss::backward\n+ dsfy = log_loss::backward(sfy, y_batch);\n+\n+ # 7.Send the above result through sigmoid::backward\n+ dy = sigmoid::backward(dsfy, y_res);\n+\n+ # 8.Send the above result through fm::backward\n+ [dw0, dW, dV] = fm::backward(dy, X_batch, w0, W, V);\n+\n+ # 9. update the timestep\n+ t = e * i - 1;\n+\n+ # 10.Call adam::update for all parameters\n+\n+ # Incase we want to optimize inputs (X) also, as in deep dream.\n+ #[X, mX, vX] = adam::update(X, dX, lr, beta1, beta2, epsilon, t, mX, vX);\n+\n+ [w0, mw0, vw0] = adam::update(w0, dw0, lr, beta1, beta2, epsilon, t, mw0, vw0);\n+ [W, mW, vW] = adam::update(W, dW, lr, beta1, beta2, epsilon, t, mW, vW );\n+ [V, mV, vV] = adam::update(V, dV, lr, beta1, beta2, epsilon, t, mV, vV );\n+ }\n+ }\n+}\n+\n+predict = function(matrix[double] X, matrix[double] w0, matrix[double] W, matrix[double] V)\n+ return (matrix[double] out) {\n+ /*\n+ * Computes the predictions for the given inputs.\n+ *\n+ * Inputs:\n+ * - X : n examples with d features, of shape (n, d).\n+ * - w0, W, V : trained model parameters.\n+ *\n+ * Outputs:\n+ * - out : target vector, y.\n+ */\n+\n+ # 1.initialize fm core\n+ #[w0, W, V] = fm::init(d, k);\n+\n+ # 2.Send inputs through fm::forward\n+ y = fm::forward(X, w0, W, V);\n+\n+ # 3.Send the above result through sigmoid::forward\n+ out = sigmoid::forward(y);\n+\n+ # 4.Send the above result through log_loss::forward\n+ # loss = log_loss::forward(out);\n+\n+}\n+\n+eval = function(matrix[double] probs, matrix[double] y)\n+ return (double loss, double accuracy) {\n+ /**\n+ * Computes loss and accuracy.\n+ */\n+\n+ # 1. compute log loss\n+ loss = log_loss::forward(probs, y);\n+\n+ # 2. compute accuracy\n+ accuracy = mean( (probs<0.5) == (y<0.5) )\n+}\n+\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1437] Factorization Machines Binary classification script This patch adds the binary classification script built on top of factorization machines with a sample data script for evaluation. Closes #699.
49,738
09.05.2018 20:47:30
25,200
fa6394cc6981e80b262549ab86bd535124c3212b
[MINOR] Simplification 1x1 matrix initialization with scalar
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/QuaternaryCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/QuaternaryCPInstruction.java", "diff": "@@ -106,15 +106,11 @@ public class QuaternaryCPInstruction extends ComputationCPInstruction {\nMatrixBlock matBlock3 = ec.getMatrixInput(input3.getName(), getExtendedOpcode());\nMatrixBlock matBlock4 = null;\nif( qop.hasFourInputs() ) {\n- if (input4.getDataType() == DataType.SCALAR) {\n- matBlock4 = new MatrixBlock(1, 1, false);\n- final double eps = ec.getScalarInput(input4.getName(), input4.getValueType(), input4.isLiteral()).getDoubleValue();\n- matBlock4.quickSetValue(0, 0, eps);\n- }\n- else {\n+ if (input4.getDataType() == DataType.SCALAR)\n+ matBlock4 = new MatrixBlock(ec.getScalarInput(input4).getDoubleValue());\n+ else\nmatBlock4 = ec.getMatrixInput(input4.getName(), getExtendedOpcode());\n}\n- }\n//core execute\nMatrixBlock out = matBlock1.quaternaryOperations(qop, matBlock2, matBlock3, matBlock4, new MatrixBlock(), _numThreads);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "diff": "@@ -581,8 +581,7 @@ public class VariableCPInstruction extends CPInstruction {\nelse if( getInput1().getDataType()==DataType.SCALAR ) {\nScalarObject scalarInput = ec.getScalarInput(\ngetInput1().getName(), getInput1().getValueType(), getInput1().isLiteral());\n- MatrixBlock out = new MatrixBlock(1,1,false);\n- out.quickSetValue(0, 0, scalarInput.getDoubleValue());\n+ MatrixBlock out = new MatrixBlock(scalarInput.getDoubleValue());\nec.setMatrixOutput(output.getName(), out, getExtendedOpcode());\n}\nelse if( getInput1().getDataType()==DataType.LIST ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/mr/QuaternaryInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/mr/QuaternaryInstruction.java", "diff": "@@ -309,9 +309,8 @@ public class QuaternaryInstruction extends MRInstruction implements IDistributed\nIndexedMatrixValue iWij = (_input4 != -1) ? cachedValues.getFirst(_input4) : null;\nMatrixValue Wij = (iWij!=null) ? iWij.getValue() : null;\nif (null == Wij && qop.hasFourInputs()) {\n- MatrixBlock mb = new MatrixBlock(1, 1, false);\nString[] parts = InstructionUtils.getInstructionParts(instString);\n- mb.quickSetValue(0, 0, Double.valueOf(parts[4]));\n+ MatrixBlock mb = new MatrixBlock(Double.valueOf(parts[4]));\nWij = mb;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -5239,10 +5239,8 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nelse if( qop.wtype3 != null ){ //wdivmm\n//note: for wdivmm-minus X and W interchanged because W always present\nMatrixBlock W = qop.wtype3.hasFourInputs() ? checkType(wm) : null;\n- if( qop.getScalar() != 0 ) {\n- W = new MatrixBlock(1, 1, false);\n- W.quickSetValue(0, 0, qop.getScalar());\n- }\n+ if( qop.getScalar() != 0 )\n+ W = new MatrixBlock(qop.getScalar());\nif( k > 1 )\nLibMatrixMult.matrixMultWDivMM(X, U, V, W, R, qop.wtype3, k);\nelse\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/UnivariateStatsBasicTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/UnivariateStatsBasicTest.java", "diff": "@@ -77,8 +77,7 @@ public class UnivariateStatsBasicTest extends AutomatedTestBase\nrunTest(true, false, null, -1);\n//write input types\n- MatrixBlock mb = new MatrixBlock(1,1,false);\n- mb.quickSetValue(0, 0, 1);\n+ MatrixBlock mb = new MatrixBlock(1d);\nMatrixWriterFactory.createMatrixWriter(OutputInfo.CSVOutputInfo)\n.writeMatrixToHDFS(mb, input(\"uni-types.csv\"), 1, 1, 1, 1, 1);\nMapReduceTool.writeMetaDataFile(input(\"uni-types.csv.mtd\"), ValueType.DOUBLE,\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Simplification 1x1 matrix initialization with scalar
49,738
10.05.2018 12:27:58
25,200
6f2c885e8aad480349e039fcd0390feb341b3639
[MINOR] Fix uaggouterchain compilation (output data types)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "diff": "@@ -194,7 +194,7 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\ngetDataType(), getValueType());\nunary1.getOutputParameters().setDimensions(0, 0, 0, 0, -1);\nsetLineNumbers(unary1);\n- setLops(unary1);\n+ agg1 = unary1;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/UaggOuterChainCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/UaggOuterChainCPInstruction.java", "diff": "@@ -94,26 +94,13 @@ public class UaggOuterChainCPInstruction extends UnaryCPInstruction {\nif( _uaggOp.aggOp.correctionExists )\nmbOut.dropLastRowsOrColumns(_uaggOp.aggOp.correctionLocation);\n- String output_name = output.getName();\n- //final aggregation if required\n- if(_uaggOp.indexFn instanceof ReduceAll ) //RC AGG (output is scalar)\n- {\n- //create and set output scalar\n- ScalarObject ret = null;\n- switch( output.getValueType() ) {\n- case DOUBLE: ret = new DoubleObject(mbOut.quickGetValue(0, 0)); break;\n-\n- default:\n- throw new DMLRuntimeException(\"Invalid output value type: \"+output.getValueType());\n+ if(_uaggOp.indexFn instanceof ReduceAll ) { //RC AGG (output is scalar)\n+ ec.setMatrixOutput(output.getName(), new MatrixBlock(\n+ mbOut.quickGetValue(0, 0)), getExtendedOpcode());\n}\n- ec.setScalarOutput(output_name, ret);\n- }\n- else //R/C AGG (output is rdd)\n- {\n- //Additional memory requirement to convert from dense to sparse can be leveraged from released memory needed for input data above.\n+ else { //R/C AGG (output is rdd)\nmbOut.examSparsity();\n- ec.setMatrixOutput(output_name, mbOut, getExtendedOpcode());\n+ ec.setMatrixOutput(output.getName(), mbOut, getExtendedOpcode());\n}\n-\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix/UaggOuterChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix/UaggOuterChainTest.java", "diff": "@@ -44,7 +44,6 @@ import org.apache.sysml.utils.Statistics;\n*/\npublic class UaggOuterChainTest extends AutomatedTestBase\n{\n-\nprivate final static String TEST_NAME1 = \"UaggOuterChain\";\nprivate final static String TEST_DIR = \"functions/binary/matrix/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + UaggOuterChainTest.class.getSimpleName() + \"/\";\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix uaggouterchain compilation (output data types)
49,738
10.05.2018 13:47:06
25,200
4e4586881a8086b40f99c184d8875931e8836a76
Fix length over lists w/ unknown dims initial compile This patch fixes the runtime instruction for length operations over lists with unknown size during initial compilation. Initially known sizes lead to constant propagation and replacement which has hidden the existing issue of the runtime instruction.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java", "diff": "@@ -87,8 +87,13 @@ public class AggregateUnaryCPInstruction extends UnaryCPInstruction\nthrow new DMLRuntimeException(\"Variable '\"+input1.getName()+\"' does not exist.\");\n//get meta data information\n+ long rval = -1;\n+ if (input1.getDataType() == DataType.LIST && _type == AUType.LENGTH ) {\n+ rval = ((ListObject)ec.getVariable(input1.getName())).getLength();\n+ }\n+ else if( input1.getDataType().isMatrix() || input1.getDataType().isFrame() ) {\nMatrixCharacteristics mc = ec.getMatrixCharacteristics(input1.getName());\n- long rval = getSizeMetaData(_type, mc);\n+ rval = getSizeMetaData(_type, mc);\n//check for valid output, and acquire read if necessary\n//(Use case: In case of forced exec type singlenode, there are no reblocks. For csv\n@@ -117,6 +122,7 @@ public class AggregateUnaryCPInstruction extends UnaryCPInstruction\nthrow new DMLRuntimeException(\"Invalid meta data returned by '\"+opcode+\"': \"+rval + \":\" + instString);\n}\n}\n+ }\n//create and set output scalar\nec.setScalarOutput(output_name, new IntObject(rval));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2309] Fix length over lists w/ unknown dims initial compile This patch fixes the runtime instruction for length operations over lists with unknown size during initial compilation. Initially known sizes lead to constant propagation and replacement which has hidden the existing issue of the runtime instruction.
49,738
10.05.2018 14:07:28
25,200
ae86c3f767f12e7be0572e4c1308ce1e0a02d024
Support for lists and named-lists in function calls This patch makes all necessary compiler and runtime extensions to support lists with unknown value type as function inputs and outputs. Beside parser extensions, this also includes additional tests and a cleanup of redundant code in the dml and pydml syntactic parsers.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/FunctionStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/FunctionStatementBlock.java", "diff": "@@ -85,14 +85,6 @@ public class FunctionStatementBlock extends StatementBlock\nraiseValidateError(\"for function \" + fstmt.getName() + \", return variable \" + returnValue.getName() + \" must be defined in function \", conditional);\n}\n- if (curr.getDataType() == DataType.UNKNOWN){\n- raiseValidateError(\"for function \" + fstmt.getName() + \", return variable \" + curr.getName() + \" data type of \" + curr.getDataType() + \" may not match data type in function signature of \" + returnValue.getDataType(), true);\n- }\n-\n- if (curr.getValueType() == ValueType.UNKNOWN){\n- raiseValidateError(\"for function \" + fstmt.getName() + \", return variable \" + curr.getName() + \" data type of \" + curr.getValueType() + \" may not match data type in function signature of \" + returnValue.getValueType(), true);\n- }\n-\nif (curr.getDataType() != DataType.UNKNOWN && !curr.getDataType().equals(returnValue.getDataType()) ){\nraiseValidateError(\"for function \" + fstmt.getName() + \", return variable \" + curr.getName() + \" data type of \" + curr.getDataType() + \" does not match data type in function signature of \" + returnValue.getDataType(), conditional);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/common/CommonSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/common/CommonSyntacticValidator.java", "diff": "@@ -42,6 +42,8 @@ import org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.DoubleIdentifier;\nimport org.apache.sysml.parser.Expression;\nimport org.apache.sysml.parser.Expression.DataOp;\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.parser.FunctionCallIdentifier;\nimport org.apache.sysml.parser.IntIdentifier;\nimport org.apache.sysml.parser.LanguageException;\n@@ -705,12 +707,52 @@ public abstract class CommonSyntacticValidator {\n* @param start antlr token\n*/\nprotected void checkValidDataType(String datatype, Token start) {\n- boolean validMatrixType =\n- datatype.equals(\"matrix\") || datatype.equals(\"Matrix\") ||\n- datatype.equals(\"frame\") || datatype.equals(\"Frame\") ||\n- datatype.equals(\"scalar\") || datatype.equals(\"Scalar\");\n- if(!validMatrixType ) {\n- notifyErrorListeners(\"incorrect datatype (expected matrix, frame or scalar)\", start);\n+ boolean validMatrixType = datatype.equals(\"matrix\") || datatype.equals(\"Matrix\")\n+ || datatype.equals(\"frame\") || datatype.equals(\"Frame\")\n+ || datatype.equals(\"list\") || datatype.equals(\"List\")\n+ || datatype.equals(\"scalar\") || datatype.equals(\"Scalar\");\n+ if( !validMatrixType )\n+ notifyErrorListeners(\"incorrect datatype (expected matrix, frame, list, or scalar)\", start);\n+ }\n+\n+ protected boolean setDataAndValueType(DataIdentifier dataId, String dataType, String valueType, Token start, boolean shortVt, boolean helpBool) {\n+ if( dataType.equalsIgnoreCase(\"matrix\") )\n+ dataId.setDataType(DataType.MATRIX);\n+ else if( dataType.equalsIgnoreCase(\"frame\") )\n+ dataId.setDataType(DataType.FRAME);\n+ else if( dataType.equalsIgnoreCase(\"list\") )\n+ dataId.setDataType(DataType.LIST);\n+ else if( dataType.equalsIgnoreCase(\"scalar\") )\n+ dataId.setDataType(DataType.SCALAR);\n+\n+ if( (shortVt && valueType.equals(\"int\"))\n+ || valueType.equals(\"int\") || valueType.equals(\"integer\")\n+ || valueType.equals(\"Int\") || valueType.equals(\"Integer\")) {\n+ dataId.setValueType(ValueType.INT);\n+ }\n+ else if( (shortVt && valueType.equals(\"str\"))\n+ || valueType.equals(\"string\") || valueType.equals(\"String\")) {\n+ dataId.setValueType(ValueType.STRING);\n+ }\n+ else if( (shortVt && valueType.equals(\"bool\"))\n+ || valueType.equals(\"boolean\") || valueType.equals(\"Boolean\")) {\n+ dataId.setValueType(ValueType.BOOLEAN);\n+ }\n+ else if( (shortVt && valueType.equals(\"float\") )\n+ || valueType.equals(\"double\") || valueType.equals(\"Double\")) {\n+ dataId.setValueType(ValueType.DOUBLE);\n+ }\n+ else if(valueType.equals(\"unknown\") || (!shortVt && valueType.equals(\"Unknown\"))) {\n+ dataId.setValueType(ValueType.UNKNOWN);\n+ }\n+ else if(helpBool && valueType.equals(\"bool\")) {\n+ notifyErrorListeners(\"invalid valuetype \" + valueType + \" (Quickfix: use \\'boolean\\' instead)\", start);\n+ return false;\n}\n+ else {\n+ notifyErrorListeners(\"invalid valuetype \" + valueType, start);\n+ return false;\n+ }\n+ return true;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/dml/Dml.g4", "new_path": "src/main/java/org/apache/sysml/parser/dml/Dml.g4", "diff": "@@ -190,8 +190,8 @@ ID : (ALPHABET (ALPHABET|DIGIT|'_')* '::')? ALPHABET (ALPHABET|DIGIT|'_')*\nml_type : valueType | dataType '[' valueType ']';\n// Note to reduce number of keywords, these are case-sensitive,\n// To allow case-insenstive, 'int' becomes: ('i' | 'I') ('n' | 'N') ('t' | 'T')\n-valueType: 'int' | 'integer' | 'string' | 'boolean' | 'double'\n- | 'Int' | 'Integer' | 'String' | 'Boolean' | 'Double';\n+valueType: 'int' | 'integer' | 'string' | 'boolean' | 'double' | 'unknown'\n+ | 'Int' | 'Integer' | 'String' | 'Boolean' | 'Double' | 'Unknown';\ndataType:\n// 'scalar' # ScalarDataTypeDummyCheck\n// |\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "diff": "@@ -38,8 +38,6 @@ import org.apache.sysml.parser.ConditionalPredicate;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.Expression;\n-import org.apache.sysml.parser.Expression.DataType;\n-import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.parser.ExpressionList;\nimport org.apache.sysml.parser.ExternalFunctionStatement;\nimport org.apache.sysml.parser.ForStatement;\n@@ -702,49 +700,15 @@ public class DmlSyntacticValidator extends CommonSyntacticValidator implements D\nArrayList<DataIdentifier> retVal = new ArrayList<>();\nfor(TypedArgNoAssignContext paramCtx : ctx) {\nDataIdentifier dataId = new DataIdentifier(paramCtx.paramName.getText());\n- String dataType = null;\n- String valueType = null;\n-\n- if(paramCtx.paramType == null || paramCtx.paramType.dataType() == null\n- || paramCtx.paramType.dataType().getText() == null || paramCtx.paramType.dataType().getText().isEmpty()) {\n- dataType = \"scalar\";\n- }\n- else {\n- dataType = paramCtx.paramType.dataType().getText();\n- }\n-\n+ String dataType = (paramCtx.paramType == null || paramCtx.paramType.dataType() == null\n+ || paramCtx.paramType.dataType().getText() == null || paramCtx.paramType.dataType().getText().isEmpty()) ?\n+ \"scalar\" : paramCtx.paramType.dataType().getText();\n+ String valueType = paramCtx.paramType.valueType().getText();\n//check and assign data type\ncheckValidDataType(dataType, paramCtx.start);\n- if( dataType.equalsIgnoreCase(\"matrix\") )\n- dataId.setDataType(DataType.MATRIX);\n- else if( dataType.equalsIgnoreCase(\"frame\") )\n- dataId.setDataType(DataType.FRAME);\n- else if( dataType.equalsIgnoreCase(\"scalar\") )\n- dataId.setDataType(DataType.SCALAR);\n-\n- valueType = paramCtx.paramType.valueType().getText();\n- if(valueType.equals(\"int\") || valueType.equals(\"integer\")\n- || valueType.equals(\"Int\") || valueType.equals(\"Integer\")) {\n- dataId.setValueType(ValueType.INT);\n- }\n- else if(valueType.equals(\"string\") || valueType.equals(\"String\")) {\n- dataId.setValueType(ValueType.STRING);\n- }\n- else if(valueType.equals(\"boolean\") || valueType.equals(\"Boolean\")) {\n- dataId.setValueType(ValueType.BOOLEAN);\n- }\n- else if(valueType.equals(\"double\") || valueType.equals(\"Double\")) {\n- dataId.setValueType(ValueType.DOUBLE);\n- }\n- else if(valueType.equals(\"bool\")) {\n- notifyErrorListeners(\"invalid valuetype \" + valueType + \" (Quickfix: use \\'boolean\\' instead)\", paramCtx.start);\n+ if( !setDataAndValueType(dataId, dataType, valueType, paramCtx.start, false, true) )\nreturn null;\n- }\n- else {\n- notifyErrorListeners(\"invalid valuetype \" + valueType, paramCtx.start);\n- return null;\n- }\nretVal.add(dataId);\n}\nreturn retVal;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/pydml/PydmlSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/pydml/PydmlSyntacticValidator.java", "diff": "@@ -42,8 +42,6 @@ import org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.DoubleIdentifier;\nimport org.apache.sysml.parser.Expression;\n-import org.apache.sysml.parser.Expression.DataType;\n-import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.parser.ExternalFunctionStatement;\nimport org.apache.sysml.parser.ForStatement;\nimport org.apache.sysml.parser.FunctionCallIdentifier;\n@@ -1371,43 +1369,15 @@ public class PydmlSyntacticValidator extends CommonSyntacticValidator implements\nArrayList<DataIdentifier> retVal = new ArrayList<>();\nfor(TypedArgNoAssignContext paramCtx : ctx) {\nDataIdentifier dataId = new DataIdentifier(paramCtx.paramName.getText());\n- String dataType = null;\n- String valueType = null;\n-\n- if(paramCtx.paramType == null || paramCtx.paramType.dataType() == null\n- || paramCtx.paramType.dataType().getText() == null || paramCtx.paramType.dataType().getText().isEmpty()) {\n- dataType = \"scalar\";\n- }\n- else {\n- dataType = paramCtx.paramType.dataType().getText();\n- }\n+ String dataType = (paramCtx.paramType == null || paramCtx.paramType.dataType() == null\n+ || paramCtx.paramType.dataType().getText() == null || paramCtx.paramType.dataType().getText().isEmpty()) ?\n+ \"scalar\" : paramCtx.paramType.dataType().getText();\n+ String valueType = paramCtx.paramType.valueType().getText();\n//check and assign data type\ncheckValidDataType(dataType, paramCtx.start);\n- if( dataType.equals(\"matrix\") )\n- dataId.setDataType(DataType.MATRIX);\n- else if( dataType.equals(\"frame\") )\n- dataId.setDataType(DataType.FRAME);\n- else if( dataType.equals(\"scalar\") )\n- dataId.setDataType(DataType.SCALAR);\n-\n- valueType = paramCtx.paramType.valueType().getText();\n- if(valueType.equals(\"int\")) {\n- dataId.setValueType(ValueType.INT);\n- }\n- else if(valueType.equals(\"str\")) {\n- dataId.setValueType(ValueType.STRING);\n- }\n- else if(valueType.equals(\"bool\")) {\n- dataId.setValueType(ValueType.BOOLEAN);\n- }\n- else if(valueType.equals(\"float\")) {\n- dataId.setValueType(ValueType.DOUBLE);\n- }\n- else {\n- notifyErrorListeners(\"invalid valuetype \" + valueType, paramCtx.start);\n+ if( !setDataAndValueType(dataId, dataType, valueType, paramCtx.start, true, false) )\nreturn null;\n- }\nretVal.add(dataId);\n}\nreturn retVal;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "diff": "@@ -34,6 +34,9 @@ public class ListAndStructTest extends AutomatedTestBase\n{\nprivate static final String TEST_NAME1 = \"ListUnnamed\";\nprivate static final String TEST_NAME2 = \"ListNamed\";\n+ private static final String TEST_NAME3 = \"ListUnnamedFun\";\n+ private static final String TEST_NAME4 = \"ListNamedFun\";\n+\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + ListAndStructTest.class.getSimpleName() + \"/\";\n@@ -43,6 +46,8 @@ public class ListAndStructTest extends AutomatedTestBase\nTestUtils.clearAssertionInformation();\naddTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n}\n@Test\n@@ -65,6 +70,26 @@ public class ListAndStructTest extends AutomatedTestBase\nrunListStructTest(TEST_NAME2, true);\n}\n+ @Test\n+ public void testListUnnamedFun() {\n+ runListStructTest(TEST_NAME3, false);\n+ }\n+\n+ @Test\n+ public void testListUnnamedFunRewrites() {\n+ runListStructTest(TEST_NAME3, true);\n+ }\n+\n+ @Test\n+ public void testListNamedFun() {\n+ runListStructTest(TEST_NAME4, false);\n+ }\n+\n+ @Test\n+ public void testListNamedFunRewrites() {\n+ runListStructTest(TEST_NAME4, true);\n+ }\n+\nprivate void runListStructTest(String testname, boolean rewrites)\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedFun.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(a=A, b=B, c=c, d=D, e=matrix(5, 3, 3), f=6);\n+\n+for( i in 1:length(X) ) {\n+ tmp = X[i]\n+ if( !exists(\"tmp\") )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X[['e']])));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedFun.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function(List[unknown] X) return (List[unknown] R) {\n+ for( i in 1:length(X) ) {\n+ tmp = X[i];\n+ if( !exists(tmp) )\n+ print(\"ERROR: non-existing entry \"+i );\n+ }\n+ R = X['e'];\n+}\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(a=A, b=B, c=c, d=D, e=matrix(5, 3, 3), f=6);\n+R = as.matrix(foo(X));\n+R = as.matrix(sum(R));\n+\n+write(R, $1);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedFun.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(A, B, c, D, matrix(5, 3, 3), 6);\n+\n+for( i in 1:length(X) ) {\n+ tmp = X[i]\n+ if( !exists(\"tmp\") )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X[[5]])));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedFun.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function(List[unknown] X) return (List[unknown] R) {\n+ for( i in 1:length(X) ) {\n+ tmp = X[i];\n+ if( !exists(tmp) )\n+ print(\"ERROR: non-existing entry \"+i );\n+ }\n+ R = X[5];\n+}\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(A, B, c, D, matrix(5, 3, 3), 6);\n+R = as.matrix(foo(X));\n+R = as.matrix(sum(R));\n+\n+write(R, $1);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2311] Support for lists and named-lists in function calls This patch makes all necessary compiler and runtime extensions to support lists with unknown value type as function inputs and outputs. Beside parser extensions, this also includes additional tests and a cleanup of redundant code in the dml and pydml syntactic parsers.
49,738
11.05.2018 17:38:38
25,200
f6b8b74a2305facc7fc34766be5ae50248428eb7
[HOTFIX] Fix javadoc recent JMLC API extensions
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "diff": "@@ -222,7 +222,7 @@ public class Connection implements Closeable\n* Prepares (precompiles) a script, sets input parameter values, and registers input and output variables.\n*\n* @param script string representing of the DML or PyDML script\n- * @param nsscript map (name, script) of the DML or PyDML namespace scripts\n+ * @param nsscripts map (name, script) of the DML or PyDML namespace scripts\n* @param args map of input parameters ($) and their values\n* @param inputs string array of input variables to register\n* @param outputs string array of output variables to register\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix javadoc recent JMLC API extensions
49,738
14.05.2018 18:45:52
25,200
94260013acad86bccaf864c97f4f21de3b1b0393
[MINOR][SYSTEMML-2067] New codegen tests for conv2d and bias_add ops
[ { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowConv2DOperationsTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.test.integration.functions.codegen;\n+\n+import java.io.File;\n+import java.util.HashMap;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+public class RowConv2DOperationsTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"RowConv2DTest\";\n+ private final static String TEST_DIR = \"functions/codegen/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + RowConv2DOperationsTest.class.getSimpleName() + \"/\";\n+\n+ private final static String TEST_CONF = \"SystemML-config-codegen.xml\";\n+ private final static File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\n+\n+ private static final double eps = Math.pow(10, -10);\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] {\"B\"}));\n+ }\n+\n+ @Test\n+ public void testConv2DDenseDenseCP() {\n+ runConv2DTest(TEST_NAME1, true, 16, 64, 1, 3, 2, 1, 0, false, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testConv2DSparseDenseCP() {\n+ runConv2DTest(TEST_NAME1, true, 16, 64, 1, 3, 2, 1, 0, true, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testConv2DDenseDenseSP() {\n+ runConv2DTest(TEST_NAME1, true, 16, 64, 1, 3, 2, 1, 0, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testConv2DSparseDenseSP() {\n+ runConv2DTest(TEST_NAME1, true, 16, 64, 1, 3, 2, 1, 0, true, false, ExecType.SPARK);\n+ }\n+\n+ public void runConv2DTest(String testname, boolean rewrites, int imgSize, int numImg, int numChannels,\n+ int numFilters, int filterSize, int stride, int pad, boolean sparse1, boolean sparse2, ExecType et)\n+ {\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ) {\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ String sparseVal1 = String.valueOf(sparse1).toUpperCase();\n+ String sparseVal2 = String.valueOf(sparse2).toUpperCase();\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"recompile_runtime\", \"-stats\", \"-args\",\n+ String.valueOf(imgSize), String.valueOf(numImg), String.valueOf(numChannels),\n+ String.valueOf(numFilters), String.valueOf(filterSize), String.valueOf(stride),\n+ String.valueOf(pad), output(\"B\"), sparseVal1, sparseVal2 };\n+\n+ fullRScriptName = HOME + testname + \".R\";\n+ rCmd = getRCmd(String.valueOf(imgSize), String.valueOf(numImg), String.valueOf(numChannels),\n+ String.valueOf(numFilters), String.valueOf(filterSize), String.valueOf(stride),\n+ String.valueOf(pad), expectedDir(), sparseVal1, sparseVal2);\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ //Assert.assertTrue(heavyHittersContainsSubString(\"spoofRA\")\n+ // || heavyHittersContainsSubString(\"sp_spoofRA\"));\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+\n+ /**\n+ * Override default configuration with custom test configuration to ensure\n+ * scratch space and local temporary directory locations are also updated.\n+ */\n+ @Override\n+ protected File getConfigTemplateFile() {\n+ // Instrumentation in this test's output log to show custom configuration file used for template.\n+ System.out.println(\"This test case overrides default configuration with \" + TEST_CONF_FILE.getPath());\n+ return TEST_CONF_FILE;\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/RowConv2DTest.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+imgSize=as.integer(args[1])\n+numImg=as.integer(args[2])\n+numChannels=as.integer(args[3])\n+numFilters=as.integer(args[4])\n+filterSize=as.integer(args[5])\n+stride=as.integer(args[6])\n+pad=as.integer(args[7])\n+\n+# Assumption: NCHW image format\n+x=matrix(seq(1, numImg*numChannels*imgSize*imgSize), numImg, numChannels*imgSize*imgSize, byrow=TRUE)\n+w=matrix(seq(1, numFilters*numChannels*filterSize*filterSize), numFilters, numChannels*filterSize*filterSize, byrow=TRUE)\n+\n+if(as.logical(args[9])) {\n+ zero_mask = (x - mean(x)*1.5) > 0\n+ x = x * zero_mask\n+} else {\n+ x = x - mean(x)\n+}\n+if(as.logical(args[10])) {\n+ zero_mask = (w - mean(w)*1.5) > 0\n+ w = w * zero_mask\n+} else {\n+ w = w - mean(w)\n+}\n+pad_image <- function(img, Hin, Win, padh, padw){\n+ C = nrow(img)\n+ img_padded = matrix(0, C, (Hin+2*padh)*(Win+2*padw), byrow=TRUE) # zeros\n+ for (c in 1:C) {\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # depth slice C reshaped\n+ img_padded_slice = matrix(0, Hin+2*padh, Win+2*padw)\n+ img_padded_slice[(padh+1):(padh+Hin), (padw+1):(padw+Win)] = img_slice\n+ img_padded[c,] = matrix(t(img_padded_slice), 1, (Hin+2*padh)*(Win+2*padw)) # reshape\n+ }\n+ img_padded\n+}\n+\n+im2col <- function(img, Hin, Win, Hf, Wf, strideh, stridew) {\n+ C = nrow(img)\n+ Hout = as.integer((Hin - Hf) / strideh + 1)\n+ Wout = as.integer((Win - Wf) / stridew + 1)\n+\n+ img_cols = matrix(0, C*Hf*Wf, Hout*Wout, byrow=TRUE) # zeros\n+ for (hout in 1:Hout) { # all output rows\n+ hin = (hout-1) * strideh + 1\n+ for (wout in 1:Wout) { # all output columns\n+ win = (wout-1) * stridew + 1\n+ # Extract a local patch of the input image corresponding spatially to the filter sizes.\n+ img_patch = matrix(0, C, Hf*Wf, byrow=TRUE) # zeros\n+ for (c in 1:C) { # all channels\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # reshape\n+ img_patch[c,] = matrix(t(img_slice[hin:(hin+Hf-1), win:(win+Wf-1)]), 1, Hf*Wf)\n+ }\n+ img_cols[,(hout-1)*Wout + wout] = matrix(t(img_patch), C*Hf*Wf, 1) # reshape\n+ }\n+ }\n+ img_cols\n+}\n+\n+conv2d <- function(X, W, C, Hin, Win, Hf, Wf, strideh, stridew, padh, padw) {\n+ N = nrow(X)\n+ F = nrow(W)\n+ Hout = as.integer((Hin + 2 * padh - Hf) / strideh + 1)\n+ Wout = as.integer((Win + 2 * padw - Wf) / stridew + 1)\n+\n+ # Create output volume\n+ out = matrix(0, N, F*Hout*Wout, byrow=TRUE)\n+\n+ # Convolution - im2col implementation\n+ for (n in 1:N) { # all examples\n+ Xn = matrix(X[n,], C, Hin*Win, byrow=TRUE) # reshape\n+\n+ # Pad image\n+ Xn_padded = pad_image(Xn, Hin, Win, padh, padw) # shape (C, (Hin+2*padh)*(Win+2*padw))\n+\n+ # Extract local image patches into columns with im2col, of shape (C*Hf*Wf, Hout*Wout)\n+ Xn_padded_cols = im2col(Xn_padded, Hin+2*padh, Win+2*padw, Hf, Wf, strideh, stridew)\n+\n+ # Convolve patches with filters\n+ outn = W %*% Xn_padded_cols # shape (F, Hout*Wout)\n+ out[n,] = matrix(t(outn), 1, F*Hout*Wout) # reshape\n+ }\n+\n+ out\n+}\n+\n+output = conv2d(x, w, numChannels, imgSize, imgSize, filterSize, filterSize, stride, stride, pad, pad);\n+Hout = as.integer((imgSize + 2 * pad - filterSize) / stride + 1)\n+Wout = Hout\n+\n+b=matrix(seq(1, numFilters), numFilters, 1, byrow=TRUE)\n+for(k in 0:(numFilters-1)) {\n+ for(i in 1:nrow(output)) {\n+ start = k*Hout*Hout;\n+ for(j in 1:(Hout*Hout)) {\n+ output[i,start+j] = output[i,start+j] + b[k+1,1]\n+ }\n+ }\n+}\n+\n+writeMM(as(output,\"CsparseMatrix\"), paste(args[8], \"B\", sep=\"\"))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/RowConv2DTest.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+imgSize=$1\n+numImg=$2\n+numChannels=$3\n+numFilters=$4\n+filterSize=$5\n+stride=$6\n+pad=$7\n+\n+# Assumption: NCHW image format\n+x=matrix(seq(1, numImg*numChannels*imgSize*imgSize), rows=numImg, cols=numChannels*imgSize*imgSize)\n+w=matrix(seq(1, numFilters*numChannels*filterSize*filterSize), rows=numFilters, cols=numChannels*filterSize*filterSize)\n+b=matrix(seq(1, numFilters), rows=numFilters, cols=1)\n+\n+if($9) {\n+ zero_mask = (x - mean(x)*1.5) > 0\n+ x = x * zero_mask\n+}\n+else {\n+ x = x - mean(x)\n+}\n+if($10) {\n+ zero_mask = (w - mean(w)*1.5) > 0\n+ w = w * zero_mask\n+}\n+else {\n+ w = w - mean(w)\n+}\n+output = conv2d(x, w, padding=[pad, pad], stride=[stride, stride], input_shape=[numImg, numChannels, imgSize, imgSize], filter_shape=[numFilters, numChannels, filterSize, filterSize], bias=b)\n+output = bias_add(output, b)\n+\n+write(output, $8, format=\"text\")\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegen/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegen/ZPackageSuite.java", "diff": "@@ -40,6 +40,7 @@ import org.junit.runners.Suite;\nMultiAggTmplTest.class,\nOuterProdTmplTest.class,\nRowAggTmplTest.class,\n+ RowConv2DOperationsTest.class,\nRowVectorComparisonTest.class,\nSparseSideInputTest.class,\nSumProductChainTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR][SYSTEMML-2067] New codegen tests for conv2d and bias_add ops
49,738
14.05.2018 20:49:32
25,200
f4a399730e8b98e11e5421cec2b123b1dabac1c7
[MINOR] Fix flaky jmlc test (potential side effects over tmp files)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "diff": "@@ -53,10 +53,6 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nprivate final static double sparsity1 = 0.7;\nprivate final static double sparsity2 = 0.1;\n- //This testcase recently caused intermittent test failures on jenkins that are not\n- //reproducible in local environments; hence we perform additional sanity checks here.\n- private final static boolean CHECK_IN_OUT = true;\n-\n@Override\npublic void setUp() {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] { \"predicted_y\" }) );\n@@ -89,57 +85,42 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nloadTestConfiguration(config);\n//generate inputs\n- ArrayList<double[][]> Xset = generateInputs(nRuns, rows, cols, sparse?sparsity2:sparsity1);\n- if( CHECK_IN_OUT )\n- checkSelfEquivalence(Xset, rows, cols);\n+ ArrayList<double[][]> Xset = generateInputs(nRuns, rows, cols, sparse?sparsity2:sparsity1, 7);\n//run DML via JMLC\nArrayList<double[][]> Yset = execDMLScriptviaJMLC( Xset, flags );\n- if( CHECK_IN_OUT )\n- checkSelfEquivalence(Yset, rows, 1);\n-\n- //run R and compare results to DML result\n- String HOME = SCRIPT_DIR + TEST_DIR;\n- fullRScriptName = HOME + TEST_NAME + \".R\";\n- rCmd = getRCmd(inputDir(), expectedDir());\n- //write model data once\n+ //write out R input and model once\nMatrixBlock mb = DataConverter.readMatrixFromHDFS(SCRIPT_DIR + TEST_DIR + MODEL_FILE,\nInputInfo.TextCellInputInfo, rows, cols, 1000, 1000);\n- double[][] W = DataConverter.convertToDoubleMatrix( mb );\n- writeInputMatrix(\"W\", W, true);\n-\n- //for each input data set\n- int lnRuns = CHECK_IN_OUT ? 1 : nRuns;\n- for( int i=0; i<lnRuns; i++ ) {\n- //write input data\n- writeInputMatrix(\"X\", Xset.get(i), true);\n+ writeInputMatrix(\"X\", Xset.get(0), true);\n+ writeInputMatrix(\"W\", DataConverter.convertToDoubleMatrix(mb), true);\n- //run the R script\n+ //run R test once\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = getRCmd(inputDir(), expectedDir());\nrunRScript(true);\n- //compare results\n+ //read and convert R output\nHashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"predicted_y\");\ndouble[][] expected = TestUtils.convertHashMapToDoubleArray(rfile, rows, 1);\n+ //for each input data set compare results\n+ for( int i=0; i<nRuns; i++ )\nTestUtils.compareMatrices(expected, Yset.get(i), rows, 1, eps);\n}\n- }\nprivate static ArrayList<double[][]> execDMLScriptviaJMLC(ArrayList<double[][]> X, boolean flags)\nthrows IOException\n{\nTiming time = new Timing(true);\n-\nArrayList<double[][]> ret = new ArrayList<double[][]>();\n- //establish connection to SystemML\n- Connection conn = !flags ? new Connection():\n+ try( Connection conn = !flags ? new Connection():\nnew Connection(ConfigType.PARALLEL_CP_MATRIX_OPERATIONS,\nConfigType.PARALLEL_LOCAL_OR_REMOTE_PARFOR,\n- ConfigType.ALLOW_DYN_RECOMPILATION);\n-\n- try\n+ ConfigType.ALLOW_DYN_RECOMPILATION) )\n{\n// For now, JMLC pipeline only allows dml\nboolean parsePyDML = false;\n@@ -168,31 +149,18 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\n}\n}\ncatch(Exception ex) {\n- ex.printStackTrace();\nthrow new IOException(ex);\n}\n- finally {\n- if( conn != null )\n- conn.close();\n- }\nSystem.out.println(\"JMLC scoring w/ \"+nRuns+\" runs in \"+time.stop()+\"ms.\");\nreturn ret;\n}\n- private ArrayList<double[][]> generateInputs( int num, int rows, int cols, double sparsity ) {\n+ private ArrayList<double[][]> generateInputs( int num, int rows, int cols, double sparsity, int seed ) {\nArrayList<double[][]> ret = new ArrayList<double[][]>();\nfor( int i=0; i<num; i++ )\n- ret.add(getRandomMatrix(rows, cols, -1, 1, sparsity, 7));\n+ ret.add(getRandomMatrix(rows, cols, -1, 1, sparsity, seed));\nreturn ret;\n}\n-\n- private void checkSelfEquivalence(ArrayList<double[][]> data, int rows, int cols) {\n- if( data == null || data.size() < 2 )\n- return;\n- double[][] data0 = data.get(0);\n- for(int i=1; i<data.size(); i++)\n- TestUtils.compareMatrices(data0, data.get(i), rows, cols, eps);\n- }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/jmlc/m-svm-score.dml", "new_path": "src/test/scripts/functions/jmlc/m-svm-score.dml", "diff": "#\n#-------------------------------------------------------------\n-X = read(\"./tmp/X\", rows=-1, cols=-1);\n-W = read(\"./tmp/W\", rows=-1, cols=-1);\n-\n-Nt = nrow(X);\n-num_classes = ncol(W)\n-n = ncol(X);\n-\n-b = W[n+1,]\n-ones = matrix(1, rows=Nt, cols=1)\n-scores = X %*% W[1:n,] + ones %*% b;\n+X = read($1);\n+W = read($2);\n+scores = X %*% W[1:ncol(X),] + W[ncol(X)+1,];\npredicted_y = rowIndexMax(scores);\n-write(predicted_y, \"./tmp\", format=\"text\");\n+write(predicted_y, $3);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix flaky jmlc test (potential side effects over tmp files)
49,748
15.05.2018 16:31:46
-19,080
df3e12aa2fd032785c4e0db10cd2e952dccd289e
[MINOR] update deprecated brew commands for installing Java and Spark. Closes
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -95,14 +95,13 @@ Before you get started on SystemML, make sure that your environment is set up an\n2. **Install Java (need Java 8).**\n```\n- brew tap caskroom/cask\n- brew install Caskroom/cask/java\n+ brew tap caskroom/versions\n+ brew cask install java8\n```\n- 3. **Install Spark 2.1.**\n+ 3. **Install Spark (Newest).**\n```\n- brew tap homebrew/versions\n- brew install apache-spark21\n+ brew install apache-spark\n```\n4. **Download SystemML.**\n" }, { "change_type": "MODIFY", "old_path": "docs/beginners-guide-python.md", "new_path": "docs/beginners-guide-python.md", "diff": "@@ -52,19 +52,17 @@ If you already have an Apache Spark installation, you can skip this step.\n<div data-lang=\"OSX\" markdown=\"1\">\n```bash\n/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\n-brew tap caskroom/cask\n-brew install Caskroom/cask/java\n-brew tap homebrew/versions\n-brew install apache-spark16\n+brew tap caskroom/versions\n+brew cask install java8\n+brew install apache-spark\n```\n</div>\n<div data-lang=\"Linux\" markdown=\"1\">\n```bash\nruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install)\"\n-brew tap caskroom/cask\n-brew install Caskroom/cask/java\n-brew tap homebrew/versions\n-brew install apache-spark16\n+brew tap caskroom/versions\n+brew cask install java8\n+brew install apache-spark\n```\n</div>\n</div>\n" }, { "change_type": "MODIFY", "old_path": "src/main/standalone/README.txt", "new_path": "src/main/standalone/README.txt", "diff": "@@ -90,14 +90,13 @@ Before you get started on SystemML, make sure that your environment is set up an\n2. **Install Java (need Java 8).**\n```\n- brew tap caskroom/cask\n- brew install Caskroom/cask/java\n+ brew tap caskroom/versions\n+ brew cask install java8\n```\n- 3. **Install Spark 2.1.**\n+ 3. **Install Spark (Newest)**\n```\n- brew tap homebrew/versions\n- brew install apache-spark21\n+ brew install apache-spark\n```\n4. **Download SystemML.**\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] update deprecated brew commands for installing Java and Spark. Closes #761.
49,748
15.05.2018 22:50:38
25,200
c93d806020fc865bd1d41c52a4bfdba3863d96db
Cleanup redundant error message logging Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLProgram.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLProgram.java", "diff": "@@ -79,7 +79,6 @@ public class DMLProgram\npublic HashMap<String, FunctionStatementBlock> getFunctionStatementBlocks(String namespaceKey) {\nDMLProgram namespaceProgram = this.getNamespaces().get(namespaceKey);\nif (namespaceProgram == null){\n- LOG.error(\"ERROR: namespace \" + namespaceKey + \" is undefined\");\nthrow new LanguageException(\"ERROR: namespace \" + namespaceKey + \" is undefined\");\n}\n// for the namespace DMLProgram, get the functions in its current namespace\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -636,7 +636,6 @@ public class DMLTranslator\n}\nif (!fstmt.getBody().isEmpty()){\n- LOG.error(fstmt.printErrorLocation() + \"ExternalFunctionStatementBlock should have no statement blocks in body\");\nthrow new LopsException(fstmt.printErrorLocation() + \"ExternalFunctionStatementBlock should have no statement blocks in body\");\n}\n}\n@@ -655,7 +654,6 @@ public class DMLTranslator\n// check there are actually Lops in to process (loop stmt body will not have any)\nif (fsb.getLops() != null && !fsb.getLops().isEmpty()){\n- LOG.error(fsb.printBlockErrorLocation() + \"FunctionStatementBlock should have no Lops\");\nthrow new LopsException(fsb.printBlockErrorLocation() + \"FunctionStatementBlock should have no Lops\");\n}\n@@ -743,7 +741,6 @@ public class DMLTranslator\npredicateLops.printMe();\nif (wstb.getNumStatements() > 1){\n- LOG.error(wstb.printBlockErrorLocation() + \"WhileStatementBlock has more than 1 statement\");\nthrow new HopsException(wstb.printBlockErrorLocation() + \"WhileStatementBlock has more than 1 statement\");\n}\nWhileStatement ws = (WhileStatement)wstb.getStatement(0);\n@@ -765,7 +762,6 @@ public class DMLTranslator\npredicateLops.printMe();\nif (istb.getNumStatements() > 1){\n- LOG.error(istb.printBlockErrorLocation() + \"IfStatmentBlock has more than 1 statement\");\nthrow new HopsException(istb.printBlockErrorLocation() + \"IfStatmentBlock has more than 1 statement\");\n}\nIfStatement is = (IfStatement)istb.getStatement(0);\n@@ -810,7 +806,6 @@ public class DMLTranslator\n}\nif (fsb.getNumStatements() > 1){\n- LOG.error(fsb.printBlockErrorLocation() + \"ForStatementBlock has more than 1 statement\");\nthrow new HopsException(fsb.printBlockErrorLocation() + \"ForStatementBlock has more than 1 statement\");\n}\nForStatement ws = (ForStatement)fsb.getStatement(0);\n@@ -1322,7 +1317,6 @@ public class DMLTranslator\n//error handling missing function\nif (fsb == null){\nString error = source.printErrorLocation() + \"function \" + fci.getName() + \" is undefined in namespace \" + fci.getNamespace();\n- LOG.error(error);\nthrow new LanguageException(error);\n}\n@@ -1362,7 +1356,6 @@ public class DMLTranslator\nFunctionStatementBlock fsb = this._dmlProg.getFunctionStatementBlock(fci.getNamespace(),fci.getName());\nFunctionStatement fstmt = (FunctionStatement)fsb.getStatement(0);\nif (fstmt == null){\n- LOG.error(source.printErrorLocation() + \"function \" + fci.getName() + \" is undefined in namespace \" + fci.getNamespace());\nthrow new LanguageException(source.printErrorLocation() + \"function \" + fci.getName() + \" is undefined in namespace \" + fci.getNamespace());\n}\n@@ -1480,7 +1473,6 @@ public class DMLTranslator\nDataOp read = null;\nif (var == null) {\n- LOG.error(\"variable \" + varName + \" not live variable for conditional predicate\");\nthrow new ParseException(\"variable \" + varName + \" not live variable for conditional predicate\");\n} else {\nlong actualDim1 = (var instanceof IndexedIdentifier) ? ((IndexedIdentifier)var).getOrigDim1() : var.getDim1();\n@@ -1522,8 +1514,6 @@ public class DMLTranslator\nLOG.warn(predicate.printWarningLocation() + \"Numerical value '\" + predicate.toString()\n+ \"' (!= 0/1) is converted to boolean TRUE by DML\");\n} else if (predicate instanceof StringIdentifier) {\n- LOG.error(predicate.printErrorLocation() + \"String value '\" + predicate.toString()\n- + \"' is not allowed for iterable predicate\");\nthrow new ParseException(predicate.printErrorLocation() + \"String value '\" + predicate.toString()\n+ \"' is not allowed for iterable predicate\");\n}\n@@ -1569,7 +1559,6 @@ public class DMLTranslator\nDataIdentifier var = fsb.liveIn().getVariable(varName);\nDataOp read = null;\nif (var == null) {\n- LOG.error(\"variable '\" + varName + \"' is not available for iterable predicate\");\nthrow new ParseException(\"variable '\" + varName + \"' is not available for iterable predicate\");\n}\nelse {\n@@ -1742,7 +1731,6 @@ public class DMLTranslator\n// process the target to get targetHops\nHop targetOp = hops.get(target.getName());\nif (targetOp == null){\n- LOG.error(target.printErrorLocation() + \" must define matrix \" + target.getName() + \" before indexing operations are allowed \");\nthrow new ParseException(target.printErrorLocation() + \" must define matrix \" + target.getName() + \" before indexing operations are allowed \");\n}\n@@ -1922,7 +1910,6 @@ public class DMLTranslator\n}\nif (constLeft || constRight) {\n- LOG.error(source.printErrorLocation() + \"Boolean expression with constant unsupported\");\nthrow new RuntimeException(source.printErrorLocation() + \"Boolean expression with constant unsupported\");\n}\n@@ -1953,7 +1940,6 @@ public class DMLTranslator\n} else if (source.getOpCode() == Expression.BooleanOp.LOGICALOR) {\nop = OpOp2.OR;\n} else {\n- LOG.error(source.printErrorLocation() + \"Unknown boolean operation \" + source.getOpCode());\nthrow new RuntimeException(source.printErrorLocation() + \"Unknown boolean operation \" + source.getOpCode());\n}\ncurrBop = new BinaryOp(target.getName(), target.getDataType(), target.getValueType(), op, left, right);\n@@ -2195,10 +2181,6 @@ public class DMLTranslator\ndefault:\n- LOG.error(source.printErrorLocation() +\n- \"processDataExpression():: Unknown operation: \"\n- + source.getOpCode());\n-\nthrow new ParseException(source.printErrorLocation() +\n\"processDataExpression():: Unknown operation: \"\n+ source.getOpCode());\n@@ -2444,7 +2426,6 @@ public class DMLTranslator\nelse if ( sop.equalsIgnoreCase(\"!=\") )\noperation = OpOp2.NOTEQUAL;\nelse {\n- LOG.error(source.printErrorLocation() + \"Unknown argument (\" + sop + \") for PPRED.\");\nthrow new ParseException(source.printErrorLocation() + \"Unknown argument (\" + sop + \") for PPRED.\");\n}\ncurrBuiltinOp = new BinaryOp(target.getName(), target.getDataType(), target.getValueType(), operation, expr, expr2);\n@@ -2582,11 +2563,6 @@ public class DMLTranslator\nmathOp2 = Hop.OpOp1.LOG;\nbreak;\ndefault:\n-\n- LOG.error(source.printErrorLocation() +\n- \"processBuiltinFunctionExpression():: Could not find Operation type for builtin function: \"\n- + source.getOpCode());\n-\nthrow new ParseException(source.printErrorLocation() +\n\"processBuiltinFunctionExpression():: Could not find Operation type for builtin function: \"\n+ source.getOpCode());\n@@ -2600,11 +2576,6 @@ public class DMLTranslator\nmathOp3 = Hop.OpOp2.LOG;\nbreak;\ndefault:\n-\n- LOG.error(source.printErrorLocation() +\n- \"processBuiltinFunctionExpression():: Could not find Operation type for builtin function: \"\n- + source.getOpCode());\n-\nthrow new ParseException(source.printErrorLocation() +\n\"processBuiltinFunctionExpression():: Could not find Operation type for builtin function: \"\n+ source.getOpCode());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "diff": "@@ -1868,7 +1868,6 @@ public class DataExpression extends DataIdentifier\n}\n} catch (IOException e){\n- //LOG.error(this.printErrorLocation() + \"Error reading MatrixMarket file: \" + filename );\n//throw new LanguageException(this.printErrorLocation() + \"Error reading MatrixMarket file: \" + filename );\nthrow new LanguageException(e);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ExternalFunctionStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/ExternalFunctionStatement.java", "diff": "@@ -149,13 +149,11 @@ public class ExternalFunctionStatement extends FunctionStatement\n@Override\npublic void initializeforwardLV(VariableSet activeIn) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for ExternalFunctionStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for ExternalFunctionStatement\");\n}\n@Override\npublic VariableSet initializebackwardLV(VariableSet lo) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for ExternalFunctionStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for ExternalFunctionStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ForStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/ForStatement.java", "diff": "@@ -30,7 +30,6 @@ public class ForStatement extends Statement\n@Override\npublic Statement rewriteStatement(String prefix) {\n- LOG.error(this.printErrorLocation() + \"should not call rewriteStatement for ForStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should not call rewriteStatement for ForStatement\");\n}\n@@ -85,13 +84,11 @@ public class ForStatement extends Statement\n@Override\npublic void initializeforwardLV(VariableSet activeIn) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for ForStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for ForStatement\");\n}\n@Override\npublic VariableSet initializebackwardLV(VariableSet lo) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for ForStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for ForStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "diff": "@@ -184,7 +184,6 @@ public class ForStatementBlock extends StatementBlock\nForStatement fstmt = (ForStatement)_statements.get(0);\nif (_statements.size() > 1){\n- LOG.error(_statements.get(0).printErrorLocation() + \"ForStatementBlock should have only 1 statement (for statement)\");\nthrow new LanguageException(_statements.get(0).printErrorLocation() + \"ForStatementBlock should have only 1 statement (for statement)\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/FunctionStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/FunctionStatement.java", "diff": "@@ -32,7 +32,6 @@ public class FunctionStatement extends Statement\n@Override\npublic Statement rewriteStatement(String prefix) {\n- LOG.error(this.printErrorLocation() + \"should not call rewriteStatement for FunctionStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should not call rewriteStatement for FunctionStatement\");\n}\n@@ -114,13 +113,11 @@ public class FunctionStatement extends Statement\n@Override\npublic void initializeforwardLV(VariableSet activeIn) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for FunctionStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for FunctionStatement\");\n}\n@Override\npublic VariableSet initializebackwardLV(VariableSet lo) {\n- LOG.error(this.printErrorLocation() + \"should never call initializeforwardLV for FunctionStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should never call initializeforwardLV for FunctionStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/FunctionStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/FunctionStatementBlock.java", "diff": "@@ -45,7 +45,6 @@ public class FunctionStatementBlock extends StatementBlock\npublic VariableSet validate(DMLProgram dmlProg, VariableSet ids, HashMap<String,ConstIdentifier> constVars, boolean conditional)\n{\nif (_statements.size() > 1){\n- LOG.error(this.printBlockErrorLocation() + \"FunctionStatementBlock should have only 1 statement (FunctionStatement)\");\nthrow new LanguageException(this.printBlockErrorLocation() + \"FunctionStatementBlock should have only 1 statement (FunctionStatement)\");\n}\nFunctionStatement fstmt = (FunctionStatement) _statements.get(0);\n@@ -110,10 +109,6 @@ public class FunctionStatementBlock extends StatementBlock\n}\nelse {\n// THROW EXCEPTION -- CANNOT CONVERT\n- LOG.error(curr.printErrorLocation() + \"for function \" + fstmt.getName()\n- + \", return variable \" + curr.getName() + \" value type of \"\n- + curr.getValueType() + \" does not match value type in function signature of \"\n- + returnValue.getValueType() + \" and cannot safely cast value\");\nthrow new LanguageException(curr.printErrorLocation() + \"for function \"\n+ fstmt.getName() + \", return variable \" + curr.getName()\n+ \" value type of \" + curr.getValueType()\n@@ -123,11 +118,6 @@ public class FunctionStatementBlock extends StatementBlock\n}\nif (returnValue.getValueType() == ValueType.INT){\n// THROW EXCEPTION -- CANNOT CONVERT\n- LOG.error(curr.printErrorLocation() + \"for function \" + fstmt.getName()\n- + \", return variable \" + curr.getName() + \" value type of \"\n- + curr.getValueType() + \" does not match value type in function signature of \"\n- + returnValue.getValueType() + \" and cannot safely cast \" + curr.getValueType()\n- + \" as \" + returnValue.getValueType());\nthrow new LanguageException(curr.printErrorLocation() + \"for function \" + fstmt.getName()\n+ \", return variable \" + curr.getName() + \" value type of \" + curr.getValueType()\n+ \" does not match value type in function signature of \"\n@@ -137,7 +127,6 @@ public class FunctionStatementBlock extends StatementBlock\n}\n}\nelse {\n- LOG.error(curr.printErrorLocation() + \"for function \" + fstmt.getName() + \", return variable \" + curr.getName() + \" value type of \" + curr.getValueType() + \" does not match value type in function signature of \" + returnValue.getValueType() + \" and cannot safely cast double as int\");\nthrow new LanguageException(curr.printErrorLocation() + \"for function \" + fstmt.getName() + \", return variable \" + curr.getName() + \" value type of \" + curr.getValueType() + \" does not match value type in function signature of \" + returnValue.getValueType() + \" and cannot safely cast \" + curr.getValueType() + \" as \" + returnValue.getValueType());\n}\n}\n@@ -193,7 +182,6 @@ public class FunctionStatementBlock extends StatementBlock\npublic VariableSet initializeforwardLV(VariableSet activeInPassed) {\nFunctionStatement fstmt = (FunctionStatement)_statements.get(0);\nif (_statements.size() > 1){\n- LOG.error(this.printBlockErrorLocation() + \"FunctionStatementBlock should have only 1 statement (while statement)\");\nthrow new LanguageException(this.printBlockErrorLocation() + \"FunctionStatementBlock should have only 1 statement (while statement)\");\n}\n_read = new VariableSet();\n@@ -255,7 +243,6 @@ public class FunctionStatementBlock extends StatementBlock\n@Override\npublic VariableSet analyze(VariableSet loPassed) {\n- LOG.error(this.printBlockErrorLocation() + \"Both liveIn and liveOut variables need to be specified for liveness analysis for FunctionStatementBlock\");\nthrow new LanguageException(this.printBlockErrorLocation() + \"Both liveIn and liveOut variables need to be specified for liveness analysis for FunctionStatementBlock\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/IfStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/IfStatement.java", "diff": "@@ -31,7 +31,6 @@ public class IfStatement extends Statement\n@Override\npublic Statement rewriteStatement(String prefix) {\n- LOG.error(this.printErrorLocation() + \"should not call rewriteStatement for IfStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should not call rewriteStatement for IfStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/IfStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/IfStatementBlock.java", "diff": "@@ -287,7 +287,6 @@ public class IfStatementBlock extends StatementBlock\n{\nIfStatement ifstmt = (IfStatement)_statements.get(0);\nif (_statements.size() > 1){\n- LOG.error(ifstmt.printErrorLocation() + \"IfStatementBlock should have only 1 statement (if statement)\");\nthrow new LanguageException(ifstmt.printErrorLocation() + \"IfStatementBlock should have only 1 statement (if statement)\");\n}\n_read = new VariableSet();\n@@ -420,7 +419,6 @@ public class IfStatementBlock extends StatementBlock\n{\nIfStatement ifstmt = (IfStatement)_statements.get(0);\nif (_statements.size() > 1){\n- LOG.error(ifstmt.printErrorLocation() + \"IfStatementBlock should have only 1 statement (if statement)\");\nthrow new LanguageException(ifstmt.printErrorLocation() + \"IfStatementBlock should have only 1 statement (if statement)\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ImportStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/ImportStatement.java", "diff": "@@ -76,7 +76,6 @@ public class ImportStatement extends Statement\n@Override\npublic Statement rewriteStatement(String prefix) {\n- LOG.error(this.printErrorLocation() + \"rewriting for inlining not supported for ImportStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"rewriting for inlining not supported for ImportStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/IterablePredicate.java", "new_path": "src/main/java/org/apache/sysml/parser/IterablePredicate.java", "diff": "@@ -64,7 +64,6 @@ public class IterablePredicate extends Expression\npublic Expression rewriteExpression(String prefix) {\n//DataIdentifier newIterVar = (DataIdentifier)_iterVar.rewriteExpression(prefix);\n//return new IterablePredicate(newIterVar, _from, _to, _increment);\n- LOG.error(this.printErrorLocation() + \"rewriteExpression not supported for IterablePredicate\");\nthrow new LanguageException(this.printErrorLocation() + \"rewriteExpression not supported for IterablePredicate\");\n}\n@@ -193,7 +192,6 @@ public class IterablePredicate extends Expression\n(ident.getDataType() == DataType.SCALAR && (ident.getValueType() == ValueType.BOOLEAN ||\nident.getValueType() == ValueType.STRING || ident.getValueType() == ValueType.OBJECT)) )\n{\n- LOG.error(this.printErrorLocation() + \"expression in iterable predicate in for loop '\" + expr.toString() + \"' must return a numeric scalar\");\nthrow new LanguageException(this.printErrorLocation() + \"expression in iterable predicate in for loop '\" + expr.toString() + \"' must return a numeric scalar\");\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "diff": "@@ -1149,8 +1149,6 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nelse //error and exception if unconditional\n{\nString fullMsg = this.printErrorLocation() + msg;\n-\n- //LOG.error( fullMsg ); //no redundant error\nif( errorCode != null )\nthrow new LanguageException( fullMsg, errorCode );\nelse\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/WhileStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/WhileStatement.java", "diff": "@@ -30,7 +30,6 @@ public class WhileStatement extends Statement\n@Override\npublic Statement rewriteStatement(String prefix) {\n- LOG.error(this.printErrorLocation() + \"should not call rewriteStatement for WhileStatement\");\nthrow new LanguageException(this.printErrorLocation() + \"should not call rewriteStatement for WhileStatement\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/WhileStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/WhileStatementBlock.java", "diff": "@@ -166,7 +166,6 @@ public class WhileStatementBlock extends StatementBlock\nWhileStatement wstmt = (WhileStatement)_statements.get(0);\nif (_statements.size() > 1){\n- LOG.error(_statements.get(0).printErrorLocation() + \"WhileStatementBlock should have only 1 statement (while statement)\");\nthrow new LanguageException(_statements.get(0).printErrorLocation() + \"WhileStatementBlock should have only 1 statement (while statement)\");\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2233] Cleanup redundant error message logging Closes #762.
49,738
17.05.2018 18:13:43
25,200
79a5e80f62d387af8ad3a3cf1967ca8adc9882b9
Fix missing list support in parfor, incl tests This patch adds tests with lists and named lists for parfor loops and dependency analysis and fixes a minor issue with unbounded scoping of temporary list intermediates.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -26,6 +26,7 @@ import java.io.Serializable;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.Collection;\n+import java.util.Collections;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\n@@ -103,6 +104,7 @@ import org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.IntObject;\n+import org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.runtime.instructions.cp.StringObject;\nimport org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n@@ -1274,6 +1276,9 @@ public class ParForProgramBlock extends ForProgramBlock\n//currently we do not create any unscoped matrix or frame outputs\n//because metadata (e.g., outputinfo) not known at this place.\nbreak;\n+ case LIST:\n+ dataObj = new ListObject(Collections.emptyList());\n+ break;\ncase UNKNOWN:\nbreak;\ndefault:\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "diff": "@@ -37,7 +37,8 @@ public class ListAndStructTest extends AutomatedTestBase\nprivate static final String TEST_NAME2 = \"ListNamed\";\nprivate static final String TEST_NAME3 = \"ListUnnamedFun\";\nprivate static final String TEST_NAME4 = \"ListNamedFun\";\n-\n+ private static final String TEST_NAME5 = \"ListUnnamedParfor\";\n+ private static final String TEST_NAME6 = \"ListNamedParfor\";\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + ListAndStructTest.class.getSimpleName() + \"/\";\n@@ -49,6 +50,8 @@ public class ListAndStructTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"R\" }) );\n}\n@Test\n@@ -91,6 +94,26 @@ public class ListAndStructTest extends AutomatedTestBase\nrunListStructTest(TEST_NAME4, true);\n}\n+ @Test\n+ public void testListUnnamedParFor() {\n+ runListStructTest(TEST_NAME5, false);\n+ }\n+\n+ @Test\n+ public void testListUnnamedParForRewrites() {\n+ runListStructTest(TEST_NAME5, true);\n+ }\n+\n+ @Test\n+ public void testListNamedParFor() {\n+ runListStructTest(TEST_NAME6, false);\n+ }\n+\n+ @Test\n+ public void testListNamedParForRewrites() {\n+ runListStructTest(TEST_NAME6, true);\n+ }\n+\nprivate void runListStructTest(String testname, boolean rewrites)\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForDependencyAnalysisTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForDependencyAnalysisTest.java", "diff": "@@ -66,6 +66,8 @@ import org.junit.Test;\n* 49a: dep, 49b: dep\n* * accumulators\n* 53a: no, 53b dep, 53c dep, 53d dep, 53e dep\n+ * * lists\n+ * 54a: no, 54b: dep, 54c: dep\n*/\npublic class ParForDependencyAnalysisTest extends AutomatedTestBase\n{\n@@ -316,6 +318,15 @@ public class ParForDependencyAnalysisTest extends AutomatedTestBase\n@Test\npublic void testDependencyAnalysis53e() { runTest(\"parfor53e.dml\", true); }\n+ @Test\n+ public void testDependencyAnalysis54a() { runTest(\"parfor54a.dml\", false); }\n+\n+ @Test\n+ public void testDependencyAnalysis54b() { runTest(\"parfor54b.dml\", true); }\n+\n+ @Test\n+ public void testDependencyAnalysis54c() { runTest(\"parfor54c.dml\", true); }\n+\nprivate void runTest( String scriptFilename, boolean expectedException ) {\nboolean raisedException = false;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedParfor.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(a=A, b=B, c=c, d=D, e=matrix(5, 3, 3), f=6);\n+\n+for( i in 1:length(X) ) {\n+ tmp = X[i]\n+ if( !exists(\"tmp\") )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X[['e']])));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedParfor.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(a=A, b=B, c=c, d=D, e=matrix(5, 3, 3), f=6);\n+for( i in 1:length(X) ) {\n+ tmp = X[i];\n+ if( !exists(tmp) )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X['e'])));\n+\n+write(R, $1);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedParfor.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(A, B, c, D, matrix(5, 3, 3), 6);\n+\n+for( i in 1:length(X) ) {\n+ tmp = X[i]\n+ if( !exists(\"tmp\") )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X[[5]])));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedParfor.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = matrix(1, 10, 10);\n+B = matrix(2, 10, 10);\n+c = 3;\n+D = matrix(4, 10, 10);\n+\n+X = list(A, B, c, D, matrix(5, 3, 3), 6);\n+\n+parfor( i in 1:length(X) ) {\n+ tmp = X[i];\n+ if( !exists(tmp) )\n+ print(\"ERROR: non-existing entry \"+i );\n+}\n+\n+R = as.matrix(sum(as.matrix(X[5])));\n+\n+write(R, $1);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor54a.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = matrix(7, rows=2, cols=2);\n+B = matrix(3, rows=2, cols=2);\n+C = list(A, B);\n+parfor( i in 1:2 ) {\n+ print(sum(as.matrix(C[i])));\n+}\n+print(sum(as.matrix(C[1])));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor54b.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = matrix(7, rows=2, cols=2);\n+B = matrix(3, rows=2, cols=2);\n+C = list(A, B);\n+parfor( i in 1:2 ) {\n+ C[i] = as.matrix(C[i])+7;\n+}\n+print(sum(as.matrix(C[1])));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor54c.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = matrix(7, rows=2, cols=2);\n+B = matrix(3, rows=2, cols=2);\n+C = list(A, B);\n+parfor( i in 1:2 ) {\n+ C = list(as.matrix(C[i])+7);\n+}\n+print(sum(as.matrix(C[1])));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2327] Fix missing list support in parfor, incl tests This patch adds tests with lists and named lists for parfor loops and dependency analysis and fixes a minor issue with unbounded scoping of temporary list intermediates.
49,738
17.05.2018 20:39:24
25,200
5a155f3d2402b1d77ca90a1d024cb04d6a2ca80d
New sampling-based mm sparsity estimator This patch introduces an additional baseline sparsity estimator based on random sampling of columns from A and aligned rows from B for estimating the output sparsity of AB.
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.estim;\n+\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.DenseBlock;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.SparseBlock;\n+import org.apache.sysml.runtime.util.UtilFunctions;\n+\n+/**\n+ * This estimator implements an approach based on row/column sampling\n+ * Yongyang Yu, MingJie Tang, Walid G. Aref, Qutaibah M. Malluhi, Mostafa M. Abbas, Mourad Ouzzani:\n+ * In-Memory Distributed Matrix Computation Processing and Optimization. ICDE 2017: 1047-1058\n+ *\n+ * The basic idea is to draw random samples of aligned columns SA and rows SB,\n+ * and compute the output nnz as max(nnz(SA_i)*nnz(SB_i)). However, this estimator is\n+ * biased toward underestimation as the maximum is unlikely sampled and collisions are\n+ * not accounted for.\n+ */\n+public class EstimatorSample extends SparsityEstimator\n+{\n+ private static final double SAMPLE_FRACTION = 0.1; //10%\n+\n+ private final double _frac;\n+\n+ public EstimatorSample() {\n+ this(SAMPLE_FRACTION);\n+ }\n+\n+ public EstimatorSample(double sampleFrac) {\n+ if( sampleFrac < 0 || sampleFrac > 1.0 )\n+ throw new DMLRuntimeException(\"Invalid sample fraction: \"+sampleFrac);\n+ _frac = sampleFrac;\n+ }\n+\n+ @Override\n+ public double estim(MMNode root) {\n+ LOG.warn(\"Recursive estimates not supported by EstimatorSample, falling back to EstimatorBasicAvg.\");\n+ return new EstimatorBasicAvg().estim(root);\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m1, MatrixBlock m2) {\n+ //get sampled indexes\n+ int k = m1.getNumColumns();\n+ int[] ix = UtilFunctions.getSortedSampleIndexes(\n+ k, (int)Math.max(k*_frac, 1));\n+ //compute output sparsity\n+ int[] cnnz = computeColumnNnz(m1, ix);\n+ long nnzOut = 0;\n+ for(int i=0; i<ix.length; i++)\n+ nnzOut = Math.max(nnzOut, cnnz[i] * m2.recomputeNonZeros(ix[i], ix[i]));\n+ return OptimizerUtils.getSparsity(\n+ m1.getNumRows(), m2.getNumColumns(), nnzOut);\n+ }\n+\n+ @Override\n+ public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n+ LOG.warn(\"Meta-data-only estimates not supported by EstimatorSample, falling back to EstimatorBasicAvg.\");\n+ return new EstimatorBasicAvg().estim(mc1, mc2);\n+ }\n+\n+ private int[] computeColumnNnz(MatrixBlock in, int[] ix) {\n+ int[] nnz = new int[in.getNumColumns()];\n+ //count column nnz brute force or selective\n+ if( in.isInSparseFormat() ) {\n+ SparseBlock sblock = in.getSparseBlock();\n+ for( int i=0; i<in.getNumRows(); i++ ) {\n+ if( sblock.isEmpty(i) ) continue;\n+ LibMatrixAgg.countAgg(sblock.values(i), nnz,\n+ sblock.indexes(i), sblock.pos(i), sblock.size(i));\n+ }\n+ }\n+ else {\n+ DenseBlock dblock = in.getDenseBlock();\n+ for( int i=0; i<in.getNumRows(); i++ ) {\n+ double[] avals = dblock.values(i);\n+ int aix = dblock.pos(i);\n+ for( int j=0; j<in.getNumColumns(); j++ )\n+ nnz[j] += (avals[aix+j] != 0) ? 1 : 0;\n+ }\n+ }\n+\n+ //copy nnz into reduced vector\n+ int[] ret = new int[ix.length];\n+ for(int i=0; i<ix.length; i++)\n+ ret[i] = nnz[ix[i]];\n+ return ret;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/estim/CompressedSizeEstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/estim/CompressedSizeEstimatorSample.java", "diff": "@@ -27,13 +27,13 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.commons.math3.analysis.UnivariateFunction;\nimport org.apache.commons.math3.analysis.solvers.UnivariateSolverUtils;\nimport org.apache.commons.math3.distribution.ChiSquaredDistribution;\n-import org.apache.commons.math3.random.RandomDataGenerator;\nimport org.apache.sysml.runtime.compress.BitmapEncoder;\nimport org.apache.sysml.runtime.compress.ReaderColumnSelection;\nimport org.apache.sysml.runtime.compress.CompressedMatrixBlock;\nimport org.apache.sysml.runtime.compress.UncompressedBitmap;\nimport org.apache.sysml.runtime.compress.utils.DblArray;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.util.UtilFunctions;\npublic class CompressedSizeEstimatorSample extends CompressedSizeEstimator\n{\n@@ -303,12 +303,8 @@ public class CompressedSizeEstimatorSample extends CompressedSizeEstimator\n* @return sorted array of integers\n*/\nprivate static int[] getSortedUniformSample(int range, int smplSize) {\n- if (smplSize == 0)\n- return new int[] {};\n- RandomDataGenerator rng = new RandomDataGenerator();\n- int[] sample = rng.nextPermutation(range, smplSize);\n- Arrays.sort(sample);\n- return sample;\n+ if (smplSize == 0) return new int[] {};\n+ return UtilFunctions.getSortedSampleIndexes(range, smplSize);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java", "diff": "@@ -29,6 +29,7 @@ import java.util.stream.Stream;\nimport java.util.stream.StreamSupport;\nimport org.apache.commons.lang.ArrayUtils;\n+import org.apache.commons.math3.random.RandomDataGenerator;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.matrix.MetaDataNumItemsByEachReducer;\nimport org.apache.sysml.runtime.matrix.data.FrameBlock;\n@@ -515,8 +516,7 @@ public class UtilFunctions\nreturn 0; //equal\n}\n- public static boolean isIntegerNumber( String str )\n- {\n+ public static boolean isIntegerNumber( String str ) {\nbyte[] c = str.getBytes();\nfor( int i=0; i<c.length; i++ )\nif( c[i] < 48 || c[i] > 57 )\n@@ -524,8 +524,14 @@ public class UtilFunctions\nreturn true;\n}\n- public static byte max( byte[] array )\n- {\n+ public static int[] getSortedSampleIndexes(int range, int sampleSize) {\n+ RandomDataGenerator rng = new RandomDataGenerator();\n+ int[] sample = rng.nextPermutation(range, sampleSize);\n+ Arrays.sort(sample);\n+ return sample;\n+ }\n+\n+ public static byte max( byte[] array ) {\nbyte ret = Byte.MIN_VALUE;\nfor( int i=0; i<array.length; i++ )\nret = (array[i]>ret)?array[i]:ret;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "diff": "@@ -25,6 +25,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.EstimatorSample;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -118,6 +119,26 @@ public class OuterProductTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorMatrixHistogram(true), m, k, n, case2);\n}\n+ @Test\n+ public void testSamplingDefCase1() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testSamplingDefCase2() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testSampling20Case1() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testSampling20Case2() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, k, n, case2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "diff": "@@ -25,6 +25,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.EstimatorSample;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -123,6 +124,26 @@ public class SquaredProductTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorMatrixHistogram(true), m, k, n, case2);\n}\n+ @Test\n+ public void testSamplingDefCase1() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testSamplingDefCase2() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, case2);\n+ }\n+\n+ @Test\n+ public void testSampling20Case1() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testSampling20Case2() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, k, n, case2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2329] New sampling-based mm sparsity estimator This patch introduces an additional baseline sparsity estimator based on random sampling of columns from A and aligned rows from B for estimating the output sparsity of AB.
49,738
20.05.2018 21:58:57
25,200
73f9d417d8c36de009b4d2071c5e5cd6ae4fcfc7
Fix DNN bias_multiply correctness over sparse inputs This patch fixes incorrect index computation within the DNN-specific bias_multiply operation over sparse inputs. Specifically the bias lookup accessed wrong bias terms.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -377,31 +377,30 @@ public class LibMatrixDNN {\n}\n}\nelse {\n+ SparseBlock sblock = outputBlock.sparseBlock;\n// First delete those elements which will become zero\nfor(int k = 0; k < K; k++) {\nif(biasArr[k] == 0) {\nfor(int n = 0; n < N; n++) {\n- outputBlock.sparseBlock.deleteIndexRange(n, k*PQ, (k+1)*PQ);\n+ if( sblock.isEmpty(n) ) continue;\n+ sblock.deleteIndexRange(n, k*PQ, (k+1)*PQ);\n}\n}\n}\n// Then perform bias_multiply for non-zero bias entries\nfor(int n = 0; n < N; n++) {\n- if( !outputBlock.sparseBlock.isEmpty(n) ) {\n- int apos = outputBlock.sparseBlock.pos(n);\n- int alen = outputBlock.sparseBlock.size(n);\n- int[] aix = outputBlock.sparseBlock.indexes(n);\n- double[] avals = outputBlock.sparseBlock.values(n);\n-\n+ if( sblock.isEmpty(n) ) continue;\n+ int apos = sblock.pos(n);\n+ int alen = sblock.size(n);\n+ int[] aix = sblock.indexes(n);\n+ double[] avals = sblock.values(n);\nfor(int j=apos; j<apos+alen; j++) {\n- // Since aix[j] => KPQ\n- int k = aix[j] % PQ;\n+ int k = aix[j] / PQ; //aix[j] KPQ\nif(biasArr[k] != 0)\navals[j] *= biasArr[k];\n}\n}\n}\n- }\n//post-processing: maintain nnz\nparams.output.recomputeNonZeros();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2338] Fix DNN bias_multiply correctness over sparse inputs This patch fixes incorrect index computation within the DNN-specific bias_multiply operation over sparse inputs. Specifically the bias lookup accessed wrong bias terms.
49,719
21.05.2018 13:08:31
25,200
f7078c292fe46331a91e1b18394e5579c25cf324
[MINOR] switch build/test server to travis-ci Closes
[ { "change_type": "ADD", "old_path": null, "new_path": ".travis.yml", "diff": "+#\n+# Licensed to the Apache Software Foundation (ASF) under one or more\n+# contributor license agreements. See the NOTICE file distributed with\n+# this work for additional information regarding copyright ownership.\n+# The ASF licenses this file to You under the Apache License, Version 2.0\n+# (the \"License\"); you may not use this file except in compliance with\n+# the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+#\n+\n+dist: trusty\n+\n+language: java\n+\n+jdk:\n+ - oraclejdk8\n+\n+addons:\n+ apt:\n+ sources:\n+ - r-packages-trusty\n+ packages:\n+ - r-base-dev\n+\n+cache:\n+ apt: true\n+ directories:\n+# caching .m2 causes an error loading hadoop-yarn-common-2.6.0.jar. Not sure why.\n+# - ${HOME}/.m2\n+ - ${HOME}/R\n+ - /usr/local/lib/R/site-library\n+\n+install:\n+ - sudo Rscript ./src/test/scripts/installDependencies.R\n+\n+before_script:\n+# this is not needed anymore since adding authentication object in code for running hadoop/spark local\n+# - chmod -R 755 *\n+\n+script:\n+# - mvn clean verify jacoco:report coveralls:report\n+ - mvn clean verify\n+\n+after_success:\n+# - mvn test jacoco:report coveralls:report\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "conf/log4j-silent.properties", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Define some default values that can be overridden by system properties\n+hadoop.root.logger=INFO,console\n+hadoop.log.dir=.\n+hadoop.log.file=hadoop.log\n+hadoop.security.logger=OFF\n+\n+# Security appender\n+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\n+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n+\n+log4j.logger.org.apache.spark=ERROR\n+\n+#\n+# Job Summary Appender\n+#\n+# Use following logger to send summary to separate file defined by\n+# hadoop.mapreduce.jobsummary.log.file rolled daily:\n+# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n+#\n+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\n+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\n+\n+# Define the root logger to the system property \"hadoop.root.logger\".\n+log4j.rootLogger=${hadoop.root.logger}, EventCounter\n+\n+# Logging Threshold\n+log4j.threshold=ALL\n+\n+#\n+# Guardim Proxy setup - HDFS, MapReduce and Hadoop RPC\n+#\n+log4j.appender.GuardiumProxyAppender=org.apache.log4j.net.SocketAppender\n+log4j.appender.GuardiumProxyAppender.RemoteHost=\n+log4j.appender.GuardiumProxyAppender.Port=\n+log4j.appender.GuardiumProxyAppender.RecoveryFile=audit-${hadoop.log.file}\n+log4j.appender.GuardiumProxyAppender.Threshold=INFO\n+\n+# Hdfs audit logs\n+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\n+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hadoop.security.logger}\n+\n+hdfs.audit.logger=INFO,NullAppender\n+hdfs.audit.log.maxfilesize=256MB\n+hdfs.audit.log.maxbackupindex=20\n+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender\n+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\n+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout\n+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}\n+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}\n+\n+# MapReduce audit logs\n+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\n+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${hadoop.security.logger}\n+\n+mapred.audit.logger=INFO,NullAppender\n+mapred.audit.log.maxfilesize=256MB\n+mapred.audit.log.maxbackupindex=20\n+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender\n+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\n+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\n+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}\n+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}\n+\n+# Hadoop RPC audit logs\n+log4j.additivity.SecurityLogger=false\n+log4j.logger.SecurityLogger=${hadoop.security.logger}\n+\n+log4j.appender.hadoopaudit=org.apache.log4j.DailyRollingFileAppender\n+log4j.appender.hadoopaudit.DatePattern='.'yyyy-MM-dd\n+log4j.appender.hadoopaudit.File=${hadoop.log.dir}/audit-${hadoop.log.file}\n+log4j.appender.hadoopaudit.Append=true\n+log4j.appender.hadoopaudit.layout=org.apache.log4j.PatternLayout\n+log4j.appender.hadoopaudit.layout.ConversionPattern=%d{ISO8601} %5p %c - %m%n\n+\n+#\n+# Daily Rolling File Appender\n+#\n+\n+#log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n+#log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n+\n+# Rollver at midnight\n+#log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n+\n+# 30-day backup\n+#log4j.appender.DRFA.MaxBackupIndex=30\n+#log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n+\n+# Pattern format: Date LogLevel LoggerName LogMessage\n+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+# Debugging Pattern format\n+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n+\n+#\n+# console\n+# Add \"console\" to rootlogger above if you want to use this\n+#\n+\n+log4j.appender.console=org.apache.log4j.ConsoleAppender\n+log4j.appender.console.target=System.err\n+log4j.appender.console.layout=org.apache.log4j.PatternLayout\n+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n+\n+#\n+# TaskLog Appender\n+#\n+\n+#Default values\n+hadoop.tasklog.taskid=null\n+hadoop.tasklog.iscleanup=false\n+hadoop.tasklog.noKeepSplits=4\n+hadoop.tasklog.totalLogFileSize=100\n+hadoop.tasklog.purgeLogSplits=true\n+hadoop.tasklog.logsRetainHours=12\n+\n+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogSocketAppender\n+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\n+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\n+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n+\n+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout\n+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+\n+#\n+#Security audit appender\n+#\n+\n+hadoop.security.log.file=SecurityAuth.audit\n+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\n+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\n+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n+\n+#\n+# Rolling File Appender\n+#\n+\n+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n+# Logfile size and and 30-day backups\n+#log4j.appender.RFA.MaxFileSize=1MB\n+#log4j.appender.RFA.MaxBackupIndex=30\n+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\n+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n+\n+\n+#\n+# Rolling File Appender\n+#\n+\n+log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n+# Logfile size and and 30-day backups\n+log4j.appender.RFA.MaxFileSize=10MB\n+log4j.appender.RFA.MaxBackupIndex=3\n+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n+\n+#\n+#Logger for streaming Job Configuration\n+#\n+log4j.logger.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=INFO,${SAJC}\n+log4j.additivity.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=false\n+\n+#\n+#Socket Appender for streaming Job Configuration\n+#\n+log4j.appender.job.conf=org.apache.log4j.net.SocketAppender\n+log4j.appender.job.conf.RemoteHost=localhost\n+log4j.appender.job.conf.Port=${JOBCONF_LOGGING_PORT}\n+log4j.appender.job.conf.layout=org.apache.log4j.PatternLayout\n+log4j.appender.job.conf.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.job.conf.appender.ReconnectionDelay=120000\n+\n+\n+#\n+#Logger for streaming task attempt logs\n+#\n+log4j.logger.org.apache.hadoop.mapred.TaskLogSocketAppender=INFO,${SATA}\n+log4j.additivity.org.apache.hadoop.mapred.TaskLogSocketAppender=false\n+\n+#\n+#Socket appender for streaming task attempt logs\n+#\n+log4j.appender.task.attempt.log=org.apache.log4j.net.SocketAppender\n+log4j.appender.task.attempt.log.RemoteHost=localhost\n+log4j.appender.task.attempt.log.Port=${TASKATTEMPT_LOGGING_PORT}\n+log4j.appender.task.attempt.log.layout=org.apache.log4j.PatternLayout\n+log4j.appender.task.attempt.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.task.attempt.log.appender.ReconnectionDelay=120000\n+\n+#\n+#Socket Appender for Streaming NameNode,SecondaryNameNode and JobTracker Logs\n+#\n+log4j.appender.socket.appender=org.apache.log4j.net.SocketAppender\n+log4j.appender.socket.appender.RemoteHost=localhost\n+log4j.appender.socket.appender.Port=${HADOOP_LOGGING_PORT}\n+log4j.appender.socket.appender.layout=org.apache.log4j.PatternLayout\n+log4j.appender.socket.appender.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.socket.appender.ReconnectionDelay=120000\n+\n+#\n+#Logger for streaming Job History Logs\n+#\n+log4j.logger.JobHistoryLogs=INFO,${SAJH}\n+log4j.additivity.JobHistoryLogs=false\n+\n+#\n+#Socket Appender for Job History Logs\n+#\n+log4j.appender.job.history.log=org.apache.log4j.net.SocketAppender\n+log4j.appender.job.history.log.RemoteHost=localhost\n+log4j.appender.job.history.log.Port=${JOBHISTORY_LOGGING_PORT}\n+log4j.appender.job.history.log.layout=org.apache.log4j.PatternLayout\n+log4j.appender.job.history.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n+log4j.appender.job.history.appender.ReconnectionDelay=120000\n+\n+\n+\n+# Custom Logging levels\n+\n+hadoop.metrics.log.level=INFO\n+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n+\n+# Jets3t library\n+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n+\n+#\n+# Null Appender\n+# Trap security logger on the hadoop client side\n+#\n+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n+\n+#\n+# Event Counter Appender\n+# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n+#\n+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n+\n+#\n+# Job Summary Appender\n+#\n+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}\n+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout\n+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n+log4j.appender.JSA.DatePattern=.yyyy-MM-dd\n+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}\n+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false\n" }, { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n- ~ Licensed to the Apache Software Foundation (ASF) under one or more\n- ~ contributor license agreements. See the NOTICE file distributed with\n- ~ this work for additional information regarding copyright ownership.\n- ~ The ASF licenses this file to You under the Apache License, Version 2.0\n- ~ (the \"License\"); you may not use this file except in compliance with\n- ~ the License. You may obtain a copy of the License at\n- ~\n- ~ http://www.apache.org/licenses/LICENSE-2.0\n- ~\n- ~ Unless required by applicable law or agreed to in writing, software\n- ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n- ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n- ~ See the License for the specific language governing permissions and\n- ~ limitations under the License.\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n-->\n-<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n+<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n+ xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n<modelVersion>4.0.0</modelVersion>\n<parent>\n<groupId>org.apache</groupId>\n<forkCount>0.5C</forkCount>\n<reuseForks>false</reuseForks>\n+ <!-- Limit Spark log output-->\n+ <systemPropertyVariables>\n+ <log4j.configuration>file:${basedir}/conf/log4j-silent.properties</log4j.configuration>\n+ </systemPropertyVariables>\n+\n<!-- Last argument prevents Java from popping up lots of windows on\nMacOS -->\n<argLine>-Dfile.encoding=UTF-8 -Xmx2g -Xms2g -Xmn200m\n<includes>\n<include>${gpuTestsPath}</include> <!-- Path for GPU integration tests, enabled for gpuTests profile -->\n- <include>**/integration/applications/**/*Suite.java</include>\n- <include>**/integration/conversion/*Suite.java</include>\n- <include>**/integration/functions/data/*Suite.java</include>\n+ <!-- <include>**/integration/applications/**/*Suite.java</include> -->\n+ <!-- <include>**/integration/conversion/*Suite.java</include> -->\n+ <!-- <include>**/integration/functions/data/*Suite.java</include> -->\n<include>**/integration/functions/sparse/*Suite.java</include>\n- <include>**/integration/functions/codegenalg/*Suite.java</include>\n- <include>**/integration/functions/**/*Test*.java</include>\n+ <!-- <include>**/integration/functions/codegenalg/*Suite.java</include> -->\n+ <!-- <include>**/integration/functions/**/*Test*.java</include> -->\n<include>**/integration/mlcontext/*Suite.java</include>\n- <include>**/integration/mlcontext/algorithms/*Suite.java</include>\n+ <!-- <include>**/integration/mlcontext/algorithms/*Suite.java</include> -->\n<include>**/integration/scripts/nn/*Suite.java</include>\n- <include>**/integration/scalability/**/*Test.java</include>\n+ <!-- <include>**/integration/scalability/**/*Test.java</include> -->\n</includes>\n<excludes>\n<phase>package</phase>\n<configuration>\n<target name=\"copy and rename JAR\">\n- <copy file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\" tofile=\"${project.build.directory}/SystemML.jar\" />\n+ <copy\n+ file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\"\n+ tofile=\"${project.build.directory}/SystemML.jar\" />\n</target>\n</configuration>\n<goals>\n</execution>\n</executions>\n</plugin>\n+ <plugin>\n+ <groupId>org.jacoco</groupId>\n+ <artifactId>jacoco-maven-plugin</artifactId>\n+ <version>0.7.6.201602180812</version>\n+ <executions>\n+ <execution>\n+ <id>prepare-agent</id>\n+ <goals>\n+ <goal>prepare-agent</goal>\n+ </goals>\n+ </execution>\n+ </executions>\n+ </plugin>\n+ <plugin>\n+ <groupId>org.eluder.coveralls</groupId>\n+ <artifactId>coveralls-maven-plugin</artifactId>\n+ <version>4.3.0</version>\n+ </plugin>\n</plugins>\n</build>\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/installDependencies.R", "new_path": "src/test/scripts/installDependencies.R", "diff": "custom_install <- function(pkg) {\nif(!is.element(pkg, installed.packages()[,1])) {\n- install.packages(pkg, repos=\"http://cran.stat.ucla.edu/\");\n+ install.packages(pkg, repos=\"https://cran.cnr.berkeley.edu/\");\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] switch build/test server to travis-ci Closes #765.
49,738
22.05.2018 13:48:57
25,200
4b6b8d2df3510f51190b8550fbd7f603b248321d
Fix missing block merge large dense blocks >16GB This patch adds the missing support for large (i.e., partitioned) dense blocks >16GB to the primitive for merging matrix blocks as used during various distributed operations parfor result merge, where might work with very large result variables.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1617,77 +1617,86 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nnonZeros = nnz;\n}\n- private void mergeIntoDense(MatrixBlock that)\n- {\n+ private void mergeIntoDense(MatrixBlock that) {\n+ DenseBlock a = getDenseBlock();\nif( that.sparse ) { //DENSE <- SPARSE\n- double[] a = getDenseBlockValues();\nSparseBlock b = that.sparseBlock;\nint m = rlen;\n- int n = clen;\n- for( int i=0, aix=0; i<m; i++, aix+=n ) {\n+ for( int i=0; i<m; i++ ) {\nif( b.isEmpty(i) ) continue;\nint bpos = b.pos(i);\nint blen = b.size(i);\nint[] bix = b.indexes(i);\n- double[] bval = b.values(i);\n- for( int j=bpos; j<bpos+blen; j++ )\n- if( bval[j] != 0 )\n- a[ aix + bix[j] ] = bval[j];\n+ double[] avals = a.values(i);\n+ double[] bvals = b.values(i);\n+ int aix = a.pos(i);\n+ for( int j=bpos; j<bpos+blen; j++ ) {\n+ double bval = bvals[j];\n+ if( bval != 0 )\n+ avals[aix+bix[j]] = bval;\n+ }\n}\n}\nelse { //DENSE <- DENSE\n- double[] a = getDenseBlockValues();\n- double[] b = that.getDenseBlockValues();\n- int len = rlen * clen;\n- for( int i=0; i<len; i++ )\n- a[i] = ( b[i] != 0 ) ? b[i] : a[i];\n+ DenseBlock b = that.getDenseBlock();\n+ for(int bi=0; bi<a.numBlocks(); bi++) {\n+ double[] avals = a.valuesAt(bi);\n+ double[] bvals = b.valuesAt(bi);\n+ int blen = a.size(bi);\n+ for( int j=0; j<blen; j++ )\n+ avals[j] = bvals[j]!=0 ? bvals[j] : avals[j];\n+ }\n}\n}\n- private void mergeIntoDensePar(MatrixBlock that)\n- {\n+ private void mergeIntoDensePar(MatrixBlock that) {\n+ DenseBlock a = getDenseBlock();\nif( that.sparse ) { //DENSE <- SPARSE\n- double[] a = getDenseBlockValues();\nSparseBlock b = that.sparseBlock;\n- IntStream.range(0, rlen).parallel().forEach(i -> {\n+ int roff = 0; //row offset\n+ for( int bi=0; bi<a.numBlocks(); bi++ ) {\n+ double[] avals = a.valuesAt(bi);\n+ int alen = a.blockSize(bi);\n+ final int lroff = roff; //final for lambda\n+ IntStream.range(lroff, lroff+alen).parallel().forEach(i -> {\nif( b.isEmpty(i) ) return;\n- int aix = i*clen;\n+ int aix = (i-lroff)*clen;\nint bpos = b.pos(i);\nint blen = b.size(i);\nint[] bix = b.indexes(i);\ndouble[] bval = b.values(i);\nfor( int j=bpos; j<bpos+blen; j++ )\nif( bval[j] != 0 )\n- a[ aix + bix[j] ] = bval[j];\n+ avals[aix+bix[j]] = bval[j];\n});\n+ roff += alen;\n+ }\n}\nelse { //DENSE <- DENSE\n- double[] a = getDenseBlockValues();\n- double[] b = that.getDenseBlockValues();\n- Arrays.parallelSetAll(a, i -> (b[i]!=0) ? b[i] : a[i]);\n+ DenseBlock b = that.getDenseBlock();\n+ for(int bi=0; bi<a.numBlocks(); bi++) {\n+ double[] avals = a.valuesAt(bi);\n+ double[] bvals = b.valuesAt(bi);\n+ Arrays.parallelSetAll(avals,\n+ i -> (bvals[i]!=0) ? bvals[i] : avals[i]);\n+ }\n}\n}\n- private void mergeIntoSparse(MatrixBlock that, boolean appendOnly)\n- {\n+ private void mergeIntoSparse(MatrixBlock that, boolean appendOnly) {\nSparseBlock a = sparseBlock;\nfinal boolean COO = (a instanceof SparseBlockCOO);\nfinal int m = rlen;\nfinal int n = clen;\n-\n- if( that.sparse ) //SPARSE <- SPARSE\n- {\n+ if( that.sparse ) { //SPARSE <- SPARSE\nSparseBlock b = that.sparseBlock;\n-\n- for( int i=0; i<m; i++ )\n- {\n+ for( int i=0; i<m; i++ ) {\nif( b.isEmpty(i) ) continue;\nif( !COO && a.isEmpty(i) ) {\n//copy entire sparse row (no sort required)\na.set(i, b.get(i), true);\n}\n- else\n- {\n+ else {\nboolean appended = false;\nint bpos = b.pos(i);\nint blen = b.size(i);\n@@ -1705,15 +1714,16 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n}\n- else //SPARSE <- DENSE\n- {\n- double[] b = that.getDenseBlockValues();\n-\n- for( int i=0, bix=0; i<m; i++, bix+=n ) {\n+ else { //SPARSE <- DENSE\n+ DenseBlock b = that.getDenseBlock();\n+ for( int i=0; i<m; i++ ) {\n+ double[] bvals = b.values(i);\n+ int bix = b.pos(i);\nboolean appended = false;\nfor( int j=0; j<n; j++ ) {\n- if( b[bix+j] != 0 ) {\n- appendValue(i, j, b[bix+j]); //incl alloc\n+ double bval = bvals[bix+j];\n+ if( bval != 0 ) {\n+ appendValue(i, j, bval); //incl alloc\nappended = true;\n}\n}\n@@ -1722,7 +1732,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\na.sort(i);\n}\n}\n-\n//full sort of coordinate blocks\nif( COO && !appendOnly )\na.sort();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2247] Fix missing block merge large dense blocks >16GB This patch adds the missing support for large (i.e., partitioned) dense blocks >16GB to the primitive for merging matrix blocks as used during various distributed operations parfor result merge, where might work with very large result variables.
49,738
22.05.2018 18:08:49
25,200
bf72cc475b20e0ab4db16d102cb24973913c0909
[MINOR] Add more test suites to integration tests (compress, jmlc) This patch adds the test suites for compression and jmlc to our automated CI tests because they increase coverage (1500 tests) at low overhead (few minutes).
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<!-- <include>**/integration/applications/**/*Suite.java</include> -->\n<!-- <include>**/integration/conversion/*Suite.java</include> -->\n<!-- <include>**/integration/functions/data/*Suite.java</include> -->\n+ <include>**/integration/functions/compress/*Suite.java</include>\n+ <include>**/integration/functions/jmlc/*Suite.java</include>\n<include>**/integration/functions/sparse/*Suite.java</include>\n<!-- <include>**/integration/functions/codegenalg/*Suite.java</include> -->\n<!-- <include>**/integration/functions/**/*Test*.java</include> -->\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add more test suites to integration tests (compress, jmlc) This patch adds the test suites for compression and jmlc to our automated CI tests because they increase coverage (1500 tests) at low overhead (few minutes).
49,727
22.05.2018 22:23:59
25,200
96954b4ca980d928bf2c4c4eb012efe0188a4ac6
[MINOR] Fix incorrect list indexing range check, extended name handling Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "@@ -26,8 +26,7 @@ import org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-public class ListObject extends Data\n-{\n+public class ListObject extends Data {\nprivate static final long serialVersionUID = 3652422061598967358L;\nprivate final List<String> _names;\n@@ -72,12 +71,11 @@ public class ListObject extends Data\npublic Data slice(String name) {\n//check for existing named list\nif (_names == null)\n- throw new DMLRuntimeException(\"Invalid lookup by name\"\n- + \" in unnamed list: \"+name+\".\");\n+ throw new DMLRuntimeException(\"Invalid lookup by name\" + \" in unnamed list: \" + name + \".\");\n//find position and check for existing entry\nint pos = _names.indexOf(name);\n- if( pos <= 0 || pos >= _data.size() )\n+ if (pos < 0 || pos >= _data.size())\nthrow new DMLRuntimeException(\"List lookup returned no entry for name='\" + name + \"'\");\n//return existing entry\n@@ -87,21 +85,28 @@ public class ListObject extends Data\npublic ListObject slice(String name1, String name2) {\n//check for existing named list\nif (_names == null)\n- throw new DMLRuntimeException(\"Invalid lookup by name\"\n- + \" in unnamed list: \"+name1+\", \"+name2+\".\");\n+ throw new DMLRuntimeException(\"Invalid lookup by name\" + \" in unnamed list: \" + name1 + \", \" + name2 + \".\");\n//find position and check for existing entry\nint pos1 = _names.indexOf(name1);\nint pos2 = _names.indexOf(name2);\n- if( pos1 <= 0 || pos1 >= _data.size() )\n+ if (pos1 < 0 || pos1 >= _data.size())\nthrow new DMLRuntimeException(\"List lookup returned no entry for name='\" + name1 + \"'\");\n- if( pos2 <= 0 || pos2 >= _data.size() )\n+ if (pos2 < 0 || pos2 >= _data.size())\nthrow new DMLRuntimeException(\"List lookup returned no entry for name='\" + name2 + \"'\");\n//return list object\nreturn slice(pos1, pos2);\n}\n+ public List<String> getNames() {\n+ return _names;\n+ }\n+\n+ public String getName(int ix) {\n+ return (_names == null) ? null : _names.get(ix);\n+ }\n+\n@Override\npublic String getDebugName() {\nreturn toString();\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix incorrect list indexing range check, extended name handling Closes #766.
49,738
22.05.2018 22:56:28
25,200
46998abe1482e73ee2649bbc22686010c7eef8eb
[HOTFIX] Modified CI test suite (rm compress, add misc) This patch temporarily disables the compression test suite because it keeps failing for specific tests in Travis although is runs just fine in various other environments. In exchange, we add the misc testsuite because it adds good coverage for rewrites and runtime ops.
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<!-- <include>**/integration/applications/**/*Suite.java</include> -->\n<!-- <include>**/integration/conversion/*Suite.java</include> -->\n<!-- <include>**/integration/functions/data/*Suite.java</include> -->\n- <include>**/integration/functions/compress/*Suite.java</include>\n<include>**/integration/functions/jmlc/*Suite.java</include>\n+ <include>**/integration/functions/misc/*Suite.java</include>\n<include>**/integration/functions/sparse/*Suite.java</include>\n<!-- <include>**/integration/functions/codegenalg/*Suite.java</include> -->\n<!-- <include>**/integration/functions/**/*Test*.java</include> -->\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Modified CI test suite (rm compress, add misc) This patch temporarily disables the compression test suite because it keeps failing for specific tests in Travis although is runs just fine in various other environments. In exchange, we add the misc testsuite because it adds good coverage for rewrites and runtime ops.
49,719
23.05.2018 13:58:23
25,200
b142239b063106f3083699fd9ae41b81785e162a
change build status image url
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -21,7 +21,7 @@ limitations under the License.\n**Documentation:** [SystemML Documentation](http://systemml.apache.org/documentation)<br/>\n**Mailing List:** [Dev Mailing List](mailto:[email protected])<br/>\n-**Build Status:** [![Build Status](https://sparktc.ibmcloud.com/jenkins/job/SystemML-DailyTest/badge/icon)](https://sparktc.ibmcloud.com/jenkins/job/SystemML-DailyTest)<br/>\n+**Build Status:** [![Build Status](https://travis-ci.org/apache/systemml.svg?branch=master)](https://travis-ci.org/apache/systemml)<br/>\n**Issue Tracker:** [JIRA](https://issues.apache.org/jira/browse/SYSTEMML)<br/>\n**Download:** [Download SystemML](http://systemml.apache.org/download.html)<br/>\n" } ]
Java
Apache License 2.0
apache/systemds
change build status image url
49,769
23.05.2018 14:38:17
25,200
72454cdaa72687e87b3fced0afcb69bf203cd97a
[MINOR] Added more test cases for ARIMA (parameter combinations) Closes
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/applications/ArimaTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/applications/ArimaTest.java", "diff": "@@ -25,12 +25,11 @@ import java.util.Collection;\nimport java.util.HashMap;\nimport java.util.List;\n-import org.junit.runners.Parameterized.Parameters;\n-\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n+import org.junit.runners.Parameterized.Parameters;\npublic abstract class ArimaTest extends AutomatedTestBase {\n@@ -55,8 +54,12 @@ public abstract class ArimaTest extends AutomatedTestBase {\n@Parameters\npublic static Collection<Object[]> data() {\n- Object[][] data = new Object[][] {{ 10, 1, 1, 1, 1, 1, 1, 24, 1, 1}};\n- return Arrays.asList(data);\n+ return Arrays.asList(new Object[][] {\n+ {10, 1, 1, 1, 1, 1, 1, 24, 1, 1}});\n+ //TODO include after ARIMA script modifications\n+ //(these tests are currently failing due to invalid loop ranges)\n+ //{0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, //AR(7)\n+ //{0, 0, 0, 3, 0, 0, 0, 0, 0, 0}}); //MA(3)\n}\n@Override\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Added more test cases for ARIMA (parameter combinations) Closes #767.
49,738
23.05.2018 17:10:04
25,200
fc5039ed7b85949285c9c450dc2635e09214b42d
Improved recompilation of right indexing and reshape This patch improves special cases of size updates during dynamic recompilation and thus, also during intra- and inter-procedural analysis (IPA). Specifically, we now handle the size reset, recursive DAG propagation, and update with symbol table statistics in a consistent manner.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1720,11 +1720,11 @@ public abstract class Hop implements ParseInfo\nreturn ret;\n}\n- public double computeBoundsInformation( Hop input, LocalVariableMap vars ) {\n+ public final double computeBoundsInformation( Hop input, LocalVariableMap vars ) {\nreturn computeBoundsInformation(input, vars, new HashMap<Long, Double>());\n}\n- public double computeBoundsInformation( Hop input, LocalVariableMap vars, HashMap<Long, Double> memo ) {\n+ public final double computeBoundsInformation( Hop input, LocalVariableMap vars, HashMap<Long, Double> memo ) {\ndouble ret = Double.MAX_VALUE;\ntry {\nret = OptimizerUtils.rEvalSimpleDoubleExpression(input, memo, vars);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "diff": "@@ -46,12 +46,12 @@ import org.apache.sysml.hops.Hop.DataGenMethod;\nimport org.apache.sysml.hops.Hop.DataOpTypes;\nimport org.apache.sysml.hops.Hop.FileFormatTypes;\nimport org.apache.sysml.hops.Hop.OpOp1;\n+import org.apache.sysml.hops.Hop.ReOrgOp;\nimport org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.MemoTable;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.hops.ReorgOp;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.codegen.SpoofCompiler;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\n@@ -1282,17 +1282,12 @@ public class Recompiler\nfor( Hop c : hop.getInput() )\nrUpdateStatistics(c, vars);\n- boolean updatedSizeExpr = false;\n-\n//update statistics for transient reads according to current statistics\n//(with awareness not to override persistent reads to an existing name)\n- if( hop instanceof DataOp\n- && ((DataOp)hop).getDataOpType() != DataOpTypes.PERSISTENTREAD )\n- {\n+ if( HopRewriteUtils.isData(hop, DataOpTypes.TRANSIENTREAD) ) {\nDataOp d = (DataOp) hop;\nString varName = d.getName();\n- if( vars.keySet().contains( varName ) )\n- {\n+ if( vars.keySet().contains( varName ) ) {\nData dat = vars.get(varName);\nif( dat instanceof MatrixObject ) {\nMatrixObject mo = (MatrixObject) dat;\n@@ -1308,8 +1303,7 @@ public class Recompiler\n}\n}\n//special case for persistent reads with unknown size (read-after-write)\n- else if( hop instanceof DataOp\n- && ((DataOp)hop).getDataOpType() == DataOpTypes.PERSISTENTREAD\n+ else if( HopRewriteUtils.isData(hop, DataOpTypes.PERSISTENTREAD)\n&& !hop.dimsKnown() && ((DataOp)hop).getInputFormatType()!=FileFormatTypes.CSV\n&& !ConfigurationManager.getCompilerConfigFlag(ConfigType.IGNORE_READ_WRITE_METADATA) )\n{\n@@ -1332,7 +1326,8 @@ public class Recompiler\nHashMap<Long, Long> memo = new HashMap<>();\nd.refreshRowsParameterInformation(d.getInput().get(ix1), vars, memo);\nd.refreshColsParameterInformation(d.getInput().get(ix2), vars, memo);\n- updatedSizeExpr = initUnknown & d.dimsKnown();\n+ if( !(initUnknown & d.dimsKnown()) )\n+ d.refreshSizeInformation();\n}\nelse if ( d.getOp() == DataGenMethod.SEQ )\n{\n@@ -1355,47 +1350,40 @@ public class Recompiler\nd.setDim2( 1 );\nd.setIncrementValue( incr );\n}\n- updatedSizeExpr = initUnknown & d.dimsKnown();\n+ if( !(initUnknown & d.dimsKnown()) )\n+ d.refreshSizeInformation();\n}\nelse {\nthrow new DMLRuntimeException(\"Unexpected data generation method: \" + d.getOp());\n}\n}\n//update size expression for reshape according to symbol table entries\n- else if ( hop instanceof ReorgOp\n- && ((ReorgOp)(hop)).getOp()==Hop.ReOrgOp.RESHAPE )\n- {\n- ReorgOp d = (ReorgOp) hop;\n- boolean initUnknown = !d.dimsKnown();\n+ else if( HopRewriteUtils.isReorg(hop, ReOrgOp.RESHAPE) ) {\n+ hop.refreshSizeInformation(); //update incl reset\n+ if( !hop.dimsKnown() ) {\nHashMap<Long, Long> memo = new HashMap<>();\n- d.refreshRowsParameterInformation(d.getInput().get(1), vars, memo);\n- d.refreshColsParameterInformation(d.getInput().get(2), vars, memo);\n- updatedSizeExpr = initUnknown & d.dimsKnown();\n+ hop.refreshRowsParameterInformation(hop.getInput().get(1), vars, memo);\n+ hop.refreshColsParameterInformation(hop.getInput().get(2), vars, memo);\n+ }\n}\n//update size expression for indexing according to symbol table entries\n- else if( hop instanceof IndexingOp && hop.getDataType()!=DataType.LIST )\n- {\n- IndexingOp iop = (IndexingOp)hop;\n- Hop input2 = iop.getInput().get(1); //inpRowL\n- Hop input3 = iop.getInput().get(2); //inpRowU\n- Hop input4 = iop.getInput().get(3); //inpColL\n- Hop input5 = iop.getInput().get(4); //inpColU\n- boolean initUnknown = !iop.dimsKnown();\n+ else if( hop instanceof IndexingOp && hop.getDataType()!=DataType.LIST ) {\n+ hop.refreshSizeInformation(); //update, incl reset\n+ if( !hop.dimsKnown() ) {\nHashMap<Long, Double> memo = new HashMap<>();\n- double rl = iop.computeBoundsInformation(input2, vars, memo);\n- double ru = iop.computeBoundsInformation(input3, vars, memo);\n- double cl = iop.computeBoundsInformation(input4, vars, memo);\n- double cu = iop.computeBoundsInformation(input5, vars, memo);\n+ double rl = hop.computeBoundsInformation(hop.getInput().get(1), vars, memo);\n+ double ru = hop.computeBoundsInformation(hop.getInput().get(2), vars, memo);\n+ double cl = hop.computeBoundsInformation(hop.getInput().get(3), vars, memo);\n+ double cu = hop.computeBoundsInformation(hop.getInput().get(4), vars, memo);\nif( rl!=Double.MAX_VALUE && ru!=Double.MAX_VALUE )\n- iop.setDim1( (long)(ru-rl+1) );\n+ hop.setDim1( (long)(ru-rl+1) );\nif( cl!=Double.MAX_VALUE && cu!=Double.MAX_VALUE )\n- iop.setDim2( (long)(cu-cl+1) );\n- updatedSizeExpr = initUnknown & iop.dimsKnown();\n+ hop.setDim2( (long)(cu-cl+1) );\n}\n-\n+ }\n+ else {\n//propagate statistics along inner nodes of DAG,\n//without overwriting inferred size expressions\n- if( !updatedSizeExpr ) {\nhop.refreshSizeInformation();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/recompile/FunctionRecompileTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/recompile/FunctionRecompileTest.java", "diff": "@@ -103,7 +103,7 @@ public class FunctionRecompileTest extends AutomatedTestBase\n//note: change from previous version due to fix in op selection (unknown size XtX and mapmult)\n//CHECK compiled MR jobs\nint expectNumCompiled = -1;\n- if( IPA ) expectNumCompiled = 4; //reblock TODO investigate 1-4 recompile side effect\n+ if( IPA ) expectNumCompiled = 1; //reblock\nelse expectNumCompiled = 5; //reblock, GMR,GMR,GMR,GMR (last two should piggybacked)\nAssert.assertEquals(\"Unexpected number of compiled MR jobs.\",\nexpectNumCompiled, Statistics.getNoOfCompiledMRJobs());\n@@ -111,7 +111,7 @@ public class FunctionRecompileTest extends AutomatedTestBase\n//CHECK executed MR jobs\nint expectNumExecuted = -1;\nif( recompile ) expectNumExecuted = 0;\n- else if( IPA ) expectNumExecuted = 31; //reblock TODO investigate 1-31 recompile side effect\n+ else if( IPA ) expectNumExecuted = 1; //reblock\nelse expectNumExecuted = 41; //reblock, 10*(GMR,GMR,GMR, GMR) (last two should piggybacked)\nAssert.assertEquals(\"Unexpected number of executed MR jobs.\",\nexpectNumExecuted, Statistics.getNoOfExecutedMRJobs());\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2341] Improved recompilation of right indexing and reshape This patch improves special cases of size updates during dynamic recompilation and thus, also during intra- and inter-procedural analysis (IPA). Specifically, we now handle the size reset, recursive DAG propagation, and update with symbol table statistics in a consistent manner.
49,738
23.05.2018 17:17:45
25,200
a13632d4582b0d74c5898fe67790a1052ba11c2f
[MINOR] Add more test suites to integration tests (estim, ext, frame)
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<!-- <include>**/integration/applications/**/*Suite.java</include> -->\n<!-- <include>**/integration/conversion/*Suite.java</include> -->\n<!-- <include>**/integration/functions/data/*Suite.java</include> -->\n+ <include>**/integration/functions/estim/*Suite.java</include>\n+ <include>**/integration/functions/external/*Suite.java</include>\n+ <include>**/integration/functions/frame/*Suite.java</include>\n<include>**/integration/functions/jmlc/*Suite.java</include>\n<include>**/integration/functions/misc/*Suite.java</include>\n<include>**/integration/functions/sparse/*Suite.java</include>\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add more test suites to integration tests (estim, ext, frame)
49,738
24.05.2018 15:04:33
25,200
03aaec73314834b2b2778ddf25ec37b2c94132a6
[MINOR] Add more test suites to integration tests (parfor, tensor, misc)
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<include>**/integration/functions/estim/*Suite.java</include>\n<include>**/integration/functions/external/*Suite.java</include>\n<include>**/integration/functions/frame/*Suite.java</include>\n+ <include>**/integration/functions/io/*Suite.java</include>\n<include>**/integration/functions/jmlc/*Suite.java</include>\n<include>**/integration/functions/misc/*Suite.java</include>\n+ <include>**/integration/functions/parfor/*Suite.java</include>\n<include>**/integration/functions/sparse/*Suite.java</include>\n+ <include>**/integration/functions/tensor/*Suite.java</include>\n+ <include>**/integration/functions/updateinplace/*Suite.java</include>\n+ <include>**/integration/functions/vect/*Suite.java</include>\n<!-- <include>**/integration/functions/codegenalg/*Suite.java</include> -->\n<!-- <include>**/integration/functions/**/*Test*.java</include> -->\n<include>**/integration/mlcontext/*Suite.java</include>\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add more test suites to integration tests (parfor, tensor, misc)
49,769
25.05.2018 11:27:03
25,200
ba497ba4ffe67179af4ffc5ca9a33c94e9bd1625
Fix ARIMA script handling of zero parameters Bugfix: No differncing when d/D = 0 For loops with seq to avoid execution for range of 1:0 Closes
[ { "change_type": "MODIFY", "old_path": "src/test/scripts/applications/arima_box-jenkins/arima.dml", "new_path": "src/test/scripts/applications/arima_box-jenkins/arima.dml", "diff": "@@ -39,14 +39,15 @@ arima_css = function(Matrix[Double] w, Matrix[Double] X, Integer pIn, Integer P,\nb = X[,2:ncol(X)]%*%w\nR = Rand(rows=nrow(X), cols=nrow(X), min=0, max=0)\n- for(i7 in 1:qIn){\n+\n+ for(i7 in seq(1, qIn, 1)){\nma_ind_ns = P+pIn+i7\nerr_ind_ns = i7\nones_ns = Rand(rows=nrow(R)-err_ind_ns, cols=1, min=1, max=1)\nd_ns = ones_ns * as.scalar(w[ma_ind_ns,1])\nR[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] = R[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] + diag(d_ns)\n}\n- for(i8 in 1:Q){\n+ for(i8 in seq(1, Q, 1)){\nma_ind_s = P+pIn+qIn+i8\nerr_ind_s = s*i8\nones_s = Rand(rows=nrow(R)-err_ind_s, cols=1, min=1, max=1)\n@@ -138,7 +139,7 @@ if(num_rows <= d){\n}\nY = X\n-for(i in 1:d){\n+for(i in seq(1, d, 1)){\nn1 = nrow(Y)+0.0\nY = Y[2:n1,] - Y[1:n1-1,]\n}\n@@ -148,7 +149,7 @@ if(num_rows <= s*D){\nprint(\"seasonal differencing order should be larger than number of observations divided by length of season\")\n}\n-for(i in 1:D){\n+for(i in seq(1,D, 1)){\nn1 = nrow(Y)+0.0\nY = Y[s+1:n1,] - Y[1:n1-s,]\n}\n@@ -174,16 +175,16 @@ totcols = 1+p+P+Q+q #target col (X), p-P cols, q-Q cols\nZ = Rand(rows=n, cols=totcols, min=0, max=0)\nZ[,1] = Y #target col\n-parfor(i1 in 1:p, check=0){\n+parfor(i1 in seq(1, p, 1), check=0){\nZ[i1+1:n,1+i1] = Y[1:n-i1,]\n}\n-parfor(i2 in 1:P, check=0){\n+parfor(i2 in seq(1, P, 1), check=0){\nZ[s*i2+1:n,1+p+i2] = Y[1:n-s*i2,]\n}\n-parfor(i5 in 1:q, check=0){\n+parfor(i5 in seq(1, q, 1), check=0){\nZ[i5+1:n,1+P+p+i5] = Y[1:n-i5,]\n}\n-parfor(i6 in 1:Q, check=0){\n+parfor(i6 in seq(1,Q, 1), check=0){\nZ[s*i6+1:n,1+P+p+q+i6] = Y[1:n-s*i6,]\n}\n@@ -206,6 +207,7 @@ num_func_invoc = num_func_invoc + ncol(simplex)\ntol = 1.5 * 10^(-8) * as.scalar(objvals[1,1])\ncontinue = 1\n+best_index = 1\nwhile(continue == 1 & num_func_invoc <= max_func_invoc) {\nbest_index = 1\nworst_index = 1\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2342] Fix ARIMA script handling of zero parameters Bugfix: No differncing when d/D = 0 For loops with seq to avoid execution for range of 1:0 Closes #769.
49,738
28.05.2018 20:11:39
25,200
c7a9e016d120c93a5a947133a4511dde456ec7c4
Fix inconsistent namespace names (OS-specific paths) This patch fixes issues with OS-specific file paths used as physical namespace names, which created problems for second order functions such as eval and paramserv which potentially load functions by name.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/common/CommonSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/common/CommonSyntacticValidator.java", "diff": "@@ -176,8 +176,10 @@ public abstract class CommonSyntacticValidator {\n}\nprivate static String getWorkingFilePath(String filePath, String workingDir) {\n- return !new File(filePath).isAbsolute() ?\n- workingDir + File.separator + filePath : filePath;\n+ //NOTE: the use of File.separator would lead to OS-specific inconsistencies,\n+ //which is problematic for second order functions such as eval or paramserv.\n+ //Since this is unnecessary, we now use \"/\" independent of the use OS.\n+ return !new File(filePath).isAbsolute() ? workingDir + \"/\" + filePath : filePath;\n}\npublic String getNamespaceSafe(Token ns) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2347] Fix inconsistent namespace names (OS-specific paths) This patch fixes issues with OS-specific file paths used as physical namespace names, which created problems for second order functions such as eval and paramserv which potentially load functions by name.
49,736
30.05.2018 14:47:36
25,200
72fd8fda39062c9059e60d054f263ea1d92bf517
Upgraded CUDA and CuDNN version
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<maven.build.timestamp.format>yyyy-MM-dd HH:mm:ss z</maven.build.timestamp.format>\n<enableGPU>false</enableGPU>\n<jcuda.scope>provided</jcuda.scope>\n- <jcuda.version>0.8.0</jcuda.version>\n+ <jcuda.version>0.9.0d</jcuda.version>\n<!-- OS-specific JVM arguments for running integration tests -->\n<integrationTestExtraJVMArgs />\n</properties>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+ <!-- Commented until the PowerPC jcuda libraries are deployed for 0.9.0 version\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+ -->\n<dependency>\n<groupId>org.apache.spark</groupId>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "diff": "@@ -273,7 +273,7 @@ public class LibMatrixCuDNNConvolutionAlgorithm implements java.lang.AutoCloseab\nprivate static cudnnConvolutionDescriptor allocateConvolutionDescriptor(int padding [], int strides []) {\ncudnnConvolutionDescriptor convDesc = new cudnnConvolutionDescriptor();\ncudnnCreateConvolutionDescriptor(convDesc);\n- cudnnSetConvolution2dDescriptor(convDesc, padding[0], padding[1], strides[0], strides[1], 1, 1, CUDNN_CROSS_CORRELATION);\n+ cudnnSetConvolution2dDescriptor(convDesc, padding[0], padding[1], strides[0], strides[1], 1, 1, CUDNN_CROSS_CORRELATION, LibMatrixCUDA.CUDNN_DATA_TYPE);\nreturn convDesc;\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Upgraded CUDA and CuDNN version
49,736
30.05.2018 15:37:40
25,200
7350a0c6d38b3c018e10d18863295c1a89abc2cd
Remove unnecessary variables from batch_norm2d layer
[ { "change_type": "MODIFY", "old_path": "scripts/nn/layers/batch_norm2d.dml", "new_path": "scripts/nn/layers/batch_norm2d.dml", "diff": "@@ -29,7 +29,7 @@ forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\nmatrix[double] ema_mean, matrix[double] ema_var,\ndouble mu, double epsilon)\nreturn (matrix[double] out, matrix[double] ema_mean_upd, matrix[double] ema_var_upd,\n- matrix[double] cache_mean, matrix[double] cache_var, matrix[double] cache_norm) {\n+ matrix[double] cache_mean, matrix[double] cache_inv_var) {\n/*\n* Computes the forward pass for a 2D (spatial) batch normalization\n* layer. The input data has N examples, each represented as a 3D\n@@ -80,11 +80,8 @@ forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\n* of shape (C, 1).\n* - cache_mean: Cache of the batch mean, of shape (C, 1).\n* Note: This is used for performance during training.\n- * - cache_var: Cache of the batch variance, of shape (C, 1).\n+ * - cache_inv_var: Cache of the inverse variance, of shape (C, 1).\n* Note: This is used for performance during training.\n- * - cache_norm: Cache of the normalized inputs, of\n- * shape (C, N*Hin*Win). Note: This is used for performance\n- * during training.\n*/\nN = nrow(X)\n@@ -109,28 +106,24 @@ forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\nema_var_upd = ema_var\n}\n+ # Save variable for backward pass\n+ cache_mean = mean\n+ cache_inv_var = 1/sqrt(var+epsilon)\n+\n# Normalize, shift, and scale\n# norm = (X-mean)*(var+epsilon)^(-1/2)\n# = (X-mean) / sqrt(var+epsilon)\ncentered = bias_add(X, -mean) # shape (N, C*Hin*Win)\n- norm = bias_multiply(centered, 1/sqrt(var+epsilon)) # shape (N, C*Hin*Win)\n+ norm = bias_multiply(centered, cache_inv_var) # shape (N, C*Hin*Win)\n# out = norm*gamma + beta\nscaled = bias_multiply(norm, gamma) # shape (N, C*Hin*Win)\nout = bias_add(scaled, beta) # shape (N, C*Hin*Win)\n-\n- # Save variable for backward pass\n- cache_mean = mean\n- cache_var = var\n- cache_norm = norm\n}\n-backward = function(matrix[double] dout, matrix[double] out,\n- matrix[double] ema_mean_upd, matrix[double] ema_var_upd,\n- matrix[double] cache_mean, matrix[double] cache_var, matrix[double] cache_norm,\n- matrix[double] X, matrix[double] gamma, matrix[double] beta,\n- int C, int Hin, int Win, string mode,\n- matrix[double] ema_mean, matrix[double] ema_var,\n- double mu, double epsilon)\n+backward = function(matrix[double] dout,\n+ matrix[double] cache_mean, matrix[double] cache_inv_var,\n+ matrix[double] X, matrix[double] gamma,\n+ int C, int Hin, int Win, double epsilon)\nreturn (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {\n/*\n* Computes the backward pass for a 2D (spatial) batch normalization\n@@ -138,38 +131,18 @@ backward = function(matrix[double] dout, matrix[double] out,\n*\n* Inputs:\n* - dout: Gradient wrt `out` from upstream, of shape (N, C*Hin*Win).\n- * - out: Outputs from the forward pass, of shape (N, C*Hin*Win).\n- * - ema_mean_upd: Updated exponential moving average of the mean\n- * from the forward pass, of shape (C, 1).\n- * - ema_var_upd: Updated exponential moving average of the variance\n- * from the forward pass, of shape (C, 1).\n* - cache_mean: Cache of the batch mean from the forward pass, of\n* shape (C, 1). Note: This is used for performance during\n* training.\n- * - cache_var: Cache of the batch variance from the forward pass,\n+ * - cache_inv_var: Cache of the inverse variance from the forward pass,\n* of shape (C, 1). Note: This is used for performance during\n* training.\n- * - cache_norm: Cache of the normalized inputs from the forward\n- * pass, of shape (C, N*Hin*Win). Note: This is used for\n- * performance during training.\n* - X: Input data matrix to the forward pass, of\n* shape (N, C*Hin*Win).\n* - gamma: Scale parameters, of shape (C, 1).\n- * - beta: Shift parameters, of shape (C, 1).\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n- * - mode: 'train' or 'test' to indicate if the model is currently\n- * being trained or tested. During training, the current batch\n- * mean and variance will be used to normalize the inputs, while\n- * during testing, the exponential average of the mean and\n- * variance over all previous batches will be used.\n- * - ema_mean: Exponential moving average of the mean, of\n- * shape (C, 1).\n- * - ema_var: Exponential moving average of the variance, of\n- * shape (C, 1).\n- * - mu: Momentum value for moving averages.\n- * Typical values are in the range of [0.9, 0.999].\n* - epsilon: Smoothing term to avoid divide by zero errors.\n* Typical values are in the range of [1e-5, 1e-3].\n*\n@@ -181,34 +154,23 @@ backward = function(matrix[double] dout, matrix[double] out,\n*/\nN = nrow(X)\nmean = cache_mean\n- var = cache_var\n- norm = cache_norm\ncentered = bias_add(X, -mean) # shape (N, C*Hin*Win)\n-\n- if (mode == 'train') {\n+ norm = bias_multiply(centered, cache_inv_var) # shape (N, C*Hin*Win)\n# Compute gradients during training\ndgamma = util::channel_sums(dout*norm, C, Hin, Win) # shape (C, 1)\ndbeta = util::channel_sums(dout, C, Hin, Win) # shape (C, 1)\ndnorm = bias_multiply(dout, gamma) # shape (N, C*Hin*Win)\n- dvar = util::channel_sums((-1/2) * bias_multiply(centered, (var+epsilon)^(-3/2)) * dnorm,\n+ dvar = util::channel_sums((-1/2) * bias_multiply(centered, cache_inv_var^3) * dnorm,\nC, Hin, Win) # shape (C, 1)\n- dmean_norm_branch = util::channel_sums(bias_multiply(dnorm, -1/sqrt(var+epsilon)), C, Hin, Win)\n+ dmean_norm_branch = util::channel_sums(bias_multiply(dnorm, -cache_inv_var), C, Hin, Win)\ndmean_var_branch = util::channel_sums((-2/(N*Hin*Win)) * centered, C, Hin, Win)\ndmean_var_branch = dmean_var_branch * dvar # we can't use a function within an expression yet\ndmean = dmean_norm_branch + dmean_var_branch # shape (C, 1)\n- dX_norm_branch = bias_multiply(dnorm, 1/sqrt(var+epsilon))\n+ dX_norm_branch = bias_multiply(dnorm, cache_inv_var)\ndX_mean_branch = (1/(N*Hin*Win)) * bias_add(matrix(0, rows=1, cols=C*Hin*Win), dmean)\ndX_var_branch = (2/(N*Hin*Win)) * bias_multiply(centered, dvar)\ndX = dX_norm_branch + dX_mean_branch + dX_var_branch # shape (N, C*Hin*Win)\n}\n- else {\n- # Compute gradients during testing\n- dgamma = util::channel_sums(dout*norm, C, Hin, Win) # shape (C, 1)\n- dbeta = util::channel_sums(dout, C, Hin, Win) # shape (C, 1)\n- dnorm = bias_multiply(dout, gamma) # shape (N, C*Hin*Win)\n- dX = bias_multiply(dnorm, 1/sqrt(var+epsilon)) # shape (N, C*Hin*Win)\n- }\n-}\ninit = function(int C)\nreturn (matrix[double] gamma, matrix[double] beta,\n@@ -235,4 +197,3 @@ init = function(int C)\nema_mean = matrix(0, rows=C, cols=1)\nema_var = matrix(1, rows=C, cols=1)\n}\n-\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/grad_check.dml", "new_path": "scripts/nn/test/grad_check.dml", "diff": "@@ -363,21 +363,16 @@ batch_norm2d = function() {\n#[dummy, dummy, ema_mean, ema_var] = batch_norm2d::init(C)\n# Check training & testing modes\n- for (i in 1:2) {\n- if (i == 1)\n+ # for (i in 1:1) {\nmode = 'train'\n- else\n- mode = 'test'\nprint(\" - Grad checking the '\"+mode+\"' mode.\")\n# Compute analytical gradients of loss wrt parameters\n- [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\ndout = l2_loss::backward(out, y)\n- [dX, dgamma, dbeta] = batch_norm2d::backward(dout, out, ema_mean_upd, ema_var_upd,\n- cache_mean, cache_var, cache_norm,\n- X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ [dX, dgamma, dbeta] = batch_norm2d::backward(dout, cache_mean, cache_var,\n+ X, gamma, C, Hin, Win, eps)\n# Grad check\nh = 1e-5\n@@ -387,11 +382,11 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(X[i,j])\nX[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nX[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nX[i,j] = old # reset\n@@ -408,11 +403,11 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(gamma[i,j])\ngamma[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\ngamma[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\ngamma[i,j] = old # reset\n@@ -430,11 +425,11 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(beta[i,j])\nbeta[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nbeta[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nbeta[i,j] = old # reset\n@@ -445,7 +440,7 @@ batch_norm2d = function() {\nlossph, lossmh)\n}\n}\n- }\n+ # }\n}\nconv2d = function() {\n@@ -2497,4 +2492,3 @@ elu = function() {\n}\n}\n}\n-\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/test.dml", "new_path": "scripts/nn/test/test.dml", "diff": "@@ -125,7 +125,7 @@ batch_norm2d = function() {\n[gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)\n# Forward\n- [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var] =\nbatch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n# Equivalency check\n@@ -1125,4 +1125,3 @@ elu = function() {\n}\n}\n}\n-\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala", "new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala", "diff": "@@ -279,15 +279,12 @@ class BatchNorm(val param: LayerParameter, val id: Int, val net: CaffeNetwork) e\n* Note: This is used for performance during training.\n* - cache_var: Cache of the batch variance, of shape (C, 1).\n* Note: This is used for performance during training.\n- * - cache_norm: Cache of the normalized inputs, of\n- * shape (C, N*Hin*Win). Note: This is used for performance\n- * during training.\n*/\ndef forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = {\nval mode = if (isPrediction) \"\\\"test\\\"\" else \"\\\"train\\\"\"\ninvokeForward(\ndmlScript,\n- List[String](out, withSuffix(ema_mean), withSuffix(ema_var), withSuffix(cache_mean), withSuffix(cache_var), withSuffix(cache_norm)),\n+ List[String](out, withSuffix(ema_mean), withSuffix(ema_var), withSuffix(cache_mean), withSuffix(cache_var)),\nX,\ngamma,\nbeta,\n@@ -307,38 +304,18 @@ class BatchNorm(val param: LayerParameter, val id: Int, val net: CaffeNetwork) e\n*\n* Inputs:\n* - dout: Gradient wrt `out` from upstream, of shape (N, C*Hin*Win).\n- * - out: Outputs from the forward pass, of shape (N, C*Hin*Win).\n- * - ema_mean_upd: Updated exponential moving average of the mean\n- * from the forward pass, of shape (C, 1).\n- * - ema_var_upd: Updated exponential moving average of the variance\n- * from the forward pass, of shape (C, 1).\n* - cache_mean: Cache of the batch mean from the forward pass, of\n* shape (C, 1). Note: This is used for performance during\n* training.\n- * - cache_var: Cache of the batch variance from the forward pass,\n+ * - cache_inv_var: Cache of the inverse variance from the forward pass,\n* of shape (C, 1). Note: This is used for performance during\n* training.\n- * - cache_norm: Cache of the normalized inputs from the forward\n- * pass, of shape (C, N*Hin*Win). Note: This is used for\n- * performance during training.\n* - X: Input data matrix to the forward pass, of\n* shape (N, C*Hin*Win).\n* - gamma: Scale parameters, of shape (C, 1).\n- * - beta: Shift parameters, of shape (C, 1).\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n- * - mode: 'train' or 'test' to indicate if the model is currently\n- * being trained or tested. During training, the current batch\n- * mean and variance will be used to normalize the inputs, while\n- * during testing, the exponential average of the mean and\n- * variance over all previous batches will be used.\n- * - ema_mean: Exponential moving average of the mean, of\n- * shape (C, 1).\n- * - ema_var: Exponential moving average of the variance, of\n- * shape (C, 1).\n- * - mu: Momentum value for moving averages.\n- * Typical values are in the range of [0.9, 0.999].\n* - epsilon: Smoothing term to avoid divide by zero errors.\n* Typical values are in the range of [1e-5, 1e-3].\n*\n@@ -354,22 +331,13 @@ class BatchNorm(val param: LayerParameter, val id: Int, val net: CaffeNetwork) e\noutSuffix,\nList[String](\"dOut\" + id, dgamma, dbeta),\ndout,\n- out,\n- ema_mean,\n- ema_var,\ncache_mean,\ncache_var,\n- cache_norm,\nX,\ngamma,\n- beta,\nnumChannels,\nHin,\nWin,\n- \"\\\"train\\\"\",\n- ema_mean,\n- ema_var,\n- ma_fraction,\neps\n)\n@@ -377,8 +345,7 @@ class BatchNorm(val param: LayerParameter, val id: Int, val net: CaffeNetwork) e\noverride def weightShape(): Array[Int] = Array(numChannels.toInt, 1)\noverride def biasShape(): Array[Int] = Array(numChannels.toInt, 1)\ndef cache_mean(): String = \"cache_mean\" + id\n- def cache_var(): String = \"cache_mean\" + id\n- def cache_norm(): String = \"cache_norm\" + id\n+ def cache_var(): String = \"cache_var\" + id\nvar scaleLayer: Scale = null\ndef gamma(): String = { checkNextLayer(); scaleLayer.weight }\ndef ma_fraction(): String = if (param.getBatchNormParam.hasMovingAverageFraction()) param.getBatchNormParam.getMovingAverageFraction.toString else \"0.999\"\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Remove unnecessary variables from batch_norm2d layer
49,736
31.05.2018 16:53:25
25,200
3c519e73915d735b397b8afdb74c329d6363f18f
Bugfix for compilation error on Mac OS and PowerPC arch. Closes
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<artifactId>jcuda</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n+ <exclusions>\n+ <exclusion>\n+ <!-- always exclude recursive fetching of native libraries -->\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcuda-natives</artifactId>\n+ </exclusion>\n+ </exclusions>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n+ <exclusions>\n+ <exclusion>\n+ <!-- always exclude recursive fetching of native libraries -->\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcublas-natives</artifactId>\n+ </exclusion>\n+ </exclusions>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n+ <exclusions>\n+ <exclusion>\n+ <!-- always exclude recursive fetching of native libraries -->\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusparse-natives</artifactId>\n+ </exclusion>\n+ </exclusions>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n+ <exclusions>\n+ <exclusion>\n+ <!-- always exclude recursive fetching of native libraries -->\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver-natives</artifactId>\n+ </exclusion>\n+ </exclusions>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n+ <exclusions>\n+ <exclusion>\n+ <!-- always exclude recursive fetching of native libraries -->\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcudnn-natives</artifactId>\n+ </exclusion>\n+ </exclusions>\n</dependency>\n+ <!-- for all platforms, to be included in the extra jar -->\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n- <!-- for all platforms, to be included in the extra jar -->\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n- <classifier>windows-x86_64</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas-natives</artifactId>\n- <classifier>windows-x86_64</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse-natives</artifactId>\n- <classifier>windows-x86_64</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver-natives</artifactId>\n- <classifier>windows-x86_64</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn-natives</artifactId>\n- <classifier>windows-x86_64</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+ <!-- Commented until the PowerPC jcuda libraries are deployed for 0.9.0 version\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n- <classifier>linux-x86_64</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas-natives</artifactId>\n- <classifier>linux-x86_64</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse-natives</artifactId>\n- <classifier>linux-x86_64</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver-natives</artifactId>\n- <classifier>linux-x86_64</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn-natives</artifactId>\n- <classifier>linux-x86_64</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+ -->\n- <!-- Commented until the PowerPC jcuda libraries are deployed for 0.9.0 version\n+ <!-- Commented until the MacOS jcuda libraries are deployed for 0.9.0 version\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n- <classifier>linux-ppc_64</classifier>\n+ <classifier>apple-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas-natives</artifactId>\n- <classifier>linux-ppc_64</classifier>\n+ <classifier>apple-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse-natives</artifactId>\n- <classifier>linux-ppc_64</classifier>\n+ <classifier>apple-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver-natives</artifactId>\n- <classifier>linux-ppc_64</classifier>\n+ <classifier>apple-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn-natives</artifactId>\n- <classifier>linux-ppc_64</classifier>\n+ <classifier>apple-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/extra/LICENSE", "new_path": "src/assembly/extra/LICENSE", "diff": "@@ -257,22 +257,22 @@ The following compile-scope dependencies come under the MIT License\nJCuda (jcuda.org)\n-org.jcuda:jcuda:0.8.0\n-org.jcuda:jcublas:0.8.0\n-org.jcuda:jcufft:0.8.0\n-org.jcuda:jcusparse:0.8.0\n-org.jcuda:jcusolver:0.8.0\n-org.jcuda:jcurand:0.8.0\n-org.jcuda:jnvgraph:0.8.0\n-org.jcuda:jcudnn:0.8.0\n-org.jcuda:jcuda-natives:0.8.0\n-org.jcuda:jcublas-natives:0.8.0\n-org.jcuda:jcufft-natives:0.8.0\n-org.jcuda:jcusparse-natives:0.8.0\n-org.jcuda:jcusolver-natives:0.8.0\n-org.jcuda:jcurand-natives:0.8.0\n-org.jcuda:jnvgraph-natives:0.8.0\n-org.jcuda:jcudnn-natives:0.8.0\n+org.jcuda:jcuda:0.9.0\n+org.jcuda:jcublas:0.9.0\n+org.jcuda:jcufft:0.9.0\n+org.jcuda:jcusparse:0.9.0\n+org.jcuda:jcusolver:0.9.0\n+org.jcuda:jcurand:0.9.0\n+org.jcuda:jnvgraph:0.9.0\n+org.jcuda:jcudnn:0.9.0\n+org.jcuda:jcuda-natives:0.9.0\n+org.jcuda:jcublas-natives:0.9.0\n+org.jcuda:jcufft-natives:0.9.0\n+org.jcuda:jcusparse-natives:0.9.0\n+org.jcuda:jcusolver-natives:0.9.0\n+org.jcuda:jcurand-natives:0.9.0\n+org.jcuda:jnvgraph-natives:0.9.0\n+org.jcuda:jcudnn-natives:0.9.0\nThe MIT License (MIT)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -144,6 +144,7 @@ public class DMLConfig\n_defaultVals.put(SYNCHRONIZE_GPU, \"true\" );\n_defaultVals.put(EAGER_CUDA_FREE, \"false\" );\n_defaultVals.put(FLOATING_POINT_PRECISION, \"double\" );\n+ _defaultVals.put(PRINT_GPU_MEMORY_INFO, \"false\");\n}\npublic DMLConfig() {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Bugfix for compilation error on Mac OS and PowerPC arch. Closes #775.
49,738
31.05.2018 21:04:25
25,200
cad7c1e0f292d39907ba569735d6f9258365b166
Add PageRank to staging algorithms and codegen tests
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/PageRank.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# G: N x M, p: M x 1, e: N x 1: u: 1 x M\n+# ./sparkDML2.sh SystemML.jar -f PageRank.dml -args \"in/g\" \"in/p\" \"in/e\" \"in/u\" 0.85 3 \"out/w\"\n+\n+G = read($1);\n+p = read($2);\n+e = read($3);\n+u = read($4);\n+alpha = $5;\n+max_iteration = $6;\n+i = 0;\n+\n+while( i < max_iteration ) {\n+ p = alpha * (G %*% p) + (1 - alpha) * (e %*% u %*% p);\n+ i += 1;\n+}\n+\n+write(p, $7);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -102,10 +102,10 @@ public class SpoofCompiler\nprivate static final Log LOG = LogFactory.getLog(SpoofCompiler.class.getName());\n//internal configuration flags\n- public static boolean LDEBUG = false;\n+ public static final boolean LDEBUG = false;\npublic static CompilerType JAVA_COMPILER = CompilerType.JANINO;\npublic static PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\n- public static IntegrationType INTEGRATION = IntegrationType.RUNTIME;\n+ public static final IntegrationType INTEGRATION = IntegrationType.RUNTIME;\npublic static final boolean RECOMPILE_CODEGEN = true;\npublic static final boolean PRUNE_REDUNDANT_PLANS = true;\npublic static PlanCachePolicy PLAN_CACHE_POLICY = PlanCachePolicy.CSLH;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegenalg/AlgorithmPageRank.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.codegenalg;\n+\n+import java.io.File;\n+import java.util.HashMap;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class AlgorithmPageRank extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"Algorithm_PageRank\";\n+ private final static String TEST_DIR = \"functions/codegenalg/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + AlgorithmPageRank.class.getSimpleName() + \"/\";\n+ private final static String TEST_CONF_DEFAULT = \"SystemML-config-codegen.xml\";\n+ private final static File TEST_CONF_FILE_DEFAULT = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF_DEFAULT);\n+ private final static String TEST_CONF_FUSE_ALL = \"SystemML-config-codegen-fuse-all.xml\";\n+ private final static File TEST_CONF_FILE_FUSE_ALL = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF_FUSE_ALL);\n+ private final static String TEST_CONF_FUSE_NO_REDUNDANCY = \"SystemML-config-codegen-fuse-no-redundancy.xml\";\n+ private final static File TEST_CONF_FILE_FUSE_NO_REDUNDANCY = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF_FUSE_NO_REDUNDANCY);\n+\n+ private enum TestType { DEFAULT,FUSE_ALL,FUSE_NO_REDUNDANCY }\n+\n+ //absolute diff for large output scale in the +E12\n+ private final static double eps = 0.1;\n+\n+ private final static int rows = 1468;\n+ private final static int cols = 1468;\n+\n+ private final static double sparsity1 = 0.41; //dense\n+ private final static double sparsity2 = 0.05; //sparse\n+\n+ private final static double alpha = 0.85;\n+ private final static double maxiter = 10;\n+\n+ private TestType currentTestType = TestType.DEFAULT;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"w\" }));\n+ }\n+\n+ @Test\n+ public void testPageRankDenseCP() {\n+ runPageRankTest(TEST_NAME1, true, false, ExecType.CP, TestType.DEFAULT);\n+ }\n+\n+ @Test\n+ public void testPageRankSparseCP() {\n+ runPageRankTest(TEST_NAME1, true, true, ExecType.CP, TestType.DEFAULT);\n+ }\n+\n+ @Test\n+ public void testPageRankDenseCPFuseAll() {\n+ runPageRankTest(TEST_NAME1, true, false, ExecType.CP, TestType.FUSE_ALL);\n+ }\n+\n+ @Test\n+ public void testPageRankSparseCPFuseAll() {\n+ runPageRankTest(TEST_NAME1, true, true, ExecType.CP, TestType.FUSE_ALL);\n+ }\n+\n+ @Test\n+ public void testPageRankDenseCPFuseNoRedundancy() {\n+ runPageRankTest(TEST_NAME1, true, false, ExecType.CP, TestType.FUSE_NO_REDUNDANCY);\n+ }\n+\n+ @Test\n+ public void testPageRankSparseCPFuseNoRedundancy() {\n+ runPageRankTest(TEST_NAME1, true, true, ExecType.CP, TestType.FUSE_NO_REDUNDANCY);\n+ }\n+\n+ private void runPageRankTest( String testname, boolean rewrites, boolean sparse, ExecType instType, TestType testType)\n+ {\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( instType ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+ currentTestType = testType;\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ String TEST_NAME = testname;\n+ TestConfiguration config = getTestConfiguration(TEST_NAME);\n+ loadTestConfiguration(config);\n+\n+ fullDMLScriptName = \"scripts/staging/PageRank.dml\";\n+ programArgs = new String[]{ \"-explain\", \"-stats\", \"-args\", input(\"G\"),\n+ input(\"p\"), input(\"e\"), input(\"u\"), String.valueOf(alpha),\n+ String.valueOf(maxiter), output(\"p\")};\n+ rCmd = getRCmd(inputDir(), String.valueOf(alpha),\n+ String.valueOf(maxiter), expectedDir());\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+ //TODO test both with and without operator fusion\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = false;\n+\n+ //generate actual datasets\n+ double[][] G = getRandomMatrix(rows, cols, 1, 1, sparse?sparsity2:sparsity1, 234);\n+ writeInputMatrixWithMTD(\"G\", G, true);\n+ writeInputMatrixWithMTD(\"p\", getRandomMatrix(cols, 1, 0, 1e-14, 1, 71), true);\n+ writeInputMatrixWithMTD(\"e\", getRandomMatrix(rows, 1, 0, 1e-14, 1, 72), true);\n+ writeInputMatrixWithMTD(\"u\", getRandomMatrix(1, cols, 0, 1e-14, 1, 73), true);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dml = readDMLMatrixFromHDFS(\"p\");\n+ HashMap<CellIndex, Double> r = readRMatrixFromFS(\"p\");\n+ TestUtils.compareMatrices(dml, r, eps, \"Stat-DML\", \"Stat-R\");\n+ Assert.assertTrue(heavyHittersContainsSubString(\"spoofRA\")\n+ || heavyHittersContainsSubString(\"sp_spoofRA\"));\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+\n+ /**\n+ * Override default configuration with custom test configuration to ensure\n+ * scratch space and local temporary directory locations are also updated.\n+ */\n+ @Override\n+ protected File getConfigTemplateFile() {\n+ // Instrumentation in this test's output log to show custom configuration file used for template.\n+ String message = \"This test case overrides default configuration with \";\n+ if(currentTestType == TestType.FUSE_ALL){\n+ System.out.println(message + TEST_CONF_FILE_FUSE_ALL.getPath());\n+ return TEST_CONF_FILE_FUSE_ALL;\n+ } else if(currentTestType == TestType.FUSE_NO_REDUNDANCY){\n+ System.out.println(message + TEST_CONF_FILE_FUSE_NO_REDUNDANCY.getPath());\n+ return TEST_CONF_FILE_FUSE_NO_REDUNDANCY;\n+ } else {\n+ System.out.println(message + TEST_CONF_FILE_DEFAULT.getPath());\n+ return TEST_CONF_FILE_DEFAULT;\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegenalg/Algorithm_PageRank.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+\n+G = readMM(paste(args[1], \"G.mtx\", sep=\"\"));\n+p = as.matrix(readMM(paste(args[1], \"p.mtx\", sep=\"\")));\n+e = as.matrix(readMM(paste(args[1], \"e.mtx\", sep=\"\")));\n+u = as.matrix(readMM(paste(args[1], \"u.mtx\", sep=\"\")));\n+alpha = as.double(args[2]);\n+max_iteration = as.integer(args[3]);\n+i = 0;\n+\n+while( i < max_iteration ) {\n+ p = alpha * (G %*% p) + (1 - alpha) * (e %*% (u %*% p));\n+ i = i + 1;\n+}\n+\n+writeMM(as(p,\"CsparseMatrix\"), paste(args[4], \"p\", sep=\"\"));\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen-fuse-all.xml", "new_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen-fuse-all.xml", "diff": "<root>\n<sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>\n<sysml.scratch>scratch_space</sysml.scratch>\n- <sysml.optlevel>7</sysml.optlevel>\n+ <sysml.optlevel>6</sysml.optlevel>\n<sysml.codegen.enabled>true</sysml.codegen.enabled>\n<sysml.codegen.plancache>true</sysml.codegen.plancache>\n<sysml.codegen.literals>1</sysml.codegen.literals>\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen-fuse-no-redundancy.xml", "new_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen-fuse-no-redundancy.xml", "diff": "<root>\n<sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>\n<sysml.scratch>scratch_space</sysml.scratch>\n- <sysml.optlevel>7</sysml.optlevel>\n+ <sysml.optlevel>6</sysml.optlevel>\n<sysml.codegen.enabled>true</sysml.codegen.enabled>\n<sysml.codegen.plancache>true</sysml.codegen.plancache>\n<sysml.codegen.literals>1</sysml.codegen.literals>\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen.xml", "new_path": "src/test/scripts/functions/codegenalg/SystemML-config-codegen.xml", "diff": "<root>\n<sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>\n<sysml.scratch>scratch_space</sysml.scratch>\n- <sysml.optlevel>7</sysml.optlevel>\n+ <sysml.optlevel>6</sysml.optlevel>\n<sysml.codegen.enabled>true</sysml.codegen.enabled>\n<sysml.codegen.plancache>true</sysml.codegen.plancache>\n<sysml.codegen.literals>1</sysml.codegen.literals>\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "diff": "@@ -36,6 +36,7 @@ import org.junit.runners.Suite;\nAlgorithmMDABivar.class,\nAlgorithmMLogreg.class,\nAlgorithmMSVM.class,\n+ AlgorithmPageRank.class,\nAlgorithmPNMF.class,\nAlgorithmStepwiseRegression.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2121] Add PageRank to staging algorithms and codegen tests
49,736
01.06.2018 21:25:40
25,200
86f0e3f705874877c261a4325a11fa45dfe851cc
Added CP implementation for batch_norm2d and batch_norm2d_backward implementation. This feature is required for NN tests. The current version of batch_norm2d_backward only supports dense image and dense dout. This will be fixed in future.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops;\nimport java.util.ArrayList;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.lops.FunctionCallCP;\nimport org.apache.sysml.lops.FunctionCallCPSingle;\nimport org.apache.sysml.lops.Lop;\n@@ -168,10 +169,22 @@ public class FunctionOp extends Hop\nlong outputValues = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), 1, 1.0);\nreturn outputVectors+outputValues;\n}\n- else if ( getFunctionName().equalsIgnoreCase(\"lstm\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\") ) {\n+ else if ( getFunctionName().equalsIgnoreCase(\"lstm\") ) {\n// TODO: To allow for initial version to always run on the GPU\nreturn 0;\n}\n+ else if ( getFunctionName().equalsIgnoreCase(\"batch_norm2d\") ) {\n+ return OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(0).getDim1(), getOutputs().get(0).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), getOutputs().get(1).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(2).getDim1(), getOutputs().get(2).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(3).getDim1(), getOutputs().get(3).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(4).getDim1(), getOutputs().get(4).getDim2(), 1.0);\n+ }\n+ else if ( getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\") ) {\n+ return OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(0).getDim1(), getOutputs().get(0).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), getOutputs().get(1).getDim2(), 1.0) +\n+ OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(2).getDim1(), getOutputs().get(2).getDim2(), 1.0);\n+ }\nelse if ( getFunctionName().equalsIgnoreCase(\"svd\") ) {\nlong outputU = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(0).getDim1(), getOutputs().get(0).getDim2(), 1.0);\nlong outputSigma = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), getOutputs().get(1).getDim2(), 1.0);\n@@ -202,7 +215,10 @@ public class FunctionOp extends Hop\nreturn OptimizerUtils.estimateSizeExactSparsity(getInput().get(0).getDim1(), getInput().get(0).getDim2(), 1.0)\n+ 3*OptimizerUtils.estimateSizeExactSparsity(getInput().get(0).getDim1(), 1, 1.0);\n}\n- else if ( getFunctionName().equalsIgnoreCase(\"lstm\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\")) {\n+ else if (getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\")) {\n+ return 0;\n+ }\n+ else if ( getFunctionName().equalsIgnoreCase(\"lstm\") ) {\n// TODO: To allow for initial version to always run on the GPU\nreturn 0;\n}\n@@ -274,15 +290,20 @@ public class FunctionOp extends Hop\n|| (getMemEstimate() >= OptimizerUtils.getLocalMemBudget()\n&& OptimizerUtils.isSparkExecutionMode())) ? ExecType.SPARK : ExecType.CP);\n}\n- else if( getFunctionName().equalsIgnoreCase(\"lstm\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\")) {\n-// if ( OptimizerUtils.isMemoryBasedOptLevel() ) {\n-// _etype = findExecTypeByMemEstimate();\n-// }\n-// else {\n-// _etype = ExecType.CP;\n-// }\n-// _etype = _etype == REMOTE ? ExecType.CP : _etype; // lstm not supported on Spark\n+ else if( getFunctionName().equalsIgnoreCase(\"lstm\")) {\n+ if(DMLScript.USE_ACCELERATOR)\n_etype = ExecType.GPU;\n+ else\n+ throw new RuntimeException(\"The function \" + getFunctionName() + \" is only supported on GPU.\");\n+ }\n+ else if( getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\")) {\n+ if ( OptimizerUtils.isMemoryBasedOptLevel() ) {\n+ _etype = findExecTypeByMemEstimate();\n+ }\n+ else {\n+ _etype = ExecType.CP;\n+ }\n+ _etype = _etype == REMOTE ? ExecType.CP : _etype; // batch_norm2d and batch_norm2d_backward are not supported on Spark\n}\nelse {\n// Since the memory estimate is only conservative, do not throw\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "diff": "@@ -247,6 +247,8 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"bias_add\" , CPType.Convolution);\nString2CPInstructionType.put( \"bias_multiply\" , CPType.Convolution);\nString2CPInstructionType.put( \"channel_sums\" , CPType.Convolution);\n+ String2CPInstructionType.put( \"batch_norm2d\", CPType.Convolution);\n+ String2CPInstructionType.put( \"batch_norm2d_backward\", CPType.Convolution);\n// Quaternary instruction opcodes\nString2CPInstructionType.put( \"wsloss\" , CPType.Quaternary);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "diff": "package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\n-\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n-import org.apache.sysml.runtime.functionobjects.KahanPlus;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.matrix.data.ConvolutionParameters;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN.PoolingType;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixNative;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n-import org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\nimport org.apache.sysml.utils.NativeHelper;\n@@ -44,6 +41,15 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nprivate final CPOperand _in2;\nprivate final CPOperand _in3;\n+ private final CPOperand _in4;\n+ private final CPOperand _in5;\n+ private final CPOperand _in6;\n+ private final CPOperand _in7;\n+ private final CPOperand _in8;\n+ private final CPOperand _out2;\n+ private final CPOperand _out3;\n+ private final CPOperand _out4;\n+ private final CPOperand _out5;\nprivate final ArrayList<CPOperand> _input_shape;\nprivate final ArrayList<CPOperand> _filter_shape;\nprivate final ArrayList<CPOperand> _stride;\n@@ -57,6 +63,8 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nsuper(CPType.Convolution, null, in, out, opcode, istr);\n_in2 = in2;\n_in3 = in3;\n+ _in4 = null; _in5 = null; _in6 = null; _in7 = null; _in8 = null;\n+ _out2 = null; _out3 = null; _out4 = null; _out5 = null;\n_stride = stride;\n_padding = padding;\n_input_shape = input_shape;\n@@ -99,6 +107,30 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nthis(in, in2, in3, out, stride, padding, input_shape, filter_shape, numThreads, intermediateMemoryBudget, opcode, istr);\n}\n+ public ConvolutionCPInstruction(CPOperand in1, CPOperand in2, CPOperand in3, CPOperand in4, CPOperand in5,\n+ CPOperand in6, CPOperand in7, CPOperand in8,\n+ CPOperand out, CPOperand out2, CPOperand out3, CPOperand out4, CPOperand out5, String opcode, String istr,\n+ double intermediateMemoryBudget) throws DMLRuntimeException {\n+ super(CPType.Convolution, null, in1, out, opcode, istr);\n+ _in2 = in2;\n+ _in3 = in3;\n+ _in4 = in4;\n+ _in5 = in5;\n+ _in6 = in6;\n+ _in7 = in7;\n+ _in8 = in8;\n+ _out2 = out2;\n+ _out3 = out3;\n+ _out4 = out4;\n+ _out5 = out5;\n+ _stride = null;\n+ _padding = null;\n+ _input_shape = null;\n+ _filter_shape = null;\n+ _numThreads = 0;\n+ _intermediateMemoryBudget = intermediateMemoryBudget;\n+ }\n+\npublic static ConvolutionCPInstruction parseInstruction(String str) {\nString[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n@@ -214,6 +246,36 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nCPOperand out = new CPOperand(parts[4]);\nreturn new ConvolutionCPInstruction(in, in2, in3, out, opcode, str, -1, 0);\n}\n+ else if (opcode.equalsIgnoreCase(\"batch_norm2d\")) {\n+ InstructionUtils.checkNumFields(parts, 13);\n+ CPOperand in1 = new CPOperand(parts[1]); // image\n+ CPOperand in2 = new CPOperand(parts[2]); // scale\n+ CPOperand in3 = new CPOperand(parts[3]); // bias\n+ CPOperand in4 = new CPOperand(parts[4]); // runningMean\n+ CPOperand in5 = new CPOperand(parts[5]); // runningVar\n+ CPOperand in6 = new CPOperand(parts[6]); // mode\n+ CPOperand in7 = new CPOperand(parts[7]); // epsilon\n+ CPOperand in8 = new CPOperand(parts[8]); // exponentialAverageFactor\n+ CPOperand out = new CPOperand(parts[9]); // ret\n+ CPOperand out2 = new CPOperand(parts[10]); // retRunningMean\n+ CPOperand out3 = new CPOperand(parts[11]); // retRunningVar\n+ CPOperand out4 = new CPOperand(parts[12]); // resultSaveMean\n+ CPOperand out5 = new CPOperand(parts[13]); // resultSaveInvVariance\n+ return new ConvolutionCPInstruction(in1, in2, in3, in4, in5, in6, in7, in8, out, out2, out3, out4, out5, opcode, str, 0);\n+ }\n+ else if (opcode.equalsIgnoreCase(\"batch_norm2d_backward\")) {\n+ InstructionUtils.checkNumFields(parts, 9);\n+ CPOperand in1 = new CPOperand(parts[1]); // image\n+ CPOperand in2 = new CPOperand(parts[2]); // dout\n+ CPOperand in3 = new CPOperand(parts[3]); // scale\n+ CPOperand in4 = new CPOperand(parts[4]); // epsilon\n+ CPOperand in5 = new CPOperand(parts[5]); // resultSaveMean\n+ CPOperand in6 = new CPOperand(parts[6]); // resultSaveInvVariance\n+ CPOperand out = new CPOperand(parts[7]); // dX\n+ CPOperand out2 = new CPOperand(parts[8]); // dScale\n+ CPOperand out3 = new CPOperand(parts[9]); // dBias\n+ return new ConvolutionCPInstruction(in1, in2, in3, in4, in5, in6, null, null, out, out2, out3, null, null, opcode, str, 0);\n+ }\nelse {\nthrow new DMLRuntimeException(\"Unknown opcode while parsing a ConvolutionCPInstruction: \" + str);\n}\n@@ -309,52 +371,74 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\nelse {\noutputBlock = new MatrixBlock(C, 1, false).allocateBlock();\n- double [] output = outputBlock.getDenseBlockValues();\n- if(input.isInSparseFormat()) {\n- SparseBlock sblock = input.getSparseBlock();\n- for(int n = 0; n < input.getNumRows(); n++) {\n- if( sblock.isEmpty(n) )\n- continue;\n- int apos = sblock.pos(n);\n- int alen = sblock.size(n);\n- int[] aix = sblock.indexes(n);\n- double[] avals = sblock.values(n);\n-\n- // Iterate over the sparse block\n- for(int j=apos; j<apos+alen; j++) {\n- // Note: the input is of shape [N, CHW]\n- int chw = aix[j];\n-\n- // Get individual zero-based c,h,w indexes from zero-based 'chw'\n- int c = chw / HW;\n- output[c] += avals[j];\n- }\n- }\n- }\n- else {\n- double [] inArr = input.getDenseBlockValues();\n- if(inArr != null) {\n- KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- for(int c = 0; c < C; c++) {\n- KahanObject sum = new KahanObject(0.0, 0.0);\n- for(int n = 0; n < input.getNumRows(); n++) {\n- int index = n*C*HW + c*HW;\n- for(int hw = 0; hw < HW; hw++, index++) {\n- kplus.execute2(sum, inArr[index]);\n- }\n- }\n- output[c] = sum._sum;\n- }\n+ LibMatrixDNN.channelSums(input, outputBlock, C, HW);\n}\n+\n+ // release inputs/outputs\n+ ec.releaseMatrixInput(input1.getName(), getExtendedOpcode());\n+ ec.setMatrixOutput(getOutputVariableName(), outputBlock, getExtendedOpcode());\n}\n- outputBlock.recomputeNonZeros();\n+\n+\n+\n+ public void processBatchNorm2dInstruction(ExecutionContext ec) {\n+ MatrixBlock image = ec.getMatrixInput(input1.getName(), getExtendedOpcode());\n+ MatrixBlock scale = ec.getMatrixInput(_in2.getName(), getExtendedOpcode());\n+ MatrixBlock bias = ec.getMatrixInput(_in3.getName(), getExtendedOpcode());\n+ MatrixBlock runningMean = ec.getMatrixInput(_in4.getName(), getExtendedOpcode());\n+ MatrixBlock runningVar = ec.getMatrixInput(_in5.getName(), getExtendedOpcode());\n+ String phase = ec.getScalarInput(_in6.getName(), _in6.getValueType(), _in6.isLiteral()).getStringValue();\n+ double epsilon = ec.getScalarInput(_in7.getName(), _in7.getValueType(), _in7.isLiteral()).getDoubleValue();\n+ double mu = ec.getScalarInput(_in8.getName(), _in8.getValueType(), _in8.isLiteral()).getDoubleValue();\n+\n+ MatrixBlock ret = new MatrixBlock(image.getNumRows(), image.getNumColumns(), false).allocateBlock();\n+ MatrixBlock retRunningMean = new MatrixBlock(runningMean.getNumRows(), runningMean.getNumColumns(), false).allocateBlock();\n+ MatrixBlock retRunningVar = new MatrixBlock(runningVar.getNumRows(), runningVar.getNumColumns(), false).allocateBlock();\n+ MatrixBlock resultSaveMean = new MatrixBlock(runningMean.getNumRows(), runningMean.getNumColumns(), false).allocateBlock();\n+ MatrixBlock resultSaveInvVariance = new MatrixBlock(runningVar.getNumRows(), runningVar.getNumColumns(), false).allocateBlock();\n+\n+ LibMatrixDNN.batchNorm2D(image, scale, bias, runningMean, runningVar, phase, epsilon, mu, ret,\n+ retRunningMean, retRunningVar, resultSaveMean, resultSaveInvVariance);\n+\n+ // release inputs/outputs\n+ ec.releaseMatrixInput(input1.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in2.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in3.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in4.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in5.getName(), getExtendedOpcode());\n+ ec.setMatrixOutput(output.getName(), ret, getExtendedOpcode());\n+ ec.setMatrixOutput(_out2.getName(), retRunningMean, getExtendedOpcode());\n+ ec.setMatrixOutput(_out3.getName(), retRunningVar, getExtendedOpcode());\n+ ec.setMatrixOutput(_out4.getName(), resultSaveMean, getExtendedOpcode());\n+ ec.setMatrixOutput(_out5.getName(), resultSaveInvVariance, getExtendedOpcode());\n}\n+ public void processBatchNorm2dBackwardInstruction(ExecutionContext ec) {\n+ MatrixBlock image = ec.getMatrixInput(input1.getName(), getExtendedOpcode());\n+ MatrixBlock dout = ec.getMatrixInput(_in2.getName(), getExtendedOpcode());\n+ MatrixBlock scale = ec.getMatrixInput(_in3.getName(), getExtendedOpcode());\n+ double epsilon = ec.getScalarInput(_in4.getName(), _in4.getValueType(), _in4.isLiteral()).getDoubleValue();\n+ MatrixBlock resultSaveMean = ec.getMatrixInput(_in5.getName(), getExtendedOpcode());\n+ MatrixBlock resultSaveInvVariance = ec.getMatrixInput(_in6.getName(), getExtendedOpcode());\n+\n+ MatrixBlock dX = new MatrixBlock(image.getNumRows(), image.getNumColumns(), false).allocateBlock();\n+ MatrixBlock dScale = new MatrixBlock(scale.getNumRows(), scale.getNumColumns(), false).allocateBlock();\n+ MatrixBlock dBias = new MatrixBlock(scale.getNumRows(), scale.getNumColumns(), false).allocateBlock();\n+\n+ LibMatrixDNN.batchNorm2DBackward(image, dout, scale, epsilon, resultSaveMean, resultSaveInvVariance, dX, dScale, dBias);\n+\n// release inputs/outputs\nec.releaseMatrixInput(input1.getName(), getExtendedOpcode());\n- ec.setMatrixOutput(getOutputVariableName(), outputBlock, getExtendedOpcode());\n+ ec.releaseMatrixInput(_in2.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in3.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in5.getName(), getExtendedOpcode());\n+ ec.releaseMatrixInput(_in6.getName(), getExtendedOpcode());\n+ ec.setMatrixOutput(output.getName(), dX, getExtendedOpcode());\n+ ec.setMatrixOutput(_out2.getName(), dScale, getExtendedOpcode());\n+ ec.setMatrixOutput(_out3.getName(), dBias, getExtendedOpcode());\n}\n+\n// Assumption: enableNative && NativeHelper.isNativeLibraryLoaded() is true\n// This increases the number of native calls. For example:the cases where filter is sparse but input is dense\nprivate static boolean isFilterSparse(MatrixBlock filter) {\n@@ -385,6 +469,14 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nprocessChannelSumsInstruction(ec);\nreturn;\n}\n+ else if (instOpcode.equalsIgnoreCase(\"batch_norm2d\")) {\n+ processBatchNorm2dInstruction(ec);\n+ return;\n+ }\n+ else if (instOpcode.equalsIgnoreCase(\"batch_norm2d_backward\")) {\n+ processBatchNorm2dBackwardInstruction(ec);\n+ return;\n+ }\n// acquire inputs\nMatrixBlock outputBlock = null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "package org.apache.sysml.runtime.matrix.data;\nimport java.util.ArrayList;\n+import java.util.Arrays;\nimport java.util.List;\nimport java.util.concurrent.Callable;\nimport java.util.concurrent.ExecutorService;\n@@ -30,6 +31,8 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.functionobjects.KahanPlus;\n+import org.apache.sysml.runtime.instructions.cp.KahanObject;\nimport org.apache.sysml.runtime.util.CommonThreadPool;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\n@@ -332,6 +335,325 @@ public class LibMatrixDNN {\noutputBlock.examSparsity();\n}\n+ /**\n+ * Perform channel sum operation\n+ *\n+ * @param input input matrix block\n+ * @param outputBlock output matrix block\n+ * @param C number of channels\n+ * @param HW height X width\n+ */\n+ public static void channelSums(MatrixBlock input, MatrixBlock outputBlock, int C, int HW) {\n+ double [] output = outputBlock.getDenseBlockValues();\n+ if(input.isInSparseFormat()) {\n+ SparseBlock sblock = input.getSparseBlock();\n+ for(int n = 0; n < input.getNumRows(); n++) {\n+ if( sblock.isEmpty(n) )\n+ continue;\n+ int apos = sblock.pos(n);\n+ int alen = sblock.size(n);\n+ int[] aix = sblock.indexes(n);\n+ double[] avals = sblock.values(n);\n+\n+ // Iterate over the sparse block\n+ for(int j=apos; j<apos+alen; j++) {\n+ // Note: the input is of shape [N, CHW]\n+ int chw = aix[j];\n+\n+ // Get individual zero-based c,h,w indexes from zero-based 'chw'\n+ int c = chw / HW;\n+ output[c] += avals[j];\n+ }\n+ }\n+ }\n+ else {\n+ double [] inArr = input.getDenseBlockValues();\n+ if(inArr != null) {\n+ KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n+ for(int c = 0; c < C; c++) {\n+ KahanObject sum = new KahanObject(0.0, 0.0);\n+ for(int n = 0; n < input.getNumRows(); n++) {\n+ int index = n*C*HW + c*HW;\n+ for(int hw = 0; hw < HW; hw++, index++) {\n+ kplus.execute2(sum, inArr[index]);\n+ }\n+ }\n+ output[c] = sum._sum;\n+ }\n+ }\n+ }\n+ outputBlock.recomputeNonZeros();\n+ }\n+\n+ public static void batchNorm2DBackward(MatrixBlock image, MatrixBlock dout, MatrixBlock scale, double epsilon,\n+ MatrixBlock resultSaveMean, MatrixBlock resultSaveInvVariance,\n+ MatrixBlock dX, MatrixBlock dScale, MatrixBlock dBias) {\n+ int N = image.getNumRows();\n+ int K = scale.getNumRows();\n+ int PQ = image.getNumColumns() / K;\n+ channelSums(image, dBias, K, PQ);\n+ // Since output\n+ if(dBias.isInSparseFormat())\n+ dBias.sparseToDense();\n+ if(dScale.isInSparseFormat())\n+ dScale.sparseToDense();\n+ if(dX.isInSparseFormat())\n+ dX.sparseToDense();\n+ // Very small matrices\n+ if(resultSaveMean.isInSparseFormat())\n+ resultSaveMean.sparseToDense();\n+ if(resultSaveInvVariance.isInSparseFormat())\n+ resultSaveInvVariance.sparseToDense();\n+ if(scale.isInSparseFormat())\n+ scale.sparseToDense();\n+ double [] dBiasArr = dBias.getDenseBlockValues();\n+ double [] dScaleArr = dScale.getDenseBlockValues();\n+ double [] dXArr = dX.getDenseBlockValues();\n+ double [] mean = resultSaveMean.getDenseBlockValues();\n+ double [] invVar = resultSaveInvVariance.getDenseBlockValues();\n+ double [] scaleArr = scale.getDenseBlockValues();\n+ // since K is relatively small, it reduces code complexity. We can avoid this in subsequent commits.\n+ mean = (mean==null) ? new double[K] : mean;\n+ invVar = (invVar==null) ? new double[K] : invVar;\n+ scaleArr = (scaleArr == null) ? new double[K] : scaleArr;\n+\n+ // TODO: Handle sparse image and dout cases:\n+ if(image.isInSparseFormat())\n+ image.sparseToDense();\n+ if(dout.isInSparseFormat())\n+ dout.sparseToDense();\n+\n+ if(!image.isInSparseFormat() && !dout.isInSparseFormat()) {\n+ double [] imageArr = image.getDenseBlockValues();\n+ double [] doutArr = dout.getDenseBlockValues();\n+ double constant1 = Math.pow(N*PQ, -1);\n+ int KPQ = K*PQ;\n+ for(int k = 0; k < K; k++) {\n+ double dvar = 0;\n+ double dmean_norm_branch = 0; double dmean_var_branch = 0;\n+ double sumDout = 0; double sum = 0;\n+ for(int n = 0; n < N; n++) {\n+ int index = n*KPQ + k*PQ;\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ double doutVal = doutArr != null ? doutArr[index] : 0;\n+ double centered = imageArr != null ? imageArr[index] : 0;\n+ centered -= mean[k];\n+ double dnorm = doutVal*scaleArr[k];\n+ dvar -= 0.5*centered*Math.pow(invVar[k], 3)*dnorm;\n+ dmean_norm_branch -= dnorm*invVar[k];\n+ sum += centered * invVar[k] * doutVal;\n+ sumDout += doutVal;\n+ dmean_var_branch -= 2*constant1*centered;\n+ }\n+ }\n+ dBiasArr[k] = sumDout;\n+ dScaleArr[k] = sum;\n+ dmean_var_branch *= dvar;\n+ double dmean = dmean_norm_branch + dmean_var_branch;\n+ double dX_mean_branch = constant1*dmean;\n+\n+ for(int n = 0; n < N; n++) {\n+ int index = n*KPQ + k*PQ;\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ double doutVal = doutArr != null ? doutArr[index] : 0;\n+ double centered = imageArr != null ? imageArr[index] : 0;\n+ centered -= mean[k];\n+ double dnorm = doutVal*scaleArr[k];\n+ double dX_norm_branch = dnorm*invVar[k];\n+ double dX_var_branch = 2*constant1*centered*dvar;\n+ dXArr[index] = dX_norm_branch + dX_mean_branch + dX_var_branch;\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Sparse format is not yet supported for batch norm backward\");\n+ }\n+ dBias.recomputeNonZeros();\n+ dScale.recomputeNonZeros();\n+ dX.recomputeNonZeros();\n+ }\n+\n+ public static void batchNorm2D(MatrixBlock image, MatrixBlock scale, MatrixBlock bias, MatrixBlock runningMean,\n+ MatrixBlock runningVar, String phase, double epsilon, double mu,\n+ MatrixBlock ret, MatrixBlock retRunningMean, MatrixBlock retRunningVar,\n+ MatrixBlock resultSaveMean, MatrixBlock resultSaveInvVariance) {\n+ // Since bias, scale, runningMean, runningVar are extremely small array\n+ if(bias.isInSparseFormat())\n+ bias.sparseToDense();\n+ double [] biasArr = bias.getDenseBlockValues();\n+ if(scale.isInSparseFormat())\n+ scale.sparseToDense();\n+ double [] scaleArr = scale.getDenseBlockValues();\n+ if(runningMean.isInSparseFormat())\n+ runningMean.sparseToDense();\n+ double [] runningMeanArr = runningMean.getDenseBlockValues(); // ema_mean\n+ if(runningVar.isInSparseFormat())\n+ runningVar.sparseToDense();\n+ double [] runningVarArr = runningVar.getDenseBlockValues(); // ema_var\n+\n+ double [] retRunningMeanArr = retRunningMean.getDenseBlockValues(); // ema_mean_upd\n+ double [] retRunningVarArr = retRunningVar.getDenseBlockValues(); // ema_var_upd\n+ double [] resultSaveMeanArr = resultSaveMean.getDenseBlockValues(); // cache_mean\n+ double [] resultSaveInvVarianceArr = resultSaveInvVariance.getDenseBlockValues(); // cache_inv_var\n+\n+ int N = image.getNumRows();\n+ int K = bias.getNumRows(); // number of output channels\n+ int PQ = image.getNumColumns() / K; // output height X output width\n+\n+ if(phase.equalsIgnoreCase(\"train\")) {\n+ computeBiasSumAndSumSquares(image, resultSaveMeanArr, resultSaveInvVarianceArr, K, PQ);\n+ int NPQ = N*PQ;\n+ for(int k = 0; k < K; k++) {\n+ double mean = resultSaveMeanArr[k] / NPQ;\n+ double var = resultSaveInvVarianceArr[k]/NPQ - Math.pow(mean, 2.0);\n+ resultSaveMeanArr[k] = mean;\n+ resultSaveInvVarianceArr[k] = Math.pow(Math.sqrt(var + epsilon), -1.0);\n+ retRunningMeanArr[k] = mu*runningMeanArr[k] + (1-mu)*mean;\n+ retRunningVarArr[k] = mu*runningVarArr[k] + (1-mu)*mean;\n+ }\n+ }\n+ else if(phase.equalsIgnoreCase(\"test\")) {\n+ copy(runningMean, retRunningMeanArr); // ema_mean_upd = ema_mean\n+ copy(runningVar, retRunningVarArr); // ema_var_upd = ema_var\n+ copy(runningMean, resultSaveMeanArr); // cache_mean = ema_mean\n+ double invSqrtEps = Math.pow(Math.sqrt(epsilon), -1.0);\n+ double [] inArr = runningVar.getDenseBlockValues();\n+ if(inArr != null) {\n+ for(int i = 0; i < inArr.length; i++) {\n+ resultSaveInvVarianceArr[i] = Math.pow(Math.sqrt(inArr[i] + epsilon), -1.0);\n+ }\n+ }\n+ else {\n+ Arrays.fill(resultSaveInvVarianceArr, invSqrtEps);\n+ }\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Incorrect mode: Expected either train or test, but found \" + phase);\n+ }\n+\n+ // Normalize, shift, and scale\n+ double [] retArr = ret.getDenseBlockValues();\n+ copy(image, retArr);\n+ if(resultSaveMean != null && resultSaveInvVariance != null && biasArr != null && scaleArr != null) {\n+ // Common scenario:\n+ int index = 0;\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ retArr[index] = (retArr[index]-resultSaveMeanArr[k])*resultSaveInvVarianceArr[k]*scaleArr[k] + biasArr[k];\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ addBias(retArr, resultSaveMeanArr, -1, N, K, PQ);\n+ multiplyBias(retArr, resultSaveInvVarianceArr, N, K, PQ);\n+ multiplyBias(retArr, scaleArr, N, K, PQ);\n+ addBias(retArr, biasArr, 1, N, K, PQ);\n+ }\n+ ret.recomputeNonZeros();\n+ retRunningMean.recomputeNonZeros();\n+ retRunningVar.recomputeNonZeros();\n+ resultSaveMean.recomputeNonZeros();\n+ resultSaveInvVariance.recomputeNonZeros();\n+ }\n+\n+ private static void copy(MatrixBlock input, double [] output) {\n+ if(input.isInSparseFormat()) {\n+ SparseBlock sblock = input.getSparseBlock();\n+ int numCols = input.getNumColumns();\n+ for(int n = 0; n < input.getNumRows(); n++) {\n+ if( sblock.isEmpty(n) )\n+ continue;\n+ int apos = sblock.pos(n);\n+ int alen = sblock.size(n);\n+ int[] aix = sblock.indexes(n);\n+ double[] avals = sblock.values(n);\n+\n+ // Iterate over the sparse block\n+ for(int j=apos; j<apos+alen; j++) {\n+ output[n*numCols + aix[j]] = avals[j];\n+ }\n+ }\n+ }\n+ else {\n+ double [] inputArr = input.getDenseBlockValues();\n+ if(inputArr != null) {\n+ System.arraycopy(inputArr, 0, output, 0, inputArr.length);\n+ }\n+ }\n+ }\n+\n+ private static void addBias(double [] arr, double [] bias, double biasMultiplier, int N, int K, int PQ) {\n+ int index = 0;\n+ if(bias != null) {\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ arr[index] += biasMultiplier*bias[k];\n+ }\n+ }\n+ }\n+ }\n+ }\n+\n+ private static void multiplyBias(double [] arr, double [] bias, int N, int K, int PQ) {\n+ int index = 0;\n+ if(bias != null) {\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ arr[index] *= bias[k];\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ Arrays.fill(arr, 0);\n+ }\n+ }\n+\n+ private static void computeBiasSumAndSumSquares(MatrixBlock image, double [] sumArr, double [] sumSquaresArr, int K, int PQ) {\n+ if(sumArr.length != K) {\n+ throw new DMLRuntimeException(\"Expected the length of array to be \" + K + \", but instead is \" + sumArr.length);\n+ }\n+ if(sumSquaresArr.length != K) {\n+ throw new DMLRuntimeException(\"Expected the length of array to be \" + K + \", but instead is \" + sumSquaresArr.length);\n+ }\n+ if(image.isInSparseFormat()) {\n+ SparseBlock sblock = image.getSparseBlock();\n+ for(int r = 0; r < image.getNumRows(); r++) {\n+ if( sblock.isEmpty(r) )\n+ continue;\n+ int apos = sblock.pos(r);\n+ int alen = sblock.size(r);\n+ int[] aix = sblock.indexes(r);\n+ double[] avals = sblock.values(r);\n+ for(int j=apos; j<apos+alen; j++) {\n+ int k = aix[j] / PQ;\n+ sumArr[k] += avals[j];\n+ sumSquaresArr[k] += Math.pow(avals[j], 2.0);\n+ }\n+ }\n+ }\n+ else {\n+ double [] X = image.getDenseBlockValues();\n+ int N = image.getNumRows();\n+ if(X != null) {\n+ int index = 0;\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ sumArr[k] += X[index];\n+ sumSquaresArr[k] += Math.pow(X[index], 2.0);\n+ }\n+ }\n+ }\n+ }\n+ }\n+ }\n+\n/**\n* Performs the operation corresponding to the DML script:\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added CP implementation for batch_norm2d and batch_norm2d_backward implementation. - This feature is required for NN tests. - The current version of batch_norm2d_backward only supports dense image and dense dout. This will be fixed in future.
49,738
01.06.2018 22:23:52
25,200
7d936cf0c1f4061c1b611378cd2e07a23372cfc0
[HOTFIX] Various fixes size propagation and batch_norm2d integration
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "diff": "@@ -280,10 +280,8 @@ public class FunctionOp extends Hop\nprotected ExecType optFindExecType()\n{\ncheckAndSetForcedPlatform();\n- ExecType REMOTE = OptimizerUtils.isSparkExecutionMode() ? ExecType.SPARK : ExecType.MR;\nif ( getFunctionType() == FunctionType.MULTIRETURN_BUILTIN ) {\n-\n// check if there is sufficient memory to execute this function\nif( getFunctionName().equalsIgnoreCase(\"transformencode\") ) {\n_etype = ((_etypeForced==ExecType.SPARK\n@@ -291,19 +289,12 @@ public class FunctionOp extends Hop\n&& OptimizerUtils.isSparkExecutionMode())) ? ExecType.SPARK : ExecType.CP);\n}\nelse if( getFunctionName().equalsIgnoreCase(\"lstm\")) {\n- if(DMLScript.USE_ACCELERATOR)\n- _etype = ExecType.GPU;\n- else\n+ if(!DMLScript.USE_ACCELERATOR)\nthrow new RuntimeException(\"The function \" + getFunctionName() + \" is only supported on GPU.\");\n+ _etype = ExecType.GPU;\n}\nelse if( getFunctionName().equalsIgnoreCase(\"batch_norm2d\") || getFunctionName().equalsIgnoreCase(\"batch_norm2d_backward\")) {\n- if ( OptimizerUtils.isMemoryBasedOptLevel() ) {\n- _etype = findExecTypeByMemEstimate();\n- }\n- else {\n- _etype = ExecType.CP;\n- }\n- _etype = _etype == REMOTE ? ExecType.CP : _etype; // batch_norm2d and batch_norm2d_backward are not supported on Spark\n+ _etype = DMLScript.USE_ACCELERATOR ? ExecType.GPU : ExecType.CP;\n}\nelse {\n// Since the memory estimate is only conservative, do not throw\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteConstantFolding.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteConstantFolding.java", "diff": "@@ -104,16 +104,21 @@ public class RewriteConstantFolding extends HopRewriteRule\ncatch(Exception ex) {\nLOG.error(\"Failed to execute constant folding instructions. No abort.\", ex);\n}\n-\n+ }\n+ //fold nrow as precondition for further constant folding\n+ else if( HopRewriteUtils.isUnary(root, OpOp1.NROW) && root.getInput().get(0).rowsKnown() ) {\n+ literal = new LiteralOp(root.getInput().get(0).getDim1());\n+ }\n+ //fold ncol as precondition for further constant folding\n+ else if( HopRewriteUtils.isUnary(root, OpOp1.NCOL) && root.getInput().get(0).colsKnown() ) {\n+ literal = new LiteralOp(root.getInput().get(0).getDim2());\n}\n//fold conjunctive predicate if at least one input is literal 'false'\n- else if( isApplicableFalseConjunctivePredicate(root) )\n- {\n+ else if( isApplicableFalseConjunctivePredicate(root) ) {\nliteral = new LiteralOp(false);\n}\n//fold disjunctive predicate if at least one input is literal 'true'\n- else if( isApplicableTrueDisjunctivePredicate(root) )\n- {\n+ else if( isApplicableTrueDisjunctivePredicate(root) ) {\nliteral = new LiteralOp(true);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -99,7 +99,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\n}\npublic Expression getSixthExpr() {\n- return (_args.length >= 5 ? _args[4] : null);\n+ return (_args.length >= 6 ? _args[5] : null);\n}\npublic Expression[] getAllExpr(){\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "diff": "@@ -535,10 +535,9 @@ public class DataExpression extends DataIdentifier\n}\n//general data expression constant propagation\n- if( !conditional ) {\n+ if( !conditional )\nperformConstantPropagationRand( currConstVars );\nperformConstantPropagationReadWrite( currConstVars );\n- }\n// check if data parameter of matrix is scalar or matrix -- if scalar, use Rand instead\nExpression dataParam1 = getVarParam(RAND_DATA);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "diff": "@@ -89,7 +89,8 @@ public class ForStatementBlock extends StatementBlock\n//validate body\n_dmlProg = dmlProg;\nfor(StatementBlock sb : body) {\n- ids = sb.validate(dmlProg, ids, constVars, true);\n+ ids = sb.validate(dmlProg, ids, constVars,\n+ !(this instanceof ParForStatementBlock));\nconstVars = sb.getConstOut();\n}\n@@ -165,7 +166,8 @@ public class ForStatementBlock extends StatementBlock\n//validate body\n_dmlProg = dmlProg;\nfor(StatementBlock sb : body) {\n- ids = sb.validate(dmlProg, ids, constVars, true);\n+ ids = sb.validate(dmlProg, ids, constVars,\n+ !(this instanceof ParForStatementBlock));\nconstVars = sb.getConstOut();\n}\nif (!body.isEmpty()){\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -21,7 +21,6 @@ package org.apache.sysml.runtime.matrix.data;\nimport static jcuda.jcublas.cublasOperation.CUBLAS_OP_N;\nimport static jcuda.jcublas.cublasOperation.CUBLAS_OP_T;\n-import static jcuda.runtime.JCuda.cudaMalloc;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\n@@ -89,10 +88,8 @@ import jcuda.jcublas.cublasHandle;\nimport jcuda.jcublas.cublasOperation;\nimport jcuda.jcublas.cublasSideMode;\nimport jcuda.jcusparse.cusparseAction;\n-import jcuda.jcusparse.cusparseDirection;\nimport jcuda.jcusparse.cusparseHandle;\nimport jcuda.jcusparse.cusparseIndexBase;\n-import jcuda.jcusparse.cusparseMatDescr;\n/**\n* All CUDA kernels and library calls are redirected through this class\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Various fixes size propagation and batch_norm2d integration
49,738
02.06.2018 16:56:06
25,200
dbc844c12db6cb4d0e617be46d7fc51fa3eaee67
Improved IPA constant propagation and replacement After had to disable the parser constant propagation into conditional control flow, this patch now improves the IPA constant propagation pass to recognize constant scalars and propagate and replace them whenever it's save to do so.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassPropagateReplaceLiterals.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassPropagateReplaceLiterals.java", "diff": "@@ -25,7 +25,9 @@ import org.apache.sysml.hops.FunctionOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.Hop.DataOpTypes;\nimport org.apache.sysml.hops.recompile.Recompiler;\n+import org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.ForStatement;\n@@ -55,6 +57,25 @@ public class IPAPassPropagateReplaceLiterals extends IPAPass\n@Override\npublic void rewriteProgram( DMLProgram prog, FunctionCallGraph fgraph, FunctionCallSizeInfo fcallSizes )\n{\n+ //step 1: propagate final literals across main program\n+ LocalVariableMap constants = new LocalVariableMap();\n+ for( StatementBlock sb : prog.getStatementBlocks() ) {\n+ //delete update constant variables\n+ constants.removeAllIn(sb.variablesUpdated().getVariableNames());\n+ //literal replacement\n+ rReplaceLiterals(sb, constants);\n+ //extract literal assignments\n+ if( HopRewriteUtils.isLastLevelStatementBlock(sb) ) {\n+ for( Hop root : sb.getHops() )\n+ if( HopRewriteUtils.isData(root, DataOpTypes.TRANSIENTWRITE)\n+ && root.getInput().get(0) instanceof LiteralOp) {\n+ constants.put(root.getName(), ScalarObjectFactory\n+ .createScalarObject((LiteralOp)root.getInput().get(0)));\n+ }\n+ }\n+ }\n+\n+ //step 2: propagate literals into functions\nfor( String fkey : fgraph.getReachableFunctions() ) {\nFunctionOp first = fgraph.getFunctionCalls(fkey).get(0);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ForStatementBlock.java", "diff": "@@ -89,8 +89,7 @@ public class ForStatementBlock extends StatementBlock\n//validate body\n_dmlProg = dmlProg;\nfor(StatementBlock sb : body) {\n- ids = sb.validate(dmlProg, ids, constVars,\n- !(this instanceof ParForStatementBlock));\n+ ids = sb.validate(dmlProg, ids, constVars, true);\nconstVars = sb.getConstOut();\n}\n@@ -166,8 +165,7 @@ public class ForStatementBlock extends StatementBlock\n//validate body\n_dmlProg = dmlProg;\nfor(StatementBlock sb : body) {\n- ids = sb.validate(dmlProg, ids, constVars,\n- !(this instanceof ParForStatementBlock));\n+ ids = sb.validate(dmlProg, ids, constVars, true);\nconstVars = sb.getConstOut();\n}\nif (!body.isEmpty()){\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarObjectFactory.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarObjectFactory.java", "diff": "@@ -66,6 +66,10 @@ public abstract class ScalarObjectFactory\n}\n}\n+ public static ScalarObject createScalarObject(LiteralOp lit) {\n+ return createScalarObject(lit.getValueType(), lit);\n+ }\n+\npublic static ScalarObject createScalarObject(ValueType vt, LiteralOp lit) {\nswitch( vt ) {\ncase DOUBLE: return new DoubleObject(lit.getDoubleValue());\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2356] Improved IPA constant propagation and replacement After SYSTEMML-2340 had to disable the parser constant propagation into conditional control flow, this patch now improves the IPA constant propagation pass to recognize constant scalars and propagate and replace them whenever it's save to do so.
49,738
02.06.2018 21:49:48
25,200
2b86a4d92e18f3889d693e47b1ff2c6c84d89180
[MINOR] Refactoring parser/lops methods w/ inconsistent naming scheme
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/Lop.java", "new_path": "src/main/java/org/apache/sysml/lops/Lop.java", "diff": "@@ -162,11 +162,11 @@ public abstract class Lop\n}\n- public boolean[] get_reachable() {\n+ public boolean[] getReachable() {\nreturn reachable;\n}\n- public boolean[] create_reachable(int size) {\n+ public boolean[] createReachable(int size) {\nreachable = new boolean[size];\nreturn reachable;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "diff": "@@ -316,7 +316,7 @@ public class Dag<N extends Lop>\n// - store the constructed reachability information in $u$.reachable[] boolean array\nfor (int i = 0; i < nodearray.length; i++) {\ndagDFS(nodearray[i], nodearray[i]\n- .create_reachable(nodearray.length));\n+ .createReachable(nodearray.length));\n}\n// print the nodes in sorted order\n@@ -1975,8 +1975,8 @@ public class Dag<N extends Lop>\nif ( queuedNodes.isEmpty() )\nreturn false;\n- boolean[] nodeMarked = node.get_reachable();\n- boolean[] tmpMarked = tmpNode.get_reachable();\n+ boolean[] nodeMarked = node.getReachable();\n+ boolean[] tmpMarked = tmpNode.getReachable();\nlong nodeid = IDMap.get(node.getID());\nlong tmpid = IDMap.get(tmpNode.getID());\n@@ -3585,7 +3585,7 @@ public class Dag<N extends Lop>\n*/\nprivate static boolean isChild(Lop a, Lop b, Map<Long, Integer> IDMap) {\nint bID = IDMap.get(b.getID());\n- return a.get_reachable()[bID];\n+ return a.getReachable()[bID];\n}\n/**\n@@ -3628,7 +3628,7 @@ public class Dag<N extends Lop>\nreturn false;\nint index = IDMap.get(node.getID());\nfor( Lop cnode : childNodes ) {\n- if ( (type == ExecLocation.INVALID || cnode.getExecLocation() == type) && cnode.get_reachable()[index])\n+ if ( (type == ExecLocation.INVALID || cnode.getExecLocation() == type) && cnode.getReachable()[index])\nreturn true;\n}\nreturn false;\n@@ -3639,7 +3639,7 @@ public class Dag<N extends Lop>\nreturn null;\nint index = IDMap.get(node.getID());\nfor( Lop cnode : childNodes ) {\n- if ( cnode.getExecLocation() == type && cnode.get_reachable()[index])\n+ if ( cnode.getExecLocation() == type && cnode.getReachable()[index])\nreturn cnode;\n}\nreturn null;\n@@ -3659,7 +3659,7 @@ public class Dag<N extends Lop>\nreturn null;\nfor( Lop pn : parentNodes ) {\nint index = IDMap.get( pn.getID() );\n- if ( pn.getExecLocation() == type && node.get_reachable()[index])\n+ if ( pn.getExecLocation() == type && node.getReachable()[index])\nreturn pn;\n}\nreturn null;\n@@ -3673,7 +3673,7 @@ public class Dag<N extends Lop>\nint index = IDMap.get(node.getID());\nfor( Lop n : nodesVec ) {\n- if ( n.definesMRJob() && n.get_reachable()[index])\n+ if ( n.definesMRJob() && n.getReachable()[index])\nreturn true;\n}\nreturn false;\n@@ -3686,7 +3686,7 @@ public class Dag<N extends Lop>\nint index = IDMap.get(node.getID());\nboolean onlyDatagen = true;\nfor( Lop n : nodesVec ) {\n- if ( n.definesMRJob() && n.get_reachable()[index] && JobType.findJobTypeFromLop(n) != JobType.DATAGEN )\n+ if ( n.definesMRJob() && n.getReachable()[index] && JobType.findJobTypeFromLop(n) != JobType.DATAGEN )\nonlyDatagen = false;\n}\n// return true also when there is no lop in \"nodesVec\" that defines a MR job.\n@@ -3731,7 +3731,7 @@ public class Dag<N extends Lop>\nreturn false;\nfor( Lop pnode : parentNodes ) {\nint index = IDMap.get( pnode.getID() );\n- if ( node.get_reachable()[index])\n+ if ( node.getReachable()[index])\nreturn true;\n}\nreturn false;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -334,7 +334,7 @@ public class DMLTranslator\n// handle while stmt predicate\nLop l = wsb.getPredicateHops().constructLops();\n- wsb.set_predicateLops(l);\n+ wsb.setPredicateLops(l);\nret |= wsb.updatePredicateRecompilationFlag();\n}\n@@ -355,7 +355,7 @@ public class DMLTranslator\n// handle if stmt predicate\nLop l = isb.getPredicateHops().constructLops();\n- isb.set_predicateLops(l);\n+ isb.setPredicateLops(l);\nret |= isb.updatePredicateRecompilationFlag();\n}\n@@ -460,7 +460,7 @@ public class DMLTranslator\n// create DAG for loop predicates\npred_dag = new Dag<>();\n- ((WhileStatementBlock) sb).get_predicateLops().addToDag(pred_dag);\n+ ((WhileStatementBlock) sb).getPredicateLops().addToDag(pred_dag);\n// create instructions for loop predicates\npred_instruct = new ArrayList<>();\n@@ -497,7 +497,7 @@ public class DMLTranslator\n// create DAG for loop predicates\npred_dag = new Dag<>();\n- ((IfStatementBlock) sb).get_predicateLops().addToDag(pred_dag);\n+ ((IfStatementBlock) sb).getPredicateLops().addToDag(pred_dag);\n// create instructions for loop predicates\npred_instruct = new ArrayList<>();\n@@ -1021,7 +1021,7 @@ public class DMLTranslator\nif (current instanceof WhileStatementBlock) {\nWhileStatementBlock wstb = (WhileStatementBlock) current;\n- wstb.get_predicateLops().resetVisitStatus();\n+ wstb.getPredicateLops().resetVisitStatus();\nif (wstb.getNumStatements() > 1)\nLOG.debug(\"While statement block has more than 1 stmt\");\nWhileStatement ws = (WhileStatement)wstb.getStatement(0);\n@@ -1033,7 +1033,7 @@ public class DMLTranslator\nif (current instanceof IfStatementBlock) {\nIfStatementBlock istb = (IfStatementBlock) current;\n- istb.get_predicateLops().resetVisitStatus();\n+ istb.getPredicateLops().resetVisitStatus();\nif (istb.getNumStatements() > 1)\nLOG.debug(\"If statement block has more than 1 stmt\");\nIfStatement is = (IfStatement)istb.getStatement(0);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/IfStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/IfStatementBlock.java", "diff": "@@ -452,11 +452,11 @@ public class IfStatementBlock extends StatementBlock\nreturn _predicateHops;\n}\n- public Lop get_predicateLops() {\n+ public Lop getPredicateLops() {\nreturn _predicateLops;\n}\n- public void set_predicateLops(Lop predicateLops) {\n+ public void setPredicateLops(Lop predicateLops) {\n_predicateLops = predicateLops;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/WhileStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/WhileStatementBlock.java", "diff": "@@ -247,11 +247,11 @@ public class WhileStatementBlock extends StatementBlock\nreturn _predicateHops;\n}\n- public Lop get_predicateLops() {\n+ public Lop getPredicateLops() {\nreturn _predicateLops;\n}\n- public void set_predicateLops(Lop predicateLops) {\n+ public void setPredicateLops(Lop predicateLops) {\n_predicateLops = predicateLops;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/GMRCtableBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/GMRCtableBuffer.java", "diff": "@@ -141,7 +141,6 @@ public class GMRCtableBuffer\n}\nelse if ( _blockBuffer != null ) {\nMatrixIndexes key=new MatrixIndexes(1,1);\n- //DataConverter.writeBinaryBlockMatrixToHDFS(path, job, mat, mc.get_rows(), mc.get_cols(), mc.get_rows_per_block(), mc.get_cols_per_block(), replication);\nfor(Entry<Byte, MatrixBlock> ctable: _blockBuffer.entrySet())\n{\nArrayList<Integer> resultIDs=ReduceBase.getOutputIndexes(ctable.getKey(), _resultIndexes);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Refactoring parser/lops methods w/ inconsistent naming scheme
49,719
04.06.2018 16:54:14
25,200
8084dc1276663c7dfaa8983b13efb0ec11e6ee1e
[MINOR] add 2 new DML examples to Jupyter notebook
[ { "change_type": "MODIFY", "old_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "new_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"1. [Replace NaN with mode](#NaN2Mode)\\n\",\n+ \"1. [Create all value pairs for v1 and v2](#AllValuePairs)\\n\",\n+ \"* [Replace NaN with mode](#NaN2Mode)\\n\",\n\"* [Use sample builtin function to create sample from matrix](#sample)\\n\",\n\"* [Count of Matching Values in two Matrices/Vectors](#MatchinRows)\\n\",\n\"* [Cross Validation](#CrossValidation)\\n\",\n\"* [Value-based join of two Matrices](#JoinMatrices)\\n\",\n\"* [Filter Matrix to include only Frequent Column Values](#FilterMatrix)\\n\",\n- \"* [Construct (sparse) Matrix from (rowIndex, colIndex, values) triplets](#Construct_sparse_Matrix)\\n\",\n+ \"* [(Sparse) Matrix to/from (rowIndex, colIndex, values) conversions (i,j,v)](#Construct_sparse_Matrix)\\n\",\n\"* [Find and remove duplicates in columns or rows](#Find_and_remove_duplicates)\\n\",\n\"* [Set based Indexing](#Set_based_Indexing)\\n\",\n\"* [Group by Aggregate using Linear Algebra](#Multi_column_Sorting)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 15,\n+ \"execution_count\": null,\n\"metadata\": {\n\"collapsed\": false,\n\"scrolled\": false\n},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from systemml import MLContext, dml\\n\",\n+ \"ml = MLContext(sc)\\n\",\n+ \"\\n\",\n+ \"print (ml.buildTime())\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Create all value pairs for v1 and v2<a id=\\\"AllValuePairs\\\" />\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 50,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"2017-09-22 07:57:57 UTC\\n\"\n+ \"2.000\\n\",\n+ \"1.000\\n\",\n+ \"8.000\\n\",\n+ \"3.000\\n\",\n+ \"5.000\\n\",\n+ \"6.000\\n\",\n+ \"7.000\\n\",\n+ \"\\n\",\n+ \"80.000\\n\",\n+ \"20.000\\n\",\n+ \"50.000\\n\",\n+ \"\\n\",\n+ \"2.000 80.000\\n\",\n+ \"2.000 20.000\\n\",\n+ \"2.000 50.000\\n\",\n+ \"1.000 80.000\\n\",\n+ \"1.000 20.000\\n\",\n+ \"1.000 50.000\\n\",\n+ \"8.000 80.000\\n\",\n+ \"8.000 20.000\\n\",\n+ \"8.000 50.000\\n\",\n+ \"3.000 80.000\\n\",\n+ \"3.000 20.000\\n\",\n+ \"3.000 50.000\\n\",\n+ \"5.000 80.000\\n\",\n+ \"5.000 20.000\\n\",\n+ \"5.000 50.000\\n\",\n+ \"6.000 80.000\\n\",\n+ \"6.000 20.000\\n\",\n+ \"6.000 50.000\\n\",\n+ \"7.000 80.000\\n\",\n+ \"7.000 20.000\\n\",\n+ \"7.000 50.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.000 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n]\n}\n],\n\"source\": [\n- \"from systemml import MLContext, dml, jvm_stdout\\n\",\n- \"ml = MLContext(sc)\\n\",\n- \"\\n\",\n- \"print (ml.buildTime())\"\n+ \"prog=\\\"\\\"\\\"\\n\",\n+ \"v1 = matrix ('2 1 8 3 5 6 7', rows = 7, cols = 1 )\\n\",\n+ \"v2 = matrix ('80 20 50', rows = 3, cols = 1 )\\n\",\n+ \"\\n\",\n+ \"nv1 = nrow (v1);\\n\",\n+ \"nv2 = nrow (v2);\\n\",\n+ \"R = cbind (\\n\",\n+ \" matrix (v1 %*% matrix(1, 1, nv2), nv1*nv2, 1),\\n\",\n+ \" matrix (matrix(1, nv1, 1) %*% t(v2), nv1*nv2, 1))\\n\",\n+ \"\\n\",\n+ \"print(toString(v1));\\n\",\n+ \"print(toString(v2));\\n\",\n+ \"print(toString(R));\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"This functions replaces NaN in column with mode of column\"\n+ \"This functions replaces NaN in column i with mode of column i.\"\n]\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 13,\n+ \"execution_count\": 51,\n\"metadata\": {\n\"collapsed\": false\n},\n\"replaceNaNwithMode = function (matrix[double] X, integer colId) \\n\",\n\" return (matrix[double] X) \\n\",\n\"{\\n\",\n- \" Xi = replace (target=X[,colId], pattern=0/0, replacement=max(X[,colId])+1) # replace NaN with largest value + 1\\n\",\n+ \" Xi = replace (target=X[,colId], pattern=NaN, replacement=-Inf) # replace NaN with -Inf\\n\",\n+ \" Xi = replace (target=Xi, pattern=-Inf, replacement=max(Xi)+1) # replace -Inf with largest value + 1\\n\",\n\" agg = aggregate (target=Xi, groups=Xi, fn=\\\"count\\\") # count each distinct value\\n\",\n\" mode = as.scalar (rowIndexMax(t(agg[1:nrow(agg)-1, ]))) # mode is max frequent value except last value\\n\",\n\" X[,colId] = replace (target=Xi, pattern=max(Xi), replacement=mode) # fill in mode\\n\",\n\"print (\\\"Before: \\\\n\\\" + toString(X))\\n\",\n\"print (\\\"After: \\\\n\\\" + toString(Y))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout(True):\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 18,\n+ \"execution_count\": 52,\n\"metadata\": {\n\"collapsed\": false\n},\n\"\\n\",\n\"sv: \\n\",\n\"1.000\\n\",\n- \"4.000\\n\",\n+ \"5.000\\n\",\n\"\\n\",\n\"samples: \\n\",\n\"2.000 1.000\\n\",\n- \"7.000 9.000\\n\",\n+ \"4.000 4.000\\n\",\n\"\\n\",\n\"SystemML Statistics:\\n\",\n- \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Total execution time:\\t\\t0.000 sec.\\n\",\n\"Number of executed Spark inst:\\t0.\\n\",\n\"\\n\",\n\"\\n\"\n\"print (\\\"sv: \\\\n\\\" + toString(sv))\\n\",\n\"print (\\\"samples: \\\\n\\\" + toString(samples))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout(True):\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 19,\n+ \"execution_count\": 53,\n\"metadata\": {\n\"collapsed\": false\n},\n\"Number of Matches: 2.0\\n\",\n\"\\n\",\n\"SystemML Statistics:\\n\",\n- \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Total execution time:\\t\\t0.000 sec.\\n\",\n\"Number of executed Spark inst:\\t0.\\n\",\n\"\\n\",\n\"\\n\"\n\"print (\\\"t(Y): \\\" + toString(t(Y)))\\n\",\n\"print (\\\"Number of Matches: \\\" + matches + \\\"\\\\n\\\")\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout(True):\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 4,\n+ \"execution_count\": 54,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": false\n},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"Test data Xyi2\\n\",\n+ \"Test data Xyi1\\n\",\n+ \"7.000 8.000 9.000 3.000\\n\",\n\"10.000 11.000 12.000 4.000\\n\",\n- \"16.000 17.000 18.000 6.000\\n\",\n\"\\n\",\n- \"Train data Xyni2\\n\",\n+ \"Train data Xyni1\\n\",\n\"1.000 2.000 3.000 1.000\\n\",\n\"4.000 5.000 6.000 2.000\\n\",\n- \"7.000 8.000 9.000 3.000\\n\",\n\"13.000 14.000 15.000 5.000\\n\",\n+ \"16.000 17.000 18.000 6.000\\n\",\n\"\\n\",\n- \"w2\\n\",\n- \"95.000\\n\",\n- \"106.000\\n\",\n- \"117.000\\n\",\n+ \"w_1\\n\",\n+ \"170.000\\n\",\n+ \"184.000\\n\",\n+ \"198.000\\n\",\n\"\\n\",\n- \"stats2\\n\",\n- \"8938.000\\n\",\n+ \"stats1\\n\",\n+ \"10537.000\\n\",\n\"\\n\",\n\"\\n\",\n- \"Test data Xyi3\\n\",\n- \"1.000 2.000 3.000 1.000\\n\",\n- \"7.000 8.000 9.000 3.000\\n\",\n+ \"Test data Xyi2\\n\",\n+ \"13.000 14.000 15.000 5.000\\n\",\n+ \"16.000 17.000 18.000 6.000\\n\",\n\"\\n\",\n- \"Train data Xyni3\\n\",\n+ \"Train data Xyni2\\n\",\n+ \"1.000 2.000 3.000 1.000\\n\",\n\"4.000 5.000 6.000 2.000\\n\",\n+ \"7.000 8.000 9.000 3.000\\n\",\n\"10.000 11.000 12.000 4.000\\n\",\n- \"13.000 14.000 15.000 5.000\\n\",\n- \"16.000 17.000 18.000 6.000\\n\",\n\"\\n\",\n- \"w3\\n\",\n- \"209.000\\n\",\n- \"226.000\\n\",\n- \"243.000\\n\",\n+ \"w_2\\n\",\n+ \"70.000\\n\",\n+ \"80.000\\n\",\n+ \"90.000\\n\",\n\"\\n\",\n- \"stats3\\n\",\n- \"6844.000\\n\",\n+ \"stats2\\n\",\n+ \"7469.000\\n\",\n\"\\n\",\n\"\\n\",\n- \"Test data Xyi1\\n\",\n+ \"Test data Xyi3\\n\",\n+ \"1.000 2.000 3.000 1.000\\n\",\n\"4.000 5.000 6.000 2.000\\n\",\n- \"13.000 14.000 15.000 5.000\\n\",\n\"\\n\",\n- \"Train data Xyni1\\n\",\n- \"1.000 2.000 3.000 1.000\\n\",\n+ \"Train data Xyni3\\n\",\n\"7.000 8.000 9.000 3.000\\n\",\n\"10.000 11.000 12.000 4.000\\n\",\n+ \"13.000 14.000 15.000 5.000\\n\",\n\"16.000 17.000 18.000 6.000\\n\",\n\"\\n\",\n- \"w1\\n\",\n- \"158.000\\n\",\n- \"172.000\\n\",\n- \"186.000\\n\",\n+ \"w_3\\n\",\n+ \"222.000\\n\",\n+ \"240.000\\n\",\n+ \"258.000\\n\",\n\"\\n\",\n- \"stats1\\n\",\n- \"9853.000\\n\",\n+ \"stats3\\n\",\n+ \"5109.000\\n\",\n\"\\n\",\n\"\\n\",\n\"SV selection vector:\\n\",\n\"3.000\\n\",\n- \"1.000\\n\",\n\"3.000\\n\",\n- \"2.000\\n\",\n\"1.000\\n\",\n+ \"1.000\\n\",\n+ \"2.000\\n\",\n\"2.000\\n\",\n\"\\n\",\n\"SystemML Statistics:\\n\",\n- \"Total execution time:\\t\\t0.024 sec.\\n\",\n+ \"Total execution time:\\t\\t0.014 sec.\\n\",\n\"Number of executed Spark inst:\\t0.\\n\",\n\"\\n\",\n\"\\n\"\n\" distinctLabels = aggregate( target = Xyni[,1], groups = Xyni[,1], fn = \\\"count\\\")\\n\",\n\" if ( nrow(distinctLabels) > 1)\\n\",\n\" {\\n\",\n- \" wi = trainAlg (Xyni[ ,1:ncol(Xy)-1], Xyni[ ,ncol(Xy)]) # wi Model for i-th training data\\n\",\n- \" pi = testAlg (Xyi [ ,1:ncol(Xy)-1], wi) # pi Prediction for i-th test data\\n\",\n- \" ei = evalPrediction (pi, Xyi[ ,ncol(Xy)]) # stats[i,] evaluation of prediction of i-th fold\\n\",\n- \" stats[i,] = ei\\n\",\n+ \" w_i = trainAlg (Xyni[ ,1:ncol(Xy)-1], Xyni[ ,ncol(Xy)]) # w_i Model for i-th training data\\n\",\n+ \" p_i = testAlg (Xyi [ ,1:ncol(Xy)-1], w_i) # p_i Prediction for i-th test data\\n\",\n+ \" e_i = evalPrediction (p_i, Xyi[ ,ncol(Xy)]) # stats[i,] evaluation of prediction of i-th fold\\n\",\n+ \" stats[i,] = e_i\\n\",\n\" \\n\",\n\" print ( \\\"Test data Xyi\\\" + i + \\\"\\\\n\\\" + toString(Xyi) \\n\",\n\" + \\\"\\\\nTrain data Xyni\\\" + i + \\\"\\\\n\\\" + toString(Xyni) \\n\",\n- \" + \\\"\\\\nw\\\" + i + \\\"\\\\n\\\" + toString(wi) \\n\",\n+ \" + \\\"\\\\nw_\\\" + i + \\\"\\\\n\\\" + toString(w_i) \\n\",\n\" + \\\"\\\\nstats\\\" + i + \\\"\\\\n\\\" + toString(stats[i,]) \\n\",\n\" + \\\"\\\\n\\\")\\n\",\n\" }\\n\",\n\"}\\n\",\n\"\\\"\\\"\\\"\\n\",\n\"\\n\",\n- \"with jvm_stdout(True):\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n\"print (\\\"M2 \\\\n\\\" + toString(M2))\\n\",\n\"print (\\\"M1[,2] joined with M2[,2], and return matching M1 rows\\\\n\\\" + toString(M12))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 53,\n+ \"execution_count\": 56,\n\"metadata\": {\n\"collapsed\": false\n},\n\"print (toString(M))\\n\",\n\"print (toString(fM))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n- \" ml.execute(dml(prog))\"\n+ \"res = ml.execute(dml(prog))\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"## Construct (sparse) Matrix from (rowIndex, colIndex, values) triplets<a id=\\\"Construct_sparse_Matrix\\\"></a>\"\n+ \"## (Sparse) Matrix to/from (rowIndex, colIndex, values) conversions (i,j,v) <a id=\\\"Construct_sparse_Matrix\\\"></a>\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 57,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"scrolled\": true\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"1.000 2.000 10.000\\n\",\n+ \"3.000 3.000 20.000\\n\",\n+ \"3.000 4.000 30.000\\n\",\n+ \"4.000 1.000 40.000\\n\",\n+ \"5.000 6.000 50.000\\n\",\n+ \"\\n\",\n+ \"0.000 10.000 0.000 0.000 0.000 0.000\\n\",\n+ \"0.000 0.000 0.000 0.000 0.000 0.000\\n\",\n+ \"0.000 0.000 20.000 30.000 0.000 0.000\\n\",\n+ \"40.000 0.000 0.000 0.000 0.000 0.000\\n\",\n+ \"0.000 0.000 0.000 0.000 0.000 50.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"I = matrix (\\\"1 3 3 4 5\\\", rows = 5, cols = 1)\\n\",\n\"J = matrix (\\\"2 3 4 1 6\\\", rows = 5, cols = 1)\\n\",\n\"V = matrix (\\\"10 20 30 40 50\\\", rows = 5, cols = 1)\\n\",\n\"\\n\",\n+ \"IJVs = cbind(I, J, V)\\n\",\n+ \"\\n\",\n\"M = table (I, J, V)\\n\",\n+ \"\\n\",\n+ \"print (toString (IJVs))\\n\",\n\"print (toString (M))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"ml.execute(dml(prog).output('M')).get('M').toNumPy()\"\n+ \"res = ml.execute(dml(prog).output('M')).get('M').toNumPy()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Given a sparse matrix, construct ``<i,j,v>`` matrix with 3 columns rowIndex, colIndex, and values.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 58,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"M:\\n\",\n+ \"0.000 23.000\\n\",\n+ \"10.000 0.000\\n\",\n+ \"18.000 0.000\\n\",\n+ \"0.000 20.000\\n\",\n+ \"\\n\",\n+ \"IJVs:\\n\",\n+ \"1.000 2.000 23.000\\n\",\n+ \"2.000 1.000 10.000\\n\",\n+ \"3.000 1.000 18.000\\n\",\n+ \"4.000 2.000 20.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"prog = \\\"\\\"\\\"\\n\",\n+ \"M = matrix (\\\"0 23 10 0 18 0 0 20\\\", rows = 4, cols = 2)\\n\",\n+ \"\\n\",\n+ \"m = nrow(M);\\n\",\n+ \"n = ncol(M);\\n\",\n+ \"I = matrix((M!=0)*seq(1,m), m*n, 1)\\n\",\n+ \"J = matrix((M!=0)*t(seq(1,n)), m*n, 1)\\n\",\n+ \"V = matrix(M, m*n, 1)\\n\",\n+ \"IJVd = cbind(I, J, V);\\n\",\n+ \"IJVs = removeEmpty(target=IJVd, margin=\\\"rows\\\");\\n\",\n+ \"\\n\",\n+ \"print (\\\"M:\\\\n\\\" + toString(M))\\n\",\n+ \"print (\\\"IJVs:\\\\n\\\" + toString (IJVs))\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"res = ml.execute(dml(prog).output('M')).get('M').toNumPy()\"\n]\n},\n{\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 59,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.000 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"array([[ 1.],\\n\",\n+ \" [ 2.],\\n\",\n+ \" [ 3.],\\n\",\n+ \" [ 4.],\\n\",\n+ \" [ 5.],\\n\",\n+ \" [ 10.]])\"\n+ ]\n+ },\n+ \"execution_count\": 59,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"X = matrix (\\\"1 2 3 3 3 4 5 10\\\", rows = 8, cols = 1)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 60,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.076 sec.\\n\",\n+ \"Number of executed Spark inst:\\t6.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"array([[ 1.],\\n\",\n+ \" [ 2.],\\n\",\n+ \" [ 3.],\\n\",\n+ \" [ 4.],\\n\",\n+ \" [ 5.],\\n\",\n+ \" [ 10.]])\"\n+ ]\n+ },\n+ \"execution_count\": 60,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"X = matrix (\\\"3 2 1 3 3 4 5 10\\\", rows = 8, cols = 1)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 61,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.000 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"array([[ 1.],\\n\",\n+ \" [ 2.],\\n\",\n+ \" [ 3.],\\n\",\n+ \" [ 4.],\\n\",\n+ \" [ 5.],\\n\",\n+ \" [ 10.]])\"\n+ ]\n+ },\n+ \"execution_count\": 61,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"X = matrix (\\\"3 2 1 3 3 4 5 10\\\", rows = 8, cols = 1)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 62,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 11.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 11.000 1.000 1.000 1.000 1.000 11.000 11.000 1.000 11.000 1.000 1.000 11.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 11.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 11.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 11.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"array([[ 1., 1., 1., 1., 1., 1., 1., 1., 1., 11., 1.,\\n\",\n+ \" 1., 1., 1., 1., 1., 1., 1., 1., 11., 1., 1.,\\n\",\n+ \" 1., 1., 11., 11., 1., 11., 1., 1., 11., 1., 1.,\\n\",\n+ \" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\\n\",\n+ \" 1., 1., 1., 1., 1., 11., 1., 1., 1., 1., 1.,\\n\",\n+ \" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\\n\",\n+ \" 11., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\\n\",\n+ \" 1., 11.]])\"\n+ ]\n+ },\n+ \"execution_count\": 62,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"X = matrix (1, rows = 1, cols = 100)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 63,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.002 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"[[ 4. 10. 33.]\\n\",\n+ \" [ 8. 20. 74.]\\n\",\n+ \" [ 3. 20. 49.]\\n\",\n+ \" [ 6. 20. 29.]\\n\",\n+ \" [ 5. 30. 94.]\\n\",\n+ \" [ 9. 30. 57.]\\n\",\n+ \" [ 7. 40. 48.]\\n\",\n+ \" [ 2. 40. 11.]\\n\",\n+ \" [ 1. 50. 20.]]\\n\",\n+ \"[[ 10.]\\n\",\n+ \" [ 20.]\\n\",\n+ \" [ 30.]\\n\",\n+ \" [ 40.]\\n\",\n+ \" [ 50.]]\\n\",\n+ \"[[ 33.]\\n\",\n+ \" [ 74.]\\n\",\n+ \" [ 94.]\\n\",\n+ \" [ 48.]\\n\",\n+ \" [ 20.]]\\n\",\n+ \"[[ 0. 0. 0. 1. 0. 0. 0. 0. 0.]\\n\",\n+ \" [ 0. 0. 1. 0. 0. 1. 0. 1. 0.]\\n\",\n+ \" [ 0. 0. 0. 0. 1. 0. 0. 0. 1.]\\n\",\n+ \" [ 0. 1. 0. 0. 0. 0. 1. 0. 0.]\\n\",\n+ \" [ 1. 0. 0. 0. 0. 0. 0. 0. 0.]]\\n\",\n+ \"[[ 33.]\\n\",\n+ \" [ 152.]\\n\",\n+ \" [ 151.]\\n\",\n+ \" [ 59.]\\n\",\n+ \" [ 20.]]\\n\"\n+ ]\n+ }\n+ ],\n\"source\": [\n\"prog = \\\"\\\"\\\"\\n\",\n\"C = matrix ('50 40 20 10 30 20 40 20 30', rows = 9, cols = 1) # category data\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 64,\n\"metadata\": {\n\"collapsed\": true\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 65,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"1.000\\n\",\n+ \"3.000\\n\",\n+ \"6.000\\n\",\n+ \"4.000\\n\",\n+ \"9.000\\n\",\n+ \"15.000\\n\",\n+ \"22.000\\n\",\n+ \"8.000\\n\",\n+ \"17.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 65,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = cumsum_prod_def + \\\"\\\"\\\"\\n\",\n\"X = matrix (\\\"1 2 3 4 5 6 7 8 9\\\", rows = 9, cols = 1);\\n\",\n\"\\n\",\n\"print (toString(Y))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 66,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"1.000\\n\",\n+ \"1.000\\n\",\n+ \"1.000\\n\",\n+ \"4.000\\n\",\n+ \"4.000\\n\",\n+ \"4.000\\n\",\n+ \"4.000\\n\",\n+ \"8.000\\n\",\n+ \"8.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 66,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = cumsum_prod_def + \\\"\\\"\\\"\\n\",\n\"X = matrix (\\\"1 2 3 4 5 6 7 8 9\\\", rows = 9, cols = 1);\\n\",\n\"\\n\",\n\"print (toString(Y))\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 67,\n\"metadata\": {\n\"collapsed\": true\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 68,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t6.081 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 68,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = cumsum_prod_def + cumsum_prod_naive_def + \\\"\\\"\\\"\\n\",\n\"X = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = \\\"uniform\\\", sparsity = 1.0);\\n\",\n\"\\n\",\n\"Y1 = cumsum_prod_naive (X, C, 0.123);\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 69,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.074 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 69,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = cumsum_prod_def + cumsum_prod_naive_def + \\\"\\\"\\\"\\n\",\n\"X = rand (rows = 20000, cols = 10, min = 0, max = 1, pdf = \\\"uniform\\\", sparsity = 1.0);\\n\",\n\"\\n\",\n\"Y2 = cumsum_prod (X, C, 0.123);\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 70,\n\"metadata\": {\n\"collapsed\": true\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 71,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"Maximum difference between X %*% L and Identity = 2.220446049250313E-16\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.309 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 71,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = invert_lower_triangular_def + \\\"\\\"\\\"\\n\",\n\"n = 1000;\\n\",\n\"\\n\",\n\"print (\\\"Maximum difference between X %*% L and Identity = \\\" + max (abs (X %*% L - diag (matrix (1, rows = n, cols = 1)))));\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 72,\n\"metadata\": {\n\"collapsed\": true\n},\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 73,\n\"metadata\": {\n\"collapsed\": false\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"Maximum difference between X %*% L and Identity = 4.718447854656915E-16\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t6.890 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"data\": {\n+ \"text/plain\": [\n+ \"MLResults\"\n+ ]\n+ },\n+ \"execution_count\": 73,\n+ \"metadata\": {},\n+ \"output_type\": \"execute_result\"\n+ }\n+ ],\n\"source\": [\n\"prog = invert_lower_triangular_naive_def + \\\"\\\"\\\"\\n\",\n\"n = 1000;\\n\",\n\"\\n\",\n\"print (\\\"Maximum difference between X %*% L and Identity = \\\" + max (abs (X %*% L - diag (matrix (1, rows = n, cols = 1)))));\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"with jvm_stdout():\\n\",\n\"ml.execute(dml(prog))\"\n]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": []\n}\n],\n\"metadata\": {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] add 2 new DML examples to Jupyter notebook
49,738
04.06.2018 14:25:05
25,200
1bcdfaac138d8f68e4144ca4ddbaf8cf03329ca1
Fix codegen row tmpl support for vector ternary axpy This patch fixes the CPlan construction of row templates for ternary axpy operations with row vector intermediates. Specifically, we now correctly handle index loopkups only for scalar intermediates, which otherwise causes codegen compilation errors.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "diff": "package org.apache.sysml.hops.codegen.cplan;\n+import java.util.Arrays;\n+\nimport org.apache.commons.lang.StringUtils;\nimport org.apache.sysml.hops.codegen.template.TemplateUtils;\nimport org.apache.sysml.parser.Expression.DataType;\n@@ -56,10 +58,7 @@ public class CNodeBinary extends CNode\nMINUS1_MULT, MINUS_NZ;\npublic static boolean contains(String value) {\n- for( BinType bt : values() )\n- if( bt.name().equals(value) )\n- return true;\n- return false;\n+ return Arrays.stream(values()).anyMatch(bt -> bt.name().equals(value));\n}\npublic boolean isCommutative() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java", "diff": "package org.apache.sysml.hops.codegen.cplan;\n+import java.util.Arrays;\n+\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -30,12 +32,8 @@ public class CNodeTernary extends CNode\nREPLACE, REPLACE_NAN, IFELSE,\nLOOKUP_RC1, LOOKUP_RVECT1;\n-\npublic static boolean contains(String value) {\n- for( TernaryType tt : values() )\n- if( tt.name().equals(value) )\n- return true;\n- return false;\n+ return Arrays.stream(values()).anyMatch(tt -> tt.name().equals(value));\n}\npublic String getTemplate(boolean sparse) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "diff": "package org.apache.sysml.hops.codegen.cplan;\n+import java.util.Arrays;\n+\nimport org.apache.commons.lang.ArrayUtils;\nimport org.apache.commons.lang.StringUtils;\nimport org.apache.sysml.hops.codegen.template.TemplateUtils;\n@@ -43,10 +45,7 @@ public class CNodeUnary extends CNode\nSPROP, SIGMOID;\npublic static boolean contains(String value) {\n- for( UnaryType ut : values() )\n- if( ut.name().equals(value) )\n- return true;\n- return false;\n+ return Arrays.stream(values()).anyMatch(ut -> ut.name().equals(value));\n}\npublic String getTemplate(boolean sparse) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -448,18 +448,19 @@ public class TemplateRow extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nCNode cdata3 = tmp.get(hop.getInput().get(2).getHopID());\n- //add lookups if required\n- cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n- cdata3 = TemplateUtils.wrapLookupIfNecessary(cdata3, hop.getInput().get(2));\n-\nif( hop.getDim2() > 2 ) { //row vectors\nout = new CNodeBinary(cdata1, new CNodeBinary(cdata2, cdata3, BinType.VECT_MULT_SCALAR),\ntop.getOp()==OpOp3.PLUS_MULT? BinType.VECT_PLUS : BinType.VECT_MINUS);\n}\nelse {\n+ //add lookups if required\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n+ cdata2 = TemplateUtils.wrapLookupIfNecessary(cdata2, hop.getInput().get(1));\n+ cdata3 = TemplateUtils.wrapLookupIfNecessary(cdata3, hop.getInput().get(2));\n+\n//construct scalar ternary cnode, primitive operation derived from OpOp3\nout = new CNodeTernary(cdata1, cdata2, cdata3,\n- TernaryType.valueOf(top.getOp().toString()));\n+ TernaryType.valueOf(top.getOp().name()));\n}\n}\nelse if(HopRewriteUtils.isNary(hop, OpOpN.CBIND)) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2134] Fix codegen row tmpl support for vector ternary axpy This patch fixes the CPlan construction of row templates for ternary axpy operations with row vector intermediates. Specifically, we now correctly handle index loopkups only for scalar intermediates, which otherwise causes codegen compilation errors.
49,738
05.06.2018 16:34:02
25,200
68f37af3ecdaeaac16120eb8a5dcbb2af7a1a0ba
[MINOR] Additional tests for missing and unknown functions
[ { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionNotFoundTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLException;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class FunctionNotFoundTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"FunNotFound1\";\n+ private final static String TEST_NAME2 = \"FunNotFound2\";\n+ private final static String TEST_DIR = \"functions/misc/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FunctionNotFoundTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testFunNotFound1() {\n+ runFunctionNotFoundTest( TEST_NAME1, true );\n+ }\n+\n+ @Test\n+ public void testFunNotFound2() {\n+ //parse issues (import statement) written to stderr\n+ runFunctionNotFoundTest( TEST_NAME2, false );\n+ }\n+\n+ private void runFunctionNotFoundTest(String testName, boolean error) {\n+ TestConfiguration config = getTestConfiguration(testName);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testName + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-stats\"};\n+\n+ //run script and compare output\n+ runTest(true, error, DMLException.class, -1);\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunNotFound1.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+foo = function( Matrix[Double] B ) return (Matrix[Double] V) {\n+ V = baar(B + 7);\n+ while(FALSE){}\n+}\n+baar = function( Matrix[Double] B ) return (Matrix[Double] V) {\n+ V = B+B;\n+ while(FALSE){}\n+}\n+\n+X = matrix(7, 10, 10);\n+Y = foo(X);\n+Z = bar(Y);\n+R = baar(Z);\n+print(sum(R));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunNotFound2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"./non-existing-dir/bar.dml\") as bar\n+\n+foo = function( Matrix[Double] B ) return (Matrix[Double] V) {\n+ V = bar::bar(B + 7);\n+ while(FALSE){}\n+}\n+\n+X = matrix(7, 10, 10);\n+Y = foo(X);\n+Z = bar::bar(Y);\n+R = bar::baar(Z);\n+print(sum(R));\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "diff": "@@ -33,6 +33,7 @@ import org.junit.runners.Suite;\nFunctionInExpressionTest.class,\nFunctionInliningTest.class,\nFunctionNamespaceTest.class,\n+ FunctionNotFoundTest.class,\nFunctionReturnTest.class,\nIfTest.class,\nInvalidBuiltinFunctionCallTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Additional tests for missing and unknown functions
49,738
06.06.2018 21:40:17
25,200
f6de9b1be115eb8af7919f5779d080c027c5f91f
[MINOR] Additional tests for cyclic JMLC package namespace functions
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/NamespaceFunctionTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/NamespaceFunctionTest.java", "diff": "@@ -40,6 +40,9 @@ public class NamespaceFunctionTest extends AutomatedTestBase\nprivate final static String TEST_NAME2 = \"bar1.dml\";\nprivate final static String TEST_NAME3 = \"bar2.dml\";\n+ private final static String TEST_NAME4 = \"foo2.dml\";\n+ private final static String TEST_NAME5 = \"bar3.dml\";\n+\nprivate final static String TEST_DIR = \"functions/jmlc/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + NamespaceFunctionTest.class.getSimpleName() + \"/\";\n@@ -48,24 +51,34 @@ public class NamespaceFunctionTest extends AutomatedTestBase\nprivate final static double sparsity1 = 0.7;\nprivate final static double sparsity2 = 0.1;\n-\n@Override\npublic void setUp() {\naddTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"F2\" }) );\n+ addTestConfiguration(TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"F2\" }) );\n+ }\n+\n+ @Test\n+ public void testJMLCNamespaceAcyclicDense() throws IOException {\n+ runJMLCNamespaceTest(false, false);\n}\n@Test\n- public void testJMLCNamespaceDense() throws IOException {\n- runJMLCNamespaceTest(false);\n+ public void testJMLCNamespaceAcyclicSparse() throws IOException {\n+ runJMLCNamespaceTest(true, false);\n}\n@Test\n- public void testJMLCNamespaceSparse() throws IOException {\n- runJMLCNamespaceTest(true);\n+ public void testJMLCNamespaceCyclicDense() throws IOException {\n+ runJMLCNamespaceTest(false, true);\n+ }\n+\n+ @Test\n+ public void testJMLCNamespaceCyclicSparse() throws IOException {\n+ runJMLCNamespaceTest(true, true);\n}\n- private void runJMLCNamespaceTest(boolean sparse)\n+ private void runJMLCNamespaceTest(boolean sparse, boolean cyclic)\nthrows IOException\n{\nTestConfiguration config = getTestConfiguration(TEST_NAME1);\n@@ -73,10 +86,18 @@ public class NamespaceFunctionTest extends AutomatedTestBase\n//load scripts and create prepared script\nConnection conn = new Connection();\n- String script1 = conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME1);\nMap<String,String> nsscripts = new HashMap<>();\n+ String script1 = null;\n+ if( cyclic ) {\n+ script1 = conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME4);\n+ nsscripts.put(TEST_NAME4, conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME4));\n+ nsscripts.put(TEST_NAME5, conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME5));\n+ }\n+ else {\n+ script1 = conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME1);\nnsscripts.put(TEST_NAME2, conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME2));\nnsscripts.put(TEST_NAME3, conn.readScript(SCRIPT_DIR + TEST_DIR + TEST_NAME3));\n+ }\nPreparedScript pstmt = conn.prepareScript(script1,\nnsscripts, Collections.emptyMap(), new String[]{\"X\"}, new String[]{\"Z\"}, false);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/jmlc/bar3.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"foo2.dml\") as ns1\n+\n+exec = function(matrix[double] A) return (matrix[double] B) {\n+ while(FALSE) {}\n+ B = ns1::debug(A + 10);\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/jmlc/foo2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"bar3.dml\") as ns1\n+\n+X = read($1);\n+Z = ns1::exec(X);\n+write(Z, $2)\n+\n+debug = function(matrix[double] A) return (matrix[double] B) {\n+ while(FALSE){}\n+ print(\"sum(A) = \" + sum(A));\n+ B = A;\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Additional tests for cyclic JMLC package namespace functions
49,738
07.06.2018 22:23:05
25,200
8d320791265321de38050e741308e3243ce89a7b
New simplification rewrite 'fold nary min/max ops' This patch adds a new dynamic rewrite for folding nested binary or nary min/max operations into a single nary min/max operation. Due to limited support for broadcasting this is a dynamic rewrite that is only applied if the dimensions of all involved matrix inputs match.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.LinkedHashMap;\n+import java.util.List;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -37,11 +38,13 @@ import org.apache.sysml.hops.Hop.OpOp1;\nimport org.apache.sysml.hops.Hop.OpOp2;\nimport org.apache.sysml.hops.Hop.OpOp3;\nimport org.apache.sysml.hops.Hop.OpOp4;\n+import org.apache.sysml.hops.Hop.OpOpN;\nimport org.apache.sysml.hops.Hop.ParamBuiltinOp;\nimport org.apache.sysml.hops.Hop.ReOrgOp;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LeftIndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.NaryOp;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.QuaternaryOp;\n@@ -191,6 +194,8 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhi = simplifyNnzComputation(hop, hi, i); //e.g., sum(ppred(X,0,\"!=\")) -> literal(nnz(X)), if nnz known\nhi = simplifyNrowNcolComputation(hop, hi, i); //e.g., nrow(X) -> literal(nrow(X)), if nrow known to remove data dependency\nhi = simplifyTableSeqExpand(hop, hi, i); //e.g., table(seq(1,nrow(v)), v, nrow(v), m) -> rexpand(v, max=m, dir=row, ignore=false, cast=true)\n+ if( OptimizerUtils.ALLOW_OPERATOR_FUSION )\n+ foldMultipleMinMaxOperations(hi); //e.g., min(X,min(min(3,7),Y)) -> min(X,3,7,Y)\n//process childs recursively after rewrites (to investigate pattern newly created by rewrites)\nif( !descendFirst )\n@@ -2584,4 +2589,54 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nreturn hi;\n}\n+\n+ private static Hop foldMultipleMinMaxOperations(Hop hi)\n+ {\n+ if( (HopRewriteUtils.isBinary(hi, OpOp2.MIN, OpOp2.MAX)\n+ || HopRewriteUtils.isNary(hi, OpOpN.MIN, OpOpN.MAX))\n+ && !OptimizerUtils.isHadoopExecutionMode() )\n+ {\n+ OpOp2 bop = (hi instanceof BinaryOp) ? ((BinaryOp)hi).getOp() :\n+ OpOp2.valueOf(((NaryOp)hi).getOp().name());\n+ OpOpN nop = (hi instanceof NaryOp) ? ((NaryOp)hi).getOp() :\n+ OpOpN.valueOf(((BinaryOp)hi).getOp().name());\n+\n+ boolean converged = false;\n+ while( !converged ) {\n+ //get first matching min/max\n+ Hop first = hi.getInput().stream()\n+ .filter(h -> HopRewriteUtils.isBinary(h, bop) || HopRewriteUtils.isNary(h, nop))\n+ .findFirst().orElse(null);\n+\n+ //replace current op with new nary min/max\n+ final Hop lhi = hi;\n+ if( first != null && first.getParent().size()==1\n+ && first.getInput().stream().allMatch(c -> c.getDataType()==DataType.SCALAR\n+ || HopRewriteUtils.isEqualSize(lhi, c))) {\n+ //construct new list of inputs (in original order)\n+ ArrayList<Hop> linputs = new ArrayList<>();\n+ for(Hop in : hi.getInput())\n+ if( in == first )\n+ linputs.addAll(first.getInput());\n+ else\n+ linputs.add(in);\n+ Hop hnew = HopRewriteUtils.createNary(nop, linputs.toArray(new Hop[0]));\n+ //clear dangling references\n+ HopRewriteUtils.removeAllChildReferences(hi);\n+ HopRewriteUtils.removeAllChildReferences(first);\n+ //rewire all parents (avoid anomalies with refs to hi)\n+ List<Hop> parents = new ArrayList<>(hi.getParent());\n+ for( Hop p : parents )\n+ HopRewriteUtils.replaceChildReference(p, hi, hnew);\n+ hi = hnew;\n+ LOG.debug(\"Applied foldMultipleMinMaxOperations (line \"+hi.getBeginLine()+\").\");\n+ }\n+ else {\n+ converged = true;\n+ }\n+ }\n+ }\n+\n+ return hi;\n+ }\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFoldMinMaxTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.apache.sysml.utils.Statistics;\n+\n+public class RewriteFoldMinMaxTest extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME1 = \"RewriteFoldMin\";\n+ private static final String TEST_NAME2 = \"RewriteFoldMax\";\n+\n+ private static final String TEST_DIR = \"functions/misc/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + RewriteFoldMinMaxTest.class.getSimpleName() + \"/\";\n+\n+ private static final int rows = 1932;\n+ private static final int cols = 14;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testRewriteFoldMinNoRewrite() {\n+ testRewriteFoldMinMax( TEST_NAME1, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testRewriteFoldMinRewrite() {\n+ testRewriteFoldMinMax( TEST_NAME1, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testRewriteFoldMaxNoRewrite() {\n+ testRewriteFoldMinMax( TEST_NAME2, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testRewriteFoldMaxRewrite() {\n+ testRewriteFoldMinMax( TEST_NAME2, true, ExecType.CP );\n+ }\n+\n+ private void testRewriteFoldMinMax( String testname, boolean rewrites, ExecType et )\n+ {\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+\n+ try {\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{ \"-stats\", \"-args\", String.valueOf(rows),\n+ String.valueOf(cols), output(\"R\") };\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ //run performance tests\n+ runTest(true, false, null, -1);\n+\n+ //compare matrices\n+ Double ret = readDMLMatrixFromHDFS(\"R\").get(new CellIndex(1,1));\n+ Assert.assertEquals(\"Wrong result\", new Double(5*rows*cols), ret);\n+\n+ //check for applied rewrites\n+ if( rewrites ) {\n+ Assert.assertTrue(!heavyHittersContainsString(\"min\") && !heavyHittersContainsString(\"max\")\n+ && (!testname.equals(TEST_NAME1) || Statistics.getCPHeavyHitterCount(\"nmin\") == 1)\n+ && (!testname.equals(TEST_NAME2) || Statistics.getCPHeavyHitterCount(\"nmax\") == 1));\n+ }\n+ }\n+ finally {\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteFoldMax.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(1, $1, $2)\n+while(FALSE){}\n+Y = max(X-7,max(max(X-5,-7),5))\n+while(FALSE){}\n+R = as.matrix(sum(Y))\n+\n+write(R, $3);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteFoldMin.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(1, $1, $2)\n+while(FALSE){}\n+Y = min(X+7,min(min(X+5,7),5))\n+while(FALSE){}\n+R = as.matrix(sum(Y))\n+\n+write(R, $3);\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "diff": "@@ -61,6 +61,7 @@ import org.junit.runners.Suite;\nRewriteCTableToRExpandTest.class,\nRewriteElementwiseMultChainOptimizationTest.class,\nRewriteEliminateAggregatesTest.class,\n+ RewriteFoldMinMaxTest.class,\nRewriteFoldRCBindTest.class,\nRewriteFuseBinaryOpChainTest.class,\nRewriteFusedRandTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2374] New simplification rewrite 'fold nary min/max ops' This patch adds a new dynamic rewrite for folding nested binary or nary min/max operations into a single nary min/max operation. Due to limited support for broadcasting this is a dynamic rewrite that is only applied if the dimensions of all involved matrix inputs match.
49,738
07.06.2018 23:53:31
25,200
87bc3584db2148cf78b2d46418639e88ca27ec64
[HOTFIX] Fix validation of scalar-scalar binary min/max operations This recent introduction of nary min/max operations corrupted the language validation path for scalar-scalar operations. This patch fixes various issues related to (1) value type inference, (2) output dimension/blocksize propagation, and (3) the handling of all scalar nary min/max operations.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -574,8 +574,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\ncase MIN:\ncase MAX:\n//min(X), min(X,s), min(s,X), min(s,r), min(X,Y)\n- //unary\n- if (getSecondExpr() == null) {\n+ if (getSecondExpr() == null) { //unary\ncheckNumParameters(1);\ncheckMatrixParam(getFirstExpr());\noutput.setDataType(DataType.SCALAR);\n@@ -583,9 +582,11 @@ public class BuiltinFunctionExpression extends DataIdentifier\noutput.setDimensions(0, 0);\noutput.setBlockDimensions (0, 0);\n}\n-\n- //nary operation\n- else {\n+ else if( getAllExpr().length == 2 ) { //binary\n+ checkNumParameters(2);\n+ setBinaryOutputProperties(output);\n+ }\n+ else { //nary\nfor( Expression e : getAllExpr() )\ncheckMatrixScalarParam(e);\nsetNaryOutputProperties(output);\n@@ -1463,7 +1464,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\ne -> e.getOutput().getDataType().isScalar()) ? DataType.SCALAR : DataType.MATRIX;\nExpression firstM = dt.isMatrix() ? Arrays.stream(getAllExpr()).filter(\ne -> e.getOutput().getDataType().isMatrix()).findFirst().get() : null;\n- ValueType vt = dt.isMatrix() ? ValueType.DOUBLE : ValueType.BOOLEAN;\n+ ValueType vt = dt.isMatrix() ? ValueType.DOUBLE : ValueType.INT;\nfor( Expression e : getAllExpr() ) {\nvt = computeValueType(e, e.getOutput().getValueType(), vt, true);\nif( e.getOutput().getDataType().isMatrix() )\n@@ -1471,9 +1472,10 @@ public class BuiltinFunctionExpression extends DataIdentifier\n}\noutput.setDataType(dt);\noutput.setValueType(vt);\n- output.setDimensions(firstM.getOutput().getDim1(), firstM.getOutput().getDim2());\n- output.setBlockDimensions (\n- firstM.getOutput().getRowsInBlock(), firstM.getOutput().getColumnsInBlock());\n+ output.setDimensions(dt.isMatrix() ? firstM.getOutput().getDim1() : 0,\n+ dt.isMatrix() ? firstM.getOutput().getDim2() : 0);\n+ output.setBlockDimensions (dt.isMatrix() ? firstM.getOutput().getRowsInBlock() : 0,\n+ dt.isMatrix() ? firstM.getOutput().getColumnsInBlock() : 0);\n}\nprivate void expandArguments() {\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix validation of scalar-scalar binary min/max operations This recent introduction of nary min/max operations corrupted the language validation path for scalar-scalar operations. This patch fixes various issues related to (1) value type inference, (2) output dimension/blocksize propagation, and (3) the handling of all scalar nary min/max operations.
49,738
08.06.2018 20:10:36
25,200
37e66039da49f79ca73489683bc9b02a339baf0f
Fix codegen integration nary min/max (costs, single op) This patch fixes the codegen support for nary min/max by (1) including nary ops into the cost model and (2) ensuring that single nary ops are not eagerly prune out before optimization.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "diff": "@@ -51,6 +51,7 @@ import org.apache.sysml.hops.Hop.OpOp2;\nimport org.apache.sysml.hops.Hop.OpOpN;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.NaryOp;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.ReorgOp;\n@@ -1118,6 +1119,10 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\n+ \"implemented yet for: \"+((TernaryOp)current).getOp());\n}\n}\n+ else if( current instanceof NaryOp ) {\n+ costs = HopRewriteUtils.isNary(current, OpOpN.MIN, OpOpN.MAX) ?\n+ current.getInput().size() : 1;\n+ }\nelse if( current instanceof ParameterizedBuiltinOp ) {\ncosts = 1;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "diff": "@@ -244,7 +244,7 @@ public class CPlanMemoTable\nIterator<Entry<Long, List<MemoTableEntry>>> iter = _plans.entrySet().iterator();\nwhile( iter.hasNext() ) {\nEntry<Long, List<MemoTableEntry>> e = iter.next();\n- if( !ix.contains(e.getKey()) ) {\n+ if( !(ix.contains(e.getKey()) || TemplateUtils.isValidSingleOperation(_hopRefs.get(e.getKey()))) ) {\ne.getValue().removeIf(p -> !p.hasPlanRef());\nif( e.getValue().isEmpty() )\niter.remove();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -36,6 +36,8 @@ import org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\n+import org.apache.sysml.hops.Hop.OpOp1;\n+import org.apache.sysml.hops.Hop.OpOpN;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.codegen.SpoofCompiler;\n@@ -343,6 +345,11 @@ public class TemplateUtils\n&& hasOnlyDataNodeOrLookupInputs(output);\n}\n+ public static boolean isValidSingleOperation(Hop hop) {\n+ return HopRewriteUtils.isNary(hop, OpOpN.MIN, OpOpN.MAX)\n+ || HopRewriteUtils.isUnary(hop, OpOp1.EXP, OpOp1.LOG);\n+ }\n+\npublic static boolean hasNoOperation(CNodeTpl tpl) {\nreturn tpl.getOutput() instanceof CNodeData\n|| isLookup(tpl.getOutput(), true);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "diff": "@@ -59,6 +59,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME21 = TEST_NAME+21; //relu operation, (X>0)*dout\nprivate static final String TEST_NAME22 = TEST_NAME+22; //sum(X * seq(1,N) + t(seq(M,1)))\nprivate static final String TEST_NAME23 = TEST_NAME+23; //sum(min(X,Y,Z))\n+ private static final String TEST_NAME24 = TEST_NAME+24; //min(X, Y, Z, 3, 7)\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -71,7 +72,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=23; i++ ) {\n+ for( int i=1; i<=24; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -399,6 +400,21 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME23, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite24() {\n+ testCodegenIntegration( TEST_NAME24, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenCellwise24() {\n+ testCodegenIntegration( TEST_NAME24, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenCellwiseRewrite24_sp() {\n+ testCodegenIntegration( TEST_NAME24, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n@@ -467,7 +483,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nAssert.assertTrue(!heavyHittersContainsSubString(\"xor\"));\nelse if( testname.equals(TEST_NAME22) )\nAssert.assertTrue(!heavyHittersContainsSubString(\"seq\"));\n- else if( testname.equals(TEST_NAME23) )\n+ else if( testname.equals(TEST_NAME23) || testname.equals(TEST_NAME24) )\nAssert.assertTrue(!heavyHittersContainsSubString(\"min\",\"nmin\"));\n}\nfinally {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl24.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(6, 500, 2);\n+Y = matrix(7, 500, 2);\n+Z = matrix(8, 500, 2);\n+R = as.matrix(sum(pmin(X,Y,Z,3,7)));\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl24.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(6, 500, 2);\n+Y = matrix(7, 500, 2);\n+Z = matrix(8, 500, 2);\n+\n+while(FALSE){}\n+\n+R = min(X,Y,Z,3,7);\n+\n+while(FALSE){}\n+\n+R = as.matrix(sum(R));\n+write(R, $1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2373] Fix codegen integration nary min/max (costs, single op) This patch fixes the codegen support for nary min/max by (1) including nary ops into the cost model and (2) ensuring that single nary ops are not eagerly prune out before optimization.
49,738
09.06.2018 00:38:02
25,200
0177a13108cd6fca80609dab029572b40b02d13c
Fix rewrite 'fuse order operation chains' (for overlap) This patch fixes special cases where the rewrite for fusing multiple sort operators w/ single by attributes into a single sort operator w/ a list of by attributes led to runtime exceptions. Specifically, we now fuse these operations under awareness of potential overlap.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -21,8 +21,10 @@ package org.apache.sysml.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.HashMap;\n+import java.util.HashSet;\nimport java.util.LinkedHashMap;\nimport java.util.List;\n+import java.util.Set;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -1475,16 +1477,19 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nboolean desc = HopRewriteUtils.getBooleanValue((LiteralOp)hi.getInput().get(2));\n//find chain of order operations with same desc/ixret configuration and single consumers\n+ Set<String> probe = new HashSet<>();\nArrayList<LiteralOp> byList = new ArrayList<LiteralOp>();\n- byList.add(by);\n+ byList.add(by); probe.add(by.getStringValue());\nHop input = hi.getInput().get(0);\nwhile( HopRewriteUtils.isReorg(input, ReOrgOp.SORT)\n&& input.getInput().get(1) instanceof LiteralOp //scalar by\n+ && !probe.contains(input.getInput().get(1).getName())\n&& HopRewriteUtils.isLiteralOfValue(input.getInput().get(2), desc)\n&& HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), false)\n&& input.getParent().size() == 1 )\n{\nbyList.add((LiteralOp)input.getInput().get(1));\n+ probe.add(input.getInput().get(1).getName());\ninput = input.getInput().get(0);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2377] Fix rewrite 'fuse order operation chains' (for overlap) This patch fixes special cases where the rewrite for fusing multiple sort operators w/ single by attributes into a single sort operator w/ a list of by attributes led to runtime exceptions. Specifically, we now fuse these operations under awareness of potential overlap.
49,738
09.06.2018 18:41:45
25,200
8e1146e8945dc1a3af1ecb888a2b5682e0e60040
[MINOR] Improved memory profiling JMLC pipelines, avoid allocations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -123,11 +123,26 @@ public class LocalVariableMap implements Cloneable\n}\npublic double getPinnedDataSize() {\n- //note: this method returns the total size of pinned data objects\n- //that are not subject to automatic eviction.\n+ //note: this method returns the total size of distinct pinned\n+ //data objects that are not subject to automatic eviction\n+ //(in JMLC all matrices and frames are pinned)\n+\n+ //compute map of distinct cachable data\n+ Map<Integer, Data> dict = new HashMap<>();\n+ for( Entry<String,Data> e : localMap.entrySet() ) {\n+ int hash = System.identityHashCode(e.getValue());\n+ if( !dict.containsKey(hash) && e.getValue() instanceof CacheableData )\n+ dict.put(hash, e.getValue());\n+ }\n+\n+ //compute total in-memory size\n+ return dict.values().stream().mapToDouble(\n+ d -> ((CacheableData<?>)d).getDataSize()).sum();\n+ }\n+\n+ public long countPinnedData() {\nreturn localMap.values().stream()\n- .filter(d -> (d instanceof CacheableData))\n- .mapToDouble(d -> ((CacheableData<?>)d).getDataSize()).sum();\n+ .filter(d -> (d instanceof CacheableData)).count();\n}\npublic String serialize() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "diff": "@@ -207,7 +207,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_uniqueID = isCachingActive() ? _seq.getNextID() : -1;\n_cacheStatus = CacheStatus.EMPTY;\n_numReadThreads = 0;\n- _gpuObjects = new HashMap<>();\n+ _gpuObjects = DMLScript.USE_ACCELERATOR ? new HashMap<>() : null;\n}\n/**\n@@ -671,7 +671,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nLOG.trace(\"Exporting \" + this.getDebugName() + \" to \" + fName + \" in format \" + outputFormat);\n- //TODO remove\n+ if( DMLScript.USE_ACCELERATOR ) {\nboolean copiedFromGPU = false;\nfor (Map.Entry<GPUContext, GPUObject> kv : _gpuObjects.entrySet()) {\nGPUObject gObj = kv.getValue();\n@@ -683,6 +683,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\ngetCache();\n}\n}\n+ }\n//check for persistent or transient writes\nboolean pWrite = !fName.equals(_hdfsFileName);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved memory profiling JMLC pipelines, avoid allocations
49,738
10.06.2018 13:22:35
25,200
27fff38bcb67fe90297063371a8608278d93e0be
[MINOR] Simplification internal matrix block slicing of row blocks
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "@@ -90,7 +90,7 @@ public class ParamservUtils {\nnew MetaDataFormat(new MatrixCharacteristics(-1, -1, -1, -1),\nOutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo));\nMatrixBlock tmp = mo.acquireRead();\n- result.acquireModify(tmp.slice((int) rl - 1, (int) rh - 1, 0, tmp.getNumColumns() - 1, new MatrixBlock()));\n+ result.acquireModify(tmp.slice((int) rl - 1, (int) rh - 1));\nmo.release();\nresult.release();\nresult.enableCleanup(false);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/DataPartitionerRemoteSparkMapper.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/DataPartitionerRemoteSparkMapper.java", "diff": "@@ -117,8 +117,7 @@ public class DataPartitionerRemoteSparkMapper extends ParWorker implements PairF\nfor( int i=0; i<rows; i+=_n ) {\nPairWritableBlock tmp = new PairWritableBlock();\ntmp.indexes = new MatrixIndexes(1, col_offset/_bclen+1);\n- tmp.block = value2.slice(i, Math.min(i+(int)_n-1, value2.getNumRows()-1),\n- 0, value2.getNumColumns()-1, new MatrixBlock());\n+ tmp.block = value2.slice(i, Math.min(i+(int)_n-1, value2.getNumRows()-1));\nret.add(new Tuple2<Long,Writable>(new Long((row_offset+i)/_n+1),tmp));\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/mr/CumulativeOffsetInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/mr/CumulativeOffsetInstruction.java", "diff": "@@ -83,7 +83,7 @@ public class CumulativeOffsetInstruction extends BinaryInstruction {\n//blockwise offset aggregation and prefix sum computation\nMatrixBlock data2 = new MatrixBlock(data); //cp data\n- MatrixBlock fdata2 = data2.slice(0, 0, 0, data2.getNumColumns()-1, new MatrixBlock()); //1-based\n+ MatrixBlock fdata2 = data2.slice(0, 0);\nfdata2.binaryOperationsInPlace(_bop, offset); //sum offset to first row\ndata2.copy(0, 0, 0, data2.getNumColumns()-1, fdata2, true); //0-based\ndata2.unaryOperations(_uop, blk); //compute columnwise prefix sums/prod/min/max\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/AppendGSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/AppendGSPInstruction.java", "diff": "@@ -219,7 +219,7 @@ public class AppendGSPInstruction extends BinarySPInstruction {\n}\nelse {\n// Since merge requires the dimensions matching, shifting = slicing + left indexing\n- MatrixBlock firstSlicedBlk = in.slice(0, cutAt-1, 0, in.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock firstSlicedBlk = in.slice(0, cutAt-1);\nMatrixBlock firstBlk = new MatrixBlock(lblen1, in.getNumColumns(), true);\nfirstBlk = firstBlk.leftIndexingOperations(firstSlicedBlk, _shiftBy, _blen-1, 0, in.getNumColumns()-1, new MatrixBlock(), UpdateType.INPLACE_PINNED);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeOffsetSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeOffsetSPInstruction.java", "diff": "@@ -181,7 +181,7 @@ public class CumulativeOffsetSPInstruction extends BinarySPInstruction {\n//blockwise offset aggregation and prefix sum computation\nMatrixBlock data2 = new MatrixBlock(dblkIn); //cp data\n- MatrixBlock fdata2 = data2.slice(0, 0, 0, data2.getNumColumns()-1, new MatrixBlock()); //1-based\n+ MatrixBlock fdata2 = data2.slice(0, 0);\nfdata2.binaryOperationsInPlace(_bop, oblkIn); //sum offset to first row\ndata2.copy(0, 0, 0, data2.getNumColumns()-1, fdata2, true); //0-based\ndata2.unaryOperations(_uop, blkOut); //compute columnwise prefix sums/prod/min/max\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "diff": "@@ -952,7 +952,7 @@ public class RDDConverterUtils\nMatrixIndexes ix = arg0._1();\nMatrixBlock blk = arg0._2();\nfor( int i=0; i<blk.getNumRows(); i++ ) {\n- MatrixBlock tmpBlk = blk.slice(i, i, 0, blk.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock tmpBlk = blk.slice(i, i);\nlong rix = UtilFunctions.computeCellIndex(ix.getRowIndex(), _brlen, i);\nret.add(new Tuple2<>(rix, new Tuple2<>(ix.getColumnIndex(),tmpBlk)));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java", "diff": "@@ -291,7 +291,7 @@ public class RDDSortUtils\nthrows Exception {\nArrayList<MatrixBlock> rows = new ArrayList<>();\nfor(int i=0; i<arg0.getNumRows(); i++)\n- rows.add(arg0.slice(i, i, 0, arg0.getNumColumns()-1, new MatrixBlock()));\n+ rows.add(arg0.slice(i, i));\nreturn rows.iterator();\n}\n}\n@@ -368,7 +368,7 @@ public class RDDSortUtils\nlong ixoffset = (ix.getRowIndex()-1)*_brlen;\nfor( int i=0; i<mb.getNumRows(); i++) {\ndouble[] vals = DataConverter.convertToDoubleVector(\n- mb.slice(i, i, 0, mb.getNumColumns()-1, new MatrixBlock()));\n+ mb.slice(i, i));\nret.add(new Tuple2<>(new ValuesIndexPair(vals,ixoffset+i+1), vals));\n}\n@@ -775,7 +775,7 @@ public class RDDSortUtils\nint pos = UtilFunctions.computeCellInBlock(valix, _brlen);\nint len = UtilFunctions.computeBlockSize(_rlen, rix, _brlen);\nMatrixIndexes lix = new MatrixIndexes(rix,ixmap.getColumnIndex());\n- MatrixBlock tmp = data.slice(_currPos, _currPos, 0, data.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock tmp = data.slice(_currPos, _currPos);\n_currPos++;\n//handle end of block situations\n@@ -864,7 +864,7 @@ public class RDDSortUtils\nint pos = UtilFunctions.computeCellInBlock(valix, _brlen);\nint len = UtilFunctions.computeBlockSize(_rlen, rix, _brlen);\nMatrixIndexes lix = new MatrixIndexes(rix,ixmap.getColumnIndex());\n- MatrixBlock tmp = data.slice(_currPos, _currPos, 0, data.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock tmp = data.slice(_currPos, _currPos);\n_currPos++;\n//handle end of block situations\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -263,7 +263,7 @@ public class LibMatrixReorg\n//slice first block\nMatrixIndexes outix1 = new MatrixIndexes(blkix1, inix.getColumnIndex());\nMatrixBlock outblk1 = new MatrixBlock(blklen1, inblk.getNumColumns(), inblk.isInSparseFormat());\n- MatrixBlock tmp1 = tmpblk.slice(0, iposCut, 0, tmpblk.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock tmp1 = tmpblk.slice(0, iposCut);\noutblk1.leftIndexingOperations(tmp1, ipos1, ipos1+tmp1.getNumRows()-1,\n0, tmpblk.getNumColumns()-1, outblk1, UpdateType.INPLACE_PINNED);\nout.add(new IndexedMatrixValue(outix1, outblk1));\n@@ -272,7 +272,7 @@ public class LibMatrixReorg\nif( blkix1 != blkix2 ) {\nMatrixIndexes outix2 = new MatrixIndexes(blkix2, inix.getColumnIndex());\nMatrixBlock outblk2 = new MatrixBlock(blklen2, inblk.getNumColumns(), inblk.isInSparseFormat());\n- MatrixBlock tmp2 = tmpblk.slice(iposCut+1, tmpblk.getNumRows()-1, 0, tmpblk.getNumColumns()-1, new MatrixBlock());\n+ MatrixBlock tmp2 = tmpblk.slice(iposCut+1, tmpblk.getNumRows()-1);\noutblk2.leftIndexingOperations(tmp2, 0, tmp2.getNumRows()-1, 0, tmpblk.getNumColumns()-1, outblk2, UpdateType.INPLACE_PINNED);\nout.add(new IndexedMatrixValue(outix2, outblk2));\n}\n@@ -670,8 +670,7 @@ public class LibMatrixReorg\n{\nfor( int rl=0; rl<tmp.getNumRows(); rl+=brlen ) {\nMatrixBlock mb = tmp.slice(\n- rl, (int)(Math.min(rl+brlen, tmp.getNumRows())-1),\n- 0, tmp.getNumColumns()-1, new MatrixBlock());\n+ rl, (int)(Math.min(rl+brlen, tmp.getNumRows())-1));\noutList.add(new IndexedMatrixValue(\nnew MatrixIndexes(rl/brlen+1, ix.getRowIndex()), mb));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -3779,6 +3779,10 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n(int)ixrange.colStart, (int)ixrange.colEnd, true, ret);\n}\n+ public MatrixBlock slice(int rl, int ru) {\n+ return slice(rl, ru, 0, clen-1, true, new MatrixBlock());\n+ }\n+\n@Override\npublic MatrixBlock slice(int rl, int ru, int cl, int cu, CacheBlock ret) {\nreturn slice(rl, ru, cl, cu, true, ret);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/OperationsOnMatrixValues.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/OperationsOnMatrixValues.java", "diff": "@@ -391,8 +391,7 @@ public class OperationsOnMatrixValues\nint row_offset = blockRow*brlen;\n//copy submatrix to block\n- MatrixBlock tmp = out.slice( row_offset, row_offset+maxRow-1,\n- 0, out.getNumColumns()-1, new MatrixBlock() );\n+ MatrixBlock tmp = out.slice(row_offset, row_offset+maxRow-1);\n//append block to result cache\noutlist.add(new IndexedMatrixValue(new MatrixIndexes(blockRow+1,ix.getColumnIndex()), tmp));\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Simplification internal matrix block slicing of row blocks
49,738
10.06.2018 13:27:41
25,200
52891d28e4928300bca585e397601c8b8da93c5c
[HOTFIX] Robustness buffer pool empty GPU object handling
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "diff": "@@ -343,10 +343,14 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n}\npublic synchronized GPUObject getGPUObject(GPUContext gCtx) {\n+ if( _gpuObjects == null )\n+ return null;\nreturn _gpuObjects.get(gCtx);\n}\npublic synchronized void setGPUObject(GPUContext gCtx, GPUObject gObj) {\n+ if( _gpuObjects == null )\n+ _gpuObjects = new HashMap<>();\nGPUObject old = _gpuObjects.put(gCtx, gObj);\nif (old != null)\nthrow new DMLRuntimeException(\"GPU : Inconsistent internal state - this CacheableData already has a GPUObject assigned to the current GPUContext (\" + gCtx + \")\");\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Robustness buffer pool empty GPU object handling
49,727
11.06.2018 12:58:26
25,200
c10e509a78232f8cacfa9c7485395792d6af24e8
Fix paramserv shutdown of agg service thread pool Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "diff": "@@ -34,6 +34,7 @@ import java.util.stream.Collectors;\nimport java.util.stream.IntStream;\nimport org.apache.commons.lang3.ArrayUtils;\n+import org.apache.commons.lang3.concurrent.BasicThreadFactory;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.parser.DMLProgram;\n@@ -71,7 +72,10 @@ public abstract class ParamServer {\ncatch (InterruptedException e) {\nthrow new DMLRuntimeException(\"Param server: failed to broadcast the initial model.\", e);\n}\n- _es = Executors.newSingleThreadExecutor();\n+ BasicThreadFactory factory = new BasicThreadFactory.Builder()\n+ .namingPattern(\"agg-service-pool-thread-%d\")\n+ .build();\n+ _es = Executors.newSingleThreadExecutor(factory);\n}\npublic abstract void push(int workerID, ListObject value);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -51,6 +51,7 @@ import java.util.concurrent.Future;\nimport java.util.stream.Collectors;\nimport java.util.stream.IntStream;\n+import org.apache.commons.lang3.concurrent.BasicThreadFactory;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.log4j.Level;\n@@ -111,7 +112,10 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\npublic void processInstruction(ExecutionContext ec) {\nPSModeType mode = getPSMode();\nint workerNum = getWorkerNum(mode);\n- ExecutorService es = Executors.newFixedThreadPool(workerNum);\n+ BasicThreadFactory factory = new BasicThreadFactory.Builder()\n+ .namingPattern(\"workers-pool-thread-%d\")\n+ .build();\n+ ExecutorService es = Executors.newFixedThreadPool(workerNum, factory);\nString updFunc = getParam(PS_UPDATE_FUN);\nString aggFunc = getParam(PS_AGGREGATION_FUN);\n@@ -148,20 +152,20 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nmode, workerNum, freq, updateType, scheme));\n}\n- // Launch the worker threads and wait for completion\ntry {\n+ // Launch the worker threads and wait for completion\nfor (Future<Void> ret : es.invokeAll(workers))\nret.get(); //error handling\n+ // Fetch the final model from ps\n+ ListObject result = ps.getResult();\n+ ec.setVariable(output.getName(), result);\n} catch (InterruptedException | ExecutionException e) {\nthrow new DMLRuntimeException(\"ParamservBuiltinCPInstruction: some error occurred: \", e);\n} finally {\nes.shutdownNow();\n+ // Should shutdown the thread pool in param server\n+ ps.shutdown();\n}\n-\n- // Fetch the final model from ps\n- ListObject result;\n- result = ps.getResult();\n- ec.setVariable(output.getName(), result);\n}\nprivate PSModeType getPSMode() {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2380] Fix paramserv shutdown of agg service thread pool Closes #782.
49,738
12.06.2018 00:09:10
25,200
563b8926b2edb26ac0b852f4ad593985f0cedadf
[MINOR] Internal primitive for tracking matrices w/ NaNs This patch introduces a useful primitive for tracking and reporting matrices with NaNs. When applied after each executed instruction it allows to easily find the first instruction that introduced a NaN (before they often spread uncontrollably over many intermediates).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -5573,6 +5573,39 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nreturn !sparse || DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR; //only MCSR thread-safe\n}\n+ /**\n+ * Checks for existing NaN values in the matrix block.\n+ * @throws DMLRuntimeException if the blocks contains at least one NaN.\n+ */\n+ public void checkNaN() {\n+ if( isEmptyBlock(false) )\n+ return;\n+ if( sparse ) {\n+ SparseBlock sblock = sparseBlock;\n+ for(int i=0; i<rlen; i++) {\n+ if( sblock.isEmpty(i) ) continue;\n+ int alen = sblock.size(i);\n+ int apos = sblock.pos(i);\n+ int[] aix = sblock.indexes(i);\n+ double[] avals = sblock.values(i);\n+ for(int k=apos; k<apos+alen; k++) {\n+ if( Double.isNaN(avals[k]) )\n+ throw new DMLRuntimeException(\"NaN encountered at position [\"+i+\",\"+aix[k]+\"].\");\n+ }\n+ }\n+ }\n+ else {\n+ DenseBlock dblock = denseBlock;\n+ for(int i=0; i<rlen; i++) {\n+ int aix = dblock.pos(i);\n+ double[] avals = dblock.values(i);\n+ for(int j=0; j<clen; j++)\n+ if( Double.isNaN(avals[aix+j]) )\n+ throw new DMLRuntimeException(\"NaN encountered at position [\"+i+\",\"+j+\"].\");\n+ }\n+ }\n+ }\n+\n@Override\npublic int compareTo(Object arg0) {\nthrow new RuntimeException(\"CompareTo should never be called for matrix blocks.\");\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Internal primitive for tracking matrices w/ NaNs This patch introduces a useful primitive for tracking and reporting matrices with NaNs. When applied after each executed instruction it allows to easily find the first instruction that introduced a NaN (before they often spread uncontrollably over many intermediates).
49,738
12.06.2018 13:10:43
25,200
1fbf939d7b4e9d989520a1022e318c17a373b8e2
Fix IPA inlining of functions w/o bound returns The IPA rewrite for inlining small functions led to invalid hops in case of special cases of functions w/o returns and remaining transient writes from a previously existing sequence of statement blocks that were combined into a single block via rewrites.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassInlineFunctions.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassInlineFunctions.java", "diff": "@@ -96,8 +96,11 @@ public class IPAPassInlineFunctions extends IPAPass\noutMap.put(fstmt.getOutputParams().get(j).getName(), opOutputs[j]);\nfor(int j=0; j<hops2.size(); j++) {\nHop out = hops2.get(j);\n- if( HopRewriteUtils.isData(out, DataOpTypes.TRANSIENTWRITE) )\n+ if( HopRewriteUtils.isData(out, DataOpTypes.TRANSIENTWRITE) ) {\nout.setName(outMap.get(out.getName()));\n+ if( out.getName() == null )\n+ hops2.remove(j);\n+ }\n}\nfcallsSB.get(i).getHops().remove(op);\nfcallsSB.get(i).getHops().addAll(hops2);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "diff": "@@ -30,6 +30,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\n{\nprivate final static String TEST_NAME1 = \"FunPotpourriNoReturn\";\nprivate final static String TEST_NAME2 = \"FunPotpourriComments\";\n+ private final static String TEST_NAME3 = \"FunPotpourriNoReturn2\";\nprivate final static String TEST_DIR = \"functions/misc/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FunctionPotpourriTest.class.getSimpleName() + \"/\";\n@@ -39,6 +40,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nTestUtils.clearAssertionInformation();\naddTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\n}\n@Test\n@@ -51,6 +53,11 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nrunFunctionTest( TEST_NAME2, false );\n}\n+ @Test\n+ public void testFunctionNoReturnSpec() {\n+ runFunctionTest( TEST_NAME3, false );\n+ }\n+\nprivate void runFunctionTest(String testName, boolean error) {\nTestConfiguration config = getTestConfiguration(testName);\nloadTestConfiguration(config);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunPotpourriNoReturn2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function (String msg) {\n+ verbose = FALSE\n+ if (verbose)\n+ print(msg)\n+}\n+\n+foo(\"This is an test error message.\")\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2388] Fix IPA inlining of functions w/o bound returns The IPA rewrite for inlining small functions led to invalid hops in case of special cases of functions w/o returns and remaining transient writes from a previously existing sequence of statement blocks that were combined into a single block via rewrites.
49,738
12.06.2018 18:06:28
25,200
735c4119c4edca54c495fd0bc647163d80ff7e43
Fix robustness sparse-dense matrix mult, part II The recent fix for special cases of sparse-dense matrix mult fixed overruns of the nnz range but corrupted the check for underruns of the start position. This patch now restores this additional check to make it robust into both directions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -1301,7 +1301,9 @@ public class LibMatrixMult\nint k2 = (ru==cd) ? alen : a.posFIndexGTE(i, ru);\nk2 = (k2>=0) ? apos+k2 : apos+alen;\n- if( k1<apos+alen && b.isContiguous(aix[k1], aix[k2-1]) ) {\n+ //note: guard k1 (and thus also k2) against overrun nnz, and guard\n+ //contiguous check for k2-1 against underrun of start pos for k1==k2.\n+ if( k1<apos+alen && (k1==k2 || b.isContiguous(aix[k1], aix[k2-1])) ) {\ndouble[] bvals = b.values(aix[k1]);\nint base = aix[k1]*n - b.pos(aix[k1]);\n//rest not aligned to blocks of 4 rows\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2382] Fix robustness sparse-dense matrix mult, part II The recent fix for special cases of sparse-dense matrix mult fixed overruns of the nnz range but corrupted the check for underruns of the start position. This patch now restores this additional check to make it robust into both directions.
49,727
13.06.2018 08:47:24
25,200
0871f260e6fc6d6fed57d2cc249bf4d8beb0a31f
Rework paramserv data partitioner API and tests Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitioner.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitioner.java", "diff": "@@ -21,27 +21,20 @@ package org.apache.sysml.runtime.controlprogram.paramserv;\nimport java.util.List;\n-import org.apache.commons.logging.Log;\n-import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\npublic abstract class DataPartitioner {\n- protected static final Log LOG = LogFactory.getLog(DataPartitioner.class.getName());\n+ public final class Result {\n+ public final List<MatrixObject> pFeatures;\n+ public final List<MatrixObject> pLabels;\n- public abstract void doPartitioning(List<LocalPSWorker> workers, MatrixObject features, MatrixObject labels);\n-\n- protected void setPartitionedData(List<LocalPSWorker> workers, List<MatrixObject> pfs, List<MatrixObject> pls) {\n- if (pfs.size() < workers.size()) {\n- if (LOG.isWarnEnabled()) {\n- LOG.warn(String.format(\"There is only %d batches of data but has %d workers. \"\n- + \"Hence, reset the number of workers with %d.\", pfs.size(), workers.size(), pfs.size()));\n- }\n- workers = workers.subList(0, pfs.size());\n- }\n- for (int i = 0; i < workers.size(); i++) {\n- workers.get(i).setFeatures(pfs.get(i));\n- workers.get(i).setLabels(pls.get(i));\n+ public Result(List<MatrixObject> pFeatures, List<MatrixObject> pLabels) {\n+ this.pFeatures = pFeatures;\n+ this.pLabels = pLabels;\n}\n}\n+\n+ public abstract Result doPartitioning(int workersNum, MatrixObject features, MatrixObject labels);\n+\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDC.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDC.java", "diff": "@@ -32,17 +32,10 @@ import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n* non-overlapping partitions of rows.\n*/\npublic class DataPartitionerDC extends DataPartitioner {\n- @Override\n- public void doPartitioning(List<LocalPSWorker> workers, MatrixObject features, MatrixObject labels) {\n- int workerNum = workers.size();\n- List<MatrixObject> pfs = doPartitioning(workerNum, features);\n- List<MatrixObject> pls = doPartitioning(workerNum, labels);\n- setPartitionedData(workers, pfs, pls);\n- }\nprivate List<MatrixObject> doPartitioning(int k, MatrixObject mo) {\nList<MatrixObject> list = new ArrayList<>();\n- long stepSize = (long) Math.ceil(mo.getNumRows() / k);\n+ long stepSize = (long) Math.ceil((double) mo.getNumRows() / k);\nlong begin = 1;\nwhile (begin < mo.getNumRows()) {\nlong end = Math.min(begin - 1 + stepSize, mo.getNumRows());\n@@ -52,4 +45,11 @@ public class DataPartitionerDC extends DataPartitioner {\n}\nreturn list;\n}\n+\n+ @Override\n+ public Result doPartitioning(int workersNum, MatrixObject features, MatrixObject labels) {\n+ List<MatrixObject> pfs = doPartitioning(workersNum, features);\n+ List<MatrixObject> pls = doPartitioning(workersNum, labels);\n+ return new Result(pfs, pls);\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDR.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDR.java", "diff": "@@ -34,22 +34,14 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n* i.e., sampling without replacement to ensure disjointness.\n*/\npublic class DataPartitionerDR extends DataPartitioner {\n- @Override\n- public void doPartitioning(List<LocalPSWorker> workers, MatrixObject features, MatrixObject labels) {\n- // Generate a single permutation matrix (workers use slices)\n- MatrixBlock permutation = ParamservUtils.generatePermutation((int)features.getNumRows());\n- List<MatrixObject> pfs = doPartitioning(workers.size(), features, permutation);\n- List<MatrixObject> pls = doPartitioning(workers.size(), labels, permutation);\n- setPartitionedData(workers, pfs, pls);\n- }\nprivate List<MatrixObject> doPartitioning(int k, MatrixObject mo, MatrixBlock permutation) {\nMatrixBlock data = mo.acquireRead();\n- int batchSize = (int) Math.ceil(mo.getNumRows() / k);\n+ int batchSize = (int) Math.ceil((double) mo.getNumRows() / k);\nList<MatrixObject> pMatrices = IntStream.range(0, k).mapToObj(i -> {\n- int begin = i * batchSize + 1;\n+ int begin = i * batchSize;\nint end = (int) Math.min((i + 1) * batchSize, mo.getNumRows());\n- MatrixBlock slicedPerm = permutation.slice(begin - 1, end - 1);\n+ MatrixBlock slicedPerm = permutation.slice(begin, end - 1);\nMatrixBlock output = slicedPerm.aggregateBinaryOperations(slicedPerm,\ndata, new MatrixBlock(), InstructionUtils.getMatMultOperator(k));\nMatrixObject result = ParamservUtils.newMatrixObject();\n@@ -60,4 +52,13 @@ public class DataPartitionerDR extends DataPartitioner {\nmo.release();\nreturn pMatrices;\n}\n+\n+ @Override\n+ public Result doPartitioning(int workersNum, MatrixObject features, MatrixObject labels) {\n+ // Generate a single permutation matrix (workers use slices)\n+ MatrixBlock permutation = ParamservUtils.generatePermutation((int)features.getNumRows());\n+ List<MatrixObject> pfs = doPartitioning(workersNum, features, permutation);\n+ List<MatrixObject> pls = doPartitioning(workersNum, labels, permutation);\n+ return new Result(pfs, pls);\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDRR.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerDRR.java", "diff": "@@ -35,19 +35,11 @@ import org.apache.sysml.runtime.util.DataConverter;\n* (target=X, margin=rows, select=(seq(1,nrow(X))%%k)==id)\n*/\npublic class DataPartitionerDRR extends DataPartitioner {\n- @Override\n- public void doPartitioning(List<LocalPSWorker> workers, MatrixObject features, MatrixObject labels) {\n- List<MatrixObject> pfs = IntStream.range(0, workers.size())\n- .mapToObj(i -> removeEmpty(features, workers.size(), i)).collect(Collectors.toList());\n- List<MatrixObject> pls = IntStream.range(0, workers.size())\n- .mapToObj(i -> removeEmpty(labels, workers.size(), i)).collect(Collectors.toList());\n- setPartitionedData(workers, pfs, pls);\n- }\nprivate MatrixObject removeEmpty(MatrixObject mo, int k, int workerId) {\nMatrixObject result = ParamservUtils.newMatrixObject();\nMatrixBlock tmp = mo.acquireRead();\n- double[] data = LongStream.range(0, mo.getNumRows())\n+ double[] data = LongStream.range(1, mo.getNumRows() + 1)\n.mapToDouble(l -> l % k == workerId ? 1 : 0).toArray();\nMatrixBlock select = DataConverter.convertToMatrixBlock(data, true);\nMatrixBlock resultMB = tmp.removeEmptyOperations(new MatrixBlock(), true, true, select);\n@@ -57,4 +49,13 @@ public class DataPartitionerDRR extends DataPartitioner {\nresult.enableCleanup(false);\nreturn result;\n}\n+\n+ @Override\n+ public Result doPartitioning(int workersNum, MatrixObject features, MatrixObject labels) {\n+ List<MatrixObject> pfs = IntStream.range(0, workersNum)\n+ .mapToObj(i -> removeEmpty(features, workersNum, i)).collect(Collectors.toList());\n+ List<MatrixObject> pls = IntStream.range(0, workersNum)\n+ .mapToObj(i -> removeEmpty(labels, workersNum, i)).collect(Collectors.toList());\n+ return new Result(pfs, pls);\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerOR.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/DataPartitionerOR.java", "diff": "@@ -33,20 +33,11 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n* where P is constructed for example with P=table(seq(1,nrow(X),sample(nrow(X), nrow(X))))\n*/\npublic class DataPartitionerOR extends DataPartitioner {\n- @Override\n- public void doPartitioning(List<LocalPSWorker> workers, MatrixObject features, MatrixObject labels) {\n- // Generate a different permutation matrix for each worker\n- List<MatrixBlock> permutation = IntStream.range(0, workers.size()).mapToObj(i ->\n- ParamservUtils.generatePermutation((int)features.getNumRows())).collect(Collectors.toList());\n- List<MatrixObject> pfs = doPartitioning(workers.size(), features, permutation);\n- List<MatrixObject> pls = doPartitioning(workers.size(), labels, permutation);\n- setPartitionedData(workers, pfs, pls);\n- }\n- private List<MatrixObject> doPartitioning(int k, MatrixObject mo, List<MatrixBlock> lpermutation) {\n+ private List<MatrixObject> doPartitioning(int k, MatrixObject mo, List<MatrixBlock> permutations) {\nMatrixBlock data = mo.acquireRead();\nList<MatrixObject> pMatrices = IntStream.range(0, k).mapToObj(i -> {\n- MatrixBlock permutation = lpermutation.get(i);\n+ MatrixBlock permutation = permutations.get(i);\nMatrixBlock output = permutation.aggregateBinaryOperations(permutation,\ndata, new MatrixBlock(), InstructionUtils.getMatMultOperator(k));\nMatrixObject result = ParamservUtils.newMatrixObject();\n@@ -57,4 +48,15 @@ public class DataPartitionerOR extends DataPartitioner {\nmo.release();\nreturn pMatrices;\n}\n+\n+ @Override\n+ public Result doPartitioning(int workersNum, MatrixObject features, MatrixObject labels) {\n+ // Generate a different permutation matrix for each worker\n+ List<MatrixBlock> permutations = IntStream.range(0, workersNum)\n+ .mapToObj(i -> ParamservUtils.generatePermutation((int)features.getNumRows()))\n+ .collect(Collectors.toList());\n+ List<MatrixObject> pfs = doPartitioning(workersNum, features, permutations);\n+ List<MatrixObject> pls = doPartitioning(workersNum, labels, permutations);\n+ return new Result(pfs, pls);\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -413,6 +413,19 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n}\nprivate void doDataPartitioning(DataPartitioner dp, MatrixObject features, MatrixObject labels, List<LocalPSWorker> workers) {\n- dp.doPartitioning(workers, features, labels);\n+ DataPartitioner.Result result = dp.doPartitioning(workers.size(), features, labels);\n+ List<MatrixObject> pfs = result.pFeatures;\n+ List<MatrixObject> pls = result.pLabels;\n+ if (pfs.size() < workers.size()) {\n+ if (LOG.isWarnEnabled()) {\n+ LOG.warn(String.format(\"There is only %d batches of data but has %d workers. \"\n+ + \"Hence, reset the number of workers with %d.\", pfs.size(), workers.size(), pfs.size()));\n+ }\n+ workers = workers.subList(0, pfs.size());\n+ }\n+ for (int i = 0; i < workers.size(); i++) {\n+ workers.get(i).setFeatures(pfs.get(i));\n+ workers.get(i).setLabels(pls.get(i));\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/DataPartitionerTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/DataPartitionerTest.java", "diff": "package org.apache.sysml.test.integration.functions.paramserv;\n-import java.util.ArrayList;\n+import java.util.HashMap;\nimport java.util.List;\n-import java.util.stream.Collectors;\n+import java.util.Map;\nimport java.util.stream.IntStream;\n-import org.apache.sysml.parser.DMLProgram;\n-import org.apache.sysml.parser.DataIdentifier;\n-import org.apache.sysml.parser.Expression;\n-import org.apache.sysml.parser.Statement;\n-import org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\n-import org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n-import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n-import org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.controlprogram.paramserv.DataPartitioner;\nimport org.apache.sysml.runtime.controlprogram.paramserv.DataPartitionerDC;\n+import org.apache.sysml.runtime.controlprogram.paramserv.DataPartitionerDR;\nimport org.apache.sysml.runtime.controlprogram.paramserv.DataPartitionerDRR;\n-import org.apache.sysml.runtime.controlprogram.paramserv.LocalPSWorker;\n+import org.apache.sysml.runtime.controlprogram.paramserv.DataPartitionerOR;\nimport org.apache.sysml.runtime.controlprogram.paramserv.ParamservUtils;\nimport org.apache.sysml.runtime.util.DataConverter;\nimport org.junit.Assert;\nimport org.junit.Test;\n-//TODO test data partitioning on defined API not internal methods,\n-// potentially remove workers from API to make the data partitioner independent\n-//TODO test expected behavior not the internal implementation against itself\n-// (e.g., for DR check that each row is in at most one partition and all rows are distributed)\n-\npublic class DataPartitionerTest {\n@Test\npublic void testDataPartitionerDC() {\nDataPartitioner dp = new DataPartitionerDC();\n- List<LocalPSWorker> workers = IntStream.range(0, 2).mapToObj(i -> new LocalPSWorker(i, \"updFunc\", Statement.PSFrequency.BATCH, 1, 64, null, null, createMockExecutionContext(), null)).collect(Collectors.toList());\ndouble[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\nMatrixObject features = ParamservUtils.newMatrixObject();\nfeatures.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n@@ -62,56 +49,80 @@ public class DataPartitionerTest {\nlabels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\nlabels.refreshMetaData();\nlabels.release();\n- dp.doPartitioning(workers, features, labels);\n-\n- double[] expected1 = new double[] { 1, 2, 3, 4, 5 };\n- double[] realValue1 = workers.get(0).getFeatures().acquireRead().getDenseBlockValues();\n- double[] realValue2 = workers.get(0).getLabels().acquireRead().getDenseBlockValues();\n- Assert.assertArrayEquals(expected1, realValue1, 0);\n- Assert.assertArrayEquals(expected1, realValue2, 0);\n-\n- double[] expected2 = new double[] { 6, 7, 8, 9, 10 };\n- double[] realValue3 = workers.get(1).getFeatures().acquireRead().getDenseBlockValues();\n- double[] realValue4 = workers.get(1).getLabels().acquireRead().getDenseBlockValues();\n- Assert.assertArrayEquals(expected2, realValue3, 0);\n- Assert.assertArrayEquals(expected2, realValue4, 0);\n+ DataPartitioner.Result result = dp.doPartitioning(3, features, labels);\n+\n+ Assert.assertEquals(3, result.pFeatures.size());\n+ Assert.assertEquals(3, result.pLabels.size());\n+\n+ double[] expected1 = new double[] { 1, 2, 3, 4 };\n+ assertResult(result, 0, expected1);\n+\n+ double[] expected2 = new double[] { 5, 6, 7, 8 };\n+ assertResult(result, 1, expected2);\n+\n+ double[] expected3 = new double[] { 9, 10 };\n+ assertResult(result, 2, expected3);\n+ }\n+\n+ private void assertResult(DataPartitioner.Result result, int index, double[] expected) {\n+ List<MatrixObject> pfs = result.pFeatures;\n+ List<MatrixObject> pls = result.pLabels;\n+ double[] realValue1 = pfs.get(index).acquireRead().getDenseBlockValues();\n+ double[] realValue2 = pls.get(index).acquireRead().getDenseBlockValues();\n+ Assert.assertArrayEquals(expected, realValue1, 0);\n+ Assert.assertArrayEquals(expected, realValue2, 0);\n}\n@Test\npublic void testDataPartitionerDR() {\n-// DataPartitionerDR dp = new DataPartitionerDR();\n-// double[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\n-// MatrixObject features = ParamservUtils.newMatrixObject();\n-// features.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n-// features.refreshMetaData();\n-// features.release();\n-// MatrixObject labels = ParamservUtils.newMatrixObject();\n-// labels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n-// labels.refreshMetaData();\n-// labels.release();\n-//\n-// MatrixBlock permutation = ParamservUtils.generatePermutation(df.length, df.length);\n-//\n-// List<MatrixObject> pfs = dp.doPartitioning(2, features, permutation);\n-// List<MatrixObject> pls = dp.doPartitioning(2, labels, permutation);\n-//\n-// double[] expected1 = IntStream.range(0, 5).mapToDouble(i -> permutation.getSparseBlock().get(i).indexes()[0] + 1).toArray();\n-// double[] realValue1 = pfs.get(0).acquireRead().getDenseBlockValues();\n-// double[] realValue2 = pls.get(0).acquireRead().getDenseBlockValues();\n-// Assert.assertArrayEquals(expected1, realValue1, 0);\n-// Assert.assertArrayEquals(expected1, realValue2, 0);\n-//\n-// double[] expected2 = IntStream.range(5, 10).mapToDouble(i -> permutation.getSparseBlock().get(i).indexes()[0] + 1).toArray();\n-// double[] realValue3 = pfs.get(1).acquireRead().getDenseBlockValues();\n-// double[] realValue4 = pls.get(1).acquireRead().getDenseBlockValues();\n-// Assert.assertArrayEquals(expected2, realValue3, 0);\n-// Assert.assertArrayEquals(expected2, realValue4, 0);\n+ DataPartitioner dp = new DataPartitionerDR();\n+ double[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\n+ MatrixObject features = ParamservUtils.newMatrixObject();\n+ features.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n+ features.refreshMetaData();\n+ features.release();\n+ MatrixObject labels = ParamservUtils.newMatrixObject();\n+ labels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n+ labels.refreshMetaData();\n+ labels.release();\n+\n+ DataPartitioner.Result result = dp.doPartitioning(4, features, labels);\n+\n+ Assert.assertEquals(4, result.pFeatures.size());\n+ Assert.assertEquals(4, result.pLabels.size());\n+\n+ // Ensure that the index is accorded between features and labels\n+ IntStream.range(0, result.pFeatures.size()).forEach(i -> {\n+ double[] f = result.pFeatures.get(i).acquireRead().getDenseBlockValues();\n+ double[] l = result.pLabels.get(i).acquireRead().getDenseBlockValues();\n+ Assert.assertArrayEquals(f, l, 0);\n+ });\n+\n+ assertPermutationDR(df, result.pFeatures);\n+ assertPermutationDR(df, result.pLabels);\n+ }\n+\n+ private void assertPermutationDR(double[] df, List<MatrixObject> list) {\n+ Map<Double, Integer> dict = new HashMap<>();\n+ for (double d : df) {\n+ dict.put(d, 0);\n+ }\n+ IntStream.range(0, list.size()).forEach(i -> {\n+ double[] f = list.get(i).acquireRead().getDenseBlockValues();\n+ for (double d : f) {\n+ dict.compute(d, (k, v) -> v + 1);\n+ }\n+ });\n+\n+ // check if all the occurence is equivalent to one\n+ for (Map.Entry<Double, Integer> e : dict.entrySet()) {\n+ Assert.assertEquals(1, (int) e.getValue());\n+ }\n}\n@Test\npublic void testDataPartitionerDRR() {\nDataPartitioner dp = new DataPartitionerDRR();\n- List<LocalPSWorker> workers = IntStream.range(0, 2).mapToObj(i -> new LocalPSWorker(i, \"updFunc\", Statement.PSFrequency.BATCH, 1, 64, null, null, createMockExecutionContext(), null)).collect(Collectors.toList());\ndouble[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\nMatrixObject features = ParamservUtils.newMatrixObject();\nfeatures.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n@@ -121,72 +132,61 @@ public class DataPartitionerTest {\nlabels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\nlabels.refreshMetaData();\nlabels.release();\n- dp.doPartitioning(workers, features, labels);\n-\n- //TODO test against four not two workers\n- double[] expected1 = new double[] { 1, 3, 5, 7, 9 };\n- double[] realValue1 = workers.get(0).getFeatures().acquireRead().getDenseBlockValues();\n- double[] realValue2 = workers.get(0).getLabels().acquireRead().getDenseBlockValues();\n- Assert.assertArrayEquals(expected1, realValue1, 0);\n- Assert.assertArrayEquals(expected1, realValue2, 0);\n-\n- double[] expected2 = new double[] { 2, 4, 6, 8, 10 };\n- double[] realValue3 = workers.get(1).getFeatures().acquireRead().getDenseBlockValues();\n- double[] realValue4 = workers.get(1).getLabels().acquireRead().getDenseBlockValues();\n- Assert.assertArrayEquals(expected2, realValue3, 0);\n- Assert.assertArrayEquals(expected2, realValue4, 0);\n+ DataPartitioner.Result result = dp.doPartitioning(4, features, labels);\n+\n+ Assert.assertEquals(4, result.pFeatures.size());\n+ Assert.assertEquals(4, result.pLabels.size());\n+\n+ double[] expected1 = new double[] { 4, 8 };\n+ assertResult(result, 0, expected1);\n+\n+ double[] expected2 = new double[] { 1, 5, 9 };\n+ assertResult(result, 1, expected2);\n+\n+ double[] expected3 = new double[] { 2, 6, 10 };\n+ assertResult(result, 2, expected3);\n+\n+ double[] expected4 = new double[] { 3, 7 };\n+ assertResult(result, 3, expected4);\n}\n@Test\npublic void testDataPartitionerOR() {\n-// DataPartitionerOR dp = new DataPartitionerOR();\n-// double[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\n-// MatrixObject features = ParamservUtils.newMatrixObject();\n-// features.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n-// features.refreshMetaData();\n-// features.release();\n-// MatrixObject labels = ParamservUtils.newMatrixObject();\n-// labels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n-// labels.refreshMetaData();\n-// labels.release();\n-//\n-// MatrixBlock permutation = ParamservUtils.generatePermutation(df.length, df.length);\n-//\n-// List<MatrixObject> pfs = dp.doPartitioning(1, features, permutation);\n-// List<MatrixObject> pls = dp.doPartitioning(1, labels, permutation);\n-//\n-// double[] expected1 = IntStream.range(0, 10).mapToDouble(i -> permutation.getSparseBlock().get(i).indexes()[0] + 1).toArray();\n-// double[] realValue1 = pfs.get(0).acquireRead().getDenseBlockValues();\n-// double[] realValue2 = pls.get(0).acquireRead().getDenseBlockValues();\n-// Assert.assertArrayEquals(expected1, realValue1, 0);\n-// Assert.assertArrayEquals(expected1, realValue2, 0);\n+ DataPartitioner dp = new DataPartitionerOR();\n+ double[] df = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };\n+ MatrixObject features = ParamservUtils.newMatrixObject();\n+ features.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n+ features.refreshMetaData();\n+ features.release();\n+ MatrixObject labels = ParamservUtils.newMatrixObject();\n+ labels.acquireModify(DataConverter.convertToMatrixBlock(df, true));\n+ labels.refreshMetaData();\n+ labels.release();\n+\n+ DataPartitioner.Result result = dp.doPartitioning(4, features, labels);\n+\n+ Assert.assertEquals(4, result.pFeatures.size());\n+ Assert.assertEquals(4, result.pLabels.size());\n+\n+ assertPermutationOR(df, result.pFeatures);\n+ assertPermutationOR(df, result.pLabels);\n}\n- private ExecutionContext createMockExecutionContext() {\n- Program prog = new Program();\n- ArrayList<DataIdentifier> inputs = new ArrayList<>();\n- DataIdentifier features = new DataIdentifier(\"features\");\n- features.setDataType(Expression.DataType.MATRIX);\n- features.setValueType(Expression.ValueType.DOUBLE);\n- inputs.add(features);\n- DataIdentifier labels = new DataIdentifier(\"labels\");\n- labels.setDataType(Expression.DataType.MATRIX);\n- labels.setValueType(Expression.ValueType.DOUBLE);\n- inputs.add(labels);\n- DataIdentifier model = new DataIdentifier(\"model\");\n- model.setDataType(Expression.DataType.LIST);\n- model.setValueType(Expression.ValueType.UNKNOWN);\n- inputs.add(model);\n-\n- ArrayList<DataIdentifier> outputs = new ArrayList<>();\n- DataIdentifier gradients = new DataIdentifier(\"gradients\");\n- gradients.setDataType(Expression.DataType.LIST);\n- gradients.setValueType(Expression.ValueType.UNKNOWN);\n- outputs.add(gradients);\n-\n- FunctionProgramBlock fpb = new FunctionProgramBlock(prog, inputs, outputs);\n- prog.addProgramBlock(fpb);\n- prog.addFunctionProgramBlock(DMLProgram.DEFAULT_NAMESPACE, \"updFunc\", fpb);\n- return ExecutionContextFactory.createContext(prog);\n+ private void assertPermutationOR(double[] df, List<MatrixObject> list) {\n+ for (MatrixObject mo : list) {\n+ Map<Double, Integer> dict = new HashMap<>();\n+ for (double d : df) {\n+ dict.put(d, 0);\n+ }\n+ double[] f = mo.acquireRead().getDenseBlockValues();\n+ for (double d : f) {\n+ dict.compute(d, (k, v) -> v + 1);\n+ }\n+ Assert.assertEquals(10, dict.size());\n+ // check if all the occurence is equivalent to one\n+ for (Map.Entry<Double, Integer> e : dict.entrySet()) {\n+ Assert.assertEquals(1, (int) e.getValue());\n+ }\n+ }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2381] Rework paramserv data partitioner API and tests Closes #783.
49,760
13.06.2018 16:55:16
25,200
cc349dc88a8e72adc16a6048d6ee48a35834e9aa
Fix robustness bitset sparsity estimator for empty blocks Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -29,12 +29,11 @@ import org.apache.sysml.runtime.matrix.data.SparseBlock;\n/**\n* This estimator implements naive but rather common approach of boolean matrix\n- * multiplies which allows to infer the exact non-zero structure and thus is also\n- * useful for sparse result preallocation.\n+ * multiplies which allows to infer the exact non-zero structure and thus is\n+ * also useful for sparse result preallocation.\n*\n*/\n-public class EstimatorBitsetMM extends SparsityEstimator\n-{\n+public class EstimatorBitsetMM extends SparsityEstimator {\n@Override\npublic double estim(MMNode root) {\n// recursive density map computation of non-leaf nodes\n@@ -42,16 +41,15 @@ public class EstimatorBitsetMM extends SparsityEstimator\nestim(root.getLeft()); // obtain synopsis\nif (!root.getRight().isLeaf())\nestim(root.getLeft()); // obtain synopsis\n- BitsetMatrix m1Map = !root.getLeft().isLeaf() ?\n- (BitsetMatrix)root.getLeft().getSynopsis() : new BitsetMatrix(root.getLeft().getData());\n- BitsetMatrix m2Map = !root.getRight().isLeaf() ?\n- (BitsetMatrix)root.getRight().getSynopsis() : new BitsetMatrix(root.getRight().getData());\n+ BitsetMatrix m1Map = !root.getLeft().isLeaf() ? (BitsetMatrix) root.getLeft().getSynopsis()\n+ : new BitsetMatrix(root.getLeft().getData());\n+ BitsetMatrix m2Map = !root.getRight().isLeaf() ? (BitsetMatrix) root.getRight().getSynopsis()\n+ : new BitsetMatrix(root.getRight().getData());\n// estimate output density map and sparsity via boolean matrix mult\nBitsetMatrix outMap = m1Map.matMult(m2Map);\nroot.setSynopsis(outMap); // memoize boolean matrix\n- return OptimizerUtils.getSparsity(\n- outMap.getNumRows(), outMap.getNumColumns(), outMap.getNonZeros());\n+ return OptimizerUtils.getSparsity(outMap.getNumRows(), outMap.getNumColumns(), outMap.getNonZeros());\n}\n@Override\n@@ -102,10 +100,13 @@ public class EstimatorBitsetMM extends SparsityEstimator\n}\nprivate void init(MatrixBlock in) {\n+ if (in.isEmptyBlock(false))\n+ return;\nif (in.isInSparseFormat()) {\nSparseBlock sblock = in.getSparseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\n- if(sblock.isEmpty(i)) continue;\n+ if (sblock.isEmpty(i))\n+ continue;\nBitSet lbs = _data[i];\nint alen = sblock.size(i);\nint apos = sblock.pos(i);\n@@ -113,8 +114,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\nfor (int k = apos; k < apos + alen; k++)\nlbs.set(aix[k]);\n}\n- }\n- else {\n+ } else {\nDenseBlock dblock = in.getDenseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\nBitSet lbs = _data[i];\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2295] Fix robustness bitset sparsity estimator for empty blocks Closes #784.
49,727
13.06.2018 17:00:55
25,200
ebfe327e4474d0692679efc847216131d38777e8
Fix paramserv calculation of iterations per worker Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "@@ -42,7 +42,7 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\npublic Void call() throws Exception {\ntry {\nlong dataSize = _features.getNumRows();\n- int totalIter = (int) Math.ceil(dataSize / _batchSize);\n+ int totalIter = (int) Math.ceil((double) dataSize / _batchSize);\nswitch (_freq) {\ncase BATCH:\n@@ -141,9 +141,10 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n_ec.setVariable(Statement.PS_LABELS, bLabels);\nif (LOG.isDebugEnabled()) {\n- LOG.debug(String.format(\"Local worker_%d: Got batch data [size:%d kb] of index from %d to %d. \"\n- + \"[Epoch:%d Total epoch:%d Iteration:%d Total iteration:%d]\", _workerID, bFeatures.getDataSize()\n- / 1024 + bLabels.getDataSize() / 1024, begin, end, i + 1, _epochs, j + 1, totalIter));\n+ LOG.debug(String.format(\"Local worker_%d: Got batch data [size:%d kb] of index from %d to %d [last index: %d]. \"\n+ + \"[Epoch:%d Total epoch:%d Iteration:%d Total iteration:%d]\", _workerID,\n+ bFeatures.getDataSize() / 1024 + bLabels.getDataSize() / 1024, begin, end, dataSize, i + 1, _epochs,\n+ j + 1, totalIter));\n}\n// Invoke the update function\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2389] Fix paramserv calculation of iterations per worker Closes #785.
49,738
14.06.2018 16:40:34
25,200
303a2d31d29f2379e6833894b5c20e9e86dfa19d
[MINOR] Improved tracking and reporting of function call line numbers
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassInlineFunctions.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassInlineFunctions.java", "diff": "@@ -74,6 +74,8 @@ public class IPAPassInlineFunctions extends IPAPass\nList<StatementBlock> fcallsSB = fgraph.getFunctionCallsSB(fkey);\nfor(int i=0; i<fcalls.size(); i++) {\nFunctionOp op = fcalls.get(i);\n+ if( LOG.isDebugEnabled() )\n+ LOG.debug(\"-- inline '\"+fkey+\"' at line \"+op.getBeginLine());\n//step 0: robustness for special cases\nif( op.getInput().size() != fstmt.getInputParams().size()\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -1338,11 +1338,8 @@ public class DMLTranslator\nFunctionOp fcall = (target == null) ?\nnew FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{}, false) :\nnew FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{target.getName()}, false);\n+ fcall.setParseInfo(fci);\noutput.add(fcall);\n-\n- //TODO function output dataops (phase 3)\n- //DataOp trFoutput = new DataOp(target.getName(), target.getDataType(), target.getValueType(), fcall, DataOpTypes.FUNCTIONOUTPUT, null);\n- //DataOp twFoutput = new DataOp(target.getName(), target.getDataType(), target.getValueType(), trFoutput, DataOpTypes.TRANSIENTWRITE, null);\n}\n}\n@@ -1370,13 +1367,8 @@ public class DMLTranslator\n.map(d -> d.getName()).toArray(String[]::new);\nFunctionType ftype = fsb.getFunctionOpType();\nFunctionOp fcall = new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, foutputs, false);\n+ fcall.setParseInfo(fci);\noutput.add(fcall);\n-\n- //TODO function output dataops (phase 3)\n- /*for ( DataIdentifier paramName : mas.getTargetList() ){\n- DataOp twFoutput = new DataOp(paramName.getName(), paramName.getDataType(), paramName.getValueType(), fcall, DataOpTypes.TRANSIENTWRITE, null);\n- output.add(twFoutput);\n- }*/\n}\nelse if ( source instanceof BuiltinFunctionExpression && ((BuiltinFunctionExpression)source).multipleReturns() ) {\n// construct input hops\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved tracking and reporting of function call line numbers
49,738
15.06.2018 17:44:43
25,200
7b29464becbc0088a0d1834a7c7c2820a3a12242
Fix robustness list slice on temporary paramserv lists This patch fixes the robustness of list slice operations on temorary paramserv lists, which so far resulted in null pointer exceptions due to missing pinned states of inputs.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "@@ -65,6 +65,7 @@ public class ListObject extends Data {\npublic ListObject slice(int ix1, int ix2) {\nListObject ret = new ListObject(_data.subList(ix1, ix2 + 1),\n(_names != null) ? _names.subList(ix1, ix2 + 1) : null);\n+ if( _dataState != null )\nret.setStatus(Arrays.copyOfRange(_dataState, ix2, ix2 + 1));\nreturn ret;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2399] Fix robustness list slice on temporary paramserv lists This patch fixes the robustness of list slice operations on temorary paramserv lists, which so far resulted in null pointer exceptions due to missing pinned states of inputs.
49,738
16.06.2018 17:43:48
25,200
b420baa2e20dfa4566a6adf3497a9cf9d5f4bec3
Fix robustness codegen fusion multiple biasadd/biadmult This patch improves the robustness of codegen biasadd/biadmult for scenarios where the left-hand-side input is a side input itself (e.g., when fusing multiple biasadd/biasmult over different inputs into a shared fused operator).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -237,6 +237,7 @@ public class TemplateCell extends TemplateBase\n}\nelse if( HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT) ) {\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nlong c = hop.getInput().get(0).getDim2() / hop.getInput().get(1).getDim1();\nCNode cdata3 = TemplateUtils.createCNodeData(new LiteralOp(c), true);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2404] Fix robustness codegen fusion multiple biasadd/biadmult This patch improves the robustness of codegen biasadd/biadmult for scenarios where the left-hand-side input is a side input itself (e.g., when fusing multiple biasadd/biasmult over different inputs into a shared fused operator).
49,738
16.06.2018 18:25:59
25,200
3705e78fbc56356d8762333159a3e00ef51c3d1a
[MINOR] Reduce code duplication DNN maxpool/avgpool, performance avgpool This patch removes unnecessarily duplicated code paths for max and avg pooling. Furthermore, this also includes a minor performance improvement for avg pooling by avoiding unnecessary multiply operations.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "diff": "@@ -23,6 +23,7 @@ import java.util.Arrays;\nimport java.util.concurrent.Callable;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN.PoolingType;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNNHelper.CellIndex3;\n@@ -122,6 +123,7 @@ public class LibMatrixDNNPooling {\ndouble[] out = _params.output.getDenseBlockValues();\ndouble minValForMaxPoolOperations = _poolingType == PoolingType.AVG ? 0 : _params.minValForMaxPoolOperations;\n+ boolean max = (_poolingType == PoolingType.MAX);\n//thread-local initialization of output block\nif( !(_params.isStride1Pad0() && _params.isAllOnes(P, Q, W)) )\n@@ -131,55 +133,33 @@ public class LibMatrixDNNPooling {\n//quick-path w/o materialized index arrays and\n//simplified inner loops for P = 1, Q = 1, W = 1\nint lenh = Math.min(R,H);\n- if(_poolingType == PoolingType.AVG) {\nfor(int i = _rl, oix=_rl*C; i < _ru; i++, oix+=C)\n- for (int c = 0, off=i*CHW; c < C; c++, off+=H)\n- out[oix+c] = avg_pool(minValForMaxPoolOperations, in, off, lenh, _poolingMultiplier);\n- }\n- else {\n- for(int i = _rl, oix=_rl*C; i < _ru; i++, oix+=C)\n- for (int c = 0, off=i*CHW; c < C; c++, off+=H)\n- out[oix+c] = max(minValForMaxPoolOperations, in, off, lenh);\n+ for (int c = 0, off=i*CHW; c < C; c++, off+=H) {\n+ out[oix+c] = max ? max(minValForMaxPoolOperations, in, off, lenh) :\n+ avg(minValForMaxPoolOperations, in, off, lenh, _poolingMultiplier);\n}\n}\nelse if( _params.isStride1Pad0() ) {\n- if(_poolingType == PoolingType.AVG) {\n- //quick-path w/o materialized index arrays\n- for(int i = _rl; i < _ru; i++)\n- for (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\n- for (int p = 0; p < P; p++, oix+=Q)\n- for (int h = p; h < Math.min(p+R,H); h++)\n- for (int q = 0, off2=off+h*W; q < Q; q++)\n- out[oix+q] = avg_pool(out[oix+q], in, off2+q, Math.min(S,W-q), _poolingMultiplier);\n- }\n- else {\n//quick-path w/o materialized index arrays\nfor(int i = _rl; i < _ru; i++)\nfor (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\nfor (int p = 0; p < P; p++, oix+=Q)\nfor (int h = p; h < Math.min(p+R,H); h++)\n- for (int q = 0, off2=off+h*W; q < Q; q++)\n- out[oix+q] = max(out[oix+q], in, off2+q, Math.min(S,W-q));\n+ for (int q = 0, off2=off+h*W; q < Q; q++) {\n+ out[oix+q] = max ? max(out[oix+q], in, off2+q, Math.min(S,W-q)) :\n+ avg(out[oix+q], in, off2+q, Math.min(S,W-q), _poolingMultiplier);\n}\n}\nelse { //general case\nint[] hl = _params.start_indexes_h, hu = _params.end_indexes_h;\nint[] wl = _params.start_indexes_w, wu = _params.end_indexes_w;\n- if(_poolingType == PoolingType.AVG) {\nfor(int i = _rl; i < _ru; i++)\nfor (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\nfor (int p = 0; p < P; p++, oix+=Q)\nfor (int h = hl[p]; h < hu[p]; h++)\n- for (int q = 0, off2=off+h*W; q < Q; q++)\n- out[oix+q] = avg_pool(out[oix+q], in, off2+wl[q], wu[q]-wl[q], _poolingMultiplier);\n- }\n- else {\n- for(int i = _rl; i < _ru; i++)\n- for (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\n- for (int p = 0; p < P; p++, oix+=Q)\n- for (int h = hl[p]; h < hu[p]; h++)\n- for (int q = 0, off2=off+h*W; q < Q; q++)\n- out[oix+q] = max(out[oix+q], in, off2+wl[q], wu[q]-wl[q]);\n+ for (int q = 0, off2=off+h*W; q < Q; q++) {\n+ out[oix+q] = max ? max(out[oix+q], in, off2+wl[q], wu[q]-wl[q]) :\n+ avg(out[oix+q], in, off2+wl[q], wu[q]-wl[q], _poolingMultiplier);\n}\n}\n@@ -655,11 +635,8 @@ public class LibMatrixDNNPooling {\n}\n}\n- private static double avg_pool(final double aval, double[] b, final int bi, final int len, final double poolingMultiplier) {\n- double ret = aval;\n- for( int i = bi; i < bi+len; i++ )\n- ret += poolingMultiplier*b[i];\n- return ret;\n+ private static double avg(final double aval, double[] b, final int bi, final int len, final double poolingMultiplier) {\n+ return LibSpoofPrimitives.vectSum(b, bi, len) * poolingMultiplier + aval;\n}\nprivate static double max(final double aval, double[] b, final int bi, final int len) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Reduce code duplication DNN maxpool/avgpool, performance avgpool This patch removes unnecessarily duplicated code paths for max and avg pooling. Furthermore, this also includes a minor performance improvement for avg pooling by avoiding unnecessary multiply operations.
49,738
16.06.2018 19:21:01
25,200
fff0aa469dc41fdd73b7c364095d596c6be9dd65
Support for as.matrix over lists of scalars This patch adds a convenience feature for creating matrices out of lists of scalars and necessary compiler/runtime extensions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "diff": "@@ -684,16 +684,18 @@ public class UnaryOp extends MultiThreadedHop\n@Override\npublic void refreshSizeInformation()\n{\n+ Hop input = getInput().get(0);\nif ( getDataType() == DataType.SCALAR ) {\n//do nothing always known\n}\nelse if( (_op == OpOp1.CAST_AS_MATRIX || _op == OpOp1.CAST_AS_FRAME\n- || _op == OpOp1.CAST_AS_SCALAR) && getInput().get(0).getDataType()==DataType.LIST ){\n- setDim1( -1 );\n- setDim2( -1 );\n+ || _op == OpOp1.CAST_AS_SCALAR) && input.getDataType()==DataType.LIST ){\n+ //handle two cases of list of scalars or list of single matrix\n+ setDim1( input.getLength() > 1 ? input.getLength() : -1 );\n+ setDim2( input.getLength() > 1 ? 1 : -1 );\n}\nelse if( (_op == OpOp1.CAST_AS_MATRIX || _op == OpOp1.CAST_AS_FRAME)\n- && getInput().get(0).getDataType()==DataType.SCALAR )\n+ && input.getDataType()==DataType.SCALAR )\n{\n//prevent propagating 0 from scalar (which would be interpreted as unknown)\nsetDim1( 1 );\n@@ -703,7 +705,6 @@ public class UnaryOp extends MultiThreadedHop\n{\n// If output is a Matrix then this operation is of type (B = op(A))\n// Dimensions of B are same as that of A, and sparsity may/maynot change\n- Hop input = getInput().get(0);\nsetDim1( input.getDim1() );\nsetDim2( input.getDim2() );\n// cosh(0)=cos(0)=1, acos(0)=1.5707963267948966\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/LiteralReplacement.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/LiteralReplacement.java", "diff": "@@ -356,12 +356,14 @@ public class LiteralReplacement\nif( in.getDataType() == DataType.LIST\n&& HopRewriteUtils.isData(in, DataOpTypes.TRANSIENTREAD) ) {\nListObject list = (ListObject)vars.get(in.getName());\n+ if( list.getLength() == 1 ) {\nString varname = Dag.getNextUniqueVarname(DataType.MATRIX);\nMatrixObject mo = (MatrixObject) list.slice(0);\nvars.put(varname, mo);\nret = HopRewriteUtils.createTransientRead(varname, c);\n}\n}\n+ }\nreturn ret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "@@ -122,6 +122,10 @@ public class ListObject extends Data {\n.mapToLong(data -> ((CacheableData<?>) data).getDataSize()).sum();\n}\n+ public boolean checkAllDataTypes(DataType dt) {\n+ return _data.stream().allMatch(d -> d.getDataType()==dt);\n+ }\n+\n@Override\npublic String getDebugName() {\nreturn toString();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "diff": "@@ -600,7 +600,25 @@ public class VariableCPInstruction extends CPInstruction {\nelse if( getInput1().getDataType().isList() ) {\n//TODO handling of cleanup status, potentially new object\nListObject list = (ListObject)ec.getVariable(getInput1().getName());\n- ec.setVariable(output.getName(), list.slice(0));\n+ if( list.getLength() > 1 ) {\n+ if( !list.checkAllDataTypes(DataType.SCALAR) )\n+ throw new DMLRuntimeException(\"as.matrix over multi-entry list only allows scalars.\");\n+ MatrixBlock out = new MatrixBlock(list.getLength(), 1, false);\n+ for( int i=0; i<list.getLength(); i++ )\n+ out.quickSetValue(i, 0, ((ScalarObject)list.slice(i)).getDoubleValue());\n+ ec.setMatrixOutput(output.getName(), out, getExtendedOpcode());\n+ }\n+ else {\n+ //pass through matrix input or create 1x1 matrix for scalar\n+ Data tmp = list.slice(0);\n+ if( tmp instanceof ScalarObject && tmp.getValueType()!=ValueType.STRING ) {\n+ MatrixBlock out = new MatrixBlock(((ScalarObject)tmp).getDoubleValue());\n+ ec.setMatrixOutput(output.getName(), out, getExtendedOpcode());\n+ }\n+ else {\n+ ec.setVariable(output.getName(), tmp);\n+ }\n+ }\n}\nelse {\nthrow new DMLRuntimeException(\"Unsupported data type \"\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "diff": "@@ -39,6 +39,7 @@ public class ListAndStructTest extends AutomatedTestBase\nprivate static final String TEST_NAME4 = \"ListNamedFun\";\nprivate static final String TEST_NAME5 = \"ListUnnamedParfor\";\nprivate static final String TEST_NAME6 = \"ListNamedParfor\";\n+ private static final String TEST_NAME7 = \"ListAsMatrix\";\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + ListAndStructTest.class.getSimpleName() + \"/\";\n@@ -52,6 +53,7 @@ public class ListAndStructTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME7, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME7, new String[] { \"R\" }) );\n}\n@Test\n@@ -114,6 +116,16 @@ public class ListAndStructTest extends AutomatedTestBase\nrunListStructTest(TEST_NAME6, true);\n}\n+ @Test\n+ public void testListAsMatrix() {\n+ runListStructTest(TEST_NAME7, false);\n+ }\n+\n+ @Test\n+ public void testListAsMatrixRewrites() {\n+ runListStructTest(TEST_NAME7, true);\n+ }\n+\nprivate void runListStructTest(String testname, boolean rewrites)\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListAsMatrix.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = list(1,3,7,5,4);\n+Y = as.matrix(unlist(X));\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListAsMatrix.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = list(1,3,7,5,4);\n+Y = as.matrix(X);\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+write(R, $1);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2405] Support for as.matrix over lists of scalars This patch adds a convenience feature for creating matrices out of lists of scalars and necessary compiler/runtime extensions.
49,738
16.06.2018 19:44:38
25,200
48bfc9e3034ec537574cc721471ccf6fdfbc015a
[MINOR] Cleanup bivariate stats algorithm script (list, ifelse, format) This patch makes a couple of minor cleanups of the bivariate statistics algorithm in order to use the new as.matrix(list) in practice. This significantly reduces the script size. Furthermore, we also use the new ifelse and fix the messy formatting.
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/bivar-stats.dml", "new_path": "scripts/algorithms/bivar-stats.dml", "diff": "# 3) index2 - Second attribute set {A_21, A_22, ... A_2n}\n# 4) types1 - kind for attributes in S1\n# 5) types2 - kind for attributes in S2\n-# kind=1 for scale, kind=2 for nominal, kind=3 for ordinal\n+# (kind=1 for scale, kind=2 for nominal, kind=3 for ordinal)\n#\n# One output:\n# 6) output directory in which following (maximum of) four statistics files are created\n# + bivar.nominal.scale.stats -\n# + bivar.ordinal.ordinal.stats -\n#\n-# hadoop jar SystemML.jar -f bivar-stats.dml -nvargs X=<Data>\n-# index1=<Feature Index Set 1>\n-# index2=<Feature Index Set 2>\n-# types1=<Feature Types 1>\n-# types2=<Feature Types 2>\n-# OUTDIR=<Output Location>\n+# hadoop jar SystemML.jar -f bivar-stats.dml -nvargs X=<Data> \\\n+# index1=<Feature Index Set 1> index2=<Feature Index Set 2> \\\n+# types1=<Feature Types 1> types2=<Feature Types 2> OUTDIR=<Output Location>\nD = read($X); # input data set\nS1 = read($index1); # attribute set 1\n@@ -79,10 +76,10 @@ for( i in 1:s1size, check=0) {\nif (pre_k1 == 1) {\nnum_scale_scale_tests = num_scale_scale_tests + 1\npair2row[pre_pairID,1] = num_scale_scale_tests\n- } else {\n+ }\n+ else {\nnum_nominal_nominal_tests = num_nominal_nominal_tests + 1\npair2row[pre_pairID,1] = num_nominal_nominal_tests\n-\nif ( pre_k1 == 3 ) {\nnum_ordinal_ordinal_tests = num_ordinal_ordinal_tests + 1\npair2row[pre_pairID, 2] = num_ordinal_ordinal_tests\n@@ -93,7 +90,8 @@ for( i in 1:s1size, check=0) {\nif (pre_k1 == 1 | pre_k2 == 1) {\nnum_nominal_scale_tests = num_nominal_scale_tests + 1\npair2row[pre_pairID,1] = num_nominal_scale_tests\n- } else {\n+ }\n+ else {\nnum_nominal_nominal_tests = num_nominal_nominal_tests + 1\npair2row[pre_pairID,1] = num_nominal_nominal_tests\n}\n@@ -117,7 +115,6 @@ basestats_nominal_scale = matrix(0, rows=11, cols=size_nominal_scale_tests)\n# and check if these cols have been recoded\ndebug_str = \"Stopping execution of DML script due to invalid input\";\n-\nerror_flag = FALSE;\nmaxs = colMaxs(D);\n@@ -125,19 +122,15 @@ mins = colMins(D)\nmaxDomainSize = -1.0;\nfor(k in 1:ncol(K1) ) {\ntype = as.scalar(K1[1,k]);\n-\nif ( type > 1) {\ncolID = as.scalar(S1[1,k]);\n-\ncolMaximum = as.scalar(maxs[1,colID]);\nif(maxDomainSize < colMaximum) maxDomainSize = colMaximum;\n-\ncolMinimum = as.scalar(mins[1,colID]);\nif(colMinimum < 1){\n- if(type == 2)\n- debug_str = append(debug_str, \"Column \" + colID + \" was declared as nominal but its minimum value is \" + colMinimum)\n- else\n- debug_str = append(debug_str, \"Column \" + colID + \" was declared as ordinal but its minimum value is \" + colMinimum)\n+ debug_str = ifelse(type == 2,\n+ append(debug_str, \"Column \" + colID + \" was declared as nominal but its minimum value is \" + colMinimum),\n+ append(debug_str, \"Column \" + colID + \" was declared as ordinal but its minimum value is \" + colMinimum));\nerror_flag = TRUE;\n}\n}\n@@ -145,38 +138,32 @@ for(k in 1:ncol(K1) ) {\nfor(k in 1:ncol(K2) ) {\ntype = as.scalar(K2[1,k]);\n-\nif ( type > 1) {\ncolID = as.scalar(S2[1,k]);\n-\ncolMaximum = as.scalar(maxs[1,colID]);\n- if(maxDomainSize < colMaximum) maxDomainSize = colMaximum;\n-\n+ maxDomainSize = max(maxDomainSize, colMaximum);\ncolMinimum = as.scalar(mins[1,colID]);\nif(colMinimum < 1){\n- if(type == 2)\n- debug_str = append(debug_str, \"Column \" + colID + \" was declared as nominal but its minimum value is \" + colMinimum)\n- else\n- debug_str = append(debug_str, \"Column \" + colID + \" was declared as ordinal but its minimum value is \" + colMinimum)\n+ debug_str = ifelse(type == 2,\n+ append(debug_str, \"Column \" + colID + \" was declared as nominal but its minimum value is \" + colMinimum),\n+ append(debug_str, \"Column \" + colID + \" was declared as ordinal but its minimum value is \" + colMinimum));\nerror_flag = TRUE;\n}\n}\n}\nmaxDomain = as.integer(maxDomainSize);\n-\n-if(error_flag) stop(debug_str);\n+if(error_flag)\n+ stop(debug_str);\nparfor( i in 1:s1size, check=0) {\na1 = as.scalar(S1[1,i]);\nk1 = as.scalar(K1[1,i]);\nA1 = D[,a1];\n-\nparfor( j in 1:s2size, check=0) {\npairID = (i-1)*s2size+j;\na2 = as.scalar(S2[1,j]);\nk2 = as.scalar(K2[1,j]);\nA2 = D[,a2];\n-\nrowid1 = as.scalar(pair2row[pairID, 1])\nrowid2 = as.scalar(pair2row[pairID, 2])\n@@ -185,92 +172,52 @@ parfor( i in 1:s1size, check=0) {\n# scale-scale\nprint(\"[\" + i + \",\" + j + \"] scale-scale\");\n[r, cov, sigma1, sigma2] = bivar_ss(A1,A2);\n-\n- basestats_scale_scale[1,rowid1] = a1;\n- basestats_scale_scale[2,rowid1] = a2;\n- basestats_scale_scale[3,rowid1] = r;\n- basestats_scale_scale[4,rowid1] = cov;\n- basestats_scale_scale[5,rowid1] = sigma1;\n- basestats_scale_scale[6,rowid1] = sigma2;\n- } else {\n+ basestats_scale_scale[1:6,rowid1] = as.matrix(list(a1,a2,r,cov,sigma1,sigma2));\n+ }\n+ else {\n# nominal-nominal or ordinal-ordinal\nprint(\"[\" + i + \",\" + j + \"] categorical-categorical\");\n[chisq, df, pval, cramersv] = bivar_cc(A1, A2, maxDomain);\n-\n- basestats_nominal_nominal[1,rowid1] = a1;\n- basestats_nominal_nominal[2,rowid1] = a2;\n- basestats_nominal_nominal[3,rowid1] = chisq;\n- basestats_nominal_nominal[4,rowid1] = df;\n- basestats_nominal_nominal[5,rowid1] = pval;\n- basestats_nominal_nominal[6,rowid1] = cramersv;\n-\n+ basestats_nominal_nominal[1:6,rowid1] = as.matrix(list(a1,a2,chisq,df,pval,cramersv));\nif ( k1 == 3 ) {\n# ordinal-ordinal\nprint(\"[\" + i + \",\" + j + \"] ordinal-ordinal\");\nsp = bivar_oo(A1, A2, maxDomain);\n-\n- basestats_ordinal_ordinal[1,rowid2] = a1;\n- basestats_ordinal_ordinal[2,rowid2] = a2;\n- basestats_ordinal_ordinal[3,rowid2] = sp;\n+ basestats_ordinal_ordinal[1:3,rowid2] = as.matrix(list(a1,a2,sp));\n+ }\n}\n}\n- } else {\n- if (k1 == 1 | k2 == 1) {\n+ else if (k1 == 1 | k2 == 1) {\n# Scale-nominal/ordinal\nprint(\"[\" + i + \",\" + j + \"] scale-categorical\");\n-\n- if ( k1 == 1 ) {\n+ if ( k1 == 1 )\n[eta, f, pval, bw_ss, within_ss, bw_df, within_df, bw_mean_square, within_mean_square] = bivar_sc(A1, A2, maxDomain);\n- } else {\n+ else\n[eta, f, pval, bw_ss, within_ss, bw_df, within_df, bw_mean_square, within_mean_square] = bivar_sc(A2, A1, maxDomain);\n+ basestats_nominal_scale[1:11,rowid1] = as.matrix(list(a1,a2,eta,f,pval,bw_ss,within_ss,bw_df,within_df,bw_mean_square,within_mean_square));\n}\n-\n- basestats_nominal_scale[1,rowid1] = a1;\n- basestats_nominal_scale[2,rowid1] = a2;\n- basestats_nominal_scale[3,rowid1] = eta;\n- basestats_nominal_scale[4,rowid1] = f;\n- basestats_nominal_scale[5,rowid1] = pval;\n- basestats_nominal_scale[6,rowid1] = bw_ss;\n- basestats_nominal_scale[7,rowid1] = within_ss;\n- basestats_nominal_scale[8,rowid1] = bw_df;\n- basestats_nominal_scale[9,rowid1] = within_df;\n- basestats_nominal_scale[10,rowid1] = bw_mean_square;\n- basestats_nominal_scale[11,rowid1] = within_mean_square;\n- } else {\n+ else {\n# nominal-ordinal or ordinal-nominal\nprint(\"[\" + i + \",\" + j + \"] categorical-categorical\");\n[chisq, df, pval, cramersv] = bivar_cc(A1, A2, maxDomain);\n-\n- basestats_nominal_nominal[1,rowid1] = a1;\n- basestats_nominal_nominal[2,rowid1] = a2;\n- basestats_nominal_nominal[3,rowid1] = chisq;\n- basestats_nominal_nominal[4,rowid1] = df;\n- basestats_nominal_nominal[5,rowid1] = pval;\n- basestats_nominal_nominal[6,rowid1] = cramersv;\n- }\n+ basestats_nominal_nominal[1:6,rowid1] = as.matrix(list(a1,a2,chisq,df,pval,cramersv));\n}\n}\n}\n-if(num_scale_scale_tests == size_scale_scale_tests){\n+if(num_scale_scale_tests == size_scale_scale_tests)\nwrite(basestats_scale_scale, $OUTDIR + \"/bivar.scale.scale.stats\");\n-}\n-\n-if(num_nominal_scale_tests == size_nominal_scale_tests){\n+if(num_nominal_scale_tests == size_nominal_scale_tests)\nwrite(basestats_nominal_scale, $OUTDIR + \"/bivar.nominal.scale.stats\");\n-}\n-\n-if(num_nominal_nominal_tests == size_nominal_nominal_tests){\n+if(num_nominal_nominal_tests == size_nominal_nominal_tests)\nwrite(basestats_nominal_nominal, $OUTDIR + \"/bivar.nominal.nominal.stats\");\n-}\n-\n-if(num_ordinal_ordinal_tests == size_ordinal_ordinal_tests){\n+if(num_ordinal_ordinal_tests == size_ordinal_ordinal_tests)\nwrite(basestats_ordinal_ordinal, $OUTDIR + \"/bivar.ordinal.ordinal.stats\");\n-}\n# -----------------------------------------------------------------------------------------------------------\n-bivar_cc = function(Matrix[Double] A, Matrix[Double] B, Double maxDomain) return (Double chisq, Double df, Double pval, Double cramersv) {\n+bivar_cc = function(Matrix[Double] A, Matrix[Double] B, Double maxDomain)\n+ return (Double chisq, Double df, Double pval, Double cramersv) {\n# Contingency Table\nF = table(A, B, maxDomain, maxDomain);\n@@ -324,7 +271,8 @@ bivar_ss = function(Matrix[Double] X, Matrix[Double] Y) return (Double R, Double\n# Y points to SCALE variable\n# A points to CATEGORICAL variable\nbivar_sc = function(Matrix[Double] Y, Matrix[Double] A, Double maxDomain)\n- return (Double Eta, Double AnovaF, Double pval, Double bw_ss, Double within_ss, Double bw_df, Double within_df, Double bw_mean_square, Double within_mean_square) {\n+ return (Double Eta, Double AnovaF, Double pval, Double bw_ss, Double within_ss,\n+ Double bw_df, Double within_df, Double bw_mean_square, Double within_mean_square) {\n# mean and variance in target variable\nW = nrow(A);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup bivariate stats algorithm script (list, ifelse, format) This patch makes a couple of minor cleanups of the bivariate statistics algorithm in order to use the new as.matrix(list) in practice. This significantly reduces the script size. Furthermore, we also use the new ifelse and fix the messy formatting.
49,738
17.06.2018 13:13:55
25,200
25be6a68628d02ef73a30e1a08394b1495d45c57
[MINOR] Cleanup univariate stats algorithm script (list, ifelse, format)
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/Univar-Stats.dml", "new_path": "scripts/algorithms/Univar-Stats.dml", "diff": "@@ -44,39 +44,28 @@ consoleOutput = ifdef($CONSOLE_OUTPUT, FALSE);\nA = read($X); # data file\nK = read($TYPES); # attribute kind file\n-\n-# number of features/attributes\n-n = ncol(A);\n-\n-# number of data records\n-m = nrow(A);\n-\n-# number of statistics\n-numBaseStats = 17; # (14 scale stats, 3 categorical stats)\n-\n+n = ncol(A); # number of features/attributes\n+m = nrow(A); # number of data records\n+numBaseStats = 17; # number of statistics (14 scale, 3 categorical)\nmax_kind = max(K);\n# matrices to store computed statistics\nbaseStats = matrix(0, rows=numBaseStats, cols=n);\n# Compute max domain size among all categorical attributes\n-maxs = colMaxs(A);\n-maxDomainSize = max( (K > 1) * maxs );\n-maxDomain = as.integer(maxDomainSize);\n+maxDomain = as.integer(max((K > 1) * colMaxs(A)));\nparfor(i in 1:n, check=0) {\n-\n# project out the i^th column\nF = A[,i];\nkind = as.scalar(K[1,i]);\n+ minF = min(F);\n+ maxF = max(F);\nif ( kind == 1 ) {\n- #print(\"[\" + i + \"] Scale\");\n# compute SCALE statistics on the projected column\n- minimum = min(F);\n- maximum = max(F);\n- rng = maximum - minimum;\n+ rng = maxF - minF;\nmu = mean(F);\nm2 = moment(F, 2);\n@@ -90,57 +79,28 @@ parfor(i in 1:n, check=0) {\ng1 = m3/(std_dev^3);\ng2 = m4/(std_dev^4) - 3;\n- #se_g1=sqrt( 6*m*(m-1.0) / ((m-2.0)*(m+1.0)*(m+3.0)) );\nse_g1=sqrt( (6/(m-2.0)) * (m/(m+1.0)) * ((m-1.0)/(m+3.0)) );\n-\n- #se_g2= sqrt( (4*(m^2-1)*se_g1^2)/((m+5.0)*(m-3.0)) );\nse_g2=sqrt( (4/(m+5.0)) * ((m^2-1)/(m-3.0)) * se_g1^2 );\n- md = median(F); #quantile(F, 0.5);\n+ md = median(F);\niqm = interQuartileMean(F);\n- # place the computed statistics in output matrices\n- baseStats[1,i] = minimum;\n- baseStats[2,i] = maximum;\n- baseStats[3,i] = rng;\n-\n- baseStats[4,i] = mu;\n- baseStats[5,i] = var;\n- baseStats[6,i] = std_dev;\n- baseStats[7,i] = se;\n- baseStats[8,i] = cv;\n-\n- baseStats[9,i] = g1;\n- baseStats[10,i] = g2;\n- baseStats[11,i] = se_g1;\n- baseStats[12,i] = se_g2;\n-\n- baseStats[13,i] = md;\n- baseStats[14,i] = iqm;\n+ baseStats[1:14,i] = as.matrix(list(minF, maxF, rng,\n+ mu, var, std_dev, se, cv, g1, g2, se_g1, se_g2, md, iqm));\n}\nelse {\nif (kind == 2 | kind == 3) {\n- #print(\"[\" + i + \"] Categorical\");\n-\n# check if the categorical column has valid values\n- minF = min(F);\nif( minF <= 0 ) {\nprint(\"ERROR: Categorical attributes can only take values starting from 1. Encountered a value \" + minF + \" in attribute \" + i);\n}\nelse {\n# compute CATEGORICAL statistics on the projected column\n- num_cat = max(F); # number of categories\ncat_counts = table(F,1, maxDomain, 1); # counts for each category\n+ mode = as.scalar(rowIndexMax(t(cat_counts)));\n+ numModes = sum(cat_counts == max(cat_counts));\n- mode = rowIndexMax(t(cat_counts));\n- mx = max(cat_counts)\n- modeArr = (cat_counts == mx)\n- numModes = sum(modeArr);\n-\n- # place the computed statistics in output matrices\n- baseStats[15,i] = num_cat;\n- baseStats[16,i] = mode;\n- baseStats[17,i] = numModes;\n+ baseStats[15:17,i] = as.matrix(list(maxF, mode, numModes));\n}\n}\n}\n@@ -166,19 +126,16 @@ if (consoleOutput == TRUE) {\nprint(\" (12) Std err of kurtosis | \" + as.scalar(baseStats[12,i]));\nprint(\" (13) Median | \" + as.scalar(baseStats[13,i]));\nprint(\" (14) Interquartile mean | \" + as.scalar(baseStats[14,i]));\n- } else {\n- if (kind == 2 | kind == 3) {\n- if (kind == 2) {\n- print(\"Feature [\" + i + \"]: Categorical (Nominal)\");\n- } else {\n- print(\"Feature [\" + i + \"]: Categorical (Ordinal)\");\n}\n+ else if (kind == 2 | kind == 3) {\n+ print(ifelse(kind == 2,\n+ \"Feature [\" + i + \"]: Categorical (Nominal)\",\n+ \"Feature [\" + i + \"]: Categorical (Ordinal)\"));\nprint(\" (15) Num of categories | \" + as.integer(as.scalar(baseStats[15,i])));\nprint(\" (16) Mode | \" + as.integer(as.scalar(baseStats[16,i])));\nprint(\" (17) Num of modes | \" + as.integer(as.scalar(baseStats[17,i])));\n}\n}\n}\n-}\nwrite(baseStats, $STATS);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup univariate stats algorithm script (list, ifelse, format)
49,727
17.06.2018 18:21:48
25,200
b06f390ecf75dcf24e8143aafdba533440326861
[SYSTEMML-2392/8,2401/2/6] Paramserv statistics and various fixes Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\nimport java.util.concurrent.Callable;\n+import java.util.stream.IntStream;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.Timing;\n+import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n+import org.apache.sysml.utils.Statistics;\npublic class LocalPSWorker extends PSWorker implements Callable<Void> {\n@@ -40,6 +47,9 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n@Override\npublic Void call() throws Exception {\n+ if (DMLScript.STATISTICS)\n+ Statistics.incWorkerNumber();\n+\ntry {\nlong dataSize = _features.getNumRows();\nint totalIter = (int) Math.ceil((double) dataSize / _batchSize);\n@@ -65,26 +75,28 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nprivate void computeEpoch(long dataSize, int totalIter) {\nfor (int i = 0; i < _epochs; i++) {\n// Pull the global parameters from ps\n- ListObject globalParams = pullModel();\n+ ListObject params = pullModel();\n+ ListObject accGradients = null;\nfor (int j = 0; j < totalIter; j++) {\n- _ec.setVariable(Statement.PS_MODEL, globalParams);\n+ _ec.setVariable(Statement.PS_MODEL, params);\nListObject gradients = computeGradients(dataSize, totalIter, i, j);\n- if (j == totalIter - 1) {\n- // Push the gradients to ps\n- pushGradients(gradients);\n- ParamservUtils.cleanupListObject(_ec, globalParams);\n- } else {\n+ // Accumulate the intermediate gradients\n+ accGradients = (accGradients==null) ?\n+ ParamservUtils.copyList(gradients) :\n+ accrueGradients(accGradients, gradients);\n+\n// Update the local model with gradients\n- globalParams = _ps.updateModel(gradients, globalParams);\n- if (LOG.isDebugEnabled()) {\n- LOG.debug(String.format(\"Local worker_%d: Local global parameter [size:%d kb] updated.\",\n- _workerID, globalParams.getDataSize()));\n- }\n- }\n+ if( j < totalIter - 1 )\n+ params = updateModel(params, gradients, i, j, totalIter);\n}\n+\n+ // Push the gradients to ps\n+ pushGradients(accGradients);\n+ ParamservUtils.cleanupListObject(_ec, Statement.PS_MODEL);\n+\nif (LOG.isDebugEnabled()) {\nLOG.debug(String.format(\"Local worker_%d: Finished %d epoch.\", _workerID, i + 1));\n}\n@@ -92,6 +104,22 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n}\n+ private ListObject updateModel(ListObject globalParams, ListObject gradients, int i, int j, int totalIter) {\n+ Timing tUpd = DMLScript.STATISTICS ? new Timing(true) : null;\n+\n+ globalParams = _ps.updateModel(gradients, globalParams);\n+\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSLocalModelUpdateTime((long) tUpd.stop());\n+\n+ if (LOG.isDebugEnabled()) {\n+ LOG.debug(String.format(\"Local worker_%d: Local global parameter [size:%d kb] updated. \"\n+ + \"[Epoch:%d Total epoch:%d Iteration:%d Total iteration:%d]\",\n+ _workerID, globalParams.getDataSize(), i + 1, _epochs, j + 1, totalIter));\n+ }\n+ return globalParams;\n+ }\n+\nprivate void computeBatch(long dataSize, int totalIter) {\nfor (int i = 0; i < _epochs; i++) {\nfor (int j = 0; j < totalIter; j++) {\n@@ -103,7 +131,7 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n// Push the gradients to ps\npushGradients(gradients);\n- ParamservUtils.cleanupListObject(_ec, globalParams);\n+ ParamservUtils.cleanupListObject(_ec, Statement.PS_MODEL);\n}\nif (LOG.isDebugEnabled()) {\nLOG.debug(String.format(\"Local worker_%d: Finished %d epoch.\", _workerID, i + 1));\n@@ -135,8 +163,12 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nlong end = Math.min((j + 1) * _batchSize, dataSize);\n// Get batch features and labels\n+ Timing tSlic = DMLScript.STATISTICS ? new Timing(true) : null;\nMatrixObject bFeatures = ParamservUtils.sliceMatrix(_features, begin, end);\nMatrixObject bLabels = ParamservUtils.sliceMatrix(_labels, begin, end);\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSBatchIndexingTime((long) tSlic.stop());\n+\n_ec.setVariable(Statement.PS_FEATURES, bFeatures);\n_ec.setVariable(Statement.PS_LABELS, bLabels);\n@@ -148,7 +180,10 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n}\n// Invoke the update function\n+ Timing tGrad = DMLScript.STATISTICS ? new Timing(true) : null;\n_inst.processInstruction(_ec);\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSGradientComputeTime((long) tGrad.stop());\n// Get the gradients\nListObject gradients = (ListObject) _ec.getVariable(_output.getName());\n@@ -157,4 +192,15 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nParamservUtils.cleanupData(bLabels);\nreturn gradients;\n}\n+\n+ private ListObject accrueGradients(ListObject accGradients, ListObject gradients) {\n+ IntStream.range(0, accGradients.getLength()).forEach(i -> {\n+ MatrixBlock mb1 = ((MatrixObject) accGradients.getData().get(i)).acquireRead();\n+ MatrixBlock mb2 = ((MatrixObject) gradients.getData().get(i)).acquireRead();\n+ mb1.binaryOperationsInPlace(new BinaryOperator(Plus.getPlusFnObject()), mb2);\n+ ((MatrixObject) accGradients.getData().get(i)).release();\n+ ((MatrixObject) gradients.getData().get(i)).release();\n+ });\n+ return accGradients;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/PSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/PSWorker.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\n+import static org.apache.sysml.runtime.controlprogram.paramserv.ParamservUtils.UPDATE_FUNC_PREFIX;\n+\nimport java.util.ArrayList;\nimport java.util.stream.Collectors;\n@@ -71,7 +73,7 @@ public abstract class PSWorker {\nfuncNS = keys[0];\nfuncName = keys[1];\n}\n- FunctionProgramBlock func = ec.getProgram().getFunctionProgramBlock(funcNS, funcName);\n+ FunctionProgramBlock func = ec.getProgram().getFunctionProgramBlock(funcNS, UPDATE_FUNC_PREFIX + _workerID + \"_\" + funcName);\nArrayList<DataIdentifier> inputs = func.getInputParams();\nArrayList<DataIdentifier> outputs = func.getOutputParams();\nCPOperand[] boundInputs = inputs.stream()\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\n+import static org.apache.sysml.runtime.controlprogram.paramserv.ParamservUtils.AGG_FUNC_PREFIX;\n+\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.HashMap;\n@@ -37,6 +39,7 @@ import org.apache.commons.lang3.ArrayUtils;\nimport org.apache.commons.lang3.concurrent.BasicThreadFactory;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.Expression;\n@@ -44,10 +47,13 @@ import org.apache.sysml.parser.Statement;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.Timing;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.FunctionCallCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\n+import org.apache.sysml.utils.Statistics;\npublic abstract class ParamServer {\n@@ -73,8 +79,7 @@ public abstract class ParamServer {\nthrow new DMLRuntimeException(\"Param server: failed to broadcast the initial model.\", e);\n}\nBasicThreadFactory factory = new BasicThreadFactory.Builder()\n- .namingPattern(\"agg-service-pool-thread-%d\")\n- .build();\n+ .namingPattern(\"agg-service-pool-thread-%d\").build();\n_es = Executors.newSingleThreadExecutor(factory);\n}\n@@ -91,11 +96,17 @@ public abstract class ParamServer {\n}\npublic ListObject getResult() {\n+ // All the model updating work has terminated,\n+ // so we could return directly the result model\nreturn _model;\n}\npublic ListObject updateModel(ListObject gradients, ListObject model) {\n- return _aggService.updateModel(gradients, model);\n+ //note: we use a new execution context to allow for concurrent execution of ASP local updates;\n+ //otherwise synchronized on the aggService instance would serialize those\n+ ExecutionContext ec = ExecutionContextFactory.createContext(_aggService._ec.getProgram());\n+ ec.setVariable(Statement.PS_HYPER_PARAMS, _aggService._ec.getVariable(Statement.PS_HYPER_PARAMS));\n+ return _aggService.updateModel(ec, gradients, model);\n}\npublic static class Gradient {\n@@ -115,11 +126,11 @@ public abstract class ParamServer {\nprotected final Log LOG = LogFactory.getLog(AggregationService.class.getName());\n- protected ExecutionContext _ec;\n- private Statement.PSUpdateType _updateType;\n- private FunctionCallCPInstruction _inst;\n- private DataIdentifier _output;\n- private boolean[] _finishedStates; // Workers' finished states\n+ protected final ExecutionContext _ec;\n+ private final Statement.PSUpdateType _updateType;\n+ private final FunctionCallCPInstruction _inst;\n+ private final DataIdentifier _output;\n+ private final boolean[] _finishedStates; // Workers' finished states\nAggregationService(String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec, int workerNum) {\n_ec = ec;\n@@ -134,7 +145,7 @@ public abstract class ParamServer {\nfuncNS = keys[0];\nfuncName = keys[1];\n}\n- FunctionProgramBlock func = _ec.getProgram().getFunctionProgramBlock(funcNS, funcName);\n+ FunctionProgramBlock func = _ec.getProgram().getFunctionProgramBlock(funcNS, AGG_FUNC_PREFIX + funcName);\nArrayList<DataIdentifier> inputs = func.getInputParams();\nArrayList<DataIdentifier> outputs = func.getOutputParams();\n@@ -170,14 +181,24 @@ public abstract class ParamServer {\n}\nprivate void broadcastModel() throws InterruptedException {\n+ Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n+\n//broadcast copy of the model to all workers, cleaned up by workers\nfor (BlockingQueue<ListObject> q : _modelMap.values())\nq.put(ParamservUtils.copyList(_model));\n+\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n}\nprivate void broadcastModel(int workerID) throws InterruptedException {\n+ Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n+\n//broadcast copy of model to specific worker, cleaned up by worker\n_modelMap.get(workerID).put(ParamservUtils.copyList(_model));\n+\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n}\n@Override\n@@ -195,7 +216,10 @@ public abstract class ParamServer {\n}\n// Update and redistribute the model\n+ Timing tAgg = DMLScript.STATISTICS ? new Timing(true) : null;\n_model = updateModel(grad._gradients, _model);\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSAggregationTime((long) tAgg.stop());\n// Redistribute model according to update type\nswitch(_updateType) {\n@@ -231,19 +255,23 @@ public abstract class ParamServer {\n* @return A updated list object of model\n*/\nprivate synchronized ListObject updateModel(ListObject gradients, ListObject model) {\n+ return updateModel(_ec, gradients, model);\n+ }\n+\n+ private ListObject updateModel(ExecutionContext ec, ListObject gradients, ListObject model) {\n// Populate the variables table with the gradients and model\n- _ec.setVariable(Statement.PS_GRADIENTS, gradients);\n- _ec.setVariable(Statement.PS_MODEL, model);\n+ ec.setVariable(Statement.PS_GRADIENTS, gradients);\n+ ec.setVariable(Statement.PS_MODEL, model);\n// Invoke the aggregate function\n- _inst.processInstruction(_ec);\n+ _inst.processInstruction(ec);\n// Get the output\n- ListObject newModel = (ListObject) _ec.getVariable(_output.getName());\n+ ListObject newModel = (ListObject) ec.getVariable(_output.getName());\n// Update the model with the new output\n- ParamservUtils.cleanupListObject(_ec, model);\n- ParamservUtils.cleanupListObject(_ec, gradients);\n+ ParamservUtils.cleanupListObject(ec, Statement.PS_MODEL);\n+ ParamservUtils.cleanupListObject(ec, Statement.PS_GRADIENTS);\nreturn newModel;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\n+import java.io.IOException;\n+import java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.stream.Collectors;\nimport java.util.stream.IntStream;\n+import org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.MultiThreadedHop;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.hops.recompile.Recompiler;\n+import org.apache.sysml.parser.DMLProgram;\n+import org.apache.sysml.parser.DMLTranslator;\nimport org.apache.sysml.parser.Expression;\n+import org.apache.sysml.parser.StatementBlock;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.ForProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.IfProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.ParForProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.Program;\n+import org.apache.sysml.runtime.controlprogram.ProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.WhileProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n+import org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -41,6 +58,9 @@ import org.apache.sysml.runtime.matrix.data.OutputInfo;\npublic class ParamservUtils {\n+ public static final String UPDATE_FUNC_PREFIX = \"_worker_\";\n+ public static final String AGG_FUNC_PREFIX = \"_agg_\";\n+\n/**\n* Deep copy the list object\n*\n@@ -65,8 +85,8 @@ public class ParamservUtils {\nreturn new ListObject(newData, lo.getNames());\n}\n- public static void cleanupListObject(ExecutionContext ec, ListObject lo) {\n- ec.getVariables().removeAllIn(new HashSet<>(lo.getNames()));\n+ public static void cleanupListObject(ExecutionContext ec, String lName) {\n+ ListObject lo = (ListObject) ec.removeVariable(lName);\nlo.getData().forEach(ParamservUtils::cleanupData);\n}\n@@ -110,4 +130,112 @@ public class ParamservUtils {\nseq.ctableOperations(null, sample, 1.0, permutation);\nreturn permutation;\n}\n+\n+ public static ExecutionContext createExecutionContext(ExecutionContext ec, String updFunc, String aggFunc, int workerNum, int k) {\n+ FunctionProgramBlock updPB = getFunctionBlock(ec, updFunc);\n+ FunctionProgramBlock aggPB = getFunctionBlock(ec, aggFunc);\n+\n+ Program prog = ec.getProgram();\n+\n+ // 1. Recompile the internal program blocks\n+ recompileProgramBlocks(k, prog.getProgramBlocks());\n+ // 2. Recompile the imported function blocks\n+ prog.getFunctionProgramBlocks().forEach((fname, fvalue) -> recompileProgramBlocks(k, fvalue.getChildBlocks()));\n+\n+ // Copy function for workers\n+ IntStream.range(0, workerNum).forEach(i -> copyFunction(updFunc, updPB, prog, UPDATE_FUNC_PREFIX + i + \"_\"));\n+\n+ // Copy function for agg service\n+ copyFunction(aggFunc, aggPB, prog, AGG_FUNC_PREFIX);\n+\n+ return ExecutionContextFactory.createContext(prog);\n+ }\n+\n+ private static void copyFunction(String funcName, FunctionProgramBlock updPB, Program prog, String prefix) {\n+ String[] keys = DMLProgram.splitFunctionKey(funcName);\n+ String namespace = null;\n+ String func = keys[0];\n+ if (keys.length == 2) {\n+ namespace = keys[0];\n+ func = keys[1];\n+ }\n+ FunctionProgramBlock copiedFunc = ProgramConverter\n+ .createDeepCopyFunctionProgramBlock(updPB, new HashSet<>(), new HashSet<>());\n+ String fnameNew = prefix + func;\n+ prog.addFunctionProgramBlock(namespace, fnameNew, copiedFunc);\n+ }\n+\n+ private static void recompileProgramBlocks(int k, ArrayList<ProgramBlock> pbs) {\n+ // Reset the visit status from root\n+ for (ProgramBlock pb : pbs)\n+ DMLTranslator.resetHopsDAGVisitStatus(pb.getStatementBlock());\n+\n+ // Should recursively assign the level of parallelism\n+ // and recompile the program block\n+ try {\n+ rAssignParallelism(pbs, k, false);\n+ } catch (IOException e) {\n+ throw new DMLRuntimeException(e);\n+ }\n+ }\n+\n+ private static boolean rAssignParallelism(ArrayList<ProgramBlock> pbs, int k, boolean recompiled) throws IOException {\n+ for (ProgramBlock pb : pbs) {\n+ if (pb instanceof ParForProgramBlock) {\n+ ParForProgramBlock pfpb = (ParForProgramBlock) pb;\n+ pfpb.setDegreeOfParallelism(k);\n+ recompiled |= rAssignParallelism(pfpb.getChildBlocks(), 1, recompiled);\n+ } else if (pb instanceof ForProgramBlock) {\n+ recompiled |= rAssignParallelism(((ForProgramBlock) pb).getChildBlocks(), k, recompiled);\n+ } else if (pb instanceof WhileProgramBlock) {\n+ recompiled |= rAssignParallelism(((WhileProgramBlock) pb).getChildBlocks(), k, recompiled);\n+ } else if (pb instanceof FunctionProgramBlock) {\n+ recompiled |= rAssignParallelism(((FunctionProgramBlock) pb).getChildBlocks(), k, recompiled);\n+ } else if (pb instanceof IfProgramBlock) {\n+ IfProgramBlock ipb = (IfProgramBlock) pb;\n+ recompiled |= rAssignParallelism(ipb.getChildBlocksIfBody(), k, recompiled);\n+ if (ipb.getChildBlocksElseBody() != null)\n+ recompiled |= rAssignParallelism(ipb.getChildBlocksElseBody(), k, recompiled);\n+ } else {\n+ StatementBlock sb = pb.getStatementBlock();\n+ for (Hop hop : sb.getHops())\n+ recompiled |= rAssignParallelism(hop, k, recompiled);\n+ }\n+ // Recompile the program block\n+ if (recompiled) {\n+ Recompiler.recompileProgramBlockInstructions(pb);\n+ }\n+ }\n+ return recompiled;\n+ }\n+\n+ private static boolean rAssignParallelism(Hop hop, int k, boolean recompiled) {\n+ if (hop.isVisited()) {\n+ return recompiled;\n+ }\n+ if (hop instanceof MultiThreadedHop) {\n+ // Reassign the level of parallelism\n+ MultiThreadedHop mhop = (MultiThreadedHop) hop;\n+ mhop.setMaxNumThreads(k);\n+ recompiled = true;\n+ }\n+ ArrayList<Hop> inputs = hop.getInput();\n+ for (Hop h : inputs) {\n+ recompiled |= rAssignParallelism(h, k, recompiled);\n+ }\n+ hop.setVisited();\n+ return recompiled;\n+ }\n+\n+\n+ private static FunctionProgramBlock getFunctionBlock(ExecutionContext ec, String funcName) {\n+ String[] keys = DMLProgram.splitFunctionKey(funcName);\n+ String namespace = null;\n+ String func = keys[0];\n+ if (keys.length == 2) {\n+ namespace = keys[0];\n+ func = keys[1];\n+ }\n+ return ec.getProgram().getFunctionProgramBlock(namespace, func);\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -39,9 +39,6 @@ import static org.apache.sysml.parser.Statement.PS_UPDATE_TYPE;\nimport static org.apache.sysml.parser.Statement.PS_VAL_FEATURES;\nimport static org.apache.sysml.parser.Statement.PS_VAL_LABELS;\n-import java.io.IOException;\n-import java.util.ArrayList;\n-import java.util.HashSet;\nimport java.util.LinkedHashMap;\nimport java.util.List;\nimport java.util.concurrent.ExecutionException;\n@@ -56,21 +53,10 @@ import org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.log4j.Level;\nimport org.apache.log4j.Logger;\n-import org.apache.sysml.hops.Hop;\n-import org.apache.sysml.hops.MultiThreadedHop;\n-import org.apache.sysml.hops.recompile.Recompiler;\n-import org.apache.sysml.parser.DMLProgram;\n-import org.apache.sysml.parser.DMLTranslator;\n-import org.apache.sysml.parser.StatementBlock;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.controlprogram.ForProgramBlock;\n-import org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\n-import org.apache.sysml.runtime.controlprogram.IfProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\n-import org.apache.sysml.runtime.controlprogram.ParForProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.Program;\n-import org.apache.sysml.runtime.controlprogram.ProgramBlock;\n-import org.apache.sysml.runtime.controlprogram.WhileProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n@@ -82,9 +68,11 @@ import org.apache.sysml.runtime.controlprogram.paramserv.DataPartitionerOR;\nimport org.apache.sysml.runtime.controlprogram.paramserv.LocalPSWorker;\nimport org.apache.sysml.runtime.controlprogram.paramserv.LocalParamServer;\nimport org.apache.sysml.runtime.controlprogram.paramserv.ParamServer;\n-import org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\n+import org.apache.sysml.runtime.controlprogram.paramserv.ParamservUtils;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.Timing;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.utils.Statistics;\npublic class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruction {\n@@ -111,21 +99,26 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n@Override\npublic void processInstruction(ExecutionContext ec) {\n+ Timing tSetup = DMLScript.STATISTICS ? new Timing(true) : null;\n+\nPSModeType mode = getPSMode();\nint workerNum = getWorkerNum(mode);\nBasicThreadFactory factory = new BasicThreadFactory.Builder()\n- .namingPattern(\"workers-pool-thread-%d\")\n- .build();\n+ .namingPattern(\"workers-pool-thread-%d\").build();\nExecutorService es = Executors.newFixedThreadPool(workerNum, factory);\nString updFunc = getParam(PS_UPDATE_FUN);\nString aggFunc = getParam(PS_AGGREGATION_FUN);\n- // Create the workers' execution context\nint k = getParLevel(workerNum);\n- List<ExecutionContext> workerECs = createExecutionContext(ec, updFunc, workerNum, k);\n+\n+ // Get the compiled execution context\n+ ExecutionContext newEC = ParamservUtils.createExecutionContext(ec, updFunc, aggFunc, workerNum, k);\n+\n+ // Create workers' execution context\n+ List<ExecutionContext> workerECs = createExecutionContext(workerNum, ec, newEC.getProgram());\n// Create the agg service's execution context\n- ExecutionContext aggServiceEC = createExecutionContext(ec, aggFunc, 1, 1).get(0);\n+ ExecutionContext aggServiceEC = createExecutionContext(1, ec, newEC.getProgram()).get(0);\nPSFrequency freq = getFrequency();\nPSUpdateType updateType = getUpdateType();\n@@ -146,6 +139,9 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nPSScheme scheme = getScheme();\ndoDataPartitioning(scheme, ec, workers);\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSSetupTime((long) tSetup.stop());\n+\nif (LOG.isDebugEnabled()) {\nLOG.debug(String.format(\"\\nConfiguration of paramserv func: \"\n+ \"\\nmode: %s \\nworkerNum: %d \\nupdate frequency: %s \"\n@@ -169,6 +165,18 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n}\n}\n+ private List<ExecutionContext> createExecutionContext(int size, ExecutionContext ec, Program program) {\n+ return IntStream.range(0, size).mapToObj(i -> {\n+ // Put the hyperparam into the variables table\n+ LocalVariableMap varsMap = new LocalVariableMap();\n+ ListObject hyperParams = getHyperParams(ec);\n+ if (hyperParams != null) {\n+ varsMap.put(PS_HYPER_PARAMS, hyperParams);\n+ }\n+ return ExecutionContextFactory.createContext(varsMap, program);\n+ }).collect(Collectors.toList());\n+ }\n+\nprivate PSModeType getPSMode() {\nPSModeType mode;\ntry {\n@@ -194,106 +202,6 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nreturn Math.max((int)Math.ceil((double)getRemainingCores()/workerNum), 1);\n}\n- private List<ExecutionContext> createExecutionContext(ExecutionContext ec, String funcName, int workerNum, int k) {\n- // Fetch the target function\n- String[] keys = DMLProgram.splitFunctionKey(funcName);\n- String namespace = null;\n- String func = keys[0];\n- if (keys.length == 2) {\n- namespace = keys[0];\n- func = keys[1];\n- }\n- return createExecutionContext(ec, namespace, func, workerNum, k);\n- }\n-\n- private List<ExecutionContext> createExecutionContext(ExecutionContext ec, String namespace, String func,\n- int workerNum, int k) {\n- FunctionProgramBlock targetFunc = ec.getProgram().getFunctionProgramBlock(namespace, func);\n- return IntStream.range(0, workerNum).mapToObj(i -> {\n- // Put the hyperparam into the variables table\n- LocalVariableMap varsMap = new LocalVariableMap();\n- ListObject hyperParams = getHyperParams(ec);\n- if (hyperParams != null) {\n- varsMap.put(PS_HYPER_PARAMS, hyperParams);\n- }\n-\n- // Deep copy the target func\n- FunctionProgramBlock copiedFunc = ProgramConverter\n- .createDeepCopyFunctionProgramBlock(targetFunc, new HashSet<>(), new HashSet<>());\n-\n- // Reset the visit status from root\n- for( ProgramBlock pb : copiedFunc.getChildBlocks() )\n- DMLTranslator.resetHopsDAGVisitStatus(pb.getStatementBlock());\n-\n- // Should recursively assign the level of parallelism\n- // and recompile the program block\n- try {\n- rAssignParallelism(copiedFunc.getChildBlocks(), k, false);\n- } catch (IOException e) {\n- throw new DMLRuntimeException(e);\n- }\n-\n- Program prog = new Program();\n- prog.addProgramBlock(copiedFunc);\n- prog.addFunctionProgramBlock(namespace, func, copiedFunc);\n- return ExecutionContextFactory.createContext(varsMap, prog);\n-\n- }).collect(Collectors.toList());\n- }\n-\n- private boolean rAssignParallelism(ArrayList<ProgramBlock> pbs, int k, boolean recompiled) throws IOException {\n- for (ProgramBlock pb : pbs) {\n- if (pb instanceof ParForProgramBlock) {\n- ParForProgramBlock pfpb = (ParForProgramBlock) pb;\n- pfpb.setDegreeOfParallelism(k);\n- recompiled |= rAssignParallelism(pfpb.getChildBlocks(), 1, recompiled);\n- }\n- else if (pb instanceof ForProgramBlock) {\n- recompiled |= rAssignParallelism(((ForProgramBlock) pb).getChildBlocks(), k, recompiled);\n- }\n- else if (pb instanceof WhileProgramBlock) {\n- recompiled |= rAssignParallelism(((WhileProgramBlock) pb).getChildBlocks(), k, recompiled);\n- }\n- else if (pb instanceof FunctionProgramBlock) {\n- recompiled |= rAssignParallelism(((FunctionProgramBlock) pb).getChildBlocks(), k, recompiled);\n- }\n- else if (pb instanceof IfProgramBlock) {\n- IfProgramBlock ipb = (IfProgramBlock) pb;\n- recompiled |= rAssignParallelism(ipb.getChildBlocksIfBody(), k, recompiled);\n- if (ipb.getChildBlocksElseBody() != null)\n- recompiled |= rAssignParallelism(ipb.getChildBlocksElseBody(), k, recompiled);\n- }\n- else {\n- StatementBlock sb = pb.getStatementBlock();\n- for (Hop hop : sb.getHops())\n- recompiled |= rAssignParallelism(hop, k, recompiled);\n- }\n- // Recompile the program block\n- if (recompiled) {\n- Recompiler.recompileProgramBlockInstructions(pb);\n- }\n- }\n- return recompiled;\n- }\n-\n- private boolean rAssignParallelism(Hop hop, int k, boolean recompiled) {\n- if (hop.isVisited()) {\n- return recompiled;\n- }\n- if (hop instanceof MultiThreadedHop) {\n- // Reassign the level of parallelism\n- MultiThreadedHop mhop = (MultiThreadedHop) hop;\n- mhop.setMaxNumThreads(k);\n- recompiled = true;\n- }\n- ArrayList<Hop> inputs = hop.getInput();\n- for (Hop h : inputs) {\n- recompiled |= rAssignParallelism(h, k, recompiled);\n- }\n- hop.setVisited();\n- return recompiled;\n- }\n-\nprivate PSUpdateType getUpdateType() {\nPSUpdateType updType;\ntry {\n@@ -310,13 +218,12 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nif (!getParameterMap().containsKey(PS_FREQUENCY)) {\nreturn DEFAULT_UPDATE_FREQUENCY;\n}\n- PSFrequency freq;\ntry {\n- freq = PSFrequency.valueOf(getParam(PS_FREQUENCY));\n+ return PSFrequency.valueOf(getParam(PS_FREQUENCY));\n} catch (IllegalArgumentException e) {\n- throw new DMLRuntimeException(String.format(\"Paramserv function: not support '%s' update frequency.\", getParam(PS_FREQUENCY)));\n+ throw new DMLRuntimeException(String.format(\"Paramserv function: \"\n+ + \"not support '%s' update frequency.\", getParam(PS_FREQUENCY)));\n}\n- return freq;\n}\nprivate int getRemainingCores() {\n@@ -330,19 +237,16 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n* @return worker numbers\n*/\nprivate int getWorkerNum(PSModeType mode) {\n- int workerNum = -1;\nswitch (mode) {\ncase LOCAL:\n// default worker number: available cores - 1 (assign one process for agg service)\n- workerNum = getRemainingCores();\n- if (getParameterMap().containsKey(PS_PARALLELISM)) {\n- workerNum = Math.min(workerNum, Integer.valueOf(getParam(PS_PARALLELISM)));\n- }\n- break;\n- case REMOTE_SPARK:\n- throw new DMLRuntimeException(\"Do not support remote spark.\");\n- }\n+ int workerNum = getRemainingCores();\n+ if (getParameterMap().containsKey(PS_PARALLELISM))\n+ workerNum = Integer.valueOf(getParam(PS_PARALLELISM));\nreturn workerNum;\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported parameter server: \"+mode.name());\n+ }\n}\n/**\n@@ -351,15 +255,12 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n* @return parameter server\n*/\nprivate ParamServer createPS(PSModeType mode, String aggFunc, PSUpdateType updateType, int workerNum, ListObject model, ExecutionContext ec) {\n- ParamServer ps = null;\nswitch (mode) {\ncase LOCAL:\n- ps = new LocalParamServer(model, aggFunc, updateType, ec, workerNum);\n- break;\n- case REMOTE_SPARK:\n- throw new DMLRuntimeException(\"Do not support remote spark.\");\n+ return new LocalParamServer(model, aggFunc, updateType, ec, workerNum);\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported parameter server: \"+mode.name());\n}\n- return ps;\n}\nprivate long getBatchSize() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -107,6 +107,15 @@ public class Statistics\nprivate static final LongAdder sparkBroadcast = new LongAdder();\nprivate static final LongAdder sparkBroadcastCount = new LongAdder();\n+ // Paramserv function stats (time is in milli sec)\n+ private static final LongAdder psNumWorkers = new LongAdder();\n+ private static final LongAdder psSetupTime = new LongAdder();\n+ private static final LongAdder psGradientComputeTime = new LongAdder();\n+ private static final LongAdder psAggregationTime = new LongAdder();\n+ private static final LongAdder psLocalModelUpdateTime = new LongAdder();\n+ private static final LongAdder psModelBroadcastTime = new LongAdder();\n+ private static final LongAdder psBatchIndexTime = new LongAdder();\n+\n//PARFOR optimization stats (low frequency updates)\nprivate static long parforOptTime = 0; //in milli sec\nprivate static long parforOptCount = 0; //count\n@@ -517,6 +526,33 @@ public class Statistics\nsparkBroadcastCount.add(c);\n}\n+ public static void incWorkerNumber() {\n+ psNumWorkers.increment();\n+ }\n+\n+ public static void accPSSetupTime(long t) {\n+ psSetupTime.add(t);\n+ }\n+\n+ public static void accPSGradientComputeTime(long t) {\n+ psGradientComputeTime.add(t);\n+ }\n+\n+ public static void accPSAggregationTime(long t) {\n+ psAggregationTime.add(t);\n+ }\n+\n+ public static void accPSLocalModelUpdateTime(long t) {\n+ psLocalModelUpdateTime.add(t);\n+ }\n+\n+ public static void accPSModelBroadcastTime(long t) {\n+ psModelBroadcastTime.add(t);\n+ }\n+\n+ public static void accPSBatchIndexingTime(long t) {\n+ psBatchIndexTime.add(t);\n+ }\npublic static String getCPHeavyHitterCode( Instruction inst )\n{\n@@ -850,6 +886,15 @@ public class Statistics\n((double)sparkBroadcast.longValue())*1e-9,\n((double)sparkCollect.longValue())*1e-9));\n}\n+ if (psNumWorkers.longValue() > 0) {\n+ sb.append(String.format(\"Paramserv total num workers:\\t%d.\\n\", psNumWorkers.longValue()));\n+ sb.append(String.format(\"Paramserv setup time:\\t\\t%.3f secs.\\n\", psSetupTime.doubleValue() / 1000));\n+ sb.append(String.format(\"Paramserv grad compute time:\\t%.3f secs.\\n\", psGradientComputeTime.doubleValue() / 1000));\n+ sb.append(String.format(\"Paramserv model update time:\\t%.3f/%.3f secs.\\n\",\n+ psLocalModelUpdateTime.doubleValue() / 1000, psAggregationTime.doubleValue() / 1000));\n+ sb.append(String.format(\"Paramserv model broadcast time:\\t%.3f secs.\\n\", psModelBroadcastTime.doubleValue() / 1000));\n+ sb.append(String.format(\"Paramserv batch slice time:\\t%.3f secs.\\n\", psBatchIndexTime.doubleValue() / 1000));\n+ }\nif( parforOptCount>0 ){\nsb.append(\"ParFor loops optimized:\\t\\t\" + getParforOptCount() + \".\\n\");\nsb.append(\"ParFor optimize time:\\t\\t\" + String.format(\"%.3f\", ((double)getParforOptTime())/1000) + \" sec.\\n\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2392/8,2401/2/6] Paramserv statistics and various fixes Closes #787.
49,738
18.06.2018 14:15:08
25,200
2982c73fd94eda3b9669a2fb843d1395602af8bf
[MINOR] Refactoring maxpool/avgpool operation for external calls
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "diff": "@@ -97,6 +97,36 @@ public class LibMatrixDNNPooling {\nreturn ret;\n}\n+ public static void poolingDenseStride1Pad0(PoolingType pType, double minVal, double pFact,\n+ double[] in, double[] out, int rl, int ru, int C, int P, int Q, int R, int S, int H, int W) {\n+ boolean max = (pType == PoolingType.MAX);\n+ int CHW = C * H * W;\n+\n+ if( P == 1 && Q == 1 && W == 1 ) {\n+ //quick-path w/o materialized index arrays and\n+ //simplified inner loops for P = 1, Q = 1, W = 1\n+ int lenh = Math.min(R,H);\n+ for(int i = rl, oix=rl*C; i < ru; i++, oix+=C)\n+ for (int c = 0, off=i*CHW; c < C; c++, off+=H) {\n+ out[oix+c] = max ? max(minVal, in, off, lenh) :\n+ avg(minVal, in, off, lenh, pFact);\n+ }\n+ }\n+ else {\n+ int CPQ = C * P * Q, HW = H * W;\n+ Arrays.fill(out, rl*CPQ, ru*CPQ, minVal);\n+ //quick-path w/o materialized index arrays\n+ for(int i = rl; i < ru; i++)\n+ for (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\n+ for (int p = 0; p < P; p++, oix+=Q)\n+ for (int h = p; h < Math.min(p+R,H); h++)\n+ for (int q = 0, off2=off+h*W; q < Q; q++) {\n+ out[oix+q] = max ? max(out[oix+q], in, off2+q, Math.min(S,W-q)) :\n+ avg(out[oix+q], in, off2+q, Math.min(S,W-q), pFact);\n+ }\n+ }\n+ }\n+\nprivate static class DensePooling implements Callable<Long>\n{\nprivate final int _rl, _ru;\n@@ -125,32 +155,14 @@ public class LibMatrixDNNPooling {\ndouble minValForMaxPoolOperations = _poolingType == PoolingType.AVG ? 0 : _params.minValForMaxPoolOperations;\nboolean max = (_poolingType == PoolingType.MAX);\n+ if( _params.isStride1Pad0() ) {\n+ poolingDenseStride1Pad0(_poolingType, minValForMaxPoolOperations,\n+ _poolingMultiplier, in, out, _rl, _ru, C, P, Q, R, S, H, W);\n+ }\n+ else { //general case\n//thread-local initialization of output block\n- if( !(_params.isStride1Pad0() && _params.isAllOnes(P, Q, W)) )\nArrays.fill(out, _rl*CPQ, _ru*CPQ, minValForMaxPoolOperations);\n- if( _params.isStride1Pad0() && _params.isAllOnes(P, Q, W) ) {\n- //quick-path w/o materialized index arrays and\n- //simplified inner loops for P = 1, Q = 1, W = 1\n- int lenh = Math.min(R,H);\n- for(int i = _rl, oix=_rl*C; i < _ru; i++, oix+=C)\n- for (int c = 0, off=i*CHW; c < C; c++, off+=H) {\n- out[oix+c] = max ? max(minValForMaxPoolOperations, in, off, lenh) :\n- avg(minValForMaxPoolOperations, in, off, lenh, _poolingMultiplier);\n- }\n- }\n- else if( _params.isStride1Pad0() ) {\n- //quick-path w/o materialized index arrays\n- for(int i = _rl; i < _ru; i++)\n- for (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\n- for (int p = 0; p < P; p++, oix+=Q)\n- for (int h = p; h < Math.min(p+R,H); h++)\n- for (int q = 0, off2=off+h*W; q < Q; q++) {\n- out[oix+q] = max ? max(out[oix+q], in, off2+q, Math.min(S,W-q)) :\n- avg(out[oix+q], in, off2+q, Math.min(S,W-q), _poolingMultiplier);\n- }\n- }\n- else { //general case\nint[] hl = _params.start_indexes_h, hu = _params.end_indexes_h;\nint[] wl = _params.start_indexes_w, wu = _params.end_indexes_w;\nfor(int i = _rl; i < _ru; i++)\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Refactoring maxpool/avgpool operation for external calls
49,738
18.06.2018 17:41:27
25,200
51db735ebb9c7d183c02446b5328f18007bfec7e
Fix size inference reshape w/ zero rows or columns This patch fixes the robustness of size inference and update statistics for reshapes w/ zero rows or columns, which led to arithmetic exceptions due to divide by zero.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "diff": "@@ -461,13 +461,16 @@ public class ReorgOp extends MultiThreadedHop\n}\ncase RESHAPE:\n{\n- // input is a [k1,k2] matrix and output is a [k3,k4] matrix with k1*k2=k3*k4\n+ // input is a [k1,k2] matrix and output is a [k3,k4] matrix with k1*k2=k3*k4, except for\n+ // special cases where an input or output dimension is zero (i.e., 0x5 -> 1x0 is valid)\n// #nnz in output is exactly the same as in input\nif( mc.dimsKnown() ) {\n- if( _dim1 >= 0 )\n+ if( _dim1 > 0 )\nret = new long[]{_dim1, mc.getRows()*mc.getCols()/_dim1, mc.getNonZeros()};\n- else if( _dim2 >= 0 )\n+ else if( _dim2 > 0 )\nret = new long[]{mc.getRows()*mc.getCols()/_dim2, _dim2, mc.getNonZeros()};\n+ else if( _dim1 >= 0 && _dim2 >= 0 )\n+ ret = new long[]{_dim1, _dim2, -1};\n}\nbreak;\n}\n@@ -595,9 +598,9 @@ public class ReorgOp extends MultiThreadedHop\nrefreshColsParameterInformation(input3); //refresh cols\nsetNnz(input1.getNnz());\nif( !dimsKnown() && input1.dimsKnown() ) { //reshape allows to infer dims, if input and 1 dim known\n- if(_dim1 >= 0)\n+ if(_dim1 > 0)\n_dim2 = (input1._dim1*input1._dim2)/_dim1;\n- else if(_dim2 >= 0)\n+ else if(_dim2 > 0)\n_dim1 = (input1._dim1*input1._dim2)/_dim2;\n}\nbreak;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "diff": "@@ -31,6 +31,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nprivate final static String TEST_NAME1 = \"FunPotpourriNoReturn\";\nprivate final static String TEST_NAME2 = \"FunPotpourriComments\";\nprivate final static String TEST_NAME3 = \"FunPotpourriNoReturn2\";\n+ private final static String TEST_NAME4 = \"FunPotpourriEval\";\nprivate final static String TEST_DIR = \"functions/misc/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FunctionPotpourriTest.class.getSimpleName() + \"/\";\n@@ -41,6 +42,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n}\n@Test\n@@ -58,6 +60,11 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nrunFunctionTest( TEST_NAME3, false );\n}\n+ @Test\n+ public void testFunctionEval() {\n+ runFunctionTest( TEST_NAME4, false );\n+ }\n+\nprivate void runFunctionTest(String testName, boolean error) {\nTestConfiguration config = getTestConfiguration(testName);\nloadTestConfiguration(config);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunPotpourriEval.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function(Matrix[Double] weights, Matrix[Double] X, Integer p, Integer P, Integer q, Integer Q, Integer s) return(matrix[double] grad) {\n+ combined_weights = rbind (weights, matrix(2, p*P, 1))\n+ res_A = matrix(1, rows=p+P, cols=1)\n+ grad = matrix(0, rows=p+P, cols=1)\n+ if (p > 0) grad[1:p,] = res_A[1:p,]\n+ if (P > 0) grad[p+1:p+P,] = res_A[p+1:p+P,]\n+ if (p>0 & P>0){\n+ res_A = res_A[p+P+1:nrow(res_A),]\n+ for(i in seq(1, p, 1)){\n+ permut = matrix(0, rows=p, cols=P)\n+ permut[i,] = t(combined_weights[p+1:p+P,])\n+ grad[i,1] = grad[i,1] + sum(res_A * matrix(permut, rows=p*P, cols=1))\n+ }\n+ for(i in seq(1, P, 1)){\n+ permut = matrix(0, rows=p, cols=P)\n+ permut[,i] = combined_weights[1:p,]\n+ grad[p+i,1] = grad[p+i,1] + sum(res_A * matrix(permut, rows=p*P, cols=1))\n+ }\n+ }\n+}\n+\n+best_point = eval (\"foo\", matrix(1, 2, 1),matrix(0, 998, 3), 2, 0, 0, 0, 10)\n+print(toString(best_point))\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2407] Fix size inference reshape w/ zero rows or columns This patch fixes the robustness of size inference and update statistics for reshapes w/ zero rows or columns, which led to arithmetic exceptions due to divide by zero.
49,738
18.06.2018 18:50:50
25,200
c61b94c975ac9019c9e9f0187dda5b23dbac61e7
Improved size inference nary ops (cbind/rbind/min/max) This patch improves the size propagation for nary ops by (1) adding the missing worst-case size inference, and (2) computing the nnz for exact size propagation.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/NaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/NaryOp.java", "diff": "@@ -25,6 +25,7 @@ import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.lops.Nary;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n/**\n* The NaryOp Hop allows for a variable number of operands. Functionality\n@@ -180,7 +181,26 @@ public class NaryOp extends Hop {\n}\n@Override\n+ @SuppressWarnings(\"incomplete-switch\")\nprotected long[] inferOutputCharacteristics(MemoTable memo) {\n+ if( !getDataType().isScalar() ) {\n+ MatrixCharacteristics[] mc = memo.getAllInputStats(getInput());\n+\n+ switch( _op ) {\n+ case CBIND: return new long[]{\n+ HopRewriteUtils.getMaxInputDim(mc, true),\n+ HopRewriteUtils.getSumValidInputDims(mc, false),\n+ HopRewriteUtils.getSumValidInputNnz(mc, true)};\n+ case RBIND: return new long[]{\n+ HopRewriteUtils.getSumValidInputDims(mc, true),\n+ HopRewriteUtils.getMaxInputDim(mc, false),\n+ HopRewriteUtils.getSumValidInputNnz(mc, true)};\n+ case MIN:\n+ case MAX: return new long[]{\n+ HopRewriteUtils.getMaxInputDim(this, true),\n+ HopRewriteUtils.getMaxInputDim(this, false), -1};\n+ }\n+ }\nreturn null; //do nothing\n}\n@@ -190,10 +210,12 @@ public class NaryOp extends Hop {\ncase CBIND:\nsetDim1(HopRewriteUtils.getMaxInputDim(this, true));\nsetDim2(HopRewriteUtils.getSumValidInputDims(this, false));\n+ setNnz(HopRewriteUtils.getSumValidInputNnz(this));\nbreak;\ncase RBIND:\nsetDim1(HopRewriteUtils.getSumValidInputDims(this, true));\nsetDim2(HopRewriteUtils.getMaxInputDim(this, false));\n+ setNnz(HopRewriteUtils.getSumValidInputNnz(this));\nbreak;\ncase MIN:\ncase MAX:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -73,6 +73,7 @@ import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\nimport org.apache.sysml.runtime.instructions.cp.StringInitCPInstruction;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -1372,6 +1373,44 @@ public class HopRewriteUtils\nh -> dim1 ? h.rowsKnown() : h.colsKnown());\n}\n+ public static long getSumValidInputNnz(Hop hop) {\n+ if( !hasValidInputNnz(hop) )\n+ return -1;\n+ return hop.getInput().stream().mapToLong(h -> h.getNnz()).sum();\n+ }\n+\n+ public static boolean hasValidInputNnz(Hop hop) {\n+ return hop.getInput().stream().allMatch(h -> h.getNnz() >= 0);\n+ }\n+\n+ public static long getMaxInputDim(MatrixCharacteristics[] mc, boolean dim1) {\n+ return Arrays.stream(mc).mapToLong(\n+ h -> (dim1 ? h.getRows() : h.getRows())).max().orElse(-1);\n+ }\n+\n+ public static long getSumValidInputDims(MatrixCharacteristics[] mc, boolean dim1) {\n+ if( !hasValidInputDims(mc, dim1) )\n+ return -1;\n+ return Arrays.stream(mc).mapToLong(\n+ h -> (dim1 ? h.getRows() : h.getCols())).sum();\n+ }\n+\n+ public static boolean hasValidInputDims(MatrixCharacteristics[] mc, boolean dim1) {\n+ return Arrays.stream(mc).allMatch(\n+ h -> dim1 ? h.rowsKnown() : h.colsKnown());\n+ }\n+\n+ public static long getSumValidInputNnz(MatrixCharacteristics[] mc, boolean worstcase) {\n+ if( !hasValidInputNnz(mc, worstcase) )\n+ return -1;\n+ return Arrays.stream(mc).mapToLong(h -> h.nnzKnown() ?\n+ h.getNonZeros() : h.getLength()).sum();\n+ }\n+\n+ public static boolean hasValidInputNnz(MatrixCharacteristics[] mc, boolean worstcase) {\n+ return Arrays.stream(mc).allMatch(h -> h.nnzKnown() || (worstcase && h.dimsKnown()));\n+ }\n+\npublic static boolean containsSecondOrderBuiltin(ArrayList<Hop> roots) {\nHop.resetVisitStatus(roots);\nreturn roots.stream().anyMatch(r -> containsSecondOrderBuiltin(r));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2375] Improved size inference nary ops (cbind/rbind/min/max) This patch improves the size propagation for nary ops by (1) adding the missing worst-case size inference, and (2) computing the nnz for exact size propagation.
49,738
18.06.2018 21:10:54
25,200
7f05d04a746f4e0ca9bb18de9fb5eda1ec43832d
[MINOR] Fix consistency explain output to stdout (recompile rt/hops)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "diff": "@@ -28,8 +28,6 @@ import java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\n-import org.apache.commons.logging.Log;\n-import org.apache.commons.logging.LogFactory;\nimport org.apache.hadoop.fs.FileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.wink.json4j.JSONObject;\n@@ -120,8 +118,6 @@ import org.apache.sysml.utils.JSONHelper;\n*/\npublic class Recompiler\n{\n- private static final Log LOG = LogFactory.getLog(Recompiler.class.getName());\n-\n//Max threshold for in-memory reblock of text input [in bytes]\n//reason: single-threaded text read at 20MB/s, 1GB input -> 50s (should exploit parallelism)\n//note that we scale this threshold up by the degree of available parallelism\n@@ -401,20 +397,20 @@ public class Recompiler\nprivate static void logExplainDAG(StatementBlock sb, ArrayList<Hop> hops, ArrayList<Instruction> inst) {\nif( DMLScript.EXPLAIN == ExplainType.RECOMPILE_HOPS ) {\n- LOG.info(\"EXPLAIN RECOMPILE \\nGENERIC (lines \"+sb.getBeginLine()+\"-\"+sb.getEndLine()+\"):\\n\" +\n+ System.out.println(\"EXPLAIN RECOMPILE \\nGENERIC (lines \"+sb.getBeginLine()+\"-\"+sb.getEndLine()+\"):\\n\" +\nExplain.explainHops(hops, 1));\n}\nif( DMLScript.EXPLAIN == ExplainType.RECOMPILE_RUNTIME ) {\n- LOG.info(\"EXPLAIN RECOMPILE \\nGENERIC (lines \"+sb.getBeginLine()+\"-\"+sb.getEndLine()+\"):\\n\" +\n+ System.out.println(\"EXPLAIN RECOMPILE \\nGENERIC (lines \"+sb.getBeginLine()+\"-\"+sb.getEndLine()+\"):\\n\" +\nExplain.explain(inst, 1));\n}\n}\nprivate static void logExplainPred(Hop hops, ArrayList<Instruction> inst) {\nif( DMLScript.EXPLAIN == ExplainType.RECOMPILE_HOPS )\n- LOG.info(\"EXPLAIN RECOMPILE \\nPRED (line \"+hops.getBeginLine()+\"):\\n\" + Explain.explain(hops,1));\n+ System.out.println(\"EXPLAIN RECOMPILE \\nPRED (line \"+hops.getBeginLine()+\"):\\n\" + Explain.explain(hops,1));\nif( DMLScript.EXPLAIN == ExplainType.RECOMPILE_RUNTIME )\n- LOG.info(\"EXPLAIN RECOMPILE \\nPRED (line \"+hops.getBeginLine()+\"):\\n\" + Explain.explain(inst,1));\n+ System.out.println(\"EXPLAIN RECOMPILE \\nPRED (line \"+hops.getBeginLine()+\"):\\n\" + Explain.explain(inst,1));\n}\npublic static void recompileProgramBlockHierarchy( ArrayList<ProgramBlock> pbs, LocalVariableMap vars, long tid, ResetType resetRecompile ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix consistency explain output to stdout (recompile rt/hops)
49,738
18.06.2018 23:38:41
25,200
9de00dbb2d7441ed694ec4d092a9269b6f7ccccc
Fix correctness fused mmchain XtXvy w/ minus weights This patch fixes result correctness issues of the fused mmchain operator of type XtXvy (e.g., t(X) %*% (X %*% v - y)) which empty input handling of v was only valid for multiply weights but not minus.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -242,7 +242,8 @@ public class LibMatrixMult\n*/\npublic static void matrixMultChain(MatrixBlock mX, MatrixBlock mV, MatrixBlock mW, MatrixBlock ret, ChainType ct) {\n//check inputs / outputs (after that mV and mW guaranteed to be dense)\n- if( mX.isEmptyBlock(false) || mV.isEmptyBlock(false) || (mW !=null && mW.isEmptyBlock(false)) ) {\n+ if( mX.isEmptyBlock(false) || (mV.isEmptyBlock(false) && ct!=ChainType.XtXvy)\n+ || (mW !=null && mW.isEmptyBlock(false)) ) {\nret.examSparsity(); //turn empty dense into sparse\nreturn;\n}\n@@ -283,7 +284,8 @@ public class LibMatrixMult\n*/\npublic static void matrixMultChain(MatrixBlock mX, MatrixBlock mV, MatrixBlock mW, MatrixBlock ret, ChainType ct, int k) {\n//check inputs / outputs (after that mV and mW guaranteed to be dense)\n- if( mX.isEmptyBlock(false) || mV.isEmptyBlock(false) || (mW !=null && mW.isEmptyBlock(false)) ) {\n+ if( mX.isEmptyBlock(false) || (mV.isEmptyBlock(false) && ct!=ChainType.XtXvy)\n+ || (mW !=null && mW.isEmptyBlock(false)) ) {\nret.examSparsity(); //turn empty dense into sparse\nreturn;\n}\n@@ -1614,7 +1616,8 @@ public class LibMatrixMult\nfor( int i=rl; i < rl+bn; i++ ) {\ndouble[] avals = a.values(i);\nint aix = a.pos(i);\n- double val = dotProduct(avals, b, aix, 0, cd);\n+ double val = (b == null) ? 0 :\n+ dotProduct(avals, b, aix, 0, cd);\nval *= (weights) ? w[i] : 1;\nval -= (weights2) ? w[i] : 0;\nvectMultiplyAdd(val, avals, c, aix, 0, cd);\n@@ -1625,11 +1628,13 @@ public class LibMatrixMult\n{\n//compute 1st matrix-vector for row block\nArrays.fill(tmp, 0);\n+ if( b != null ) {\nfor( int bj=0; bj<cd; bj+=blocksizeJ ) {\nint bjmin = Math.min(cd-bj, blocksizeJ);\nfor( int i=0; i < blocksizeI; i++ )\ntmp[i] += dotProduct(a.values(bi+i), b, a.pos(bi+i,bj), bj, bjmin);\n}\n+ }\n//multiply/subtract weights (in-place), if required\nif( weights )\n@@ -1673,7 +1678,8 @@ public class LibMatrixMult\ndouble[] avals = a.values(i);\n//compute 1st matrix-vector dot product\n- double val = dotProduct(avals, b, aix, apos, 0, alen);\n+ double val = (b == null) ? 0 :\n+ dotProduct(avals, b, aix, apos, 0, alen);\n//multiply/subtract weights, if required\nval *= (weights) ? w[i] : 1;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2408] Fix correctness fused mmchain XtXvy w/ minus weights This patch fixes result correctness issues of the fused mmchain operator of type XtXvy (e.g., t(X) %*% (X %*% v - y)) which empty input handling of v was only valid for multiply weights but not minus.
49,738
19.06.2018 17:49:19
25,200
87a0c0bd485ee5255412b24c9230e46ae7de71ad
[MINOR] Cleanup Kmeans algorithm script (table padding/truncating) This patch simplifies the kmeans script by folding separate padding and truncating of table outputs into the table operation itself. This also improved performance of Kmeans over Mnist80m w/ 20 iterations, 5 centroids and codegen enabled from 581s to 341s (2,164s w/o codegen).
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/Kmeans.dml", "new_path": "scripts/algorithms/Kmeans.dml", "diff": "@@ -102,17 +102,13 @@ for (i in 1 : num_centroids)\ncentroid_ids = t(colSums (cdf_min_distances < threshold_matrix)) + 1;\n# Place the selected centroids together, one per run, into a matrix:\n- centroid_placer = matrix (0, rows = num_runs, cols = (sample_block_size * num_runs));\n- centroid_placer_raw =\n- table (seq (1, num_runs, 1), sample_block_size * seq (0, num_runs - 1, 1) + centroid_ids);\n- centroid_placer [, 1 : ncol (centroid_placer_raw)] = centroid_placer_raw;\n+ centroid_placer = table (seq (1, num_runs),\n+ sample_block_size * seq (0, num_runs - 1) + centroid_ids, num_runs, sample_block_size * num_runs);\ncentroids = centroid_placer %*% X_samples;\n# Place the selected centroids into their appropriate slots in All_Centroids:\n- centroid_placer = matrix (0, rows = nrow (All_Centroids), cols = num_runs);\n- centroid_placer_raw =\n- table (seq (i, num_centroids * (num_runs - 1) + i, num_centroids), seq (1, num_runs, 1));\n- centroid_placer [1 : nrow (centroid_placer_raw), ] = centroid_placer_raw;\n+ centroid_placer = table (seq (i, num_centroids * (num_runs - 1) + i, num_centroids),\n+ seq (1, num_runs, 1), nrow (All_Centroids), num_runs);\nAll_Centroids = All_Centroids + centroid_placer %*% centroids;\n# Update min_distances to preserve the loop invariant:\n@@ -250,14 +246,7 @@ get_sample_maps = function (int num_records, int num_samples, int approx_sample_\n# Use contingency table to create the \"sample_maps\" matrix that is a vertical concatenation\n# of 0-1-matrices, one per sample, each with 1s at (i, sample_record[i]) and 0s elsewhere:\n- sample_maps_raw = table (seq (1, num_rows), sample_rec_ids);\n- max_rec_id = ncol (sample_maps_raw);\n- if (max_rec_id >= num_records) {\n- sample_maps = sample_maps_raw [, 1 : num_records];\n- } else {\n- sample_maps = matrix (0, rows = num_rows, cols = num_records);\n- sample_maps [, 1 : max_rec_id] = sample_maps_raw;\n- }\n+ sample_maps = table (seq (1, num_rows), sample_rec_ids, num_rows, num_records);\n# Create a 0-1-matrix that maps each sample column ID into all row positions of the\n# corresponding sample; map out-of-sample-range positions to row id = num_rows + 1:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup Kmeans algorithm script (table padding/truncating) This patch simplifies the kmeans script by folding separate padding and truncating of table outputs into the table operation itself. This also improved performance of Kmeans over Mnist80m w/ 20 iterations, 5 centroids and codegen enabled from 581s to 341s (2,164s w/o codegen).
49,760
20.06.2018 18:56:35
25,200
3bba0318403bbc8232d4806e151df24951603992
Improved matrix histogram via num non-empty rows/cols Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "package org.apache.sysml.hops.estim;\n+import java.util.Arrays;\n+\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\n@@ -118,34 +120,38 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnnz = (long)(spOut * mnOut);\n}\n+ //exploit upper bound on nnz based on non-empty rows/cols\n+ nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\n+ Math.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n+\n//compute final sparsity\nreturn OptimizerUtils.getSparsity(\nh1.getRows(), h2.getCols(), nnz);\n}\nprivate static class MatrixHistogram {\n- private final int[] rNnz; //row nnz counts\n- private int[] rNnz1e = null; //row nnz counts for cols w/ <= 1 non-zeros\n- private final int[] cNnz; //column nnz counts\n- private int[] cNnz1e = null; //column nnz counts for rows w/ <= 1 non-zeros\n- private int rMaxNnz = 0;\n- private int cMaxNnz = 0;\n+ private final int[] rNnz; //nnz per row\n+ private int[] rNnz1e = null; //nnz per row for cols w/ <= 1 non-zeros\n+ private final int[] cNnz; //nnz per col\n+ private int[] cNnz1e = null; //nnz per col for rows w/ <= 1 non-zeros\n+ private final int rMaxNnz; //max nnz per row\n+ private final int cMaxNnz; //max nnz per col\n+ private final int rNonEmpty; //number of non-empty rows (an empty row has nnz=0)\n+ private final int cNonEmpty; //number of non-empty cols (an empty col has nnz=0)\npublic MatrixHistogram(MatrixBlock in, boolean useExcepts) {\n- //allocate basic synopsis\n+ // 1) allocate basic synopsis\nrNnz = new int[in.getNumRows()];\ncNnz = new int[in.getNumColumns()];\n- if( in.isEmptyBlock(false) )\n- return;\n- //compute basic synopsis details\n+ // 2) compute basic synopsis details\n+ if( !in.isEmpty() ) {\nif( in.isInSparseFormat() ) {\nSparseBlock sblock = in.getSparseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\nif( sblock.isEmpty(i) ) continue;\nint alen = sblock.size(i);\nrNnz[i] = alen;\n- rMaxNnz = Math.max(rMaxNnz, alen);\nLibMatrixAgg.countAgg(sblock.values(i),\ncNnz, sblock.indexes(i), sblock.pos(i), alen);\n}\n@@ -162,13 +168,18 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n}\nrNnz[i] = lnnz;\n- rMaxNnz = Math.max(rMaxNnz, lnnz);\n}\n}\n- cMaxNnz = max(cNnz, 0, in.getNumColumns());\n+ }\n- //compute exception details if necessary (optional)\n- if( useExcepts && (rMaxNnz > 1 || cMaxNnz > 1) ) {\n+ // 3) compute meta data synopsis\n+ rMaxNnz = Arrays.stream(rNnz).max().orElse(0);\n+ cMaxNnz = Arrays.stream(cNnz).max().orElse(0);\n+ rNonEmpty = (int) Arrays.stream(rNnz).filter(v-> v!=0).count();\n+ cNonEmpty = (int) Arrays.stream(cNnz).filter(v-> v!=0).count();\n+\n+ // 4) compute exception details if necessary (optional)\n+ if( useExcepts & !in.isEmpty() && (rMaxNnz > 1 || cMaxNnz > 1) ) {\nrNnz1e = new int[in.getNumRows()];\ncNnz1e = new int[in.getNumColumns()];\n@@ -210,6 +221,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncNnz1e = c1e;\nrMaxNnz = rmax;\ncMaxNnz = cmax;\n+ rNonEmpty = cNonEmpty = -1;\n}\npublic int getRows() {\n@@ -222,8 +234,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n//get input/output nnz for scaling\n- long nnz1 = sum(h1.rNnz, 0, h1.getRows());\n- long nnz2 = sum(h2.cNnz, 0, h2.getCols());\n+ long nnz1 = Arrays.stream(h1.rNnz).sum();\n+ long nnz2 = Arrays.stream(h2.cNnz).sum();\ndouble nnzOut = spOut * h1.getRows() * h2.getCols();\n//propagate h1.r and h2.c to output via simple scaling\n@@ -243,19 +255,5 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//construct new histogram object\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\n-\n- private static int max(int[] a, int ai, int alen) {\n- int ret = Integer.MIN_VALUE;\n- for(int i=ai; i<ai+alen; i++)\n- ret = Math.max(ret, a[i]);\n- return ret;\n- }\n-\n- private static long sum(int[] a, int ai, int alen) {\n- int ret = 0;\n- for(int i=ai; i<ai+alen; i++)\n- ret += a[i];\n- return ret;\n- }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2409] Improved matrix histogram via num non-empty rows/cols Closes #788.
49,738
20.06.2018 21:16:05
25,200
08f9e3e47836c729e91940bcb65c90780d25649b
Support for left indexing on list data types This patch adds support for left indexing operations over both unnamed and named list data types. We allow right-hand-side inputs of types list and scalar with both named and position indexing expressions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/LeftIndexingOp.java", "new_path": "src/main/java/org/apache/sysml/hops/LeftIndexingOp.java", "diff": "@@ -388,6 +388,9 @@ public class LeftIndexingOp extends Hop\ncheckAndSetInvalidCPDimsAndSize();\n}\n+ if( getInput().get(0).getDataType()==DataType.LIST )\n+ _etype = ExecType.CP;\n+\n//mark for recompile (forever)\nsetRequiresRecompileIfNecessary();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/IndexedIdentifier.java", "new_path": "src/main/java/org/apache/sysml/parser/IndexedIdentifier.java", "diff": "@@ -107,8 +107,7 @@ public class IndexedIdentifier extends DataIdentifier\n// valid lower row bound value\nisConst_rowLowerBound = true;\n}\n-\n- else if (_rowLowerBound instanceof ConstIdentifier) {\n+ else if (_rowLowerBound instanceof ConstIdentifier && !getDataType().isList() ) {\nraiseValidateError(\"assign lower-bound row index for Indexed Identifier \" + this.toString() + \" the non-numeric value \" + _rowLowerBound.toString(), conditional);\n}\n@@ -192,7 +191,7 @@ public class IndexedIdentifier extends DataIdentifier\n}\nisConst_rowUpperBound = true;\n}\n- else if (_rowUpperBound instanceof ConstIdentifier){\n+ else if (_rowUpperBound instanceof ConstIdentifier && !getDataType().isList()){\nraiseValidateError(\"assign upper-bound row index for \" + this.toString() + \" the non-numeric value \" + _rowUpperBound.toString(), conditional);\n}\n@@ -268,7 +267,7 @@ public class IndexedIdentifier extends DataIdentifier\nisConst_colLowerBound = true;\n}\n- else if (_colLowerBound instanceof ConstIdentifier) {\n+ else if (_colLowerBound instanceof ConstIdentifier && !getDataType().isList()) {\nraiseValidateError(\"assign lower-bound column index for Indexed Identifier \" + this.toString() + \" the non-numeric value \" + _colLowerBound.toString(), conditional);\n}\n@@ -352,7 +351,7 @@ public class IndexedIdentifier extends DataIdentifier\nisConst_colUpperBound = true;\n}\n- else if (_colUpperBound instanceof ConstIdentifier){\n+ else if (_colUpperBound instanceof ConstIdentifier && !getDataType().isList()){\nraiseValidateError(\"assign upper-bound column index for \" + this.toString() + \" the non-numeric value \" + _colUpperBound.toString(), conditional);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/IndexingCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/IndexingCPInstruction.java", "diff": "@@ -76,7 +76,7 @@ public abstract class IndexingCPInstruction extends UnaryCPInstruction {\nelse if( in.getDataType() == DataType.LIST )\nreturn new ListIndexingCPInstruction(in, rl, ru, cl, cu, out, opcode, str);\nelse\n- throw new DMLRuntimeException(\"Can index only on Frames or Matrices\");\n+ throw new DMLRuntimeException(\"Can index only on matrices, frames, and lists.\");\n}\nelse {\nthrow new DMLRuntimeException(\"Invalid number of operands in instruction: \" + str);\n@@ -96,8 +96,10 @@ public abstract class IndexingCPInstruction extends UnaryCPInstruction {\nreturn new MatrixIndexingCPInstruction(lhsInput, rhsInput, rl, ru, cl, cu, out, opcode, str);\nelse if (lhsInput.getDataType() == DataType.FRAME)\nreturn new FrameIndexingCPInstruction(lhsInput, rhsInput, rl, ru, cl, cu, out, opcode, str);\n+ else if( lhsInput.getDataType() == DataType.LIST )\n+ return new ListIndexingCPInstruction(lhsInput, rhsInput, rl, ru, cl, cu, out, opcode, str);\nelse\n- throw new DMLRuntimeException(\"Can index only on Frames or Matrices\");\n+ throw new DMLRuntimeException(\"Can index only on matrices, frames, and lists.\");\n}\nelse {\nthrow new DMLRuntimeException(\"Invalid number of operands in instruction: \" + str);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListIndexingCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListIndexingCPInstruction.java", "diff": "@@ -59,27 +59,27 @@ public final class ListIndexingCPInstruction extends IndexingCPInstruction {\n}\n//left indexing\nelse if ( opcode.equalsIgnoreCase(LeftIndex.OPCODE)) {\n-// FrameBlock lin = ec.getFrameInput(input1.getName());\n-// FrameBlock out = null;\n-//\n-// if(input2.getDataType() == DataType.FRAME) { //FRAME<-FRAME\n-// FrameBlock rin = ec.getFrameInput(input2.getName());\n-// out = lin.leftIndexingOperations(rin, ixrange, new FrameBlock());\n-// ec.releaseFrameInput(input2.getName());\n-// }\n-// else { //FRAME<-SCALAR\n-// if(!ixrange.isScalar())\n-// throw new DMLRuntimeException(\"Invalid index range of scalar leftindexing: \"+ixrange.toString()+\".\" );\n-// ScalarObject scalar = ec.getScalarInput(input2.getName(), input2.getValueType(), input2.isLiteral());\n-// out = new FrameBlock(lin);\n-// out.set((int)ixrange.rowStart, (int)ixrange.colStart, scalar.getStringValue());\n-// }\n-//\n-// //unpin lhs input\n-// ec.releaseFrameInput(input1.getName());\n-//\n-// //unpin output\n-// ec.setFrameOutput(output.getName(), out);\n+ ListObject lin = (ListObject) ec.getVariable(input1.getName());\n+\n+ //execute right indexing operation and set output\n+ if( input2.getDataType().isList() ) { //LIST <- LIST\n+ ListObject rin = (ListObject) ec.getVariable(input2.getName());\n+ if( rl.getValueType()==ValueType.STRING || ru.getValueType()==ValueType.STRING )\n+ ec.setVariable(output.getName(), lin.copy().set(rl.getStringValue(), ru.getStringValue(), rin));\n+ else\n+ ec.setVariable(output.getName(), lin.copy().set((int)rl.getLongValue()-1, (int)ru.getLongValue()-1, rin));\n+ }\n+ else if( input2.getDataType().isScalar() ) { //LIST <- SCALAR\n+ ScalarObject scalar = ec.getScalarInput(input2);\n+ if( rl.getValueType()==ValueType.STRING )\n+ ec.setVariable(output.getName(), lin.copy().set(rl.getStringValue(), scalar));\n+ else\n+ ec.setVariable(output.getName(), lin.copy().set((int)rl.getLongValue()-1, scalar));\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Unsupported list \"\n+ + \"left indexing rhs type: \"+input2.getDataType().name());\n+ }\n}\nelse\nthrow new DMLRuntimeException(\"Invalid opcode (\" + opcode +\") encountered in ListIndexingCPInstruction.\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "package org.apache.sysml.runtime.instructions.cp;\n+import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\n@@ -30,9 +31,9 @@ import org.apache.sysml.runtime.controlprogram.caching.CacheableData;\npublic class ListObject extends Data {\nprivate static final long serialVersionUID = 3652422061598967358L;\n- private final List<String> _names;\nprivate final List<Data> _data;\nprivate boolean[] _dataState = null;\n+ private List<String> _names = null;\npublic ListObject(List<Data> data) {\nsuper(DataType.LIST, ValueType.UNKNOWN);\n@@ -58,6 +59,31 @@ public class ListObject extends Data {\nreturn _data.size();\n}\n+ public List<String> getNames() {\n+ return _names;\n+ }\n+\n+ public String getName(int ix) {\n+ return (_names == null) ? null : _names.get(ix);\n+ }\n+\n+ public boolean isNamedList() {\n+ return _names != null;\n+ }\n+\n+ public List<Data> getData() {\n+ return _data;\n+ }\n+\n+ public long getDataSize() {\n+ return _data.stream().filter(data -> data instanceof CacheableData)\n+ .mapToLong(data -> ((CacheableData<?>) data).getDataSize()).sum();\n+ }\n+\n+ public boolean checkAllDataTypes(DataType dt) {\n+ return _data.stream().allMatch(d -> d.getDataType()==dt);\n+ }\n+\npublic Data slice(int ix) {\nreturn _data.get(ix);\n}\n@@ -71,59 +97,89 @@ public class ListObject extends Data {\n}\npublic Data slice(String name) {\n- //check for existing named list\n- if (_names == null)\n- throw new DMLRuntimeException(\"Invalid lookup by name\" + \" in unnamed list: \" + name + \".\");\n-\n- //find position and check for existing entry\n- int pos = _names.indexOf(name);\n- if (pos < 0 || pos >= _data.size())\n- throw new DMLRuntimeException(\"List lookup returned no entry for name='\" + name + \"'\");\n+ //lookup position by name, incl error handling\n+ int pos = getPosForName(name);\n//return existing entry\nreturn slice(pos);\n}\npublic ListObject slice(String name1, String name2) {\n- //check for existing named list\n- if (_names == null)\n- throw new DMLRuntimeException(\"Invalid lookup by name\" + \" in unnamed list: \" + name1 + \", \" + name2 + \".\");\n-\n- //find position and check for existing entry\n- int pos1 = _names.indexOf(name1);\n- int pos2 = _names.indexOf(name2);\n- if (pos1 < 0 || pos1 >= _data.size())\n- throw new DMLRuntimeException(\"List lookup returned no entry for name='\" + name1 + \"'\");\n- if (pos2 < 0 || pos2 >= _data.size())\n- throw new DMLRuntimeException(\"List lookup returned no entry for name='\" + name2 + \"'\");\n+ //lookup positions by name, incl error handling\n+ int pos1 = getPosForName(name1);\n+ int pos2 = getPosForName(name2);\n//return list object\nreturn slice(pos1, pos2);\n}\n- public List<String> getNames() {\n- return _names;\n+ public ListObject copy() {\n+ ListObject ret = isNamedList() ?\n+ new ListObject(new ArrayList<>(getData()), new ArrayList<>(getNames())) :\n+ new ListObject(new ArrayList<>(getData()));\n+ ret.setStatus(Arrays.copyOf(getStatus(), getLength()));\n+ return ret;\n}\n- public String getName(int ix) {\n- return (_names == null) ? null : _names.get(ix);\n+ public ListObject set(int ix, Data data) {\n+ _data.set(ix, data);\n+ return this;\n}\n- public boolean isNamedList() {\n- return _names != null;\n+ public ListObject set(int ix1, int ix2, ListObject data) {\n+ int range = ix2 - ix1 + 1;\n+ if( range != data.getLength() || range > getLength() ) {\n+ throw new DMLRuntimeException(\"List leftindexing size mismatch: length(lhs)=\"\n+ +getLength()+\", range=[\"+ix1+\":\"+ix2+\"], legnth(rhs)=\"+data.getLength());\n}\n- public List<Data> getData() {\n- return _data;\n+ //copy rhs list object including meta data\n+ if( range == getLength() ) {\n+ //overwrite all entries in left hand side\n+ _data.clear(); _data.addAll(data.getData());\n+ System.arraycopy(data.getStatus(), 0, _dataState, 0, range);\n+ if( data.isNamedList() )\n+ _names = new ArrayList<>(data.getNames());\n+ }\n+ else {\n+ //overwrite entries of subrange in left hand side\n+ for( int i=ix1; i<=ix2; i++ ) {\n+ set(i, data.slice(i-ix1));\n+ _dataState[i] = data._dataState[i-ix1];\n+ if( isNamedList() && data.isNamedList() )\n+ _names.set(i, data.getName(i-ix1));\n+ }\n+ }\n+ return this;\n}\n- public long getDataSize() {\n- return _data.stream().filter(data -> data instanceof CacheableData)\n- .mapToLong(data -> ((CacheableData<?>) data).getDataSize()).sum();\n+ public Data set(String name, Data data) {\n+ //lookup position by name, incl error handling\n+ int pos = getPosForName(name);\n+\n+ //set entry into position\n+ return set(pos, data);\n}\n- public boolean checkAllDataTypes(DataType dt) {\n- return _data.stream().allMatch(d -> d.getDataType()==dt);\n+ public ListObject set(String name1, String name2, ListObject data) {\n+ //lookup positions by name, incl error handling\n+ int pos1 = getPosForName(name1);\n+ int pos2 = getPosForName(name2);\n+\n+ //set list into position range\n+ return set(pos1, pos2, data);\n+ }\n+\n+ private int getPosForName(String name) {\n+ //check for existing named list\n+ if (_names == null)\n+ throw new DMLRuntimeException(\"Invalid indexing by name\" + \" in unnamed list: \" + name + \".\");\n+\n+ //find position and check for existing entry\n+ int pos = _names.indexOf(name);\n+ if (pos < 0 || pos >= _data.size())\n+ throw new DMLRuntimeException(\"List indexing returned no entry for name='\" + name + \"'\");\n+ return pos;\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/ListAndStructTest.java", "diff": "@@ -40,6 +40,8 @@ public class ListAndStructTest extends AutomatedTestBase\nprivate static final String TEST_NAME5 = \"ListUnnamedParfor\";\nprivate static final String TEST_NAME6 = \"ListNamedParfor\";\nprivate static final String TEST_NAME7 = \"ListAsMatrix\";\n+ private static final String TEST_NAME8 = \"ListUnnamedRix\";\n+ private static final String TEST_NAME9 = \"ListNamedRix\";\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + ListAndStructTest.class.getSimpleName() + \"/\";\n@@ -54,6 +56,8 @@ public class ListAndStructTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME7, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME7, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME8, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME8, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME9, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME9, new String[] { \"R\" }) );\n}\n@Test\n@@ -126,6 +130,26 @@ public class ListAndStructTest extends AutomatedTestBase\nrunListStructTest(TEST_NAME7, true);\n}\n+ @Test\n+ public void testListRix() {\n+ runListStructTest(TEST_NAME8, false);\n+ }\n+\n+ @Test\n+ public void testListRixRewrites() {\n+ runListStructTest(TEST_NAME8, true);\n+ }\n+\n+ @Test\n+ public void testListNamedRix() {\n+ runListStructTest(TEST_NAME9, false);\n+ }\n+\n+ @Test\n+ public void testListNamedRixRewrites() {\n+ runListStructTest(TEST_NAME9, true);\n+ }\n+\nprivate void runListStructTest(String testname, boolean rewrites)\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedRix.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+#X = list(1,3,7,5,4);\n+X = list(1,0,0,0,0);\n+X[2:4] = list(3,7,5);\n+X[5] = 4;\n+Y = as.matrix(unlist(X));\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListNamedRix.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = list(a=0,b=0,c=0,d=0,e=0);\n+X[2:4] = list(3,7,5);\n+X[\"e\"] = 4;\n+X[1] = list(f=2);\n+X[\"f\"] = 1;\n+\n+Y = as.matrix(X);\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+write(R, $1);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedRix.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+#X = list(1,3,7,5,4);\n+X = list(1,0,0,0,0);\n+X[2:4] = list(3,7,5);\n+X[5] = 4;\n+Y = as.matrix(unlist(X));\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/ListUnnamedRix.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#X = list(1,3,7,5,4);\n+X = list(1,0,0,0,0);\n+X[2:4] = list(3,7,5);\n+X[5] = 4;\n+Y = as.matrix(X);\n+R = as.matrix(nrow(Y) * sum(Y) + ncol(Y));\n+\n+write(R, $1);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2415] Support for left indexing on list data types This patch adds support for left indexing operations over both unnamed and named list data types. We allow right-hand-side inputs of types list and scalar with both named and position indexing expressions.
49,738
21.06.2018 17:40:36
25,200
36c217d38efe8ce031b568a799a9b796a64a1968
[MINOR] Performance binary in-place operations, especially plus This patch makes a minor performance improvement to binary in-place operations for dense-dense and dense-sparse ops, especially plus. On a scenario of paramserv ASP, 10 epochs 80 workers, this patch improved the end-to-end runtime from 212s to 202.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -1082,10 +1082,12 @@ public class LibMatrixBincell\nprivate static void safeBinaryInPlace(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op) {\n//early abort on skip and empty\n- if( m1ret.isEmptyBlock(false) && m2.isEmptyBlock(false) )\n+ if( (m1ret.isEmpty() && m2.isEmpty() )\n+ || (op.fn instanceof Plus && m2.isEmpty())\n+ || (op.fn instanceof Minus && m2.isEmpty()))\nreturn; // skip entire empty block\n//special case: start aggregation\n- else if( op.fn instanceof Plus && m1ret.isEmptyBlock(false) ){\n+ else if( op.fn instanceof Plus && m1ret.isEmpty() ){\nm1ret.copy(m2);\nreturn;\n}\n@@ -1094,6 +1096,8 @@ public class LibMatrixBincell\nsafeBinaryInPlaceSparse(m1ret, m2, op);\nelse if(!m1ret.sparse && !m2.sparse)\nsafeBinaryInPlaceDense(m1ret, m2, op);\n+ else if(m2.sparse && (op.fn instanceof Plus || op.fn instanceof Minus))\n+ safeBinaryInPlaceDenseSparseAdd(m1ret, m2, op);\nelse //GENERIC\nsafeBinaryInPlaceGeneric(m1ret, m2, op);\n}\n@@ -1191,6 +1195,14 @@ public class LibMatrixBincell\n}\n}\n}\n+ else if( op.fn instanceof Plus ) {\n+ for(int r=0; r<rlen; r++) {\n+ int aix = a.pos(r), bix = b.pos(r);\n+ double[] avals = a.values(r), bvals = b.values(r);\n+ LibMatrixMult.vectAdd(bvals, avals, bix, aix, clen);\n+ lnnz += UtilFunctions.computeNnz(avals, aix, clen);\n+ }\n+ }\nelse {\nfor(int r=0; r<rlen; r++) {\ndouble[] avals = a.values(r), bvals = b.values(r);\n@@ -1204,28 +1216,31 @@ public class LibMatrixBincell\nm1ret.setNonZeros(lnnz);\n}\n- private static void safeBinaryInPlaceGeneric(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op) {\n+ private static void safeBinaryInPlaceDenseSparseAdd(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op) {\nfinal int rlen = m1ret.rlen;\n- final int clen = m1ret.clen;\n-\n- if( m2.sparse && (op.fn instanceof Plus || op.fn instanceof Minus) ) {\n- if( m2.isEmptyBlock(false) )\n- return;\n+ DenseBlock a = m1ret.denseBlock;\nSparseBlock b = m2.sparseBlock;\n+ long nnz = m1ret.getNonZeros();\nfor(int r=0; r<rlen; r++) {\nif( b.isEmpty(r) ) continue;\n- int bpos = b.pos(r);\n+ int apos = a.pos(r), bpos = b.pos(r);\nint blen = b.size(r);\nint[] bix = b.indexes(r);\n- double[] bvals = b.values(r);\n+ double[] avals = a.values(r), bvals = b.values(r);\nfor(int k = bpos; k<bpos+blen; k++) {\n- double vold = m1ret.quickGetValue(r, bix[k]);\n+ double vold = avals[apos+bix[k]];\ndouble vnew = op.fn.execute(vold, bvals[k]);\n- m1ret.quickSetValue(r, bix[k], vnew);\n+ nnz += (vold == 0 && vnew != 0) ? 1 :\n+ (vold != 0 && vnew ==0) ? -1 : 0;\n+ avals[apos+bix[k]] = vnew;\n}\n}\n+ m1ret.setNonZeros(nnz);\n}\n- else {\n+\n+ private static void safeBinaryInPlaceGeneric(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op) {\n+ final int rlen = m1ret.rlen;\n+ final int clen = m1ret.clen;\nfor(int r=0; r<rlen; r++)\nfor(int c=0; c<clen; c++) {\ndouble thisvalue = m1ret.quickGetValue(r, c);\n@@ -1234,7 +1249,6 @@ public class LibMatrixBincell\nm1ret.quickSetValue(r, c, resultvalue);\n}\n}\n- }\nprivate static void unsafeBinaryInPlace(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op)\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance binary in-place operations, especially plus This patch makes a minor performance improvement to binary in-place operations for dense-dense and dense-sparse ops, especially plus. On a scenario of paramserv ASP, 10 epochs 80 workers, this patch improved the end-to-end runtime from 212s to 202.
49,738
21.06.2018 21:36:31
25,200
0e01af5244c6ea562a3a31bafacfd91e0aff07a0
Performance density map sparsity estimator This patch makes two minor performance improvements to density maps. First, for fully dense inputs, we now directly set the entire density map to 1 instead of computing the histogram and sparsity. Second, we also avoid unnecessary computation for sparse dense maps w/ zero cells.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -91,11 +91,22 @@ public class EstimatorDensityMap extends SparsityEstimator\nint rlen = (int)Math.ceil((double)in.getNumRows()/_b);\nint clen = (int)Math.ceil((double)in.getNumColumns()/_b);\nMatrixBlock out = new MatrixBlock(rlen, clen, false);\n+\n+ //fast-path empty input\nif( in.isEmptyBlock(false) )\nreturn out;\n- //compute nnz histogram\n+ //allocate dense output block\nDenseBlock c = out.allocateBlock().getDenseBlock();\n+\n+ //fast-path fully dense input\n+ if( in.getLength() == in.getNonZeros() ) {\n+ c.set(1); //set sparsity 1.0 into all cells\n+ out.setNonZeros(in.getLength());\n+ return out;\n+ }\n+\n+ //compute nnz histogram\nif( in.isInSparseFormat() ) {\nSparseBlock sblock = in.getSparseBlock();\nfor(int i=0; i<in.getNumRows(); i++) {\n@@ -121,8 +132,10 @@ public class EstimatorDensityMap extends SparsityEstimator\nfor(int i=0; i<rlen; i++){\nint lrlen = UtilFunctions.computeBlockSize(in.getNumRows(), i+1, _b);\nfor(int j=0; j<clen; j++) {\n+ double cval = c.get(i, j);\n+ if( cval == 0 ) continue;\nint lclen = UtilFunctions.computeBlockSize(in.getNumColumns(), j+1, _b);\n- c.set(i, j, c.get(i, j)/lrlen/lclen);\n+ c.set(i, j, cval/lrlen/lclen);\n}\n}\nout.recomputeNonZeros();\n@@ -154,8 +167,10 @@ public class EstimatorDensityMap extends SparsityEstimator\nfor(int k=0; k<cd; k++) {\nint lbk = UtilFunctions.computeBlockSize(cdOrig, k+1, _b);\ndouble sp1 = m1Map.quickGetValue(i, k);\n+ if( sp1 == 0 ) continue;\nfor(int j=0; j<n; j++) {\ndouble sp2 = m2Map.quickGetValue(k, j);\n+ if( sp2 == 0 ) continue;\n//custom multiply for scalar sparsity\ndouble tmp1 = 1 - Math.pow(1-sp1*sp2, lbk);\n//custom add for scalar sparsity\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2417] Performance density map sparsity estimator This patch makes two minor performance improvements to density maps. First, for fully dense inputs, we now directly set the entire density map to 1 instead of computing the histogram and sparsity. Second, we also avoid unnecessary computation for sparse dense maps w/ zero cells.
49,738
22.06.2018 15:44:15
25,200
e9268d9e7d2f7558a09593aa0d196482c74177b8
[HOTFIX] Fix shallow copy ternary axpy operations (restricted cp right)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -197,7 +197,7 @@ public class LibMatrixBincell\n|| isSparseSafeDivide(op, m2) );\nboolean copyLeftRightEmpty = (op.fn instanceof Plus || op.fn instanceof Minus\n|| op.fn instanceof PlusMultiply || op.fn instanceof MinusMultiply);\n- boolean copyRightLeftEmpty = (op.fn instanceof Plus || op.fn instanceof PlusMultiply);\n+ boolean copyRightLeftEmpty = (op.fn instanceof Plus);\n//skip empty blocks (since sparse-safe)\nif( m1.isEmptyBlock(false) && m2.isEmptyBlock(false)\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix shallow copy ternary axpy operations (restricted cp right)
49,727
22.06.2018 22:29:49
25,200
e7fccd1c764ec470f6460e4e3cec90913e606798
Simplified paramserv aggregation service Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "@@ -107,7 +107,7 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nprivate ListObject updateModel(ListObject globalParams, ListObject gradients, int i, int j, int totalIter) {\nTiming tUpd = DMLScript.STATISTICS ? new Timing(true) : null;\n- globalParams = _ps.updateModel(_ec, gradients, globalParams);\n+ globalParams = _ps.updateLocalModel(_ec, gradients, globalParams);\nif (DMLScript.STATISTICS)\nStatistics.accPSLocalModelUpdateTime((long) tUpd.stop());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalParamServer.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\n-import java.util.concurrent.ExecutionException;\n-\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n@@ -35,16 +33,7 @@ public class LocalParamServer extends ParamServer {\n@Override\npublic void push(int workerID, ListObject gradients) {\n- try {\n- _gradientsQueue.put(new Gradient(workerID, gradients));\n- } catch (InterruptedException e) {\n- throw new DMLRuntimeException(e);\n- }\n- try {\n- launchService();\n- } catch (ExecutionException | InterruptedException e) {\n- throw new DMLRuntimeException(\"Aggregate service: some error occurred: \", e);\n- }\n+ updateGlobalModel(workerID, gradients);\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "diff": "@@ -27,16 +27,10 @@ import java.util.HashMap;\nimport java.util.Map;\nimport java.util.concurrent.ArrayBlockingQueue;\nimport java.util.concurrent.BlockingQueue;\n-import java.util.concurrent.Callable;\n-import java.util.concurrent.ExecutionException;\n-import java.util.concurrent.ExecutorService;\n-import java.util.concurrent.Executors;\n-import java.util.concurrent.LinkedBlockingDeque;\nimport java.util.stream.Collectors;\nimport java.util.stream.IntStream;\nimport org.apache.commons.lang3.ArrayUtils;\n-import org.apache.commons.lang3.concurrent.BasicThreadFactory;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\n@@ -53,85 +47,34 @@ import org.apache.sysml.runtime.instructions.cp.FunctionCallCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.utils.Statistics;\n-public abstract class ParamServer {\n+public abstract class ParamServer\n+{\n+ protected final Log LOG = LogFactory.getLog(ParamServer.class.getName());\n- final BlockingQueue<Gradient> _gradientsQueue;\n- final Map<Integer, BlockingQueue<ListObject>> _modelMap;\n- private final AggregationService _aggService;\n- private final ExecutorService _es;\n+ // worker input queues and global model\n+ protected final Map<Integer, BlockingQueue<ListObject>> _modelMap;\nprivate ListObject _model;\n- ParamServer(ListObject model, String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec, int workerNum) {\n- _gradientsQueue = new LinkedBlockingDeque<>();\n+ //aggregation service\n+ protected final ExecutionContext _ec;\n+ private final Statement.PSUpdateType _updateType;\n+ private final FunctionCallCPInstruction _inst;\n+ private final String _outputName;\n+ private final boolean[] _finishedStates; // Workers' finished states\n+\n+ protected ParamServer(ListObject model, String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec, int workerNum) {\n+ // init worker queues and global model\n_modelMap = new HashMap<>(workerNum);\nIntStream.range(0, workerNum).forEach(i -> {\n// Create a single element blocking queue for workers to receive the broadcasted model\n_modelMap.put(i, new ArrayBlockingQueue<>(1));\n});\n_model = model;\n- _aggService = new AggregationService(aggFunc, updateType, ec, workerNum);\n- try {\n- _aggService.broadcastModel();\n- }\n- catch (InterruptedException e) {\n- throw new DMLRuntimeException(\"Param server: failed to broadcast the initial model.\", e);\n- }\n- BasicThreadFactory factory = new BasicThreadFactory.Builder()\n- .namingPattern(\"agg-service-pool-thread-%d\").build();\n- _es = Executors.newSingleThreadExecutor(factory);\n- }\n-\n- public abstract void push(int workerID, ListObject value);\n-\n- public abstract Data pull(int workerID);\n- void launchService() throws ExecutionException, InterruptedException {\n- _es.submit(_aggService).get();\n- }\n-\n- public void shutdown() {\n- _es.shutdownNow();\n- }\n-\n- public ListObject getResult() {\n- // All the model updating work has terminated,\n- // so we could return directly the result model\n- return _model;\n- }\n-\n- public ListObject updateModel(ExecutionContext ec, ListObject gradients, ListObject model) {\n- return _aggService.updateModel(ec, gradients, model);\n- }\n-\n- public static class Gradient {\n- final int _workerID;\n- final ListObject _gradients;\n-\n- public Gradient(int workerID, ListObject gradients) {\n- _workerID = workerID;\n- _gradients = gradients;\n- }\n- }\n-\n- /**\n- * Inner aggregation service which is for updating the model\n- */\n- private class AggregationService implements Callable<Void> {\n-\n- protected final Log LOG = LogFactory.getLog(AggregationService.class.getName());\n-\n- protected final ExecutionContext _ec;\n- private final Statement.PSUpdateType _updateType;\n- private final FunctionCallCPInstruction _inst;\n- private final DataIdentifier _output;\n- private final boolean[] _finishedStates; // Workers' finished states\n-\n- AggregationService(String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec, int workerNum) {\n+ // init aggregation service\n_ec = ec;\n_updateType = updateType;\n_finishedStates = new boolean[workerNum];\n-\n- // Fetch the aggregation function\nString[] cfn = ParamservUtils.getCompleteFuncName(aggFunc, PS_FUNC_PREFIX);\nString ns = cfn[0];\nString fname = cfn[1];\n@@ -146,7 +89,7 @@ public abstract class ParamServer {\nif (outputs.get(0).getDataType() != Expression.DataType.LIST) {\nthrow new DMLRuntimeException(String.format(\"The output of the '%s' function should be type of list.\", aggFunc));\n}\n- _output = outputs.get(0);\n+ _outputName = outputs.get(0).getName();\nCPOperand[] boundInputs = inputs.stream()\n.map(input -> new CPOperand(input.getName(), input.getValueType(), input.getDataType()))\n@@ -156,65 +99,43 @@ public abstract class ParamServer {\nArrayList<String> outputNames = outputs.stream().map(DataIdentifier::getName)\n.collect(Collectors.toCollection(ArrayList::new));\n_inst = new FunctionCallCPInstruction(ns, fname, boundInputs, inputNames, outputNames, \"aggregate function\");\n- }\n-\n- private boolean allFinished() {\n- return !ArrayUtils.contains(_finishedStates, false);\n- }\n- private void resetFinishedStates() {\n- Arrays.fill(_finishedStates, false);\n+ // broadcast initial model\n+ try {\n+ broadcastModel();\n}\n-\n- private void setFinishedState(int workerID) {\n- _finishedStates[workerID] = true;\n+ catch (InterruptedException e) {\n+ throw new DMLRuntimeException(\"Param server: failed to broadcast the initial model.\", e);\n}\n-\n- private void broadcastModel() throws InterruptedException {\n- Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n-\n- //broadcast copy of the model to all workers, cleaned up by workers\n- for (BlockingQueue<ListObject> q : _modelMap.values())\n- q.put(ParamservUtils.copyList(_model));\n-\n- if (DMLScript.STATISTICS)\n- Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n}\n- private void broadcastModel(int workerID) throws InterruptedException {\n- Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n+ public abstract void push(int workerID, ListObject value);\n- //broadcast copy of model to specific worker, cleaned up by worker\n- _modelMap.get(workerID).put(ParamservUtils.copyList(_model));\n+ public abstract Data pull(int workerID);\n- if (DMLScript.STATISTICS)\n- Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n+ public ListObject getResult() {\n+ // All the model updating work has terminated,\n+ // so we could return directly the result model\n+ return _model;\n}\n- @Override\n- public Void call() throws Exception {\n- try {\n- Gradient grad;\n+ protected synchronized void updateGlobalModel(int workerID, ListObject gradients) {\ntry {\n- grad = _gradientsQueue.take();\n- } catch (InterruptedException e) {\n- throw new DMLRuntimeException(\"Aggregation service: error when waiting for the coming gradients.\", e);\n- }\nif (LOG.isDebugEnabled()) {\nLOG.debug(String.format(\"Successfully pulled the gradients [size:%d kb] of worker_%d.\",\n- grad._gradients.getDataSize() / 1024, grad._workerID));\n+ gradients.getDataSize() / 1024, workerID));\n}\n// Update and redistribute the model\nTiming tAgg = DMLScript.STATISTICS ? new Timing(true) : null;\n- _model = updateModel(grad._gradients, _model);\n+ _model = updateLocalModel(_ec, gradients, _model);\nif (DMLScript.STATISTICS)\nStatistics.accPSAggregationTime((long) tAgg.stop());\n// Redistribute model according to update type\nswitch(_updateType) {\ncase BSP: {\n- setFinishedState(grad._workerID);\n+ setFinishedState(workerID);\nif (allFinished()) {\n// Broadcast the updated model\nresetFinishedStates();\n@@ -225,7 +146,7 @@ public abstract class ParamServer {\nbreak;\n}\ncase ASP: {\n- broadcastModel(grad._workerID);\n+ broadcastModel(workerID);\nbreak;\n}\ndefault:\n@@ -235,17 +156,17 @@ public abstract class ParamServer {\ncatch (Exception e) {\nthrow new DMLRuntimeException(\"Aggregation service failed: \", e);\n}\n- return null;\n- }\n-\n- private ListObject updateModel(ListObject gradients, ListObject model) {\n- return updateModel(_ec, gradients, model);\n}\n/**\n* A service method for updating model with gradients\n+ *\n+ * @param ec execution context\n+ * @param gradients list of gradients\n+ * @param model old model\n+ * @return new model\n*/\n- private ListObject updateModel(ExecutionContext ec, ListObject gradients, ListObject model) {\n+ protected ListObject updateLocalModel(ExecutionContext ec, ListObject gradients, ListObject model) {\n// Populate the variables table with the gradients and model\nec.setVariable(Statement.PS_GRADIENTS, gradients);\nec.setVariable(Statement.PS_MODEL, model);\n@@ -254,12 +175,44 @@ public abstract class ParamServer {\n_inst.processInstruction(ec);\n// Get the output\n- ListObject newModel = (ListObject) ec.getVariable(_output.getName());\n+ ListObject newModel = (ListObject) ec.getVariable(_outputName);\n// Update the model with the new output\nParamservUtils.cleanupListObject(ec, Statement.PS_MODEL);\nParamservUtils.cleanupListObject(ec, Statement.PS_GRADIENTS);\nreturn newModel;\n}\n+\n+ private boolean allFinished() {\n+ return !ArrayUtils.contains(_finishedStates, false);\n+ }\n+\n+ private void resetFinishedStates() {\n+ Arrays.fill(_finishedStates, false);\n+ }\n+\n+ private void setFinishedState(int workerID) {\n+ _finishedStates[workerID] = true;\n+ }\n+\n+ private void broadcastModel() throws InterruptedException {\n+ Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n+\n+ //broadcast copy of the model to all workers, cleaned up by workers\n+ for (BlockingQueue<ListObject> q : _modelMap.values())\n+ q.put(ParamservUtils.copyList(_model));\n+\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n+ }\n+\n+ private void broadcastModel(int workerID) throws InterruptedException {\n+ Timing tBroad = DMLScript.STATISTICS ? new Timing(true) : null;\n+\n+ //broadcast copy of model to specific worker, cleaned up by worker\n+ _modelMap.get(workerID).put(ParamservUtils.copyList(_model));\n+\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSModelBroadcastTime((long) tBroad.stop());\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -160,8 +160,6 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nthrow new DMLRuntimeException(\"ParamservBuiltinCPInstruction: some error occurred: \", e);\n} finally {\nes.shutdownNow();\n- // Should shutdown the thread pool in param server\n- ps.shutdown();\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2416] Simplified paramserv aggregation service Closes #790.
49,738
24.06.2018 00:58:58
25,200
1608528ddc346b37987ec75ff24d6cba45f03d7a
[MINOR] Performance paramserv data partitioning via permutations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "diff": "@@ -2200,7 +2200,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n@Override\npublic void ctableOperations(Operator op, double scalar,\nMatrixValue that, CTableMap resultMap, MatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock left = isCompressed() ? decompress() : this;\nMatrixBlock right = getUncompressed(that);\nleft.ctableOperations(op, scalar, right, resultMap, resultBlock);\n@@ -2209,7 +2209,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n@Override\npublic void ctableOperations(Operator op, double scalar,\ndouble scalar2, CTableMap resultMap, MatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock tmp = isCompressed() ? decompress() : this;\ntmp.ctableOperations(op, scalar, scalar2, resultMap, resultBlock);\n}\n@@ -2218,7 +2218,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\npublic void ctableOperations(Operator op, MatrixIndexes ix1,\ndouble scalar, boolean left, int brlen, CTableMap resultMap,\nMatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock tmp = isCompressed() ? decompress() : this;\ntmp.ctableOperations(op, ix1, scalar, left, brlen, resultMap, resultBlock);\n}\n@@ -2227,24 +2227,23 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\npublic void ctableOperations(Operator op, MatrixValue that,\ndouble scalar, boolean ignoreZeros, CTableMap resultMap,\nMatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock left = isCompressed() ? decompress() : this;\nMatrixBlock right = getUncompressed(that);\nleft.ctableOperations(op, right, scalar, ignoreZeros, resultMap, resultBlock);\n}\n@Override\n- public void ctableOperations(Operator op, MatrixValue that, double scalar, MatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n- MatrixBlock left = isCompressed() ? decompress() : this;\n+ public MatrixBlock ctableSeqOperations(MatrixValue that, double scalar, MatrixBlock resultBlock) {\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock right = getUncompressed(that);\n- left.ctableOperations(op, right, scalar, resultBlock);\n+ return this.ctableSeqOperations(right, scalar, resultBlock);\n}\n@Override\npublic void ctableOperations(Operator op, MatrixValue that,\nMatrixValue that2, CTableMap resultMap) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock left = isCompressed() ? decompress() : this;\nMatrixBlock right1 = getUncompressed(that);\nMatrixBlock right2 = getUncompressed(that2);\n@@ -2254,7 +2253,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n@Override\npublic void ctableOperations(Operator op, MatrixValue that,\nMatrixValue that2, CTableMap resultMap, MatrixBlock resultBlock) {\n- printDecompressWarning(\"ternaryOperations\");\n+ printDecompressWarning(\"ctableOperations\");\nMatrixBlock left = isCompressed() ? decompress() : this;\nMatrixBlock right1 = getUncompressed(that);\nMatrixBlock right2 = getUncompressed(that2);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "@@ -123,13 +123,13 @@ public class ParamservUtils {\npublic static MatrixBlock generatePermutation(int numEntries) {\n// Create a sequence and sample w/o replacement\n- MatrixBlock seq = MatrixBlock.seqOperations(1, numEntries, 1);\n+ // (no need to materialize the sequence because ctable only uses its meta data)\n+ MatrixBlock seq = new MatrixBlock(numEntries, 1, false);\nMatrixBlock sample = MatrixBlock.sampleOperations(numEntries, numEntries, false, -1);\n// Combine the sequence and sample as a table\n- MatrixBlock permutation = new MatrixBlock(numEntries, numEntries, true);\n- seq.ctableOperations(null, sample, 1.0, permutation);\n- return permutation;\n+ return seq.ctableSeqOperations(sample, 1.0,\n+ new MatrixBlock(numEntries, numEntries, true));\n}\npublic static String[] getCompleteFuncName(String funcName, String prefix) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CtableCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CtableCPInstruction.java", "diff": "@@ -134,7 +134,7 @@ public class CtableCPInstruction extends ComputationCPInstruction {\nmatBlock2 = ec.getMatrixInput(input2.getName(), getExtendedOpcode());\ncst1 = ec.getScalarInput(input3.getName(), input3.getValueType(), input3.isLiteral()).getDoubleValue();\n// only resultBlock.rlen known, resultBlock.clen set in operation\n- matBlock1.ctableOperations((SimpleOperator)_optr, matBlock2, cst1, resultBlock);\n+ matBlock1.ctableSeqOperations(matBlock2, cst1, resultBlock);\nbreak;\ncase CTABLE_TRANSFORM_HISTOGRAM: //(VECTOR)\n// F=ctable(A,1) or F = ctable(A,1,1)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -5236,12 +5236,12 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n* (i1,j1,v2) from input2 (that)\n* (w) from scalar_input3 (scalarThat2)\n*\n- * @param op operator\n* @param thatMatrix matrix value\n* @param thatScalar scalar double\n* @param resultBlock result matrix block\n+ * @return resultBlock\n*/\n- public void ctableOperations(Operator op, MatrixValue thatMatrix, double thatScalar, MatrixBlock resultBlock) {\n+ public MatrixBlock ctableSeqOperations(MatrixValue thatMatrix, double thatScalar, MatrixBlock resultBlock) {\nMatrixBlock that = checkType(thatMatrix);\nCTable ctable = CTable.getCTableFnObject();\ndouble w = thatScalar;\n@@ -5259,6 +5259,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//update meta data (initially unknown number of columns)\n//note: nnz maintained in ctable (via quickset)\nresultBlock.clen = maxCol;\n+ return resultBlock;\n}\n/**\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance paramserv data partitioning via permutations
49,738
24.06.2018 01:15:54
25,200
80dcb52808c9a5cf6903c1044abfd33d94ab3123
[MINOR] Add missing support for ternary ops over compressed matrices
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "diff": "@@ -90,6 +90,7 @@ import org.apache.sysml.runtime.matrix.operators.Operator;\nimport org.apache.sysml.runtime.matrix.operators.QuaternaryOperator;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.matrix.operators.ScalarOperator;\n+import org.apache.sysml.runtime.matrix.operators.TernaryOperator;\nimport org.apache.sysml.runtime.matrix.operators.UnaryOperator;\nimport org.apache.sysml.runtime.util.CommonThreadPool;\nimport org.apache.sysml.runtime.util.IndexRange;\n@@ -2260,6 +2261,15 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nleft.ctableOperations(op, right1, right2, resultMap, resultBlock);\n}\n+ @Override\n+ public MatrixBlock ternaryOperations(TernaryOperator op, MatrixBlock m2, MatrixBlock m3, MatrixBlock ret) {\n+ printDecompressWarning(\"ternaryOperations\");\n+ MatrixBlock left = isCompressed() ? decompress() : this;\n+ MatrixBlock right1 = getUncompressed(m2);\n+ MatrixBlock right2 = getUncompressed(m3);\n+ return left.ternaryOperations(op, right1, right2, ret);\n+ }\n+\n@Override\npublic MatrixBlock quaternaryOperations(QuaternaryOperator qop,\nMatrixBlock um, MatrixBlock vm, MatrixBlock wm, MatrixBlock out) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add missing support for ternary ops over compressed matrices
49,736
06.07.2018 09:53:22
25,200
b56612f0231c7fe7abc100bf6f296bdb393aa971
Fixed a memory leak in GPU lstm builtin function and also added developer utility to debug such bugs in the future.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java", "diff": "@@ -299,7 +299,7 @@ public class CSRPointer {\ncusparseSetPointerMode(handle, cusparsePointerMode.CUSPARSE_POINTER_MODE_HOST);\n//cudaDeviceSynchronize;\n// Do not increment the cudaCount of allocations on GPU\n- C.rowPtr = gCtx.allocate(getIntSizeOf((long) rowsC + 1));\n+ C.rowPtr = gCtx.allocate(null, getIntSizeOf((long) rowsC + 1));\n}\n/**\n@@ -413,7 +413,7 @@ public class CSRPointer {\n}\nprivate Pointer allocate(long size) {\n- return getGPUContext().allocate(size);\n+ return getGPUContext().allocate(null, size);\n}\nprivate GPUContext getGPUContext() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -182,16 +182,6 @@ public class GPUContext {\ninitializeCudaLibraryHandles();\n}\n- /**\n- * Convenience method for {@link #allocate(String, long)}.\n- *\n- * @param size size of data (in bytes) to allocate\n- * @return jcuda pointer\n- */\n- public Pointer allocate(long size) {\n- return memoryManager.malloc(null, size);\n- }\n-\n/**\n* Invokes memory manager's malloc method\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -28,6 +28,7 @@ import java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\n+import java.util.Map.Entry;\nimport java.util.Set;\nimport java.util.concurrent.atomic.LongAdder;\nimport java.util.stream.Collectors;\n@@ -50,6 +51,11 @@ import jcuda.Pointer;\npublic class GPUMemoryManager {\nprotected static final Log LOG = LogFactory.getLog(GPUMemoryManager.class.getName());\n+ // Developer flag: Use this flag to check for GPU memory leak in SystemML.\n+ // This has an additional overhead of maintaining stack trace of all the allocated GPU pointers via PointerInfo class.\n+ private static final boolean DEBUG_MEMORY_LEAK = false;\n+ private static final int [] DEBUG_MEMORY_LEAK_STACKTRACE_DEPTH = {5, 6, 7, 8, 9, 10}; // Avoids printing too much text while debuggin\n+\n/*****************************************************************************************/\n// GPU Memory is divided into three major sections:\n// 1. Matrix Memory: Memory allocated to matrices in SystemML and addressable by GPUObjects.\n@@ -109,7 +115,7 @@ public class GPUMemoryManager {\nprivate long sizeInBytes;\nprivate StackTraceElement[] stackTraceElements;\npublic PointerInfo(long sizeInBytes) {\n- if(DMLScript.PRINT_GPU_MEMORY_INFO) {\n+ if(DEBUG_MEMORY_LEAK) {\nthis.stackTraceElements = Thread.currentThread().getStackTrace();\n}\nthis.sizeInBytes = sizeInBytes;\n@@ -196,6 +202,7 @@ public class GPUMemoryManager {\n}\n}\n+\n/**\n* Allocate pointer of the given size in bytes.\n*\n@@ -207,6 +214,10 @@ public class GPUMemoryManager {\nif(size < 0) {\nthrow new DMLRuntimeException(\"Cannot allocate memory of size \" + byteCountToDisplaySize(size));\n}\n+ if(DEBUG_MEMORY_LEAK) {\n+ LOG.info(\"GPU Memory info during malloc:\" + toString());\n+ }\n+\nlong t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\nlong mallocStart = 0;\n// Step 1: First try reusing exact match in rmvarGPUPointers to avoid holes in the GPU memory\n@@ -395,25 +406,23 @@ public class GPUMemoryManager {\n}\n// --------------- Developer Utilities to debug potential memory leaks ------------------------\n- @SuppressWarnings(\"unused\")\n- private void printPointers(List<PointerInfo> pointers) {\n- for(PointerInfo ptrInfo : pointers) {\n- System.out.println(\">>\" +\n- // getCallerInfo(ptrInfo.stackTraceElements, 5) + getCallerInfo(ptrInfo.stackTraceElements, 6) + getCallerInfo(ptrInfo.stackTraceElements, 7) +\n- getCallerInfo(ptrInfo.stackTraceElements, 8) + getCallerInfo(ptrInfo.stackTraceElements, 9) + getCallerInfo(ptrInfo.stackTraceElements, 10));\n- }\n- }\n-\n- @SuppressWarnings(\"unused\")\nprivate void printPointers(Set<Pointer> pointers, StringBuilder sb) {\n+ HashMap<String, Integer> frequency = new HashMap<>();\nfor(Pointer ptr : pointers) {\nPointerInfo ptrInfo = allPointers.get(ptr);\n- sb.append(\">>\");\n- // getCallerInfo(ptrInfo.stackTraceElements, 5) + getCallerInfo(ptrInfo.stackTraceElements, 6) + getCallerInfo(ptrInfo.stackTraceElements, 7) +\n- sb.append(getCallerInfo(ptrInfo.stackTraceElements, 8));\n- sb.append(getCallerInfo(ptrInfo.stackTraceElements, 9));\n- sb.append(getCallerInfo(ptrInfo.stackTraceElements, 10));\n- sb.append(\"\\n\");\n+ String key = \"\";\n+ for(int index : DEBUG_MEMORY_LEAK_STACKTRACE_DEPTH) {\n+ key += getCallerInfo(ptrInfo.stackTraceElements, index);\n+ }\n+ if(frequency.containsKey(key)) {\n+ frequency.put(key, frequency.get(key)+1);\n+ }\n+ else {\n+ frequency.put(key, 1);\n+ }\n+ }\n+ for(Entry<String, Integer> kv : frequency.entrySet()) {\n+ sb.append(\">>\" + kv.getKey() + \" => \" + kv.getValue() + \"\\n\");\n}\n}\n// --------------------------------------------------------------------------------------------\n@@ -566,6 +575,7 @@ public class GPUMemoryManager {\n/**\n* Print debugging information\n*/\n+ @SuppressWarnings(\"unused\")\npublic String toString() {\nlong sizeOfLockedGPUObjects = 0; int numLockedGPUObjects = 0; int numLockedPointers = 0;\nlong sizeOfUnlockedDirtyGPUObjects = 0; int numUnlockedDirtyGPUObjects = 0; int numUnlockedDirtyPointers = 0;\n@@ -605,12 +615,10 @@ public class GPUMemoryManager {\ntotalSizePotentiallyLeakyPointers += size;\n}\nStringBuilder ret = new StringBuilder();\n- //if(DMLScript.PRINT_GPU_MEMORY_INFO) {\n- // if(potentiallyLeakyPointers.size() > 0) {\n- // ret.append(\"Non-matrix pointers were allocated by:\\n\");\n- // printPointers(potentiallyLeakyPointers, ret);\n- // }\n- //}\n+ if(DEBUG_MEMORY_LEAK && potentiallyLeakyPointers.size() > 0) {\n+ ret.append(\"Non-matrix pointers were allocated by:\\n\");\n+ printPointers(potentiallyLeakyPointers, ret);\n+ }\nret.append(\"\\n====================================================\\n\");\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"\",\n\"Num Objects\", \"Num Pointers\", \"Size\"));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -183,7 +183,7 @@ public class GPUObject {\n}\nprivate Pointer allocate(long size) {\n- return getGPUContext().allocate(size);\n+ return getGPUContext().allocate(null, size);\n}\nprivate void cudaFreeHelper(Pointer toFree) throws DMLRuntimeException {\n@@ -212,7 +212,7 @@ public class GPUObject {\nPointer alpha = LibMatrixCUDA.one();\nPointer beta = LibMatrixCUDA.zero();\nPointer A = densePtr;\n- Pointer C = gCtx.allocate(((long) m) * getDatatypeSizeOf(n));\n+ Pointer C = gCtx.allocate(null, ((long) m) * getDatatypeSizeOf(n));\n// Transpose the matrix to get a dense matrix\nLibMatrixCUDA.cudaSupportFunctions.cublasgeam(gCtx.getCublasHandle(), CUBLAS_OP_T, CUBLAS_OP_T, m, n, alpha, A, lda, beta, new Pointer(),\n@@ -240,8 +240,8 @@ public class GPUObject {\nPointer nnzPerRowPtr = null;\nPointer nnzTotalDevHostPtr = null;\n- nnzPerRowPtr = gCtx.allocate(getIntSizeOf(rows));\n- nnzTotalDevHostPtr = gCtx.allocate(getIntSizeOf(1));\n+ nnzPerRowPtr = gCtx.allocate(null, getIntSizeOf(rows));\n+ nnzTotalDevHostPtr = gCtx.allocate(null, getIntSizeOf(1));\n// Output is in dense vector format, convert it to CSR\nLibMatrixCUDA.cudaSupportFunctions.cusparsennz(cusparseHandle, cusparseDirection.CUSPARSE_DIRECTION_ROW, rows, cols, matDescr, densePtr, rows,\n@@ -532,8 +532,8 @@ public class GPUObject {\nint cols = toIntExact(mat.getNumColumns());\nPointer nnzPerRowPtr = null;\nPointer nnzTotalDevHostPtr = null;\n- nnzPerRowPtr = gCtx.allocate(getIntSizeOf(rows));\n- nnzTotalDevHostPtr = gCtx.allocate(getIntSizeOf(1));\n+ nnzPerRowPtr = gCtx.allocate(instName, getIntSizeOf(rows));\n+ nnzTotalDevHostPtr = gCtx.allocate(instName, getIntSizeOf(1));\nLibMatrixCUDA.cudaSupportFunctions.cusparsennz(cusparseHandle, cusparseDirection.CUSPARSE_DIRECTION_ROW, rows, cols, matDescr, getDensePointer(), rows,\nnnzPerRowPtr, nnzTotalDevHostPtr);\nint[] nnzC = { -1 };\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -2454,7 +2454,7 @@ public class LibMatrixCUDA {\n// step 4: compute QR factorization\nPointer work = gCtx.allocate(instName, lwork[0] * sizeOfDataType);\nPointer tau = gCtx.allocate(instName, m * sizeOfDataType);\n- Pointer devInfo = gCtx.allocate(Sizeof.INT);\n+ Pointer devInfo = gCtx.allocate(instName, Sizeof.INT);\nif (DMLScript.FINEGRAINED_STATISTICS) t0 = System.nanoTime();\ncudaSupportFunctions.cusolverDngeqrf(gCtx.getCusolverDnHandle(), m, n, A, m, tau, work, lwork[0], devInfo);\nif (DMLScript.FINEGRAINED_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_QR, System.nanoTime() - t0);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -436,7 +436,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\ntry(LibMatrixCuDNNInputRowFetcher imgFetcher = new LibMatrixCuDNNInputRowFetcher(gCtx, instName, image);\nLibMatrixCuDNNInputRowFetcher doutFetcher = new LibMatrixCuDNNInputRowFetcher(gCtx, instName, dout)) {\n// Perform one-input conv2dBackwardFilter\n- Pointer tempdwPointer = gCtx.allocate(KCRS*sizeOfDataType);\n+ Pointer tempdwPointer = gCtx.allocate(instName, KCRS*sizeOfDataType);\nfor(int n = 0; n < N; n++) {\nlong t0 = DMLScript.FINEGRAINED_STATISTICS ? System.nanoTime() : 0;\ncudaMemset(tempdwPointer, 0, KCRS*sizeOfDataType);\n@@ -754,7 +754,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nif(!isMaxPoolOutputProvided) {\nif (DMLScript.FINEGRAINED_STATISTICS) t1 = System.nanoTime();\nlong numBytes = N*C*P*Q*sizeOfDataType;\n- y = gCtx.allocate(numBytes);\n+ y = gCtx.allocate(instName, numBytes);\nif (DMLScript.FINEGRAINED_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_CUDNN_INIT, System.nanoTime() - t1);\nif (DMLScript.FINEGRAINED_STATISTICS) t2 = System.nanoTime();\nstatus = cudnnPoolingForward(getCudnnHandle(gCtx), desc.poolingDesc, one(), desc.xDesc, x, zero(), desc.yDesc, y);\n@@ -976,6 +976,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nExecutionConfig.getConfigForSimpleVectorOperations(N*T*D),\nsmlDx, cudnnDx, N, D, T*D, N*T*D);\nec.releaseMatrixOutputForGPUInstruction(dxName);\n+ gCtx.cudaFreeHelper(instName, cudnnDx, DMLScript.EAGER_CUDA_FREE);\n// -------------------------------------------------------------------------------------------\nPointer cudnnDwPointer = gCtx.allocate(instName, (D+M+2)*(4*M)*LibMatrixCUDA.sizeOfDataType);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "diff": "@@ -141,7 +141,7 @@ public class LibMatrixCuDNNConvolutionAlgorithm implements java.lang.AutoCloseab\njcuda.jcudnn.JCudnn.cudnnGetConvolutionForwardWorkspaceSize(LibMatrixCuDNN.getCudnnHandle(gCtx),\nret.nchwTensorDesc, ret.filterDesc, ret.convDesc, ret.nkpqTensorDesc, algos[0], sizeInBytesArray);\nif (sizeInBytesArray[0] != 0)\n- ret.workSpace = gCtx.allocate(sizeInBytesArray[0]);\n+ ret.workSpace = gCtx.allocate(instName, sizeInBytesArray[0]);\nret.sizeInBytes = sizeInBytesArray[0];\nret.algo = algos[0];\nif (DMLScript.FINEGRAINED_STATISTICS)\n@@ -186,7 +186,7 @@ public class LibMatrixCuDNNConvolutionAlgorithm implements java.lang.AutoCloseab\njcuda.jcudnn.JCudnn.cudnnGetConvolutionBackwardFilterWorkspaceSize(LibMatrixCuDNN.getCudnnHandle(gCtx),\nret.nchwTensorDesc, ret.nkpqTensorDesc, ret.convDesc, ret.filterDesc, algos[0], sizeInBytesArray);\nif (sizeInBytesArray[0] != 0)\n- ret.workSpace = gCtx.allocate(sizeInBytesArray[0]);\n+ ret.workSpace = gCtx.allocate(instName, sizeInBytesArray[0]);\nret.sizeInBytes = sizeInBytesArray[0];\nret.algo = algos[0];\n@@ -239,7 +239,7 @@ public class LibMatrixCuDNNConvolutionAlgorithm implements java.lang.AutoCloseab\njcuda.jcudnn.JCudnn.cudnnGetConvolutionBackwardDataWorkspaceSize(LibMatrixCuDNN.getCudnnHandle(gCtx),\nret.filterDesc, ret.nkpqTensorDesc, ret.convDesc, ret.nchwTensorDesc, algos[0], sizeInBytesArray);\nif (sizeInBytesArray[0] != 0)\n- ret.workSpace = gCtx.allocate(sizeInBytesArray[0]);\n+ ret.workSpace = gCtx.allocate(instName, sizeInBytesArray[0]);\nret.sizeInBytes = sizeInBytesArray[0];\nret.algo = algos[0];\nif (DMLScript.FINEGRAINED_STATISTICS)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNInputRowFetcher.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNInputRowFetcher.java", "diff": "@@ -49,7 +49,7 @@ public class LibMatrixCuDNNInputRowFetcher extends LibMatrixCUDA implements java\nnumColumns = LibMatrixCUDA.toInt(image.getNumColumns());\nisInputInSparseFormat = LibMatrixCUDA.isInSparseFormat(gCtx, image);\ninPointer = isInputInSparseFormat ? LibMatrixCUDA.getSparsePointer(gCtx, image, instName) : LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, image, instName);\n- outPointer = gCtx.allocate(numColumns*sizeOfDataType);\n+ outPointer = gCtx.allocate(instName, numColumns*sizeOfDataType);\n}\n/**\n* Copy the nth row and return the dense pointer\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNRnnAlgorithm.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNRnnAlgorithm.java", "diff": "@@ -55,6 +55,7 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\ncudnnFilterDescriptor dwDesc;\nlong sizeInBytes; Pointer workSpace;\nlong reserveSpaceSizeInBytes; Pointer reserveSpace;\n+ long dropOutSizeInBytes; Pointer dropOutStateSpace;\npublic LibMatrixCuDNNRnnAlgorithm(ExecutionContext ec, GPUContext gCtx, String instName,\nString rnnMode, int N, int T, int M, int D, boolean isTraining, Pointer w) throws DMLRuntimeException {\nthis.gCtx = gCtx;\n@@ -83,12 +84,13 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\n// Initial dropout descriptor\ndropoutDesc = new cudnnDropoutDescriptor();\nJCudnn.cudnnCreateDropoutDescriptor(dropoutDesc);\n- long [] dropOutSizeInBytes = {-1};\n- JCudnn.cudnnDropoutGetStatesSize(gCtx.getCudnnHandle(), dropOutSizeInBytes);\n- Pointer dropOutStateSpace = new Pointer();\n- if (dropOutSizeInBytes[0] != 0)\n- dropOutStateSpace = gCtx.allocate(dropOutSizeInBytes[0]);\n- JCudnn.cudnnSetDropoutDescriptor(dropoutDesc, gCtx.getCudnnHandle(), 0, dropOutStateSpace, dropOutSizeInBytes[0], 12345);\n+ long [] _dropOutSizeInBytes = {-1};\n+ JCudnn.cudnnDropoutGetStatesSize(gCtx.getCudnnHandle(), _dropOutSizeInBytes);\n+ dropOutSizeInBytes = _dropOutSizeInBytes[0];\n+ dropOutStateSpace = new Pointer();\n+ if (dropOutSizeInBytes != 0)\n+ dropOutStateSpace = gCtx.allocate(instName, dropOutSizeInBytes);\n+ JCudnn.cudnnSetDropoutDescriptor(dropoutDesc, gCtx.getCudnnHandle(), 0, dropOutStateSpace, dropOutSizeInBytes, 12345);\n// Initialize RNN descriptor\nrnnDesc = new cudnnRNNDescriptor();\n@@ -109,18 +111,14 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\nworkSpace = new Pointer(); reserveSpace = new Pointer();\nsizeInBytes = getWorkspaceSize(T);\nif(sizeInBytes != 0)\n- workSpace = gCtx.allocate(sizeInBytes);\n+ workSpace = gCtx.allocate(instName, sizeInBytes);\nreserveSpaceSizeInBytes = 0;\nif(isTraining) {\nreserveSpaceSizeInBytes = getReservespaceSize(T);\nif (reserveSpaceSizeInBytes != 0) {\n- reserveSpace = gCtx.allocate(reserveSpaceSizeInBytes);\n+ reserveSpace = gCtx.allocate(instName, reserveSpaceSizeInBytes);\n}\n}\n- if (reserveSpaceSizeInBytes == 0) {\n- reserveSpace = gCtx.allocate(reserveSpaceSizeInBytes);\n- }\n-\n/*\nint numLinearLayers = getNumLinearLayers(rnnMode);\nfor(int i = 0; i < numLinearLayers; i++) {\n@@ -308,6 +306,7 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\nthrow new RuntimeException(e);\n}\n}\n+ workSpace = null;\nif(reserveSpaceSizeInBytes != 0) {\ntry {\ngCtx.cudaFreeHelper(instName, reserveSpace, DMLScript.EAGER_CUDA_FREE);\n@@ -315,5 +314,13 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\nthrow new RuntimeException(e);\n}\n}\n+ reserveSpace = null;\n+ if(dropOutSizeInBytes != 0) {\n+ try {\n+ gCtx.cudaFreeHelper(instName, dropOutStateSpace, DMLScript.EAGER_CUDA_FREE);\n+ } catch (DMLRuntimeException e) {\n+ throw new RuntimeException(e);\n+ }\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuMatMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuMatMult.java", "diff": "@@ -269,7 +269,7 @@ public class LibMatrixCuMatMult extends LibMatrixCUDA {\n// t(C) = t(B) %*% t(A)\nPointer output = null;\nif (outRLen != 1 && outCLen != 1) {\n- output = gCtx.allocate(outRLen * outCLen * sizeOfDataType);\n+ output = gCtx.allocate(instName, outRLen * outCLen * sizeOfDataType);\n} else {\n// no transpose required for vector output\noutput = C;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "diff": "@@ -179,7 +179,7 @@ public class SinglePrecisionCudaSupportFunctions implements CudaSupportFunctions\n// during eviction: `evict -> devictToHost -> float2double -> allocate -> ensureFreeSpace -> evict`.\n// To avoid this recursion, it is necessary to perform this conversion in host.\nif(PERFORM_CONVERSION_ON_DEVICE && !isEviction) {\n- Pointer deviceDoubleData = gCtx.allocate(((long)dest.length)*Sizeof.DOUBLE);\n+ Pointer deviceDoubleData = gCtx.allocate(instName, ((long)dest.length)*Sizeof.DOUBLE);\nLibMatrixCUDA.float2double(gCtx, src, deviceDoubleData, dest.length);\ncudaMemcpy(Pointer.to(dest), deviceDoubleData, ((long)dest.length)*Sizeof.DOUBLE, cudaMemcpyDeviceToHost);\ngCtx.cudaFreeHelper(instName, deviceDoubleData, DMLScript.EAGER_CUDA_FREE);\n@@ -205,7 +205,7 @@ public class SinglePrecisionCudaSupportFunctions implements CudaSupportFunctions\n// TODO: Perform conversion on GPU using double2float and float2double kernels\nlong t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\nif(PERFORM_CONVERSION_ON_DEVICE) {\n- Pointer deviceDoubleData = gCtx.allocate(((long)src.length)*Sizeof.DOUBLE);\n+ Pointer deviceDoubleData = gCtx.allocate(instName, ((long)src.length)*Sizeof.DOUBLE);\ncudaMemcpy(deviceDoubleData, Pointer.to(src), ((long)src.length)*Sizeof.DOUBLE, cudaMemcpyHostToDevice);\nLibMatrixCUDA.double2float(gCtx, deviceDoubleData, dest, src.length);\ngCtx.cudaFreeHelper(instName, deviceDoubleData, DMLScript.EAGER_CUDA_FREE);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Fixed a memory leak in GPU lstm builtin function and also added developer utility to debug such bugs in the future.
49,738
07.07.2018 15:14:09
25,200
eb179b151b3dadf818caf92aefdc48d92a4454ba
[MINOR] Fix warnings memory profiling (imports, raw types)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "diff": "@@ -30,7 +30,6 @@ import org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.ConfigurableAPI;\nimport org.apache.sysml.api.DMLException;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -136,7 +136,7 @@ public class LocalVariableMap implements Cloneable\nint hash = System.identityHashCode(e.getValue());\nif( !dict.containsKey(hash) && e.getValue() instanceof CacheableData ) {\ndict.put(hash, e.getValue());\n- double size = ((CacheableData) e.getValue()).getDataSize();\n+ double size = ((CacheableData<?>) e.getValue()).getDataSize();\nif ((DMLScript.JMLC_MEMORY_STATISTICS) && (DMLScript.FINEGRAINED_STATISTICS))\nStatistics.maintainCPHeavyHittersMem(e.getKey(), size);\ntotal += size;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -791,6 +791,7 @@ public class Statistics\nreturn sb.toString();\n}\n+ @SuppressWarnings(\"unchecked\")\npublic static String getCPHeavyHittersMem(int num) {\nint n = _cpMemObjs.size();\nif ((n <= 0) || (num <= 0))\n@@ -815,7 +816,6 @@ public class Statistics\nres.append(String.format(\" %-\" + numPadLen + \"s\" + \" %-\" + maxNameLength + \"s\" + \" %s\\n\",\n\"#\", \"Object\", \"Memory\"));\n- // lots of futzing around to format strings...\nfor (int ix = 1; ix <= numHittersToDisplay; ix++) {\nString objName = entries[ix-1].getKey();\nString objSize = byteCountToDisplaySize(entries[ix-1].getValue());\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix warnings memory profiling (imports, raw types)
49,727
07.07.2018 18:40:25
25,200
63a1e2ac59f3201ab99a6e5e71636133eec96b1b
Fix accuracy issue paramserv BSP batch updates Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "package org.apache.sysml.runtime.controlprogram.paramserv;\nimport java.util.concurrent.Callable;\n-import java.util.stream.IntStream;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -30,10 +29,7 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.Timing;\n-import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\n-import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n-import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\nimport org.apache.sysml.utils.Statistics;\npublic class LocalPSWorker extends PSWorker implements Callable<Void> {\n@@ -84,13 +80,12 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nListObject gradients = computeGradients(dataSize, totalIter, i, j);\n// Accumulate the intermediate gradients\n- accGradients = (accGradients==null) ?\n- ParamservUtils.copyList(gradients) :\n- accrueGradients(accGradients, gradients);\n+ accGradients = ParamservUtils.accrueGradients(accGradients, gradients);\n// Update the local model with gradients\nif( j < totalIter - 1 )\nparams = updateModel(params, gradients, i, j, totalIter);\n+ ParamservUtils.cleanupListObject(gradients);\n}\n// Push the gradients to ps\n@@ -193,14 +188,4 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nreturn gradients;\n}\n- private ListObject accrueGradients(ListObject accGradients, ListObject gradients) {\n- IntStream.range(0, accGradients.getLength()).forEach(i -> {\n- MatrixBlock mb1 = ((MatrixObject) accGradients.getData().get(i)).acquireRead();\n- MatrixBlock mb2 = ((MatrixObject) gradients.getData().get(i)).acquireRead();\n- mb1.binaryOperationsInPlace(new BinaryOperator(Plus.getPlusFnObject()), mb2);\n- ((MatrixObject) accGradients.getData().get(i)).release();\n- ((MatrixObject) gradients.getData().get(i)).release();\n- });\n- return accGradients;\n- }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "diff": "@@ -49,7 +49,8 @@ import org.apache.sysml.utils.Statistics;\npublic abstract class ParamServer\n{\n- protected final Log LOG = LogFactory.getLog(ParamServer.class.getName());\n+ protected static final Log LOG = LogFactory.getLog(ParamServer.class.getName());\n+ protected static final boolean ACCRUE_BSP_GRADIENTS = true;\n// worker input queues and global model\nprotected final Map<Integer, BlockingQueue<ListObject>> _modelMap;\n@@ -61,6 +62,7 @@ public abstract class ParamServer\nprivate final FunctionCallCPInstruction _inst;\nprivate final String _outputName;\nprivate final boolean[] _finishedStates; // Workers' finished states\n+ private ListObject _accGradients = null;\nprotected ParamServer(ListObject model, String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec, int workerNum) {\n// init worker queues and global model\n@@ -126,17 +128,25 @@ public abstract class ParamServer\ngradients.getDataSize() / 1024, workerID));\n}\n- // Update and redistribute the model\n- Timing tAgg = DMLScript.STATISTICS ? new Timing(true) : null;\n- _model = updateLocalModel(_ec, gradients, _model);\n- if (DMLScript.STATISTICS)\n- Statistics.accPSAggregationTime((long) tAgg.stop());\n-\n- // Redistribute model according to update type\nswitch(_updateType) {\ncase BSP: {\nsetFinishedState(workerID);\n+\n+ // Accumulate the intermediate gradients\n+ if( ACCRUE_BSP_GRADIENTS )\n+ _accGradients = ParamservUtils.accrueGradients(\n+ _accGradients, gradients, true);\n+ else\n+ updateGlobalModel(gradients);\n+ ParamservUtils.cleanupListObject(gradients);\n+\nif (allFinished()) {\n+ // Update the global model with accrued gradients\n+ if( ACCRUE_BSP_GRADIENTS ) {\n+ updateGlobalModel(_accGradients);\n+ _accGradients = null;\n+ }\n+\n// Broadcast the updated model\nresetFinishedStates();\nbroadcastModel();\n@@ -146,6 +156,7 @@ public abstract class ParamServer\nbreak;\n}\ncase ASP: {\n+ updateGlobalModel(gradients);\nbroadcastModel(workerID);\nbreak;\n}\n@@ -158,6 +169,13 @@ public abstract class ParamServer\n}\n}\n+ private void updateGlobalModel(ListObject gradients) {\n+ Timing tAgg = DMLScript.STATISTICS ? new Timing(true) : null;\n+ _model = updateLocalModel(_ec, gradients, _model);\n+ if (DMLScript.STATISTICS)\n+ Statistics.accPSAggregationTime((long) tAgg.stop());\n+ }\n+\n/**\n* A service method for updating model with gradients\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "@@ -50,6 +50,7 @@ import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\n+import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -57,6 +58,7 @@ import org.apache.sysml.runtime.matrix.MetaDataFormat;\nimport org.apache.sysml.runtime.matrix.data.InputInfo;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\npublic class ParamservUtils {\n@@ -88,6 +90,10 @@ public class ParamservUtils {\npublic static void cleanupListObject(ExecutionContext ec, String lName) {\nListObject lo = (ListObject) ec.removeVariable(lName);\n+ cleanupListObject(lo);\n+ }\n+\n+ public static void cleanupListObject(ListObject lo) {\nlo.getData().forEach(ParamservUtils::cleanupData);\n}\n@@ -258,4 +264,22 @@ public class ParamservUtils {\nString fname = cfn[1];\nreturn ec.getProgram().getFunctionProgramBlock(ns, fname);\n}\n+\n+ public static ListObject accrueGradients(ListObject accGradients, ListObject gradients) {\n+ return accrueGradients(accGradients, gradients, false);\n+ }\n+\n+ public static ListObject accrueGradients(ListObject accGradients, ListObject gradients, boolean par) {\n+ if (accGradients == null)\n+ return ParamservUtils.copyList(gradients);\n+ IntStream range = IntStream.range(0, accGradients.getLength());\n+ (par ? range.parallel() : range).forEach(i -> {\n+ MatrixBlock mb1 = ((MatrixObject) accGradients.getData().get(i)).acquireRead();\n+ MatrixBlock mb2 = ((MatrixObject) gradients.getData().get(i)).acquireRead();\n+ mb1.binaryOperationsInPlace(new BinaryOperator(Plus.getPlusFnObject()), mb2);\n+ ((MatrixObject) accGradients.getData().get(i)).release();\n+ ((MatrixObject) gradients.getData().get(i)).release();\n+ });\n+ return accGradients;\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2403] Fix accuracy issue paramserv BSP batch updates Closes #791.
49,738
08.07.2018 12:54:39
25,200
e83ae65349d8f479f7bad60f551cd145527b6071
Fix IPA for function calls w/ unknown partial binding This patch fixes the robustness of IPA for handling function calls with unknown statistics and partial output bindings (i.e., more function output parameters than bound function call outputs).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "diff": "@@ -634,28 +634,21 @@ public class InterProceduralAnalysis\n}\n}\n- private static void extractFunctionCallUnknownReturnStatistics( FunctionStatement fstmt, FunctionOp fop, LocalVariableMap callVars )\n- {\n+ private static void extractFunctionCallUnknownReturnStatistics(FunctionStatement fstmt, FunctionOp fop, LocalVariableMap callVars) {\nArrayList<DataIdentifier> foutputOps = fstmt.getOutputParams();\nString[] outputVars = fop.getOutputVariableNames();\nString fkey = fop.getFunctionKey();\n-\n- try\n- {\n- for( int i=0; i<foutputOps.size(); i++ )\n- {\n+ try {\n+ //robustness for subset of bound output variables\n+ int olen = Math.min(foutputOps.size(), outputVars.length);\n+ for( int i=0; i<olen; i++ ) {\nDataIdentifier di = foutputOps.get(i);\nString pvarname = outputVars[i]; //name in calling program\n-\nif( di.getDataType()==DataType.MATRIX )\n- {\n- MatrixObject moOut = createOutputMatrix(-1, -1, -1);\n- callVars.put(pvarname, moOut);\n+ callVars.put(pvarname, createOutputMatrix(-1, -1, -1));\n}\n}\n- }\n- catch( Exception ex )\n- {\n+ catch( Exception ex ) {\nthrow new HopsException( \"Failed to extract output statistics of function \"+fkey+\".\", ex);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "diff": "@@ -32,6 +32,8 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nprivate final static String TEST_NAME2 = \"FunPotpourriComments\";\nprivate final static String TEST_NAME3 = \"FunPotpourriNoReturn2\";\nprivate final static String TEST_NAME4 = \"FunPotpourriEval\";\n+ private final static String TEST_NAME5 = \"FunPotpourriSubsetReturn\";\n+\nprivate final static String TEST_DIR = \"functions/misc/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FunctionPotpourriTest.class.getSimpleName() + \"/\";\n@@ -43,6 +45,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"R\" }) );\n}\n@Test\n@@ -65,6 +68,11 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nrunFunctionTest( TEST_NAME4, false );\n}\n+ @Test\n+ public void testFunctionSubsetReturn() {\n+ runFunctionTest( TEST_NAME5, false );\n+ }\n+\nprivate void runFunctionTest(String testName, boolean error) {\nTestConfiguration config = getTestConfiguration(testName);\nloadTestConfiguration(config);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunPotpourriSubsetReturn.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+arima_residuals = function(Matrix[Double] weights, Matrix[Double] X, Integer p, Integer P, Integer q, Integer Q, Integer s, String solver) return (Matrix[Double] errs, Matrix[Double] combined_weights){\n+ combined_weights = weights\n+ if (p>0 & P>0)\n+ combined_weights = rbind(combined_weights, matrix(weights[1:p,] %*% t(weights[p+1:p+P,]), rows=p*P, cols=1))\n+ b = X[,2:ncol(X)]%*%combined_weights\n+ errs = X[,1] - b\n+}\n+\n+X = matrix(1, 1000, 1)\n+p = 2\n+d = 0\n+q = 0\n+P = 0\n+D = 0\n+Q = 0\n+s = 0\n+totparamcols = p+P+Q+q+p*P\n+num_rows = nrow(X)\n+\n+if(num_rows <= d)\n+ print(\"non-seasonal differencing order should be smaller than length of the time-series\")\n+if(num_rows <= s*D)\n+ print(\"seasonal differencing order should be smaller than number of observations divided by length of season\")\n+\n+Z = cbind (X[1:nrow(X),], matrix(0, nrow(X), totparamcols))\n+weights = matrix(\"0.459982 0.673987\", 2, 1)\n+\n+f1 = arima_residuals(weights, Z, p, P, q, Q, s, \"\")\n+f2 = arima_residuals(weights, Z, p, P, q, Q, s, \"\")\n+print(\"out: \" + sum(f1-f2))\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2428] Fix IPA for function calls w/ unknown partial binding This patch fixes the robustness of IPA for handling function calls with unknown statistics and partial output bindings (i.e., more function output parameters than bound function call outputs).
49,738
09.07.2018 20:30:49
25,200
a04261d3571b9177ab638c3724886e232e54f190
[MINOR] Improved average-case sparsity estimates of binary operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -1085,10 +1085,12 @@ public class OptimizerUtils\ncase MIN:\ncase MAX:\ncase OR:\n- ret = Math.min(1, sp1 + sp2); break;\n+ ret = worstcase ? Math.min(1, sp1 + sp2) :\n+ sp1 + sp2 - sp1 * sp2; break;\ncase MULT:\ncase AND:\n- ret = Math.min(sp1, sp2); break;\n+ ret = worstcase ? Math.min(sp1, sp2) :\n+ sp1 * sp2; break;\ncase DIV:\nret = Math.min(1, sp1 + (1-sp2)); break;\ncase MODULUS:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved average-case sparsity estimates of binary operations
49,760
11.07.2018 20:53:05
25,200
58ab127619549b39a91480a79b087033a3f48b3a
Initial sparsity estimator based on layered graphs Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.hops.estim;\n+\n+import org.apache.commons.lang.NotImplementedException;\n+import org.apache.commons.math3.distribution.ExponentialDistribution;\n+import org.apache.commons.math3.random.Well1024a;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.List;\n+\n+public class EstimatorLayeredGraph extends SparsityEstimator {\n+\n+ private static final int ROUNDS = 128;\n+ private final int _rounds;\n+\n+ public EstimatorLayeredGraph() {\n+ this(ROUNDS);\n+ }\n+\n+ public EstimatorLayeredGraph(int rounds) {\n+ _rounds = rounds;\n+ }\n+\n+ @Override\n+ public double estim(MMNode root) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m1, MatrixBlock m2){\n+ int layer = 3;\n+ LayeredGraph LGraph = new LayeredGraph(m1, m2);\n+ //lambda is not the mean, if lambda is 2 hand in 1/2\n+ ExponentialDistribution random = new ExponentialDistribution(new Well1024a(), 1);\n+ for (int h = 0; h < LGraph.nodes.size(); h++) {\n+ if (LGraph.nodes.get(h).getY() == 1) {\n+ double[] doubArray = new double[_rounds];\n+ for (int g = 0; g < _rounds; g++)\n+ doubArray[g] = random.sample();\n+ LGraph.nodes.get(h).setVector(doubArray);\n+ }\n+ }\n+ // get r for nodes of upper layer\n+ for (int h = 0; h < LGraph.nodes.size(); h++) {\n+ if (LGraph.nodes.get(h).getY() == layer) {\n+ double[] ret = recr(_rounds, LGraph.nodes.get(h));\n+ if(ret != null)\n+ LGraph.nodes.get(h).setVector(ret);\n+ LGraph.nodes.get(h).setValue(\n+ calcNNZ(LGraph.nodes.get(h).getVector(), _rounds));\n+ }\n+ }\n+ //calc final sparsity\n+ double nnz = LGraph.nodes.stream().filter(n -> n.getY()==layer)\n+ .mapToDouble(n -> n.getValue()).sum();\n+ return nnz / m1.getNumRows() / m2.getNumColumns();\n+ }\n+\n+\n+ public double[] recr(int numr, Node tempnode) {\n+ if (tempnode.getInput().isEmpty())\n+ return (tempnode.getY() == 1) ? tempnode.getVector() : null;\n+ else if (tempnode.getInput().size() == 1)\n+ return recr(numr, tempnode.getInput().get(0));\n+ else {\n+ return tempnode.getInput().stream()\n+ .map(n -> recr(numr, n)).filter(v -> v != null)\n+ .reduce((v1,v2) -> min(v1,v2)).get();\n+ }\n+ }\n+\n+ private double[] min(double[] v1, double[] v2) {\n+ double[] ret = new double[v1.length];\n+ for(int i=0; i<v1.length; i++)\n+ ret[i] = Math.min(v1[i], v2[i]);\n+ return ret;\n+ }\n+\n+ public double calcNNZ(double[] inpvec, int numr) {\n+ return (inpvec != null && inpvec.length > 0) ?\n+ (numr - 1) / Arrays.stream(inpvec).sum() : 0;\n+ }\n+\n+ private class LayeredGraph {\n+ List<Node> nodes = new ArrayList<>();\n+\n+ public LayeredGraph(MatrixBlock m1, MatrixBlock m2) {\n+ createNodes(m1, 1, nodes);\n+ createNodes(m2, 2, nodes);\n+ }\n+ }\n+\n+ public void createNodes(MatrixBlock m, int mpos, List<Node> nodes) {\n+ if( m.isEmpty() )\n+ return;\n+\n+ Node nodeout = null;\n+ Node nodein = null;\n+ //TODO perf: separate handling sparse and dense\n+ //TODO perf: hash lookups for existing nodes\n+ for (int i = 0; i < m.getNumRows(); i++) {\n+ for (int j = 0; j < m.getNumColumns(); j++) {\n+ if (m.getValue(i, j) == 0) continue;\n+ boolean alreadyExists = false;\n+ boolean alreadyExists2 = false;\n+ for (int k = 0; k < nodes.size(); k++) {\n+ if (nodes.get(k).getX() == i && nodes.get(k).getY() == mpos) {\n+ alreadyExists = true;\n+ }\n+ }\n+ if (!alreadyExists) {\n+ nodein = new Node(i, mpos);\n+ nodes.add(nodein);\n+ } else {\n+ for (int k = 0; k < nodes.size(); k++) {\n+ if (nodes.get(k).getX() == i && nodes.get(k).getY() == mpos) {\n+ nodein = nodes.get(k);\n+ }\n+ }\n+ }\n+ for (int k = 0; k < nodes.size(); k++) {\n+ if (nodes.get(k).getX() == j && nodes.get(k).getY() == mpos + 1) {\n+ alreadyExists2 = true;\n+ }\n+ }\n+ if (!alreadyExists2) {\n+ nodeout = new Node(j, mpos + 1);\n+ nodes.add(nodeout);\n+\n+ } else {\n+ for (int k = 0; k < nodes.size(); k++) {\n+ if (nodes.get(k).getX() == j && nodes.get(k).getY() == mpos + 1) {\n+ nodeout = nodes.get(k);\n+ }\n+ }\n+ }\n+ nodeout.addnz(nodein);\n+ }\n+ }\n+ }\n+\n+ private static class Node {\n+ int xpos;\n+ int ypos;\n+ double[] r_vector;\n+ List<Node> input = new ArrayList<>();\n+ double value;\n+\n+ public Node(int x, int y) {\n+ xpos = x;\n+ ypos = y;\n+ }\n+\n+ public void setValue(double inp) {\n+ value = inp;\n+ }\n+\n+ public double getValue() {\n+ return value;\n+ }\n+\n+ public List<Node> getInput() {\n+ return input;\n+ }\n+\n+ public int getX() {\n+ return xpos;\n+ }\n+\n+ public int getY() {\n+ return ypos;\n+ }\n+\n+ public double[] getVector() {\n+ return r_vector;\n+ }\n+\n+ public void setVector(double[] r_input) {\n+ r_vector = r_input;\n+ }\n+\n+ public void addnz(Node dest) {\n+ input.add(dest);\n+ }\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2291] Initial sparsity estimator based on layered graphs Closes #796.
49,738
12.07.2018 15:54:29
25,200
b429551dbd9917746f0001c74c16afbdb8231592
[MINOR] Simplify and cleanup GPU-specific rewrites (rewrite utils)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -24,7 +24,6 @@ import java.util.HashMap;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.AggUnaryOp;\n-import org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\n@@ -35,8 +34,6 @@ import org.apache.sysml.hops.Hop.ReOrgOp;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.DnnOp;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.hops.ReorgOp;\n-import org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\n/*\n@@ -97,8 +94,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\nreturn;\n//recursively process children\n- for( int i=0; i<hop.getInput().size(); i++)\n- {\n+ for( int i=0; i<hop.getInput().size(); i++) {\nHop hi = hop.getInput().get(i);\n//process childs recursively first (to allow roll-up)\n@@ -116,11 +112,11 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n}\nprivate static boolean isBiasAdd(Hop h) {\n- return h instanceof DnnOp && ((DnnOp) h).getOp() == OpOpDnn.BIASADD;\n+ return HopRewriteUtils.isDnn(h, OpOpDnn.BIASADD);\n}\nprivate static boolean isBiasMultiply(Hop h) {\n- return h instanceof DnnOp && ((DnnOp) h).getOp() == OpOpDnn.BIASMULT;\n+ return HopRewriteUtils.isDnn(h, OpOpDnn.BIASMULT);\n}\nprivate static boolean fitsOnGPU(Hop h, double multiplier) {\n@@ -168,24 +164,22 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n}\nprivate static boolean isUnaryMinus(Hop h) {\n- return h instanceof BinaryOp && ((BinaryOp)h).getOp() == OpOp2.MINUS\n- && Hop.computeSizeInformation(h.getInput().get(0)) == 0;\n+ return HopRewriteUtils.isBinary(h, OpOp2.MINUS)\n+ && HopRewriteUtils.isLiteralOfValue(h.getInput().get(0), 0);\n}\nprivate static boolean isOneDivideBySqrt(Hop h) {\n- return h instanceof BinaryOp && ((BinaryOp)h).getOp() == OpOp2.DIV\n- && h.getInput().get(1) instanceof UnaryOp\n- && ((UnaryOp)h.getInput().get(1)).getOp() == OpOp1.SQRT\n- && Hop.computeSizeInformation(h.getInput().get(0)) == 1;\n+ return HopRewriteUtils.isBinary(h, OpOp2.DIV)\n+ && HopRewriteUtils.isUnary(h.getInput().get(1), OpOp1.SQRT)\n+ && HopRewriteUtils.isLiteralOfValue(h.getInput().get(0), 1);\n}\n- private static Hop channelSums(Hop parent, Hop hi, int pos)\n- {\n+ private static Hop channelSums(Hop parent, Hop hi, int pos) {\nif(hi instanceof AggUnaryOp) {\nAggUnaryOp hop = (AggUnaryOp) hi;\n// output = rowSums(matrix(colSums(x), rows=numChannels, cols=imgSize*imgSize))\nif( hop.getOp() == AggOp.SUM && hop.getDirection() == Direction.Row\n- && hop.getInput().get(0) instanceof ReorgOp && ((ReorgOp)hop.getInput().get(0)).getOp() == ReOrgOp.RESHAPE) {\n+ && HopRewriteUtils.isReorg(hop.getInput().get(0), ReOrgOp.RESHAPE) ) {\nHop colSumsInput = hop.getInput().get(0).getInput().get(0);\nif(colSumsInput instanceof AggUnaryOp && ((AggUnaryOp)colSumsInput).getOp() == AggOp.SUM && ((AggUnaryOp)colSumsInput).getDirection() == Direction.Col) {\nArrayList<Hop> inHops = new ArrayList<Hop>();\n@@ -206,8 +200,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\nreturn hi;\n}\n- private static Hop batchNormTest(Hop parent, Hop hi, int pos)\n- {\n+ private static Hop batchNormTest(Hop parent, Hop hi, int pos) {\n// norm = bias_multiply(bias_add(X, -mean), 1/sqrt(var+eps))\n// hi = bias_add(bias_multiply(norm, gamma), beta)\n// 2x for input and output and 1x for overhead\n@@ -218,7 +211,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n&& isOneDivideBySqrt(getSecondInput(norm))) {\ndouble eps = 0;\nHop var = getFirstInput(getSecondInput(getSecondInput(norm)));\n- if(var instanceof BinaryOp && ((BinaryOp) var).getOp() == OpOp2.PLUS &&\n+ if( HopRewriteUtils.isBinary(var, OpOp2.PLUS) &&\n(getFirstInput(var) instanceof LiteralOp || getSecondInput(var) instanceof LiteralOp)) {\n// eps + ema_var\nif(getFirstInput(var) instanceof LiteralOp) {\n@@ -253,5 +246,4 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\nreturn hi;\n}\n-\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Simplify and cleanup GPU-specific rewrites (rewrite utils)
49,738
12.07.2018 17:11:14
25,200
d065c3d13c15f8a8bb4c5e882856e6b0d648675d
Codegen support for rowMeans in row templates This patch adds codegen support for unary aggregate rowMeans to the codegen row templates. In detail, this includes extended compiler support for the necessary dense/sparse vector primitives. Furthermore, this also cleans up some convenience methods for full aggregates on matrix blocks.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -102,7 +102,7 @@ public class SpoofCompiler\nprivate static final Log LOG = LogFactory.getLog(SpoofCompiler.class.getName());\n//internal configuration flags\n- public static final boolean LDEBUG = false;\n+ public static final boolean LDEBUG = true;\npublic static CompilerType JAVA_COMPILER = CompilerType.JANINO;\npublic static PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\npublic static final IntegrationType INTEGRATION = IntegrationType.RUNTIME;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "diff": "@@ -32,7 +32,8 @@ public class CNodeUnary extends CNode\n{\npublic enum UnaryType {\nLOOKUP_R, LOOKUP_C, LOOKUP_RC, LOOKUP0, //codegen specific\n- ROW_SUMS, ROW_SUMSQS, ROW_MINS, ROW_MAXS, ROW_COUNTNNZS, //codegen specific\n+ ROW_SUMS, ROW_SUMSQS, ROW_COUNTNNZS, //codegen specific\n+ ROW_MEANS, ROW_MINS, ROW_MAXS,\nVECT_EXP, VECT_POW2, VECT_MULT2, VECT_SQRT, VECT_LOG,\nVECT_ABS, VECT_ROUND, VECT_CEIL, VECT_FLOOR, VECT_SIGN,\nVECT_SIN, VECT_COS, VECT_TAN, VECT_ASIN, VECT_ACOS, VECT_ATAN,\n@@ -54,6 +55,7 @@ public class CNodeUnary extends CNode\ncase ROW_SUMSQS:\ncase ROW_MINS:\ncase ROW_MAXS:\n+ case ROW_MEANS:\ncase ROW_COUNTNNZS: {\nString vectName = StringUtils.capitalize(name().substring(4, name().length()-1).toLowerCase());\nreturn sparse ? \" double %TMP% = LibSpoofPrimitives.vect\"+vectName+\"(%IN1v%, %IN1i%, %POS1%, alen, len);\\n\":\n@@ -249,6 +251,7 @@ public class CNodeUnary extends CNode\ncase ROW_SUMSQS: return \"u(Rsq+)\";\ncase ROW_MINS: return \"u(Rmin)\";\ncase ROW_MAXS: return \"u(Rmax)\";\n+ case ROW_MEANS: return \"u(Rmean)\";\ncase ROW_COUNTNNZS: return \"u(Rnnz)\";\ncase VECT_EXP:\ncase VECT_POW2:\n@@ -319,6 +322,7 @@ public class CNodeUnary extends CNode\ncase ROW_SUMSQS:\ncase ROW_MINS:\ncase ROW_MAXS:\n+ case ROW_MEANS:\ncase ROW_COUNTNNZS:\ncase EXP:\ncase LOOKUP_R:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -67,7 +67,7 @@ import org.apache.sysml.runtime.matrix.data.Pair;\npublic class TemplateRow extends TemplateBase\n{\n- private static final Hop.AggOp[] SUPPORTED_ROW_AGG = new AggOp[]{AggOp.SUM, AggOp.MIN, AggOp.MAX};\n+ private static final Hop.AggOp[] SUPPORTED_ROW_AGG = new AggOp[]{AggOp.SUM, AggOp.MIN, AggOp.MAX, AggOp.MEAN};\nprivate static final Hop.OpOp1[] SUPPORTED_VECT_UNARY = new OpOp1[]{\nOpOp1.EXP, OpOp1.SQRT, OpOp1.LOG, OpOp1.ABS, OpOp1.ROUND, OpOp1.CEIL, OpOp1.FLOOR, OpOp1.SIGN,\nOpOp1.SIN, OpOp1.COS, OpOp1.TAN, OpOp1.ASIN, OpOp1.ACOS, OpOp1.ATAN, OpOp1.SINH, OpOp1.COSH, OpOp1.TANH,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "diff": "@@ -340,6 +340,14 @@ public class LibSpoofPrimitives\nreturn alen;\n}\n+ public static double vectMean(double[] a, int ai, int len) {\n+ return vectSum(a, ai, len) / len;\n+ }\n+\n+ public static double vectMean(double[] avals, int[] aix, int ai, int alen, int len) {\n+ return vectSum(avals, aix, ai, alen, len) / len;\n+ }\n+\n//custom vector div\npublic static void vectDivAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -64,6 +64,7 @@ import org.apache.sysml.runtime.functionobjects.RevIndex;\nimport org.apache.sysml.runtime.functionobjects.SortIndex;\nimport org.apache.sysml.runtime.functionobjects.SwapIndex;\nimport org.apache.sysml.runtime.functionobjects.TernaryValueFunction.ValueFunctionWithConstant;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CM_COV_Object;\nimport org.apache.sysml.runtime.instructions.cp.KahanObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n@@ -792,18 +793,27 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nreturn min;\n}\n+ /**\n+ * Wrapper method for reduceall-mean of a matrix.\n+ *\n+ * @return ?\n+ */\n+ public double mean() {\n+ MatrixBlock out = new MatrixBlock(1, 3, false);\n+ LibMatrixAgg.aggregateUnaryMatrix(this, out,\n+ InstructionUtils.parseBasicAggregateUnaryOperator(\"uamean\", 1));\n+ return out.quickGetValue(0, 0);\n+ }\n+\n/**\n* Wrapper method for reduceall-min of a matrix.\n*\n* @return ?\n*/\npublic double min() {\n- //construct operator\n- AggregateOperator aop = new AggregateOperator(Double.POSITIVE_INFINITY, Builtin.getBuiltinFnObject(\"min\"));\n- AggregateUnaryOperator auop = new AggregateUnaryOperator( aop, ReduceAll.getReduceAllFnObject());\n- //execute operation\nMatrixBlock out = new MatrixBlock(1, 1, false);\n- LibMatrixAgg.aggregateUnaryMatrix(this, out, auop);\n+ LibMatrixAgg.aggregateUnaryMatrix(this, out,\n+ InstructionUtils.parseBasicAggregateUnaryOperator(\"uamin\", 1));\nreturn out.quickGetValue(0, 0);\n}\n@@ -813,12 +823,9 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n* @return ?\n*/\npublic double max() {\n- //construct operator\n- AggregateOperator aop = new AggregateOperator(Double.NEGATIVE_INFINITY, Builtin.getBuiltinFnObject(\"max\"));\n- AggregateUnaryOperator auop = new AggregateUnaryOperator( aop, ReduceAll.getReduceAllFnObject());\n- //execute operation\nMatrixBlock out = new MatrixBlock(1, 1, false);\n- LibMatrixAgg.aggregateUnaryMatrix(this, out, auop);\n+ LibMatrixAgg.aggregateUnaryMatrix(this, out,\n+ InstructionUtils.parseBasicAggregateUnaryOperator(\"uamax\", 1));\nreturn out.quickGetValue(0, 0);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CPlanVectorPrimitivesTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CPlanVectorPrimitivesTest.java", "diff": "@@ -89,6 +89,16 @@ public class CPlanVectorPrimitivesTest extends AutomatedTestBase\ntestVectorAggPrimitive(UnaryType.ROW_MAXS, InputType.VECTOR_SPARSE);\n}\n+ @Test\n+ public void testVectorMeansDense() {\n+ testVectorAggPrimitive(UnaryType.ROW_MEANS, InputType.VECTOR_DENSE);\n+ }\n+\n+ @Test\n+ public void testVectorMeansSparse() {\n+ testVectorAggPrimitive(UnaryType.ROW_MEANS, InputType.VECTOR_SPARSE);\n+ }\n+\n//support unary vector primitives (pow2/mult2 current excluded because not unary)\n@Test\n@@ -716,7 +726,8 @@ public class CPlanVectorPrimitivesTest extends AutomatedTestBase\nMatrixBlock in = MatrixBlock.randOperations(m, n, sparsity, -1, 1, \"uniform\", 7);\n//get vector primitive via reflection\n- String meName = \"vect\"+StringUtils.camelize(aggtype.name().split(\"_\")[1].substring(0, 3));\n+ String tmp = StringUtils.camelize(aggtype.name().split(\"_\")[1]);\n+ String meName = \"vect\"+tmp.substring(0, tmp.length()-1);\nMethod me = (type1 == InputType.VECTOR_DENSE) ?\nLibSpoofPrimitives.class.getMethod(meName, new Class[]{double[].class, int.class, int.class}) :\nLibSpoofPrimitives.class.getMethod(meName, new Class[]{double[].class, int[].class, int.class, int.class, int.class});\n@@ -735,6 +746,7 @@ public class CPlanVectorPrimitivesTest extends AutomatedTestBase\ncase ROW_SUMS: ret2 = in2.sum(); break;\ncase ROW_MAXS: ret2 = in2.max(); break;\ncase ROW_MINS: ret2 = in2.min(); break;\n+ case ROW_MEANS: ret2 = in2.mean(); break;\n}\n//compare results\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "diff": "@@ -80,6 +80,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME41 = TEST_NAME+\"41\"; //X*rowSums(X/seq(1,N)+t(seq(M,1)))\nprivate static final String TEST_NAME42 = TEST_NAME+\"42\"; //X/rowSums(min(X, Y, Z))\nprivate static final String TEST_NAME43 = TEST_NAME+\"43\"; //bias_add(X,B) + bias_mult(X,B)\n+ private static final String TEST_NAME44 = TEST_NAME+\"44\"; //maxpool(X - mean(X));\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -91,7 +92,7 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<=43; i++)\n+ for(int i=1; i<=44; i++)\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@@ -740,6 +741,21 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME43, false, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite44CP() {\n+ testCodegenIntegration( TEST_NAME44, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg44CP() {\n+ testCodegenIntegration( TEST_NAME44, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg44SP() {\n+ testCodegenIntegration( TEST_NAME44, false, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern44.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+imgSize=8\n+numImg=16\n+numChannels=4\n+poolSize1=imgSize*imgSize\n+poolSize2=1\n+stride=1\n+pad=0\n+\n+X = matrix(seq(1, numImg*numChannels*imgSize*imgSize), numImg, numChannels*imgSize*imgSize, byrow=TRUE)\n+X = X - rowMeans(X)\n+\n+pad_image <- function(img, Hin, Win, padh, padw){\n+ C = nrow(img)\n+ img_padded = matrix(0, C, (Hin+2*padh)*(Win+2*padw)) # zeros\n+ for (c in 1:C) {\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # depth slice C reshaped\n+ img_padded_slice = matrix(0, Hin+2*padh, Win+2*padw)\n+ img_padded_slice[(padh+1):(padh+Hin), (padw+1):(padw+Win)] = img_slice\n+ img_padded[c,] = matrix(t(img_padded_slice), 1, (Hin+2*padh)*(Win+2*padw)) # reshape\n+ }\n+ img_padded\n+}\n+\n+im2col <- function(img, Hin, Win, Hf, Wf, strideh, stridew) {\n+ C = nrow(img)\n+ Hout = as.integer((Hin - Hf) / strideh + 1)\n+ Wout = as.integer((Win - Wf) / stridew + 1)\n+\n+ img_cols = matrix(0, C*Hf*Wf, Hout*Wout, byrow=TRUE) # zeros\n+ for (hout in 1:Hout) { # all output rows\n+ hin = (hout-1) * strideh + 1\n+ for (wout in 1:Wout) { # all output columns\n+ win = (wout-1) * stridew + 1\n+ # Extract a local patch of the input image corresponding spatially to the filter sizes.\n+ img_patch = matrix(0, C, Hf*Wf, byrow=TRUE) # zeros\n+ for (c in 1:C) { # all channels\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # reshape\n+ img_patch[c,] = matrix(t(img_slice[hin:(hin+Hf-1), win:(win+Wf-1)]), 1, Hf*Wf)\n+ }\n+ img_cols[,(hout-1)*Wout + wout] = matrix(t(img_patch), C*Hf*Wf, 1) # reshape\n+ }\n+ }\n+ img_cols\n+}\n+\n+max_pool <- function(X, N, C, Hin, Win, Hf, Wf,\n+ strideh, stridew) {\n+ Hout = as.integer((Hin - Hf) / strideh + 1)\n+ Wout = as.integer((Win - Wf) / stridew + 1)\n+\n+ # Create output volume\n+ out = matrix(0, N, C*Hout*Wout, byrow=TRUE)\n+\n+ # Max pooling - im2col implementation\n+ for (n in 1:N) { # all examples\n+ img = matrix(X[n,], C, Hin*Win, byrow=TRUE) # reshape\n+ img_maxes = matrix(0, C, Hout*Wout, byrow=TRUE) # zeros\n+\n+ for (c in 1:C) { # all channels\n+ # Extract local image slice patches into columns with im2col, of shape (Hf*Wf, Hout*Wout)\n+ img_slice_cols = im2col(matrix(t(img[c,]), 1, Hin*Win) , Hin, Win, Hf, Wf, strideh, stridew)\n+\n+ # Max pooling on patches\n+ img_maxes[c,] = colMaxs(img_slice_cols)\n+ }\n+\n+ out[n,] = matrix(t(img_maxes), 1, C*Hout*Wout)\n+ }\n+\n+ out\n+}\n+\n+R = max_pool(X, numImg, numChannels, imgSize*imgSize, 1, poolSize1, poolSize2, stride, stride)\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern44.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+imgSize=8\n+numImg=16\n+numChannels=4\n+poolSize1=imgSize*imgSize\n+poolSize2=1\n+stride=1\n+pad=0\n+\n+X = matrix(seq(1, numImg*numChannels*imgSize*imgSize), rows=numImg, cols=numChannels*imgSize*imgSize);\n+while(FALSE){}\n+\n+X = X - rowMeans(X);\n+R = max_pool(X, stride=[stride, stride], padding=[pad, pad], input_shape=[numImg, numChannels, imgSize*imgSize, 1], pool_size=[poolSize1, poolSize2]);\n+\n+write(R, $1, format=\"text\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2430] Codegen support for rowMeans in row templates This patch adds codegen support for unary aggregate rowMeans to the codegen row templates. In detail, this includes extended compiler support for the necessary dense/sparse vector primitives. Furthermore, this also cleans up some convenience methods for full aggregates on matrix blocks.
49,738
12.07.2018 17:46:30
25,200
f1bf97baf342035764c676b50d361e36e2bbae62
Fix codegen multi-agg compilation w/ interleaved MMs This patch fixes special cases of compiling code generation plans of multi-aggregates with interleaved matrix multiplications (e.g., t(X)%*%X, t(X)%*Y, t(Y)%*%Y) over transient reads which require dedicated handling of data operators.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -188,11 +188,17 @@ public class TemplateCell extends TemplateBase\n&& (me.type!=TemplateType.MAGG || memo.contains(c.getHopID(), TemplateType.CELL)))\nrConstructCplan(c, memo, tmp, inHops, compileLiterals);\nelse if( me!=null && (me.type==TemplateType.MAGG || me.type==TemplateType.CELL)\n- && HopRewriteUtils.isMatrixMultiply(hop) && i==0 ) //skip transpose\n+ && HopRewriteUtils.isMatrixMultiply(hop) && i==0 ) { //skip transpose\n+ if( c.getInput().get(0) instanceof DataOp ) {\n+ tmp.put(c.getInput().get(0).getHopID(),\n+ TemplateUtils.createCNodeData(c.getInput().get(0), compileLiterals));\n+ inHops.add(c.getInput().get(0));\n+ }\n+ else\nrConstructCplan(c.getInput().get(0), memo, tmp, inHops, compileLiterals);\n+ }\nelse {\n- CNodeData cdata = TemplateUtils.createCNodeData(c, compileLiterals);\n- tmp.put(c.getHopID(), cdata);\n+ tmp.put(c.getHopID(), TemplateUtils.createCNodeData(c, compileLiterals));\ninHops.add(c);\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2431] Fix codegen multi-agg compilation w/ interleaved MMs This patch fixes special cases of compiling code generation plans of multi-aggregates with interleaved matrix multiplications (e.g., t(X)%*%X, t(X)%*Y, t(Y)%*%Y) over transient reads which require dedicated handling of data operators.
49,741
12.07.2018 21:30:22
25,200
6bb136911b290b8c75ee16ea7da199bcc9dd0ba6
[MINOR] Improved JMLC memory profiling, command line arg -mem Closes
[ { "change_type": "MODIFY", "old_path": "docs/jmlc.md", "new_path": "docs/jmlc.md", "diff": "@@ -55,12 +55,8 @@ JMLC can be configured to gather runtime statistics, as in the MLContext API, by\nmethod with a value of `true`. JMLC can also be configured to gather statistics on the memory used by matrices and\nframes in the DML script. To enable collection of memory statistics, call Connection's `gatherMemStats()` method\nwith a value of `true`. When finegrained statistics are enabled in `SystemML.conf`, JMLC will also report the variables\n-in the DML script which used the most memory. By default, the memory use reported will be an overestimte of the actual\n-memory required to run the program. When finegrained statistics are enabled, JMLC will gather more accurate statistics\n-by keeping track of garbage collection events and reducing the memory estimate accordingly. The most accurate way to\n-determine the memory required by a script is to run the script in a single thread and enable finegrained statistics.\n-\n-An example showing how to enable statistics in JMLC is presented in the section below.\n+in the DML script which used the most memory. An example showing how to enable statistics in JMLC is presented in the\n+section below.\n---\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -131,6 +131,7 @@ public class DMLScript\npublic boolean clean = false; // Whether to clean up all SystemML working directories (FS, DFS)\npublic boolean stats = false; // Whether to record and print the statistics\npublic int statsCount = 10; // Default statistics count\n+ public boolean memStats = false; // max memory statistics\npublic Explain.ExplainType explainType = Explain.ExplainType.NONE; // Whether to print the \"Explain\" and if so, what type\npublic DMLScript.RUNTIME_PLATFORM execMode = OptimizerUtils.getDefaultExecutionMode(); // Execution mode standalone, MR, Spark or a hybrid\npublic boolean gpu = false; // Whether to use the GPU\n@@ -151,6 +152,7 @@ public class DMLScript\n\", clean=\" + clean +\n\", stats=\" + stats +\n\", statsCount=\" + statsCount +\n+ \", memStats=\" + memStats +\n\", explainType=\" + explainType +\n\", execMode=\" + execMode +\n\", gpu=\" + gpu +\n@@ -167,7 +169,7 @@ public class DMLScript\npublic static RUNTIME_PLATFORM rtplatform = DMLOptions.defaultOptions.execMode; // the execution mode\npublic static boolean STATISTICS = DMLOptions.defaultOptions.stats; // whether to print statistics\npublic static boolean FINEGRAINED_STATISTICS = false; // whether to print fine-grained statistics\n- public static boolean JMLC_MEMORY_STATISTICS = false; // whether to gather memory use stats in JMLC\n+ public static boolean JMLC_MEM_STATISTICS = false; // whether to gather memory use stats in JMLC\npublic static int STATISTICS_COUNT = DMLOptions.defaultOptions.statsCount; // statistics maximum heavy hitter count\npublic static int STATISTICS_MAX_WRAP_LEN = 30; // statistics maximum wrap length\npublic static boolean ENABLE_DEBUG_MODE = DMLOptions.defaultOptions.debug; // debug mode\n@@ -315,6 +317,7 @@ public class DMLScript\n}\n}\n}\n+ dmlOptions.memStats = line.hasOption(\"mem\");\ndmlOptions.clean = line.hasOption(\"clean\");\n@@ -390,9 +393,11 @@ public class DMLScript\nOption cleanOpt = OptionBuilder.withDescription(\"cleans up all SystemML working directories (FS, DFS); all other flags are ignored in this mode. \\n\")\n.create(\"clean\");\nOption statsOpt = OptionBuilder.withArgName(\"count\")\n- .withDescription(\"monitors and reports caching/recompilation statistics; heavy hitter <count> is 10 unless overridden; default off\")\n+ .withDescription(\"monitors and reports summary execution statistics; heavy hitter <count> is 10 unless overridden; default off\")\n.hasOptionalArg()\n.create(\"stats\");\n+ Option memOpt = OptionBuilder.withDescription(\"monitors and reports max memory consumption in CP; default off\")\n+ .create(\"mem\");\nOption explainOpt = OptionBuilder.withArgName(\"level\")\n.withDescription(\"explains plan levels; can be 'hops' / 'runtime'[default] / 'recompile_hops' / 'recompile_runtime'\")\n.hasOptionalArg()\n@@ -436,6 +441,7 @@ public class DMLScript\noptions.addOption(configOpt);\noptions.addOption(cleanOpt);\noptions.addOption(statsOpt);\n+ options.addOption(memOpt);\noptions.addOption(explainOpt);\noptions.addOption(execOpt);\noptions.addOption(gpuOpt);\n@@ -465,11 +471,9 @@ public class DMLScript\n{\nDMLOptions dmlOptions = parseCLArguments(args, options);\n- // String[] scriptArgs = null; //optional script arguments\n- // boolean namedScriptArgs = false;\n-\nSTATISTICS = dmlOptions.stats;\nSTATISTICS_COUNT = dmlOptions.statsCount;\n+ JMLC_MEM_STATISTICS = dmlOptions.memStats;\nUSE_ACCELERATOR = dmlOptions.gpu;\nFORCE_ACCELERATOR = dmlOptions.forceGPU;\nEXPLAIN = dmlOptions.explainType;\n@@ -517,35 +521,26 @@ public class DMLScript\nelse {\nexecute(dmlScriptStr, fnameOptConfig, argVals, args, SCRIPT_TYPE);\n}\n-\n}\n- catch(AlreadySelectedException e)\n- {\n+ catch(AlreadySelectedException e) {\nSystem.err.println(\"Mutually exclusive options were selected. \" + e.getMessage());\nHelpFormatter formatter = new HelpFormatter();\nformatter.printHelp( \"systemml\", options );\nreturn false;\n}\n- catch(org.apache.commons.cli.ParseException e)\n- {\n+ catch(org.apache.commons.cli.ParseException e) {\nSystem.err.println(e.getMessage());\nHelpFormatter formatter = new HelpFormatter();\nformatter.printHelp( \"systemml\", options );\n}\n- catch (ParseException pe) {\n- throw pe;\n- }\n- catch (DMLScriptException e) {\n- //rethrow DMLScriptException to propagate stop call\n+ catch (ParseException | DMLScriptException e) {\nthrow e;\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nLOG.error(\"Failed to execute DML script.\", ex);\nthrow new DMLException(ex);\n}\n- finally\n- {\n+ finally {\n//reset runtime platform and visualize flag\nrtplatform = oldrtplatform;\nEXPLAIN = oldexplain;\n@@ -626,22 +621,17 @@ public class DMLScript\n}\n- private static void setLoggingProperties( Configuration conf )\n- {\n+ private static void setLoggingProperties( Configuration conf ) {\nString debug = conf.get(\"systemml.logging\");\n-\nif (debug == null)\ndebug = System.getProperty(\"systemml.logging\");\n-\nif (debug != null){\n- if (debug.equalsIgnoreCase(\"debug\")){\n+ if (debug.equalsIgnoreCase(\"debug\"))\nLogger.getLogger(\"org.apache.sysml\").setLevel((Level) Level.DEBUG);\n- }\n- else if (debug.equalsIgnoreCase(\"trace\")){\n+ else if (debug.equalsIgnoreCase(\"trace\"))\nLogger.getLogger(\"org.apache.sysml\").setLevel((Level) Level.TRACE);\n}\n}\n- }\n///////////////////////////////\n// private internal interface\n@@ -871,8 +861,7 @@ public class DMLScript\n+ MRConfigurationNames.DFS_PERMISSIONS_ENABLED + \" = \" + perm );\n//print warning if permission issues possible\n- if( flagDiffUser && ( flagLocalFS || flagSecurity ) )\n- {\n+ if( flagDiffUser && ( flagLocalFS || flagSecurity ) ) {\nLOG.warn(\"Cannot run map/reduce tasks as user '\"+userName+\"'. Using tasktracker group '\"+ttGroupName+\"'.\");\n}\n}\n@@ -895,19 +884,13 @@ public class DMLScript\n//this implementation does not create job specific sub directories)\nJobConf job = new JobConf(ConfigurationManager.getCachedJobConf());\nif( InfrastructureAnalyzer.isLocalMode(job) ) {\n- try\n- {\n- LocalFileUtils.deleteFileIfExists( DMLConfig.LOCAL_MR_MODE_STAGING_DIR + //staging dir (for local mode only)\n- dirSuffix );\n- LocalFileUtils.deleteFileIfExists( MRJobConfiguration.getLocalWorkingDirPrefix(job) + //local dir\n- dirSuffix );\n- MapReduceTool.deleteFileIfExistOnHDFS( MRJobConfiguration.getSystemWorkingDirPrefix(job) + //system dir\n- dirSuffix );\n- MapReduceTool.deleteFileIfExistOnHDFS( MRJobConfiguration.getStagingWorkingDirPrefix(job) + //staging dir\n- dirSuffix );\n- }\n- catch(Exception ex)\n- {\n+ try {\n+ LocalFileUtils.deleteFileIfExists( DMLConfig.LOCAL_MR_MODE_STAGING_DIR + dirSuffix );\n+ LocalFileUtils.deleteFileIfExists( MRJobConfiguration.getLocalWorkingDirPrefix(job) + dirSuffix );\n+ MapReduceTool.deleteFileIfExistOnHDFS( MRJobConfiguration.getSystemWorkingDirPrefix(job) + dirSuffix );\n+ MapReduceTool.deleteFileIfExistOnHDFS( MRJobConfiguration.getStagingWorkingDirPrefix(job) + dirSuffix );\n+ }\n+ catch(Exception ex) {\n//we give only a warning because those directories are written by the mapred deamon\n//and hence, execution can still succeed\nLOG.warn(\"Unable to cleanup hadoop working dirs: \"+ex.getMessage());\n@@ -924,12 +907,10 @@ public class DMLScript\n// private internal helper functionalities\n////////\n- private static void printInvocationInfo(String fnameScript, String fnameOptConfig, Map<String,String> argVals)\n- {\n+ private static void printInvocationInfo(String fnameScript, String fnameOptConfig, Map<String,String> argVals) {\nLOG.debug(\"****** args to DML Script ******\\n\" + \"UUID: \" + getUUID() + \"\\n\" + \"SCRIPT PATH: \" + fnameScript + \"\\n\"\n+ \"RUNTIME: \" + rtplatform + \"\\n\" + \"BUILTIN CONFIG: \" + DMLConfig.DEFAULT_SYSTEMML_CONFIG_FILEPATH + \"\\n\"\n+ \"OPTIONAL CONFIG: \" + fnameOptConfig + \"\\n\");\n-\nif( !argVals.isEmpty() ) {\nLOG.debug(\"Script arguments are: \\n\");\nfor (int i=1; i<= argVals.size(); i++)\n@@ -937,27 +918,23 @@ public class DMLScript\n}\n}\n- private static void printStartExecInfo(String dmlScriptString)\n- {\n+ private static void printStartExecInfo(String dmlScriptString) {\nLOG.info(\"BEGIN DML run \" + getDateTime());\nLOG.debug(\"DML script: \\n\" + dmlScriptString);\n-\nif (rtplatform == RUNTIME_PLATFORM.HADOOP || rtplatform == RUNTIME_PLATFORM.HYBRID) {\nString hadoop_home = System.getenv(\"HADOOP_HOME\");\nLOG.info(\"HADOOP_HOME: \" + hadoop_home);\n}\n}\n- private static String getDateTime()\n- {\n+ private static String getDateTime() {\nDateFormat dateFormat = new SimpleDateFormat(\"MM/dd/yyyy HH:mm:ss\");\nDate date = new Date();\nreturn dateFormat.format(date);\n}\nprivate static void cleanSystemMLWorkspace() {\n- try\n- {\n+ try {\n//read the default config\nDMLConfig conf = DMLConfig.readConfigurationFile(null);\n@@ -974,8 +951,7 @@ public class DMLScript\nif( localtmp != null )\nLocalFileUtils.cleanupRcWorkingDirectory(localtmp);\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nthrow new DMLException(\"Failed to run SystemML workspace cleanup.\", ex);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "diff": "@@ -194,7 +194,7 @@ public class Connection implements Closeable\n*/\npublic void gatherMemStats(boolean stats) {\nDMLScript.STATISTICS = stats || DMLScript.STATISTICS;\n- DMLScript.JMLC_MEMORY_STATISTICS = stats;\n+ DMLScript.JMLC_MEM_STATISTICS = stats;\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -137,7 +137,7 @@ public class LocalVariableMap implements Cloneable\nif( !dict.containsKey(hash) && e.getValue() instanceof CacheableData ) {\ndict.put(hash, e.getValue());\ndouble size = ((CacheableData<?>) e.getValue()).getDataSize();\n- if ((DMLScript.JMLC_MEMORY_STATISTICS) && (DMLScript.FINEGRAINED_STATISTICS))\n+ if (DMLScript.JMLC_MEM_STATISTICS && DMLScript.FINEGRAINED_STATISTICS)\nStatistics.maintainCPHeavyHittersMem(e.getKey(), size);\ntotal += size;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "diff": "@@ -259,7 +259,7 @@ public class ProgramBlock implements ParseInfo\nStatistics.maintainCPHeavyHitters(\ntmp.getExtendedOpcode(), System.nanoTime()-t0);\n}\n- if ((DMLScript.JMLC_MEMORY_STATISTICS) && (DMLScript.FINEGRAINED_STATISTICS))\n+ if (DMLScript.JMLC_MEM_STATISTICS && DMLScript.FINEGRAINED_STATISTICS)\nec.getVariables().getPinnedDataSize();\n// optional trace information (instruction and runtime)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "diff": "@@ -487,6 +487,8 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nif( DMLScript.STATISTICS ){\nlong t1 = System.nanoTime();\nCacheStatistics.incrementAcquireMTime(t1-t0);\n+ if (DMLScript.JMLC_MEM_STATISTICS)\n+ Statistics.addCPMemObject(System.identityHashCode(this), getDataSize());\n}\nreturn ret;\n@@ -505,9 +507,6 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nsetDirty(true);\n_isAcquireFromEmpty = false;\n- if (DMLScript.JMLC_MEMORY_STATISTICS)\n- Statistics.addCPMemObject(newData);\n-\n//set references to new data\nif (newData == null)\nthrow new DMLRuntimeException(\"acquireModify with empty cache block.\");\n@@ -574,11 +573,6 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_requiresLocalWrite = false;\n}\n- if ((DMLScript.JMLC_MEMORY_STATISTICS) && (this._data != null)) {\n- int hash = System.identityHashCode(this._data);\n- Statistics.removeCPMemObject(hash);\n- }\n-\n//create cache\ncreateCache();\n_data = null;\n@@ -608,10 +602,6 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n||(_data!=null && !isCachingActive()) )) //additional condition for JMLC\nfreeEvictedBlob();\n- if ((DMLScript.JMLC_MEMORY_STATISTICS) && (this._data != null)) {\n- int hash = System.identityHashCode(this._data);\n- Statistics.removeCPMemObject(hash);\n- }\n// clear the in-memory data\n_data = null;\nclearCache();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "diff": "@@ -59,6 +59,7 @@ import org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.runtime.matrix.data.Pair;\nimport org.apache.sysml.runtime.util.MapReduceTool;\nimport org.apache.sysml.utils.GPUStatistics;\n+import org.apache.sysml.utils.Statistics;\npublic class ExecutionContext {\n@@ -600,6 +601,8 @@ public class ExecutionContext {\n}\npublic void cleanupCacheableData(CacheableData<?> mo) {\n+ if (DMLScript.JMLC_MEM_STATISTICS)\n+ Statistics.removeCPMemObject(System.identityHashCode(mo));\n//early abort w/o scan of symbol table if no cleanup required\nboolean fileExists = (mo.isHDFSFileExists() && mo.getFileName() != null);\nif( !CacheableData.isCachingActive() && !fileExists )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -1097,6 +1097,9 @@ public class SparkExecutionContext extends ExecutionContext\n//and hence is transparently used by rmvar instructions and other users. The\n//core difference is the lineage-based cleanup of RDD and broadcast variables.\n+ if (DMLScript.JMLC_MEM_STATISTICS)\n+ Statistics.removeCPMemObject(System.identityHashCode(mo));\n+\nif( !mo.isCleanupEnabled() )\nreturn;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -22,7 +22,6 @@ package org.apache.sysml.utils;\nimport java.lang.management.CompilationMXBean;\nimport java.lang.management.GarbageCollectorMXBean;\nimport java.lang.management.ManagementFactory;\n-import java.lang.ref.SoftReference;\nimport java.text.DecimalFormat;\nimport java.util.Arrays;\nimport java.util.Comparator;\n@@ -36,7 +35,6 @@ import java.util.concurrent.atomic.LongAdder;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheBlock;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.instructions.Instruction;\n@@ -81,12 +79,6 @@ public class Statistics\nprivate static final ConcurrentHashMap<String,Double> _cpMemObjs = new ConcurrentHashMap<>();\nprivate static final ConcurrentHashMap<Integer,Double> _currCPMemObjs = new ConcurrentHashMap<>();\n- // this hash map maintains soft references to the cache blocks in memory. It is periodically scanned to check for\n- // objects which have been garbage collected. This enables more accurate memory statistics. Relying on rmvar\n- // instructions to determine when an object has been de-allocated results in a substantial underestimate to memory\n- // use by the program since garbage collection will not occur immediately.\n- private static final ConcurrentHashMap<Integer,SoftReference<CacheBlock>> _liveObjects = new ConcurrentHashMap<>();\n-\n//JVM stats (low frequency updates)\nprivate static long jitCompileTime = 0; //in milli sec\nprivate static long jvmGCTime = 0; //in milli sec\n@@ -601,33 +593,11 @@ public class Statistics\nreturn opcode;\n}\n- public static void addCPMemObject(CacheBlock data) {\n- int hash = System.identityHashCode(data);\n- double sizeof = data.getInMemorySize();\n-\n+ public static void addCPMemObject(int hash, double sizeof) {\ndouble sizePrev = _currCPMemObjs.getOrDefault(hash, 0.0);\n_currCPMemObjs.put(hash, sizeof);\nsizeofPinnedObjects.add(sizeof - sizePrev);\n- if (DMLScript.FINEGRAINED_STATISTICS)\n- _liveObjects.putIfAbsent(hash, new SoftReference<>(data));\nmaintainMemMaxStats();\n- checkForDeadBlocks();\n- }\n-\n- /**\n- * If finegrained statistics are enabled searches through a map of soft references to find objects\n- * which have been garbage collected. This results in more accurate statistics on memory use but\n- * introduces overhead so is only enabled with finegrained stats and when running in JMLC\n- */\n- public static void checkForDeadBlocks() {\n- if (!DMLScript.FINEGRAINED_STATISTICS)\n- return;\n- for (Entry<Integer,SoftReference<CacheBlock>> e : _liveObjects.entrySet()) {\n- if (e.getValue().get() == null) {\n- removeCPMemObject(e.getKey());\n- _liveObjects.remove(e.getKey());\n- }\n- }\n}\n/**\n@@ -994,8 +964,8 @@ public class Statistics\nsb.append(\"Cache hits (Mem, WB, FS, HDFS):\\t\" + CacheStatistics.displayHits() + \".\\n\");\nsb.append(\"Cache writes (WB, FS, HDFS):\\t\" + CacheStatistics.displayWrites() + \".\\n\");\nsb.append(\"Cache times (ACQr/m, RLS, EXP):\\t\" + CacheStatistics.displayTime() + \" sec.\\n\");\n- if (DMLScript.JMLC_MEMORY_STATISTICS)\n- sb.append(\"Max size of objects in CP memory:\\t\" + byteCountToDisplaySize(getSizeofPinnedObjects()) + \" (\" + getNumPinnedObjects() + \" total objects)\" + \"\\n\");\n+ if (DMLScript.JMLC_MEM_STATISTICS)\n+ sb.append(\"Max size of live objects:\\t\" + byteCountToDisplaySize(getSizeofPinnedObjects()) + \" (\" + getNumPinnedObjects() + \" total objects)\" + \"\\n\");\nsb.append(\"HOP DAGs recompiled (PRED, SB):\\t\" + getHopRecompiledPredDAGs() + \"/\" + getHopRecompiledSBDAGs() + \".\\n\");\nsb.append(\"HOP DAGs recompile time:\\t\" + String.format(\"%.3f\", ((double)getHopRecompileTime())/1000000000) + \" sec.\\n\");\nif( getFunRecompiles()>0 ) {\n@@ -1047,7 +1017,7 @@ public class Statistics\nsb.append(\"Total JVM GC time:\\t\\t\" + ((double)getJVMgcTime())/1000 + \" sec.\\n\");\nLibMatrixDNN.appendStatistics(sb);\nsb.append(\"Heavy hitter instructions:\\n\" + getHeavyHitters(maxHeavyHitters));\n- if ((DMLScript.JMLC_MEMORY_STATISTICS) && (DMLScript.FINEGRAINED_STATISTICS))\n+ if (DMLScript.JMLC_MEM_STATISTICS && DMLScript.FINEGRAINED_STATISTICS)\nsb.append(\"Heavy hitter objects:\\n\" + getCPHeavyHittersMem(maxHeavyHitters));\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved JMLC memory profiling, command line arg -mem Closes #797.
49,738
13.07.2018 12:37:37
25,200
2d0102853805d533c0db4ec420c304555b2e4fb5
Fix robustness codegen optimizer for MLogreg This patch fixes special cases of the cost-based codegen optimizer to be robust against unavailable computation costs, e.g., when descending to data nodes.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "diff": "@@ -951,6 +951,7 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\n}\n//add compute costs of current operator to costs vector\n+ if( computeCosts.containsKey(currentHopId) )\ncostVect.computeCosts += computeCosts.get(currentHopId);\n//process children recursively\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2439] Fix robustness codegen optimizer for MLogreg This patch fixes special cases of the cost-based codegen optimizer to be robust against unavailable computation costs, e.g., when descending to data nodes.
49,738
13.07.2018 12:55:41
25,200
c13a1b04c90198ce934888daddb0c54324c46d72
Fix correctness matrix compression group partitioning The recently improved bin-packing-based column group partitioning on matrix compression returned padded bins leading to duplicated column 0 and thus incorrect results.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "diff": "@@ -77,7 +77,7 @@ public class ColumnGroupPartitionerBinPacking extends ColumnGroupPartitioner\n}\n//extract native int arrays for individual bins\n- return bins.stream().map(b -> b.extractValues())\n+ return bins.stream().map(b -> b.extractValues(true))\n.collect(Collectors.toList());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/utils/IntArrayList.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/utils/IntArrayList.java", "diff": "@@ -86,6 +86,12 @@ public class IntArrayList\nreturn _data;\n}\n+ public int[] extractValues(boolean trim) {\n+ int[] ret = extractValues();\n+ return (trim && _size < ret.length) ?\n+ Arrays.copyOfRange(ret, 0, _size) : ret;\n+ }\n+\nprivate void resize() {\n// check for integer overflow on resize\nif( _data.length > Integer.MAX_VALUE / RESIZE_FACTOR )\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2438] Fix correctness matrix compression group partitioning The recently improved bin-packing-based column group partitioning on matrix compression returned padded bins leading to duplicated column 0 and thus incorrect results.
49,738
13.07.2018 18:06:13
25,200
f2a413a0279d19a3abc4734f0d0902b92fa937ea
New tests for all matrix market formats/fields/symmetry This patch is a preparation step for supporting matrix market in the general case, that is all formats (arrays, coordinate), fields (real, integer, pattern), and symmetry (general, symmetric, skew-symmetric).
[ { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/data/MatrixMarketFormatTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.data;\n+\n+import org.junit.Test;\n+\n+import java.io.BufferedWriter;\n+import java.io.IOException;\n+import java.io.OutputStreamWriter;\n+import java.util.Iterator;\n+\n+import org.apache.commons.lang.NotImplementedException;\n+import org.apache.hadoop.fs.FileSystem;\n+import org.apache.hadoop.fs.Path;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.io.IOUtilFunctions;\n+import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class MatrixMarketFormatTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"MatrixMarketFormat\";\n+ private final static String TEST_DIR = \"functions/data/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + MatrixMarketFormatTest.class.getSimpleName() + \"/\";\n+\n+ private final static int dim = 1200;\n+ private final static double sparsity = 0.1;\n+\n+ private enum MMFormat {\n+ COORDINATE,\n+ ARRAY,\n+ }\n+\n+ private enum MMField {\n+ REAL,\n+ INTEGER,\n+ COMPLEX,\n+ PATTERN,\n+ }\n+\n+ private enum MMSymmetry {\n+ GENERAL,\n+ SYMMETRIC,\n+ SKEW_SYMMETRIC, //- instead _\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] { \"R\", \"C\" }) );\n+ }\n+\n+ @Test\n+ public void testMMCooRealGeneralCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.GENERAL, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooRealGeneralSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.GENERAL, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooRealGeneralMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.GENERAL, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSkewSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSkewSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooRealSkewSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerGeneralCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerGeneralSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerGeneralMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSkewSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSkewSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooIntegerSkewSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternGeneralCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.GENERAL, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternGeneralSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.GENERAL, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternGeneralMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.GENERAL, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMCooPatternSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.COORDINATE, MMField.PATTERN, MMSymmetry.SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrRealGeneralCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.GENERAL, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrRealGeneralSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.GENERAL, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrRealGeneralMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.GENERAL, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSkewSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSkewSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrRealSkewSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.REAL, MMSymmetry.SKEW_SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerGeneralCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerGeneralSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerGeneralMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.GENERAL, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SYMMETRIC, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSkewSymmetricCP() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSkewSymmetricSp() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testMMArrIntegerSkewSymmetricMR() {\n+ runMatrixMarketFormatTest(MMFormat.ARRAY, MMField.INTEGER, MMSymmetry.SKEW_SYMMETRIC, ExecType.MR);\n+ }\n+\n+ private void runMatrixMarketFormatTest(MMFormat fmt, MMField field, MMSymmetry symmetry, ExecType et)\n+ {\n+ //rtplatform for MR\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.SINGLE_NODE; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ TestConfiguration config = getTestConfiguration(TEST_NAME);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"X\"), output(\"R\"), output(\"C\") };\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" +\n+ input(\"X\") + \" \" + expected(\"R\") + \" \" + expected(\"C\");\n+\n+ generateAndWriteMMInput(input(\"X\"), fmt, field, symmetry);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare row and column aggregates\n+ TestUtils.compareMatrices(readDMLMatrixFromHDFS(\"R\"),\n+ readRMatrixFromFS(\"R\"), 1e-10, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(readDMLMatrixFromHDFS(\"C\"),\n+ readRMatrixFromFS(\"C\"), 1e-10, \"Stat-DML\", \"Stat-R\");\n+ }\n+ catch (IOException e) {\n+ throw new RuntimeException(e);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ }\n+ }\n+\n+ private void generateAndWriteMMInput(String fname, MMFormat fmt, MMField field, MMSymmetry symmetry)\n+ throws IOException\n+ {\n+ int rows = dim;\n+ int cols = (symmetry==MMSymmetry.GENERAL) ? dim/3 : dim;\n+ MatrixBlock tmp = MatrixBlock.randOperations(\n+ rows, cols, sparsity, -10, 10, \"uniform\", 7);\n+\n+ String header = \"%%MatrixMarket matrix \"\n+ + fmt.name().toLowerCase() + \" \"\n+ + field.name().toLowerCase() + \" \"\n+ + symmetry.name().toLowerCase().replace(\"_\", \"-\") + \"\\n\";\n+ String meta = rows + \" \" + cols + ((fmt == MMFormat.COORDINATE) ?\n+ \" \" + tmp.getNonZeros() : \"\") + \"\\n\";\n+\n+ Path path = new Path( fname );\n+ FileSystem fs = IOUtilFunctions.getFileSystem(path);\n+\n+ try( BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(path,true))) )\n+ {\n+ br.write(header);\n+ br.write(meta);\n+\n+ if( fmt == MMFormat.ARRAY ) {\n+ for(int j=0; j<tmp.getNumColumns(); j++) {\n+ int bi = (symmetry == MMSymmetry.GENERAL) ? 0 :\n+ (symmetry == MMSymmetry.SYMMETRIC) ? j : j+1;\n+ for(int i=bi; i<tmp.getNumRows(); i++) {\n+ double val = tmp.quickGetValue(i, j);\n+ br.write(String.valueOf((field == MMField.INTEGER) ?\n+ (int) val : val) + \"\\n\" );\n+ }\n+ }\n+ }\n+ else { //COORDINATE\n+ if( tmp.isInSparseFormat() ) {\n+ StringBuilder sb = new StringBuilder();\n+ Iterator<IJV> iter = tmp.getSparseBlockIterator();\n+ while( iter.hasNext() ) {\n+ IJV cell = iter.next();\n+ if( (symmetry == MMSymmetry.SYMMETRIC && cell.getJ() > cell.getI())\n+ || (symmetry == MMSymmetry.SKEW_SYMMETRIC && cell.getJ() >= cell.getI()))\n+ continue;\n+ sb.append(cell.getI()+1);\n+ sb.append(' ');\n+ sb.append(cell.getJ()+1);\n+ if( field != MMField.PATTERN ) {\n+ sb.append(' ');\n+ sb.append(String.valueOf((field == MMField.INTEGER) ?\n+ (int) cell.getV() : cell.getV()));\n+ }\n+ sb.append('\\n');\n+ br.write( sb.toString() ); //same as append\n+ sb.setLength(0);\n+ }\n+ }\n+ else {\n+ //always sparse in above used setup\n+ throw new NotImplementedException();\n+ }\n+ }\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/data/MatrixMarketFormat.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = as.matrix(readMM(args[1]))\n+R = as.matrix(rowSums(A));\n+C = t(as.matrix(colSums(A)));\n+writeMM(as(R,\"CsparseMatrix\"), args[2])\n+writeMM(as(C,\"CsparseMatrix\"), args[3])\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/data/MatrixMarketFormat.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+R = rowSums(A);\n+C = colSums(A);\n+write(R, $2);\n+write(C, $3);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2436] New tests for all matrix market formats/fields/symmetry This patch is a preparation step for supporting matrix market in the general case, that is all formats (arrays, coordinate), fields (real, integer, pattern), and symmetry (general, symmetric, skew-symmetric).
49,738
16.07.2018 13:51:02
25,200
56c81cbde3a71ccc49505d2f4f5a89bb0661fd9b
[MINOR] Various script simplifications Kmeans predict
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/Kmeans-predict.dml", "new_path": "scripts/algorithms/Kmeans-predict.dml", "diff": "@@ -121,7 +121,6 @@ if (fileC != \" \") {\nprint (\"Reading C...\");\nC = read (fileC);\nnum_clusters = nrow (C);\n- ones_C = matrix (1, rows = num_clusters, cols = 1);\nprint (\"Computing the predicted Y...\");\nD = -2 * (X %*% t(C)) + t(rowSums (C ^ 2));\nprY = rowIndexMin (D);\n@@ -133,22 +132,19 @@ if (fileC != \" \") {\nprint (\"Reading the predicted Y...\");\nprY = read (filePrY);\nnum_clusters = max (prY);\n- ones_C = matrix (1, rows = num_clusters, cols = 1);\n}\nif (fileX != \" \") {\nprint (\"Computing the WCSS...\");\n# Compute projection matrix from clusters to records\n- P = matrix (0, rows = nrow (X), cols = num_clusters);\n- P [, 1 : max (prY)] = table (seq (1, nrow (X), 1), prY);\n+ P = table (seq (1, nrow (X), 1), prY, nrow(X), num_clusters);\n# Compute the means, as opposed to the centroids\ncluster_sizes = t(colSums (P));\n- record_of_ones = matrix (1, rows = 1, cols = ncol (X));\n- M = (t(P) %*% X) / ((cluster_sizes + (cluster_sizes == 0)) %*% record_of_ones);\n+ M = (t(P) %*% X) / (cluster_sizes + (cluster_sizes == 0));\n# Compute the WCSS for the means\nwcss_means = sum ((X - P %*% M) ^ 2);\nwcss_means_pc = 100.0 * wcss_means / total_ss;\n- bcss_means = sum (cluster_sizes * rowSums ((M - ones_C %*% total_mean) ^ 2));\n+ bcss_means = sum (cluster_sizes * rowSums ((M - total_mean) ^ 2));\nbcss_means_pc = 100.0 * bcss_means / total_ss;\n# Output results\nprint (\"Total Sum of Squares (TSS) = \" + total_ss);\n@@ -166,7 +162,7 @@ if (fileC != \" \") {\n# Compute the WCSS for the centroids\nwcss_centroids = sum ((X - P %*% C) ^ 2);\nwcss_centroids_pc = 100.0 * wcss_centroids / total_ss;\n- bcss_centroids = sum (cluster_sizes * rowSums ((C - ones_C %*% total_mean) ^ 2));\n+ bcss_centroids = sum (cluster_sizes * rowSums ((C - total_mean) ^ 2));\nbcss_centroids_pc = 100.0 * bcss_centroids / total_ss;\n# Output results\nprint (\"WCSS for centroids: \" + (round (10000.0 * wcss_centroids_pc) / 10000.0) + \"% of TSS = \" + wcss_centroids);\n@@ -323,15 +319,12 @@ return (Matrix[double] row_ids, Matrix[double] col_ids, Matrix[double] margins,\nMatrix[double] max_counts, Matrix[double] rounded_percentages)\n{\nmargins = rowSums (counts);\n- select_positive = diag (margins > 0);\n- select_positive = removeEmpty (target = select_positive, margin = \"rows\");\n+ select_positive = removeEmpty (target = diag (margins > 0), margin = \"rows\");\nrow_ids = select_positive %*% seq (1, nrow (margins), 1);\npos_counts = select_positive %*% counts;\npos_margins = select_positive %*% margins;\nmax_counts = rowMaxs (pos_counts);\n- one_per_column = matrix (1, rows = 1, cols = ncol (pos_counts));\n- max_counts_ppred = max_counts %*% one_per_column;\n- is_max_count = (pos_counts == max_counts_ppred);\n+ is_max_count = (pos_counts == max_counts);\naggr_is_max_count = t(cumsum (t(is_max_count)));\ncol_ids = rowSums (aggr_is_max_count == 0) + 1;\nrounded_percentages = round (1000000.0 * max_counts / pos_margins) / 10000.0;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Various script simplifications Kmeans predict
49,738
17.07.2018 15:23:13
25,200
34482f8e4fcf48e36c4c87f409e93ecc02c0979e
[MINOR] Fix test utils matrix market reader for 'pattern symmetric'
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "new_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "diff": "@@ -437,8 +437,11 @@ public class TestUtils\nif ( matrixType == 2 )\nexpectedValues.put(new CellIndex(j, i), v);\n}\n- else\n+ else { //pattern\nexpectedValues.put(new CellIndex(i, j), 1.0);\n+ if ( matrixType == 2 )\n+ expectedValues.put(new CellIndex(j, i), 1.0);\n+ }\n}\n}\ncatch (IOException e) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix test utils matrix market reader for 'pattern symmetric'
49,738
17.07.2018 16:49:00
25,200
7f54592feda197d80752ec1b1b5d2054bef8d952
[HOTFIX] Fix javadoc and logging issues dmloptions and codegen
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLOptions.java", "new_path": "src/main/java/org/apache/sysml/api/DMLOptions.java", "diff": "@@ -87,12 +87,10 @@ public class DMLOptions {\n'}';\n}\n-\n/**\n* Parses command line arguments to create a {@link DMLOptions} instance with the correct options\n* @param args arguments from the command line\n- * @param options an {@link Options} instance containing the options that need to be parsed\n- * @return an instance of {@link Options} that contain the correct {@link Option}s.\n+ * @return an instance of {@link DMLOptions} that contain the correct {@link Option}s.\n* @throws org.apache.commons.cli.ParseException if there is an incorrect option specified in the CLI\n*/\npublic static DMLOptions parseCLArguments(String[] args)\n@@ -198,14 +196,6 @@ public class DMLOptions {\nreturn dmlOptions;\n}\n- /**\n- * Creates an {@link Options} instance for the command line parameters\n- * As of SystemML 0.13, Apache Commons CLI 1.2 is transitively in the classpath\n- * However the most recent version of Apache Commons CLI is 1.4\n- * Creating CLI options is done using Static methods. Instead of {@link OptionBuilder},\n- * CLI 1.4 uses Option.Builder which has non-static methods.\n- * @return an appropriate instance of {@link Options}\n- */\n@SuppressWarnings(\"static-access\")\nprivate static Options createCLIOptions() {\nOptions options = new Options();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -102,7 +102,7 @@ public class SpoofCompiler\nprivate static final Log LOG = LogFactory.getLog(SpoofCompiler.class.getName());\n//internal configuration flags\n- public static final boolean LDEBUG = true;\n+ public static final boolean LDEBUG = false;\npublic static CompilerType JAVA_COMPILER = CompilerType.JANINO;\npublic static PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\npublic static final IntegrationType INTEGRATION = IntegrationType.RUNTIME;\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix javadoc and logging issues dmloptions and codegen
49,760
17.07.2018 20:24:47
25,200
070f93976d5ca0d97e1b537d5322018556626349
Exploit lower bound in MNC sparsity estimator Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -124,6 +124,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\nMath.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n+ //exploit lower bound on nnz based on half-full rows/cols\n+ nnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\n+ Math.max(h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n+\n//compute final sparsity\nreturn OptimizerUtils.getSparsity(\nh1.getRows(), h2.getCols(), nnz);\n@@ -138,6 +142,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nprivate final int cMaxNnz; //max nnz per col\nprivate final int rNonEmpty; //number of non-empty rows (an empty row has nnz=0)\nprivate final int cNonEmpty; //number of non-empty cols (an empty col has nnz=0)\n+ private final int rNdiv2; //number of rows with nnz > #cols/2\n+ private final int cNdiv2; //number of cols with nnz > #rows/2\npublic MatrixHistogram(MatrixBlock in, boolean useExcepts) {\n// 1) allocate basic synopsis\n@@ -177,6 +183,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncMaxNnz = Arrays.stream(cNnz).max().orElse(0);\nrNonEmpty = (int) Arrays.stream(rNnz).filter(v-> v!=0).count();\ncNonEmpty = (int) Arrays.stream(cNnz).filter(v-> v!=0).count();\n+ rNdiv2 = (int) Arrays.stream(rNnz).filter(item -> item > getCols()/2).count();\n+ cNdiv2 = (int) Arrays.stream(cNnz).filter(item -> item > getRows()/2).count();\n// 4) compute exception details if necessary (optional)\nif( useExcepts & !in.isEmpty() && (rMaxNnz > 1 || cMaxNnz > 1) ) {\n@@ -222,6 +230,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nrMaxNnz = rmax;\ncMaxNnz = cmax;\nrNonEmpty = cNonEmpty = -1;\n+ rNdiv2 = cNdiv2 = -1;\n}\npublic int getRows() {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2409] Exploit lower bound in MNC sparsity estimator Closes #801.