author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
49,738
18.07.2018 18:23:51
25,200
c89d3be80c13d47c2545840ce5b33e7debec60a5
Fix codegen binary outer operation handling So far we generated invalid codegen plans for binary outer vector operations leading to incorrect results. This patch effectively disables such outer vector operations (which anyway have dedicated physical operators that change their asymptotic behavior) in all codegen templates. Furthermore, this also includes related tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "diff": "@@ -555,7 +555,7 @@ public class AggUnaryOp extends MultiThreadedHop\nboolean ret = false;\nHop input = getInput().get(0);\n- if( input instanceof BinaryOp && ((BinaryOp)input).isOuterVectorOperator() )\n+ if( input instanceof BinaryOp && ((BinaryOp)input).isOuter() )\n{\n//for special cases, we need to hold the broadcast twice in order to allow for\n//an efficient binary search over a plain java array\n@@ -592,7 +592,7 @@ public class AggUnaryOp extends MultiThreadedHop\nboolean ret = false;\nHop input = getInput().get(0);\n- if( input instanceof BinaryOp && ((BinaryOp)input).isOuterVectorOperator() )\n+ if( input instanceof BinaryOp && ((BinaryOp)input).isOuter() )\n{\n//note: both cases (partitioned matrix, and sorted double array), require to\n//fit the broadcast twice into the local memory budget. Also, the memory\n@@ -634,16 +634,13 @@ public class AggUnaryOp extends MultiThreadedHop\n*\n* @return true if unary aggregate outer\n*/\n- private boolean isUnaryAggregateOuterCPRewriteApplicable()\n- {\n+ private boolean isUnaryAggregateOuterCPRewriteApplicable() {\nboolean ret = false;\nHop input = getInput().get(0);\n-\n- if(( input instanceof BinaryOp && ((BinaryOp)input).isOuterVectorOperator() )\n+ if(( input instanceof BinaryOp && ((BinaryOp)input).isOuter() )\n&& (_op == AggOp.MAXINDEX || _op == AggOp.MININDEX || _op == AggOp.SUM)\n&& (isCompareOperator(((BinaryOp)input).getOp())))\nret = true;\n-\nreturn ret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "diff": "@@ -125,7 +125,7 @@ public class BinaryOp extends MultiThreadedHop\nouter = flag;\n}\n- public boolean isOuterVectorOperator(){\n+ public boolean isOuter(){\nreturn outer;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -336,18 +336,15 @@ public class TemplateCell extends TemplateBase\nboolean isBinaryMatrixScalar = false;\nboolean isBinaryMatrixVector = false;\nboolean isBinaryMatrixMatrix = false;\n- if( hop instanceof BinaryOp && hop.getDataType().isMatrix() ) {\n+ if( hop instanceof BinaryOp && hop.getDataType().isMatrix() && !((BinaryOp)hop).isOuter() ) {\nHop left = hop.getInput().get(0);\nHop right = hop.getInput().get(1);\n- DataType ldt = left.getDataType();\n- DataType rdt = right.getDataType();\n-\n- isBinaryMatrixScalar = (ldt.isScalar() || rdt.isScalar());\n+ isBinaryMatrixScalar = (left.getDataType().isScalar() || right.getDataType().isScalar());\nisBinaryMatrixVector = hop.dimsKnown()\n- && ((ldt.isMatrix() && TemplateUtils.isVectorOrScalar(right))\n- || (rdt.isMatrix() && TemplateUtils.isVectorOrScalar(left)) );\n+ && ((left.getDataType().isMatrix() && TemplateUtils.isVectorOrScalar(right))\n+ || (right.getDataType().isMatrix() && TemplateUtils.isVectorOrScalar(left)) );\nisBinaryMatrixMatrix = hop.dimsKnown() && HopRewriteUtils.isEqualSize(left, right)\n- && ldt.isMatrix() && rdt.isMatrix();\n+ && left.getDataType().isMatrix() && right.getDataType().isMatrix();\n}\n//prepare indicators for ternary operations\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -142,7 +142,7 @@ public class TemplateUtils\npublic static boolean isOperationSupported(Hop h) {\nif(h instanceof UnaryOp)\nreturn UnaryType.contains(((UnaryOp)h).getOp().name());\n- else if(h instanceof BinaryOp)\n+ else if(h instanceof BinaryOp && !((BinaryOp)h).isOuter())\nreturn BinType.contains(((BinaryOp)h).getOp().name());\nelse if(h instanceof TernaryOp)\nreturn TernaryType.contains(((TernaryOp)h).getOp().name());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveConstantBinaryOps.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveConstantBinaryOps.java", "diff": "@@ -137,7 +137,7 @@ public class IPAPassRemoveConstantBinaryOps extends IPAPass\nreturn;\nif( hop instanceof BinaryOp && ((BinaryOp)hop).getOp()==OpOp2.MULT\n- && !((BinaryOp) hop).isOuterVectorOperator()\n+ && !((BinaryOp) hop).isOuter()\n&& hop.getInput().get(0).getDataType()==DataType.MATRIX\n&& hop.getInput().get(1) instanceof DataOp\n&& mOnes.containsKey(hop.getInput().get(1).getName()) )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -2186,7 +2186,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nprivate static Hop fuseAxpyBinaryOperationChain(Hop parent, Hop hi, int pos)\n{\n//patterns: (a) X + s*Y -> X +* sY, (b) s*Y+X -> X +* sY, (c) X - s*Y -> X -* sY\n- if( hi instanceof BinaryOp && !((BinaryOp) hi).isOuterVectorOperator()\n+ if( hi instanceof BinaryOp && !((BinaryOp) hi).isOuter()\n&& (((BinaryOp)hi).getOp()==OpOp2.PLUS || ((BinaryOp)hi).getOp()==OpOp2.MINUS) )\n{\nBinaryOp bop = (BinaryOp) hi;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -1775,7 +1775,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\n//pattern: outer(v, t(seq(1,m)), \"==\") -> rexpand(v, max=m, dir=row, ignore=true, cast=false)\n//note: this rewrite supports both left/right sequence\n- if( HopRewriteUtils.isBinary(hi, OpOp2.EQUAL) && ((BinaryOp)hi).isOuterVectorOperator() )\n+ if( HopRewriteUtils.isBinary(hi, OpOp2.EQUAL) && ((BinaryOp)hi).isOuter() )\n{\nif( ( HopRewriteUtils.isTransposeOperation(hi.getInput().get(1)) //pattern a: outer(v, t(seq(1,m)), \"==\")\n&& HopRewriteUtils.isBasic1NSequence(hi.getInput().get(1).getInput().get(0)))\n@@ -1833,7 +1833,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nelse {\nOpOp2 optr = bop2.getComplementPPredOperation();\nBinaryOp tmp = HopRewriteUtils.createBinary(bop2.getInput().get(0),\n- bop2.getInput().get(1), optr, bop2.isOuterVectorOperator());\n+ bop2.getInput().get(1), optr, bop2.isOuter());\nHopRewriteUtils.replaceChildReference(parent, bop, tmp, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, bop2);\nhi = tmp;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "diff": "@@ -62,6 +62,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME24 = TEST_NAME+24; //min(X, Y, Z, 3, 7)\nprivate static final String TEST_NAME25 = TEST_NAME+25; //bias_add\nprivate static final String TEST_NAME26 = TEST_NAME+26; //bias_mult\n+ private static final String TEST_NAME27 = TEST_NAME+27; //outer < +7 negative\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -74,7 +75,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=26; i++ ) {\n+ for( int i=1; i<=27; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -447,6 +448,20 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME26, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite27() {\n+ testCodegenIntegration( TEST_NAME27, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenCellwise27() {\n+ testCodegenIntegration( TEST_NAME27, false, ExecType.CP );\n+ }\n+\n+ public void testCodegenCellwiseRewrite27_sp() {\n+ testCodegenIntegration( TEST_NAME27, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n@@ -498,7 +513,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n}\nif( !(rewrites && (testname.equals(TEST_NAME2)\n- || testname.equals(TEST_NAME19))) ) //sigmoid\n+ || testname.equals(TEST_NAME19))) && !testname.equals(TEST_NAME27) )\nAssert.assertTrue(heavyHittersContainsSubString(\n\"spoofCell\", \"sp_spoofCell\", \"spoofMA\", \"sp_spoofMA\"));\nif( testname.equals(TEST_NAME7) ) //ensure matrix mult is fused\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl27.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = seq(17,1,-1);\n+C = outer(A, t(A), \"<\")+7;\n+S = matrix(as.matrix(C), nrow=17, ncol=17, byrow=FALSE);\n+\n+writeMM(as(S, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n+\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl27.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = seq(17,1,-1);\n+C = outer(A, t(A), \"<\")+7;\n+write(C, $1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2454] Fix codegen binary outer operation handling So far we generated invalid codegen plans for binary outer vector operations leading to incorrect results. This patch effectively disables such outer vector operations (which anyway have dedicated physical operators that change their asymptotic behavior) in all codegen templates. Furthermore, this also includes related tests.
49,698
18.07.2018 18:47:49
25,200
1049f5e56070b648d2f1fa54f1af658206f0114e
[MINOR] Remove unused argument from fm.init dml function Closes
[ { "change_type": "MODIFY", "old_path": "scripts/nn/layers/fm.dml", "new_path": "scripts/nn/layers/fm.dml", "diff": "@@ -109,7 +109,7 @@ backward = function(matrix[double] dout, matrix[double] X, matrix[double] w0, ma\n# dV = mean(dout) * (t(X) %*% X %*%V) - g_V2\n}\n-init = function(int n, int d, int k)\n+init = function(int d, int k)\nreturn (matrix[double] w0, matrix[double] W, matrix[double] V) {\n/*\n* This function initializes the parameters.\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/grad_check.dml", "new_path": "scripts/nn/test/grad_check.dml", "diff": "@@ -1142,7 +1142,7 @@ fm = function() {\nk = 2 # factorization dimensionality\nX = rand(rows=n, cols=d)\ny = rand(rows=n, cols=1)\n- [w0, W, V] = fm::init(n, d, k)\n+ [w0, W, V] = fm::init(d, k)\n# Compute analytical gradients of loss wrt parameters\nout = fm::forward(X, w0, W, V)\n" }, { "change_type": "MODIFY", "old_path": "scripts/staging/fm-binclass.dml", "new_path": "scripts/staging/fm-binclass.dml", "diff": "@@ -56,7 +56,7 @@ train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matri\nk = 2; # factorization dimensionality, only(=2) possible for now.\n# 1.initialize fm core\n- [w0, W, V] = fm::init(n, d, k);\n+ [w0, W, V] = fm::init(d, k);\n# 2.initialize adam optimizer\n## Default values for some parameters\n" }, { "change_type": "MODIFY", "old_path": "scripts/staging/fm-regression.dml", "new_path": "scripts/staging/fm-regression.dml", "diff": "@@ -55,7 +55,7 @@ train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matri\n# only (=2) possible\n# 1.initialize fm core\n- [w0, W, V] = fm::init(n, d, k);\n+ [w0, W, V] = fm::init(d, k);\n# 2.initialize adam optimizer\n## Default values for some parameters\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Remove unused argument from fm.init dml function Closes #804.
49,727
18.07.2018 22:16:34
25,200
bca1f1c758b076ceb39febe3c4a6f8757655005d
Fix paramserv model list cleanup for partial updates Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "@@ -85,7 +85,7 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n// Update the local model with gradients\nif( j < totalIter - 1 )\nparams = updateModel(params, gradients, i, j, totalIter);\n- ParamservUtils.cleanupListObject(gradients);\n+ ParamservUtils.cleanupListObject(_ec, gradients);\n}\n// Push the gradients to ps\n@@ -183,8 +183,8 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n// Get the gradients\nListObject gradients = (ListObject) _ec.getVariable(_output.getName());\n- ParamservUtils.cleanupData(bFeatures);\n- ParamservUtils.cleanupData(bLabels);\n+ ParamservUtils.cleanupData(_ec, bFeatures);\n+ ParamservUtils.cleanupData(_ec, bLabels);\nreturn gradients;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamServer.java", "diff": "@@ -138,7 +138,7 @@ public abstract class ParamServer\n_accGradients, gradients, true);\nelse\nupdateGlobalModel(gradients);\n- ParamservUtils.cleanupListObject(gradients);\n+ ParamservUtils.cleanupListObject(_ec, gradients);\nif (allFinished()) {\n// Update the global model with accrued gradients\n@@ -192,11 +192,11 @@ public abstract class ParamServer\n// Invoke the aggregate function\n_inst.processInstruction(ec);\n- // Get the output\n+ // Get the new model\nListObject newModel = (ListObject) ec.getVariable(_outputName);\n- // Update the model with the new output\n- ParamservUtils.cleanupListObject(ec, Statement.PS_MODEL);\n+ // Clean up the list according to the data referencing status\n+ ParamservUtils.cleanupListObject(ec, Statement.PS_MODEL, newModel.getStatus());\nParamservUtils.cleanupListObject(ec, Statement.PS_GRADIENTS);\nreturn newModel;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "@@ -101,21 +101,45 @@ public class ParamservUtils {\nreturn new ListObject(newData, lo.getNames());\n}\n+ /**\n+ * Clean up the list object according to its own data status\n+ * @param ec execution context\n+ * @param lName list var name\n+ */\npublic static void cleanupListObject(ExecutionContext ec, String lName) {\nListObject lo = (ListObject) ec.removeVariable(lName);\n- cleanupListObject(lo);\n+ cleanupListObject(ec, lo, lo.getStatus());\n+ }\n+\n+ /**\n+ * Clean up the list object according to the given array of data status (i.e., false => not be removed)\n+ * @param ec execution context\n+ * @param lName list var name\n+ * @param status data status\n+ */\n+ public static void cleanupListObject(ExecutionContext ec, String lName, boolean[] status) {\n+ ListObject lo = (ListObject) ec.removeVariable(lName);\n+ cleanupListObject(ec, lo, status);\n}\n- public static void cleanupListObject(ListObject lo) {\n- lo.getData().forEach(ParamservUtils::cleanupData);\n+ public static void cleanupListObject(ExecutionContext ec, ListObject lo) {\n+ cleanupListObject(ec, lo, lo.getStatus());\n+ }\n+\n+ public static void cleanupListObject(ExecutionContext ec, ListObject lo, boolean[] status) {\n+ for (int i = 0; i < lo.getLength(); i++) {\n+ if (status != null && !status[i])\n+ continue; // data ref by other object must not be cleaned up\n+ ParamservUtils.cleanupData(ec, lo.getData().get(i));\n+ }\n}\n- public static void cleanupData(Data data) {\n+ public static void cleanupData(ExecutionContext ec, Data data) {\nif (!(data instanceof CacheableData))\nreturn;\nCacheableData<?> cd = (CacheableData<?>) data;\ncd.enableCleanup(true);\n- cd.clearData();\n+ ec.cleanupCacheableData(cd);\n}\npublic static MatrixObject newMatrixObject(MatrixBlock mb) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2446] Fix paramserv model list cleanup for partial updates Closes #802.
49,738
21.07.2018 01:18:16
25,200
44a1a67df134ccc6194ade35f51fe9736717434c
New estimation util for analyzing self-product NNZs This patch adds a new estimation utility for analyzing the exact output number of non-zeros of self matrix products without the need to materialize the output matrix.
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimationUtils.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.estim;\n+\n+import java.util.Arrays;\n+\n+import org.apache.sysml.runtime.matrix.data.DenseBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.SparseBlock;\n+import org.apache.sysml.runtime.matrix.data.SparseRowVector;\n+import org.apache.sysml.runtime.util.UtilFunctions;\n+\n+public abstract class EstimationUtils\n+{\n+ /**\n+ * This utility function computes the exact output nnz\n+ * of a self matrix product without need to materialize\n+ * the output.\n+ *\n+ * @param m dense or sparse input matrix\n+ * @return exact output number of non-zeros.\n+ */\n+ public static long getSelfProductOutputNnz(MatrixBlock m1) {\n+ final int m = m1.getNumRows();\n+ final int n = m1.getNumColumns();\n+ long retNnz = 0;\n+\n+ if( m1.isInSparseFormat() ) {\n+ SparseBlock a = m1.getSparseBlock();\n+ SparseRowVector tmpS = new SparseRowVector(1024);\n+ double[] tmpD = null;\n+\n+ for( int i=0; i<m; i++ ) {\n+ if( a.isEmpty(i) ) continue;\n+ int alen = a.size(i);\n+ int apos = a.pos(i);\n+ int[] aix = a.indexes(i);\n+ double[] avals = a.values(i);\n+\n+ //compute number of aggregated non-zeros for input row\n+ int nnz1 = (int) Math.min(UtilFunctions.computeNnz(a, aix, apos, alen), n);\n+ boolean ldense = nnz1 > n / 128;\n+\n+ //perform vector-matrix multiply w/ dense or sparse output\n+ if( ldense ) { //init dense tmp row\n+ tmpD = (tmpD == null) ? new double[n] : tmpD;\n+ Arrays.fill(tmpD, 0);\n+ }\n+ else {\n+ tmpS.setSize(0);\n+ }\n+ for( int k=apos; k<apos+alen; k++ ) {\n+ if( a.isEmpty(aix[k]) ) continue;\n+ int blen = a.size(aix[k]);\n+ int bpos = a.pos(aix[k]);\n+ int[] bix = a.indexes(aix[k]);\n+ double aval = avals[k];\n+ double[] bvals = a.values(aix[k]);\n+ if( ldense ) { //dense aggregation\n+ for( int j=bpos; j<bpos+blen; j++ )\n+ tmpD[bix[j]] += aval * bvals[j];\n+ }\n+ else { //sparse aggregation\n+ for( int j=bpos; j<bpos+blen; j++ )\n+ tmpS.add(bix[j], aval * bvals[j]);\n+ }\n+ }\n+ retNnz += !ldense ? tmpS.size() :\n+ UtilFunctions.computeNnz(tmpD, 0, n);\n+ }\n+ }\n+ else { //dense\n+ DenseBlock a = m1.getDenseBlock();\n+ double[] tmp = new double[n];\n+ for( int i=0; i<m; i++ ) {\n+ double[] avals = a.values(i);\n+ int aix = a.pos(i);\n+ Arrays.fill(tmp, 0); //reset\n+ for( int k=aix; k<aix+n; k++ ) {\n+ double aval = avals[k];\n+ if( aval == 0 ) continue;\n+ double[] bvals = a.values(k);\n+ int bix = a.pos(k);\n+ for( int j=0; j<n; j++ )\n+ tmp[j] += aval * bvals[bix+j];\n+ }\n+ retNnz += UtilFunctions.computeNnz(tmp, 0, n);\n+ }\n+ }\n+ return retNnz;\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SelfProductTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.hops.estim.EstimationUtils;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorBitsetMM;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.EstimatorSample;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class SelfProductTest extends AutomatedTestBase\n+{\n+ private final static int m = 2500;\n+ private final static double sparsity1 = 0.0001;\n+ private final static double sparsity2 = 0.000001;\n+ private final static double eps1 = 0.05;\n+ private final static double eps2 = 1e-4;\n+ private final static double eps3 = 0;\n+\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase1() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testBasicAvgCase2() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testDensityMapCase2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case1() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testDensityMap7Case2() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(7), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase1() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testBitsetMatrixCase2() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramCase1() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(false), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramCase2() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(false), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramExceptCase1() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(true), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMatrixHistogramExceptCase2() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(true), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testSamplingDefCase1() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testSamplingDefCase2() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, sparsity2);\n+ }\n+\n+ @Test\n+ public void testSampling20Case1() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testSampling20Case2() {\n+ runSparsityEstimateTest(new EstimatorSample(0.2), m, sparsity2);\n+ }\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int n, double sp) {\n+ MatrixBlock m1 = MatrixBlock.randOperations(m, n, sp, 1, 1, \"uniform\", 3);\n+ MatrixBlock m3 = m1.aggregateBinaryOperations(m1, m1,\n+ new MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n+ double spExact = OptimizerUtils.getSparsity(m, m,\n+ EstimationUtils.getSelfProductOutputNnz(m1));\n+\n+ //compare estimated and real sparsity\n+ double est = estim.estim(m1, m1);\n+ TestUtils.compareScalars(est, m3.getSparsity(),\n+ (estim instanceof EstimatorBitsetMM) ? eps3 : //exact\n+ (estim instanceof EstimatorBasicWorst) ? eps1 : eps2);\n+ TestUtils.compareScalars(m3.getSparsity(), spExact, eps3);\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "diff": "@@ -27,6 +27,7 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nOuterProductTest.class,\n+ SelfProductTest.class,\nSquaredProductChainTest.class,\nSquaredProductTest.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2461] New estimation util for analyzing self-product NNZs This patch adds a new estimation utility for analyzing the exact output number of non-zeros of self matrix products without the need to materialize the output matrix.
49,738
21.07.2018 16:13:38
25,200
dac22aae7d244dd608911fc7760408a2ecbf5a4a
Fix integer overflows in MNC sparsity estimator This patch fixes int overflows when using the MNC sparsity estimator over matrices with large dimensions whose product exceed max int.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -100,7 +100,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//note: normally h1.getRows()*h2.getCols() would define mnOut\n//but by leveraging the knowledge of rows/cols w/ <=1 nnz, we account\n//that exact and approximate fractions touch different areas\n- int mnOut = (h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\n+ long mnOut = (h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\ndouble spOutRest = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\n//exact fractions, w/o double counting\n@@ -115,7 +115,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n//general case with approximate output\nelse {\n- int mnOut = h1.getRows()*h2.getCols();\n+ long mnOut = h1.getRows()*h2.getCols();\ndouble spOut = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\ndouble lsp = (double) h1.cNnz[j] * h2.rNnz[j] / mnOut;\n@@ -130,7 +130,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//exploit lower bound on nnz based on half-full rows/cols\nnnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\n- Math.max(h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n+ Math.max((long)h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n//compute final sparsity\nreturn OptimizerUtils.getSparsity(\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2296] Fix integer overflows in MNC sparsity estimator This patch fixes int overflows when using the MNC sparsity estimator over matrices with large dimensions whose product exceed max int.
49,738
24.07.2018 18:32:50
25,200
99b1c2e252f935bbce5f53574d3e245221da3e68
Codegen support for maxpool/avgpool DNN operations This patch adds code generation support for maxpool and avgpool DNN operations to the codegen row-template. This way often entire joins of conv/maxpool/relu can be executed as fused operators without parallelization barriers per operator.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "diff": "@@ -217,6 +217,12 @@ public class DnnOp extends MultiThreadedHop\nisEqualAndKnown(param1.H, param2.H) && isEqualAndKnown(param1.W, param2.W);\n}\n+ public boolean isStride1Pad0() {\n+ DnnParameters tmp = parseInput();\n+ return tmp.stride_h == 1 && tmp.stride_w == 1\n+ && tmp.pad_h == 0 && tmp.pad_w == 0;\n+ }\n+\nprivate static boolean isEqualAndKnown(int val1, int val2) {\nreturn val1 >= 0 && val2 >= 0 && val1 == val2;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNode.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNode.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.codegen.cplan;\nimport java.util.ArrayList;\n+import org.apache.sysml.hops.codegen.template.TemplateUtils;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -204,4 +205,27 @@ public abstract class CNode\n&& _dataType == cthat._dataType\n&& _literal == cthat._literal;\n}\n+\n+ protected String replaceUnaryPlaceholders(String tmp, String varj, boolean vectIn) {\n+ //replace sparse and dense inputs\n+ tmp = tmp.replace(\"%IN1v%\", varj+\"vals\");\n+ tmp = tmp.replace(\"%IN1i%\", varj+\"ix\");\n+ tmp = tmp.replace(\"%IN1%\",\n+ (vectIn && TemplateUtils.isMatrix(_inputs.get(0))) ? varj + \".values(rix)\" :\n+ (vectIn && TemplateUtils.isRowVector(_inputs.get(0)) ? varj + \".values(0)\" : varj));\n+\n+ //replace start position of main input\n+ String spos = (_inputs.get(0) instanceof CNodeData\n+ && _inputs.get(0).getDataType().isMatrix()) ? !varj.startsWith(\"b\") ?\n+ varj+\"i\" : TemplateUtils.isMatrix(_inputs.get(0)) ? varj + \".pos(rix)\" : \"0\" : \"0\";\n+\n+ tmp = tmp.replace(\"%POS1%\", spos);\n+ tmp = tmp.replace(\"%POS2%\", spos);\n+\n+ //replace length\n+ if( _inputs.get(0).getDataType().isMatrix() )\n+ tmp = tmp.replace(\"%LEN%\", _inputs.get(0).getVectorLength());\n+\n+ return tmp;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeNary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeNary.java", "diff": "package org.apache.sysml.hops.codegen.cplan;\nimport java.util.ArrayList;\n+import java.util.List;\n+import org.apache.commons.lang3.StringUtils;\nimport org.apache.sysml.hops.codegen.template.TemplateUtils;\nimport org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.runtime.util.DnnUtils;\nimport org.apache.sysml.runtime.util.UtilFunctions;\npublic class CNodeNary extends CNode\n{\npublic enum NaryType {\n- VECT_CBIND;\n+ VECT_CBIND,\n+ VECT_MAX_POOL,\n+ VECT_AVG_POOL;\n+\npublic static boolean contains(String value) {\nfor( NaryType bt : values() )\nif( bt.name().equals(value) )\n@@ -56,12 +62,19 @@ public class CNodeNary extends CNode\noff += input._cols;\n}\nreturn sb.toString();\n+ case VECT_MAX_POOL:\n+ case VECT_AVG_POOL:\n+ String vectName = (this==VECT_MAX_POOL) ? \"Maxpool\" : \"Avgpool\";\n+ String paramStr = getPoolingParameterString(inputs);\n+ return sparseGen ?\n+ \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1v%, %IN1i%, %POS1%, alen, len, \"+paramStr+\");\\n\" :\n+ \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %POS1%, %LEN%, \"+paramStr+\");\\n\";\ndefault:\nthrow new RuntimeException(\"Invalid nary type: \"+this.toString());\n}\n}\npublic boolean isVectorPrimitive() {\n- return this == VECT_CBIND;\n+ return this == VECT_CBIND || this == VECT_MAX_POOL || this == VECT_AVG_POOL;\n}\n}\n@@ -90,10 +103,17 @@ public class CNodeNary extends CNode\nsb.append(in.codegen(sparse));\n//generate nary operation (use sparse template, if data input)\n+ boolean lsparse = sparse && (_inputs.get(0) instanceof CNodeData\n+ && _inputs.get(0).getVarname().startsWith(\"a\")\n+ && !_inputs.get(0).isLiteral());\nString var = createVarname();\n- String tmp = _type.getTemplate(sparse, _cols, _inputs);\n+ String tmp = _type.getTemplate(lsparse, _cols, _inputs);\ntmp = tmp.replace(\"%TMP%\", var);\n+ //replace sparse and dense inputs\n+ String varj = _inputs.get(0).getVarname();\n+ tmp = replaceUnaryPlaceholders(tmp, varj, false);\n+\nsb.append(tmp);\n//mark as generated\n@@ -106,6 +126,8 @@ public class CNodeNary extends CNode\npublic String toString() {\nswitch(_type) {\ncase VECT_CBIND: return \"n(cbind)\";\n+ case VECT_MAX_POOL: return \"n(maxpool)\";\n+ case VECT_AVG_POOL: return \"n(avgpool)\";\ndefault:\nreturn \"m(\"+_type.name().toLowerCase()+\")\";\n}\n@@ -121,6 +143,19 @@ public class CNodeNary extends CNode\n_cols += in._cols;\n_dataType = DataType.MATRIX;\nbreak;\n+ case VECT_MAX_POOL:\n+ case VECT_AVG_POOL: //only stride 1, pad 0\n+ int C = Integer.parseInt(_inputs.get(6).getVarname());\n+ int H = Integer.parseInt(_inputs.get(7).getVarname());\n+ int W = Integer.parseInt(_inputs.get(8).getVarname());\n+ int R = Integer.parseInt(_inputs.get(11).getVarname());\n+ int S = Integer.parseInt(_inputs.get(12).getVarname());\n+ long P = DnnUtils.getP(H, R, 1, 0);\n+ long Q = DnnUtils.getQ(W, S, 1, 0);\n+ _rows = _inputs.get(0)._rows;\n+ _cols = C * P * Q;\n+ _dataType = DataType.MATRIX;\n+ break;\n}\n}\n@@ -142,4 +177,19 @@ public class CNodeNary extends CNode\nreturn super.equals(that)\n&& _type == that._type;\n}\n+\n+ private static String getPoolingParameterString(List<CNode> inputs) {\n+ //extract and derive individual parameters\n+ int C = Integer.parseInt(inputs.get(6).getVarname());\n+ int H = Integer.parseInt(inputs.get(7).getVarname());\n+ int W = Integer.parseInt(inputs.get(8).getVarname());\n+ int R = Integer.parseInt(inputs.get(11).getVarname());\n+ int S = Integer.parseInt(inputs.get(12).getVarname());\n+ int P = (int) DnnUtils.getP(H, R, 1, 0);\n+ int Q = (int) DnnUtils.getQ(W, S, 1, 0);\n+\n+ //construct parameter string\n+ return \"rix, \" + StringUtils.join(\n+ new int[]{C, P, Q, R, S, H, W}, ',');\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "diff": "@@ -23,7 +23,6 @@ import java.util.Arrays;\nimport org.apache.commons.lang.ArrayUtils;\nimport org.apache.commons.lang.StringUtils;\n-import org.apache.sysml.hops.codegen.template.TemplateUtils;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -214,27 +213,10 @@ public class CNodeUnary extends CNode\nString tmp = _type.getTemplate(lsparse);\ntmp = tmp.replace(\"%TMP%\", var);\n- String varj = _inputs.get(0).getVarname();\n-\n//replace sparse and dense inputs\n+ String varj = _inputs.get(0).getVarname();\nboolean vectIn = varj.startsWith(\"b\") && !_type.isScalarLookup();\n- tmp = tmp.replace(\"%IN1v%\", varj+\"vals\");\n- tmp = tmp.replace(\"%IN1i%\", varj+\"ix\");\n- tmp = tmp.replace(\"%IN1%\",\n- (vectIn && TemplateUtils.isMatrix(_inputs.get(0))) ? varj + \".values(rix)\" :\n- (vectIn && TemplateUtils.isRowVector(_inputs.get(0)) ? varj + \".values(0)\" : varj));\n-\n- //replace start position of main input\n- String spos = (_inputs.get(0) instanceof CNodeData\n- && _inputs.get(0).getDataType().isMatrix()) ? !varj.startsWith(\"b\") ?\n- varj+\"i\" : TemplateUtils.isMatrix(_inputs.get(0)) ? varj + \".pos(rix)\" : \"0\" : \"0\";\n-\n- tmp = tmp.replace(\"%POS1%\", spos);\n- tmp = tmp.replace(\"%POS2%\", spos);\n-\n- //replace length\n- if( _inputs.get(0).getDataType().isMatrix() )\n- tmp = tmp.replace(\"%LEN%\", _inputs.get(0).getVectorLength());\n+ tmp = replaceUnaryPlaceholders(tmp, varj, vectIn);\nsb.append(tmp);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -115,7 +115,9 @@ public class TemplateRow extends TemplateBase\n&& HopRewriteUtils.isColumnRangeIndexing((IndexingOp)hop))\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n- && hop.getInput().get(0).getDim2()>1);\n+ && hop.getInput().get(0).getDim2()>1)\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0());\n}\n@Override\n@@ -140,6 +142,8 @@ public class TemplateRow extends TemplateBase\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n&& hop.getInput().get(0).getDim2()>1)\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0())\n|| isPartOfValidCumAggChain(hop) //cum* with transpose\n|| isPartOfValidTransposeMMChain(hop)); //t(f(X))%*%X\n}\n@@ -156,6 +160,8 @@ public class TemplateRow extends TemplateBase\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n&& hop.getInput().get(0).getDim2()>1 )\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0())\n|| (HopRewriteUtils.isDataGenOpWithLiteralInputs(input, DataGenMethod.SEQ)\n&& HopRewriteUtils.hasOnlyUnaryBinaryParents(input, false))\n|| (hop instanceof AggBinaryOp\n@@ -476,6 +482,12 @@ public class TemplateRow extends TemplateBase\nout = new CNodeBinary(cdata1, cdata2,\nBinType.valueOf(\"VECT_\"+((DnnOp)hop).getOp().name()));\n}\n+ else if( HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL) ) {\n+ CNode[] in = hop.getInput().stream().map(h ->\n+ tmp.get(h.getHopID())).toArray(CNode[]::new);\n+ out = new CNodeNary(in, CNodeNary.NaryType\n+ .valueOf(\"VECT_\"+((DnnOp)hop).getOp().name()));\n+ }\nelse if( hop instanceof NaryOp ) {\nCNode[] inputs = new CNode[hop.getInput().size()];\nfor( int i=0; i<hop.getInput().size(); i++ ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "diff": "@@ -26,7 +26,9 @@ import org.apache.sysml.runtime.functionobjects.BitwAnd;\nimport org.apache.sysml.runtime.functionobjects.IntegerDivide;\nimport org.apache.sysml.runtime.functionobjects.Modulus;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNNPooling;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixMult;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNN.PoolingType;\n/**\n* This library contains all vector primitives that are used in\n@@ -2053,6 +2055,44 @@ public class LibSpoofPrimitives\nreturn c;\n}\n+ //maxpool\n+\n+ public static double[] vectMaxpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ double[] c = allocVector(C*P*Q, true);\n+ LibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.MAX,\n+ -Double.MAX_VALUE, 1, a, c, rix, rix+1, ai, 0, C, P, Q, R, S, H, W);\n+ return c;\n+ }\n+\n+ public static double[] vectMaxpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ double[] a = allocVector(len, true);\n+ double[] c = allocVector(C*P*Q, true);\n+ for(int k=ai; k<ai+alen; k++)\n+ a[aix[k]] = avals[k];\n+ LibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.MAX,\n+ -Double.MAX_VALUE, 1, a, c, rix, rix+1, 0, 0, C, P, Q, R, S, H, W);\n+ return c;\n+ }\n+\n+ //avgpool\n+\n+ public static double[] vectAvgpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ double[] c = allocVector(C*P*Q, true);\n+ LibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.AVG,\n+ 0, 1/(R*S), a, c, rix, rix+1, ai, 0, C, P, Q, R, S, H, W);\n+ return c;\n+ }\n+\n+ public static double[] vectAvgpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ double[] a = allocVector(len, true);\n+ double[] c = allocVector(C*P*Q, true);\n+ for(int k=ai; k<ai+alen; k++)\n+ a[aix[k]] = avals[k];\n+ LibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.AVG,\n+ 0, 1/(R*S), a, c, rix, rix+1, 0, 0, C, P, Q, R, S, H, W);\n+ return c;\n+ }\n+\n//complex builtin functions that are not directly generated\n//(included here in order to reduce the number of imports)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "diff": "@@ -97,8 +97,8 @@ public class LibMatrixDNNPooling {\nreturn ret;\n}\n- public static void poolingDenseStride1Pad0(PoolingType pType, double minVal, double pFact,\n- double[] in, double[] out, int rl, int ru, int C, int P, int Q, int R, int S, int H, int W) {\n+ public static void poolingDenseStride1Pad0(PoolingType pType, double minVal, double pFact, double[] in,\n+ double[] out, int rl, int ru, int ii, int oi, int C, int P, int Q, int R, int S, int H, int W) {\nboolean max = (pType == PoolingType.MAX);\nint CHW = C * H * W;\n@@ -106,9 +106,9 @@ public class LibMatrixDNNPooling {\n//quick-path w/o materialized index arrays and\n//simplified inner loops for P = 1, Q = 1, W = 1\nint lenh = Math.min(R,H);\n- for(int i = rl, oix=rl*C; i < ru; i++, oix+=C)\n- for (int c = 0, off=i*CHW; c < C; c++, off+=H) {\n- out[oix+c] = max ? max(minVal, in, off, lenh) :\n+ for(int i = rl; i < ru; i++, oi+=C)\n+ for (int c = 0, off=ii+(i-rl)*CHW; c < C; c++, off+=H) {\n+ out[oi+c] = max ? max(minVal, in, off, lenh) :\navg(minVal, in, off, lenh, pFact);\n}\n}\n@@ -117,7 +117,7 @@ public class LibMatrixDNNPooling {\nArrays.fill(out, rl*CPQ, ru*CPQ, minVal);\n//quick-path w/o materialized index arrays\nfor(int i = rl; i < ru; i++)\n- for (int c = 0, off=i*CHW, oix=i*CPQ; c < C; c++, off+=HW)\n+ for (int c = 0, off=ii+(i-rl)*CHW, oix=oi; c < C; c++, off+=HW)\nfor (int p = 0; p < P; p++, oix+=Q)\nfor (int h = p; h < Math.min(p+R,H); h++)\nfor (int q = 0, off2=off+h*W; q < Q; q++) {\n@@ -139,7 +139,7 @@ public class LibMatrixDNNPooling {\n_rl = rl; _ru = ru;\n_params = params;\n_poolingType = poolingType;\n- _poolingMultiplier = Math.pow(params.R*params.S, -1);\n+ _poolingMultiplier = 1/(params.R*params.S);\n}\n@Override\n@@ -157,7 +157,7 @@ public class LibMatrixDNNPooling {\nif( _params.isStride1Pad0() ) {\npoolingDenseStride1Pad0(_poolingType, minValForMaxPoolOperations,\n- _poolingMultiplier, in, out, _rl, _ru, C, P, Q, R, S, H, W);\n+ _poolingMultiplier, in, out, _rl, _ru, _rl*CHW, _rl*CPQ, C, P, Q, R, S, H, W);\n}\nelse { //general case\n//thread-local initialization of output block\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "diff": "@@ -80,7 +80,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME41 = TEST_NAME+\"41\"; //X*rowSums(X/seq(1,N)+t(seq(M,1)))\nprivate static final String TEST_NAME42 = TEST_NAME+\"42\"; //X/rowSums(min(X, Y, Z))\nprivate static final String TEST_NAME43 = TEST_NAME+\"43\"; //bias_add(X,B) + bias_mult(X,B)\n- private static final String TEST_NAME44 = TEST_NAME+\"44\"; //maxpool(X - mean(X));\n+ private static final String TEST_NAME44 = TEST_NAME+\"44\"; //maxpool(X - mean(X)) + 7;\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -817,6 +817,10 @@ public class RowAggTmplTest extends AutomatedTestBase\nif( testname.equals(TEST_NAME42) )\nAssert.assertTrue(!heavyHittersContainsSubString(\"min\",\"nmin\")\n&& !heavyHittersContainsSubString(\"spoof\", 2));\n+ if( testname.equals(TEST_NAME44) )\n+ Assert.assertTrue(!heavyHittersContainsSubString(\"maxpooling\")\n+ && !heavyHittersContainsSubString(\"spoof\", 2));\n+\n}\nfinally {\nrtplatform = platformOld;\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/codegen/rowAggPattern44.R", "new_path": "src/test/scripts/functions/codegen/rowAggPattern44.R", "diff": "@@ -95,5 +95,6 @@ max_pool <- function(X, N, C, Hin, Win, Hf, Wf,\n}\nR = max_pool(X, numImg, numChannels, imgSize*imgSize, 1, poolSize1, poolSize2, stride, stride)\n+R = R + 7;\nwriteMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"))\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/codegen/rowAggPattern44.dml", "new_path": "src/test/scripts/functions/codegen/rowAggPattern44.dml", "diff": "@@ -31,5 +31,6 @@ while(FALSE){}\nX = X - rowMeans(X);\nR = max_pool(X, stride=[stride, stride], padding=[pad, pad], input_shape=[numImg, numChannels, imgSize*imgSize, 1], pool_size=[poolSize1, poolSize2]);\n+R = R + 7;\nwrite(R, $1, format=\"text\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2109] Codegen support for maxpool/avgpool DNN operations This patch adds code generation support for maxpool and avgpool DNN operations to the codegen row-template. This way often entire joins of conv/maxpool/relu can be executed as fused operators without parallelization barriers per operator.
49,738
24.07.2018 23:08:58
25,200
fb675b82cfce6912cccbee1875d5df259c44f9ed
[HOTFIX] Fix maxpool/avgpool refactoring (start offsets and init)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPooling.java", "diff": "@@ -117,7 +117,7 @@ public class LibMatrixDNNPooling {\nArrays.fill(out, rl*CPQ, ru*CPQ, minVal);\n//quick-path w/o materialized index arrays\nfor(int i = rl; i < ru; i++)\n- for (int c = 0, off=ii+(i-rl)*CHW, oix=oi; c < C; c++, off+=HW)\n+ for (int c = 0, off=ii+(i-rl)*CHW, oix=oi+(i-rl)*CPQ; c < C; c++, off+=HW)\nfor (int p = 0; p < P; p++, oix+=Q)\nfor (int h = p; h < Math.min(p+R,H); h++)\nfor (int q = 0, off2=off+h*W; q < Q; q++) {\n@@ -139,7 +139,7 @@ public class LibMatrixDNNPooling {\n_rl = rl; _ru = ru;\n_params = params;\n_poolingType = poolingType;\n- _poolingMultiplier = 1/(params.R*params.S);\n+ _poolingMultiplier = 1d/(params.R*params.S);\n}\n@Override\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix maxpool/avgpool refactoring (start offsets and init)
49,738
25.07.2018 18:03:57
25,200
db9da28551bd85f234c196ac8fd7ea25cccc8543
Fix paramserv tests (incorrect named argument usage) With the recently added support for named function arguments various places in SystemML check the validity of used named arguments. This makes the existing paramserv tests fail because they use incorrect name bindings that have been ignored so far.
[ { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "new_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "diff": "@@ -157,14 +157,12 @@ gradients = function(matrix[double] features,\n[outc1, Houtc1, Woutc1] = conv2d::forward(features, W1, b1, C, Hin, Win, Hf, Wf,\nstride, stride, pad, pad)\noutr1 = relu::forward(outc1)\n- [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n- [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\n## layer 3: affine3 -> relu3 -> dropout\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n@@ -184,14 +182,12 @@ gradients = function(matrix[double] features,\ndouta3 = relu::backward(doutr3, outa3)\n[doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)\n## layer 2: conv2 -> relu2 -> pool2\n- doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\ndoutc2 = relu::backward(doutr2, outc2)\n[doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, F1,\nHoutp1, Woutp1, Hf, Wf, stride, stride, pad, pad)\n## layer 1: conv1 -> relu1 -> pool1\n- doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\ndoutc1 = relu::backward(doutr1, outc1)\n[dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, features, W1, b1, C, Hin, Win,\nHf, Wf, stride, stride, pad, pad)\n@@ -314,14 +310,12 @@ predict = function(matrix[double] X, int C, int Hin, int Win, int batch_size,\n[outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride,\npad, pad)\noutr1 = relu::forward(outc1)\n- [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n- [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\n## layer 3: affine3 -> relu3\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml", "new_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml", "diff": "@@ -151,14 +151,12 @@ gradients = function(matrix[double] features,\n[outc1, Houtc1, Woutc1] = conv2d::forward(features, W1, b1, C, Hin, Win, Hf, Wf,\nstride, stride, pad, pad)\noutr1 = relu::forward(outc1)\n- [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n- [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\n## layer 3: affine3 -> relu3 -> dropout\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n@@ -178,14 +176,12 @@ gradients = function(matrix[double] features,\ndouta3 = relu::backward(doutr3, outa3)\n[doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)\n## layer 2: conv2 -> relu2 -> pool2\n- doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\ndoutc2 = relu::backward(doutr2, outc2)\n[doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, F1,\nHoutp1, Woutp1, Hf, Wf, stride, stride, pad, pad)\n## layer 1: conv1 -> relu1 -> pool1\n- doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\ndoutc1 = relu::backward(doutr1, outc1)\n[dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, features, W1, b1, C, Hin, Win,\nHf, Wf, stride, stride, pad, pad)\n@@ -307,14 +303,12 @@ predict = function(matrix[double] X, int C, int Hin, int Win, int batch_size,\n[outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride,\npad, pad)\noutr1 = relu::forward(outc1)\n- [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n- [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, 2, 2, 2, 2, 0, 0)\n## layer 3: affine3 -> relu3\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2463] Fix paramserv tests (incorrect named argument usage) With the recently added support for named function arguments various places in SystemML check the validity of used named arguments. This makes the existing paramserv tests fail because they use incorrect name bindings that have been ignored so far.
49,738
25.07.2018 19:14:34
25,200
07e65189ef8a2b9d15f17ae7f502bfd2d7588933
Fix IPA robustness for permuted named function args This patch fixes incorrect size propagation issues when using the recently introduced named function arguments. Specifically, we properly propagate statistics according to the given input-name binding.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "diff": "@@ -520,14 +520,16 @@ public class InterProceduralAnalysis\nprivate static void populateLocalVariableMapForFunctionCall( FunctionStatement fstmt, FunctionOp fop, LocalVariableMap callvars, LocalVariableMap vars, FunctionCallSizeInfo fcallSizes )\n{\n- ArrayList<DataIdentifier> inputVars = fstmt.getInputParams();\n+ //note: due to arbitrary binding sequences of named function arguments,\n+ //we cannot use the sequence as defined in the function signature\n+ String[] funArgNames = fop.getInputVariableNames();\nArrayList<Hop> inputOps = fop.getInput();\nString fkey = fop.getFunctionKey();\n- for( int i=0; i<inputVars.size(); i++ )\n+ for( int i=0; i<funArgNames.length; i++ )\n{\n//create mapping between input hops and vars\n- DataIdentifier dat = inputVars.get(i);\n+ DataIdentifier dat = fstmt.getInputParam(funArgNames[i]);\nHop input = inputOps.get(i);\nif( input.getDataType()==DataType.MATRIX )\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/FunctionPotpourriTest.java", "diff": "@@ -39,6 +39,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nprivate final static String TEST_NAME9 = \"FunPotpourriNamedArgsPartial\";\nprivate final static String TEST_NAME10 = \"FunPotpourriNamedArgsUnknown1\";\nprivate final static String TEST_NAME11 = \"FunPotpourriNamedArgsUnknown2\";\n+ private final static String TEST_NAME12 = \"FunPotpourriNamedArgsIPA\";\nprivate final static String TEST_DIR = \"functions/misc/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FunctionPotpourriTest.class.getSimpleName() + \"/\";\n@@ -57,6 +58,7 @@ public class FunctionPotpourriTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME9, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME9, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME10, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME10, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME11, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME11, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME12, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME12, new String[] { \"R\" }) );\n}\n@Test\n@@ -124,6 +126,11 @@ public class FunctionPotpourriTest extends AutomatedTestBase\nrunFunctionTest( TEST_NAME11, true );\n}\n+ @Test\n+ public void testFunctionNamedArgsIPA() {\n+ runFunctionTest( TEST_NAME12, false );\n+ }\n+\nprivate void runFunctionTest(String testName, boolean error) {\nTestConfiguration config = getTestConfiguration(testName);\nloadTestConfiguration(config);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/FunPotpourriNamedArgsIPA.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo2 = function(Matrix[Double] A, Matrix[Double] B) return (Matrix[Double] C){\n+ A = A[,1:100]; # check non-applied rewrite\n+ B = B[1:100,]; # check non-applied rewrite\n+ C = A %*% B + 7;\n+ while(FALSE){} #no inlining\n+}\n+\n+X1 = matrix(1, 100, 101)\n+X2 = matrix(2, 101, 100)\n+\n+C = foo2(B=X2, A=X1);\n+\n+if( nrow(C) != 100 | ncol(C) != 100 )\n+ C = X1 %*% X1; # cause error\n+\n+print(sum(C));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2467] Fix IPA robustness for permuted named function args This patch fixes incorrect size propagation issues when using the recently introduced named function arguments. Specifically, we properly propagate statistics according to the given input-name binding.
49,760
25.07.2018 20:38:55
25,200
eb182010ba69a645b11f9a8bc18f2722dc7e64d6
Improved MNC sketch propagation (probabilistic rounding) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "package org.apache.sysml.hops.estim;\nimport java.util.Arrays;\n+import java.util.Random;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -258,18 +259,26 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//(this implies 0s propagate and distribution is preserved)\nint rMaxNnz = 0, cMaxNnz = 0;\nint[] rNnz = new int[h1.getRows()];\n+ Random rn = new Random();\nfor( int i=0; i<h1.getRows(); i++ ) {\n- rNnz[i] = (int) Math.round(nnzOut/nnz1 * h1.rNnz[i]);\n+ rNnz[i] = probRound(nnzOut/nnz1 * h1.rNnz[i], rn);\nrMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n}\nint[] cNnz = new int[h2.getCols()];\nfor( int i=0; i<h2.getCols(); i++ ) {\n- cNnz[i] = (int) Math.round(nnzOut/nnz2 * h2.cNnz[i]);\n+ cNnz[i] = probRound(nnzOut/nnz2 * h2.cNnz[i], rn);\ncMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n}\n//construct new histogram object\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\n+\n+ private static int probRound(double inNnz, Random rand) {\n+ double temp = Math.floor(inNnz);\n+ double f = inNnz - temp; //non-int fraction [0,1)\n+ double randf = rand.nextDouble(); //uniform [0,1)\n+ return (int)((f > randf) ? temp+1 : temp);\n+ }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2468] Improved MNC sketch propagation (probabilistic rounding) Closes #807
49,738
27.07.2018 17:18:58
25,200
dfa27ba22e3e64a253012f7f77eb918be5e0ef1a
[MINOR] Fix paramserv accumulator handling (uninitialized stats)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/spark/SparkPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/spark/SparkPSWorker.java", "diff": "@@ -26,7 +26,6 @@ import java.util.Map;\nimport org.apache.spark.SparkConf;\nimport org.apache.spark.api.java.function.VoidFunction;\nimport org.apache.spark.util.LongAccumulator;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.runtime.codegen.CodegenUtils;\nimport org.apache.sysml.runtime.controlprogram.paramserv.LocalPSWorker;\n@@ -84,7 +83,7 @@ public class SparkPSWorker extends LocalPSWorker implements VoidFunction<Tuple2<\n@Override\npublic void call(Tuple2<Integer, Tuple2<MatrixBlock, MatrixBlock>> input) throws Exception {\n- Timing tSetup = DMLScript.STATISTICS ? new Timing(true) : null;\n+ Timing tSetup = new Timing(true);\nconfigureWorker(input);\naccSetupTime(tSetup);\n@@ -130,43 +129,36 @@ public class SparkPSWorker extends LocalPSWorker implements VoidFunction<Tuple2<\n@Override\n- public void incWorkerNumber() {\n- if (DMLScript.STATISTICS)\n+ protected void incWorkerNumber() {\n_aWorker.add(1);\n}\n@Override\n- public void accLocalModelUpdateTime(Timing time) {\n- if (DMLScript.STATISTICS)\n+ protected void accLocalModelUpdateTime(Timing time) {\n_aUpdate.add((long) time.stop());\n}\n@Override\n- public void accBatchIndexingTime(Timing time) {\n- if (DMLScript.STATISTICS)\n+ protected void accBatchIndexingTime(Timing time) {\n_aIndex.add((long) time.stop());\n}\n@Override\n- public void accGradientComputeTime(Timing time) {\n- if (DMLScript.STATISTICS)\n+ protected void accGradientComputeTime(Timing time) {\n_aGrad.add((long) time.stop());\n}\n@Override\nprotected void accNumEpochs(int n) {\n- if (DMLScript.STATISTICS)\n_nEpochs.add(n);\n}\n@Override\nprotected void accNumBatches(int n) {\n- if (DMLScript.STATISTICS)\n_nBatches.add(n);\n}\nprivate void accSetupTime(Timing tSetup) {\n- if (DMLScript.STATISTICS)\n_aSetup.add((long) tSetup.stop());\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -184,12 +184,12 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n// Accumulate the statistics for remote workers\nif (DMLScript.STATISTICS) {\n- Statistics.accPSSetupTime(aSetup.sum());\n- Statistics.incWorkerNumber(aWorker.sum());\n- Statistics.accPSLocalModelUpdateTime(aUpdate.sum());\n- Statistics.accPSBatchIndexingTime(aIndex.sum());\n- Statistics.accPSGradientComputeTime(aGrad.sum());\n- Statistics.accPSRpcRequestTime(aRPC.sum());\n+ Statistics.accPSSetupTime(aSetup.value().longValue());\n+ Statistics.incWorkerNumber(aWorker.value().longValue());\n+ Statistics.accPSLocalModelUpdateTime(aUpdate.value().longValue());\n+ Statistics.accPSBatchIndexingTime(aIndex.value().longValue());\n+ Statistics.accPSGradientComputeTime(aGrad.value().longValue());\n+ Statistics.accPSRpcRequestTime(aRPC.value().longValue());\n}\n// Fetch the final model from ps\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix paramserv accumulator handling (uninitialized stats)
49,727
28.07.2018 20:26:53
25,200
b586d16913196276d5bbd0c0828389aed7e4d9e3
Performance distributed paramserv (partition/serialize) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "@@ -191,8 +191,8 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\n// Get the gradients\nListObject gradients = (ListObject) _ec.getVariable(_output.getName());\n- ParamservUtils.cleanupData(_ec, bFeatures);\n- ParamservUtils.cleanupData(_ec, bLabels);\n+ ParamservUtils.cleanupData(_ec, Statement.PS_FEATURES);\n+ ParamservUtils.cleanupData(_ec, Statement.PS_LABELS);\nreturn gradients;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/ParamservUtils.java", "diff": "@@ -94,14 +94,12 @@ public class ParamservUtils {\n}\nList<Data> newData = IntStream.range(0, lo.getLength()).mapToObj(i -> {\nData oldData = lo.slice(i);\n- if (oldData instanceof MatrixObject) {\n- MatrixObject mo = (MatrixObject) oldData;\n- return sliceMatrix(mo, 1, mo.getNumRows());\n- } else if (oldData instanceof ListObject || oldData instanceof FrameObject) {\n+ if (oldData instanceof MatrixObject)\n+ return createShallowCopy((MatrixObject) oldData);\n+ else if (oldData instanceof ListObject || oldData instanceof FrameObject)\nthrow new DMLRuntimeException(\"Copy list: does not support list or frame.\");\n- } else {\n+ else\nreturn oldData;\n- }\n}).collect(Collectors.toList());\nreturn new ListObject(newData, lo.getNames());\n}\n@@ -145,14 +143,14 @@ public class ParamservUtils {\nCacheableData<?> cd = (CacheableData<?>) data;\ncd.enableCleanup(true);\nec.cleanupCacheableData(cd);\n- if (LOG.isDebugEnabled()) {\n- LOG.debug(String.format(\"%s has been deleted.\", cd.getFileName()));\n}\n+\n+ public static void cleanupData(ExecutionContext ec, String varName) {\n+ cleanupData(ec, ec.removeVariable(varName));\n}\n- public static void cleanupMatrixObject(ExecutionContext ec, MatrixObject mo) {\n- mo.enableCleanup(true);\n- ec.cleanupCacheableData(mo);\n+ public static void cleanupListObject(ListObject lo) {\n+ cleanupListObject(ExecutionContextFactory.createContext(), lo);\n}\npublic static MatrixObject newMatrixObject(MatrixBlock mb) {\n@@ -169,6 +167,10 @@ public class ParamservUtils {\nreturn result;\n}\n+ public static MatrixObject createShallowCopy(MatrixObject mo) {\n+ return newMatrixObject(mo.acquireReadAndRelease(), false);\n+ }\n+\n/**\n* Slice the matrix\n*\n@@ -178,11 +180,8 @@ public class ParamservUtils {\n* @return new sliced matrix\n*/\npublic static MatrixObject sliceMatrix(MatrixObject mo, long rl, long rh) {\n- MatrixBlock mb = mo.acquireRead();\n- MatrixObject result = newMatrixObject(sliceMatrixBlock(mb, rl, rh));\n- result.enableCleanup(false);\n- mo.release();\n- return result;\n+ MatrixBlock mb = mo.acquireReadAndRelease();\n+ return newMatrixObject(sliceMatrixBlock(mb, rl, rh), false);\n}\n/**\n@@ -335,33 +334,21 @@ public class ParamservUtils {\n/**\n* Assemble the matrix of features and labels according to the rowID\n*\n- * @param numRows row size of the data\n* @param featuresRDD indexed features matrix block\n* @param labelsRDD indexed labels matrix block\n* @return Assembled rdd with rowID as key while matrix of features and labels as value (rowID -> features, labels)\n*/\n- public static JavaPairRDD<Long, Tuple2<MatrixBlock, MatrixBlock>> assembleTrainingData(long numRows, JavaPairRDD<MatrixIndexes, MatrixBlock> featuresRDD, JavaPairRDD<MatrixIndexes, MatrixBlock> labelsRDD) {\n- JavaPairRDD<Long, MatrixBlock> fRDD = groupMatrix(numRows, featuresRDD);\n- JavaPairRDD<Long, MatrixBlock> lRDD = groupMatrix(numRows, labelsRDD);\n+ public static JavaPairRDD<Long, Tuple2<MatrixBlock, MatrixBlock>> assembleTrainingData(JavaPairRDD<MatrixIndexes, MatrixBlock> featuresRDD, JavaPairRDD<MatrixIndexes, MatrixBlock> labelsRDD) {\n+ JavaPairRDD<Long, MatrixBlock> fRDD = groupMatrix(featuresRDD);\n+ JavaPairRDD<Long, MatrixBlock> lRDD = groupMatrix(labelsRDD);\n//TODO Add an additional physical operator which broadcasts the labels directly (broadcast join with features) if certain memory budgets are satisfied\nreturn fRDD.join(lRDD);\n}\n- private static JavaPairRDD<Long, MatrixBlock> groupMatrix(long numRows, JavaPairRDD<MatrixIndexes, MatrixBlock> rdd) {\n+ private static JavaPairRDD<Long, MatrixBlock> groupMatrix(JavaPairRDD<MatrixIndexes, MatrixBlock> rdd) {\n//TODO could use join and aggregation to avoid unnecessary shuffle introduced by reduceByKey\nreturn rdd.mapToPair(input -> new Tuple2<>(input._1.getRowIndex(), new Tuple2<>(input._1.getColumnIndex(), input._2)))\n.aggregateByKey(new LinkedList<Tuple2<Long, MatrixBlock>>(),\n- new Partitioner() {\n- private static final long serialVersionUID = -7032660778344579236L;\n- @Override\n- public int getPartition(Object rblkID) {\n- return Math.toIntExact((Long) rblkID);\n- }\n- @Override\n- public int numPartitions() {\n- return Math.toIntExact(numRows);\n- }\n- },\n(list, input) -> {\nlist.add(input);\nreturn list;\n@@ -392,7 +379,7 @@ public class ParamservUtils {\nDataPartitionerSparkMapper mapper = new DataPartitionerSparkMapper(scheme, workerNum, sec, (int) features.getNumRows());\nJavaPairRDD<Integer, Tuple2<MatrixBlock, MatrixBlock>> result = ParamservUtils\n- .assembleTrainingData(features.getNumRows(), featuresRDD, labelsRDD) // Combine features and labels into a pair (rowBlockID => (features, labels))\n+ .assembleTrainingData(featuresRDD, labelsRDD) // Combine features and labels into a pair (rowBlockID => (features, labels))\n.flatMapToPair(mapper) // Do the data partitioning on spark (workerID => (rowBlockID, (single row features, single row labels))\n// Aggregate the partitioned matrix according to rowID for each worker\n// i.e. (workerID => ordered list[(rowBlockID, (single row features, single row labels)]\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/spark/rpc/PSRpcObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/spark/rpc/PSRpcObject.java", "diff": "@@ -46,30 +46,33 @@ public abstract class PSRpcObject {\n/**\n* Deep serialize and write of a list object (currently only support list containing matrices)\n* @param lo a list object containing only matrices\n- * @param dos output data to write to\n+ * @param output output data to write to\n*/\n- protected void serializeAndWriteListObject(ListObject lo, DataOutput dos) throws IOException {\n+ protected void serializeAndWriteListObject(ListObject lo, DataOutput output) throws IOException {\nvalidateListObject(lo);\n- dos.writeInt(lo.getLength()); //write list length\n- dos.writeBoolean(lo.isNamedList()); //write list named\n+ output.writeInt(lo.getLength()); //write list length\n+ output.writeBoolean(lo.isNamedList()); //write list named\nfor (int i = 0; i < lo.getLength(); i++) {\nif (lo.isNamedList())\n- dos.writeUTF(lo.getName(i)); //write name\n+ output.writeUTF(lo.getName(i)); //write name\n((MatrixObject) lo.getData().get(i))\n- .acquireReadAndRelease().write(dos); //write matrix\n+ .acquireReadAndRelease().write(output); //write matrix\n}\n+ // Cleanup the list object\n+ // because it is transferred to remote worker in binary format\n+ ParamservUtils.cleanupListObject(lo);\n}\n- protected ListObject readAndDeserialize(DataInput dis) throws IOException {\n- int listLen = dis.readInt();\n+ protected ListObject readAndDeserialize(DataInput input) throws IOException {\n+ int listLen = input.readInt();\nList<Data> data = new ArrayList<>();\n- List<String> names = dis.readBoolean() ?\n+ List<String> names = input.readBoolean() ?\nnew ArrayList<>() : null;\nfor(int i=0; i<listLen; i++) {\nif( names != null )\n- names.add(dis.readUTF());\n+ names.add(input.readUTF());\nMatrixBlock mb = new MatrixBlock();\n- mb.readFields(dis);\n+ mb.readFields(input);\ndata.add(ParamservUtils.newMatrixObject(mb, false));\n}\nreturn new ListObject(data, names);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/RpcObjectTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/RpcObjectTest.java", "diff": "@@ -32,27 +32,27 @@ import org.junit.Test;\npublic class RpcObjectTest {\n- @Test\n- public void testPSRpcCall() throws IOException {\n+ private ListObject generateData() {\nMatrixObject mo1 = SerializationTest.generateDummyMatrix(10);\nMatrixObject mo2 = SerializationTest.generateDummyMatrix(20);\n- ListObject lo = new ListObject(Arrays.asList(mo1, mo2));\n- PSRpcCall expected = new PSRpcCall(PSRpcObject.PUSH, 1, lo);\n+ return new ListObject(Arrays.asList(mo1, mo2));\n+ }\n+\n+ @Test\n+ public void testPSRpcCall() throws IOException {\n+ PSRpcCall expected = new PSRpcCall(PSRpcObject.PUSH, 1, generateData());\nPSRpcCall actual = new PSRpcCall(expected.serialize());\nAssert.assertTrue(Arrays.equals(\n- expected.serialize().array(),\n+ new PSRpcCall(PSRpcObject.PUSH, 1, generateData()).serialize().array(),\nactual.serialize().array()));\n}\n@Test\npublic void testPSRpcResponse() throws IOException {\n- MatrixObject mo1 = SerializationTest.generateDummyMatrix(10);\n- MatrixObject mo2 = SerializationTest.generateDummyMatrix(20);\n- ListObject lo = new ListObject(Arrays.asList(mo1, mo2));\n- PSRpcResponse expected = new PSRpcResponse(PSRpcResponse.Type.SUCCESS, lo);\n+ PSRpcResponse expected = new PSRpcResponse(PSRpcResponse.Type.SUCCESS, generateData());\nPSRpcResponse actual = new PSRpcResponse(expected.serialize());\nAssert.assertTrue(Arrays.equals(\n- expected.serialize().array(),\n+ new PSRpcResponse(PSRpcResponse.Type.SUCCESS, generateData()).serialize().array(),\nactual.serialize().array()));\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2469] Performance distributed paramserv (partition/serialize) Closes #809.
49,738
30.07.2018 14:34:49
25,200
50ddddb90b28c6e28e97195dded9696edcdc3b45
Fix distributed spark cumsumprod (aggregate 1st pass) This patch fixes result correctness issues of distributed spark operations of the new cumulative aggregate cumsumprod. In detail, we now use cumsumprod(AB)[n] instead of sum(AB) as aggregation function during the forward pass of the generic two-pass algorithm.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeAggregateSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeAggregateSPInstruction.java", "diff": "@@ -27,6 +27,7 @@ import scala.Tuple2;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.functionobjects.Builtin;\nimport org.apache.sysml.runtime.functionobjects.PlusMultiply;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\n@@ -36,6 +37,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.data.OperationsOnMatrixValues;\nimport org.apache.sysml.runtime.matrix.operators.AggregateUnaryOperator;\n+import org.apache.sysml.runtime.matrix.operators.UnaryOperator;\npublic class CumulativeAggregateSPInstruction extends AggregateUnarySPInstruction {\n@@ -79,10 +81,11 @@ public class CumulativeAggregateSPInstruction extends AggregateUnarySPInstructio\n{\nprivate static final long serialVersionUID = 11324676268945117L;\n- private AggregateUnaryOperator _op = null;\n- private long _rlen = -1;\n- private int _brlen = -1;\n- private int _bclen = -1;\n+ private final AggregateUnaryOperator _op;\n+ private UnaryOperator _uop = null;\n+ private final long _rlen;\n+ private final int _brlen;\n+ private final int _bclen;\npublic RDDCumAggFunction( AggregateUnaryOperator op, long rlen, int brlen, int bclen ) {\n_op = op;\n@@ -105,10 +108,12 @@ public class CumulativeAggregateSPInstruction extends AggregateUnarySPInstructio\nAggregateUnaryOperator aop = (AggregateUnaryOperator)_op;\nif( aop.aggOp.increOp.fn instanceof PlusMultiply ) { //cumsumprod\naop.indexFn.execute(ixIn, ixOut);\n- MatrixBlock t1 = blkIn.slice(0, blkIn.getNumRows()-1, 0, 0, new MatrixBlock());\n+ if( _uop == null )\n+ _uop = new UnaryOperator(Builtin.getBuiltinFnObject(\"ucumk+*\"));\n+ MatrixBlock t1 = (MatrixBlock) blkIn.unaryOperations(_uop, new MatrixBlock());\nMatrixBlock t2 = blkIn.slice(0, blkIn.getNumRows()-1, 1, 1, new MatrixBlock());\nblkOut.reset(1, 2);\n- blkOut.quickSetValue(0, 0, t1.sum());\n+ blkOut.quickSetValue(0, 0, t1.quickGetValue(t1.getNumRows()-1, 0));\nblkOut.quickSetValue(0, 1, t2.prod());\n}\nelse { //general case\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/unary/matrix/FullCumsumprodTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/unary/matrix/FullCumsumprodTest.java", "diff": "@@ -111,8 +111,7 @@ public class FullCumsumprodTest extends AutomatedTestBase\nString.valueOf(reverse).toUpperCase(), output(\"C\") };\ndouble[][] A = getRandomMatrix(rows, 1, -10, 10, sparsity, 3);\n- double[][] B = getRandomMatrix(rows, 1, -1, 1, 0.1, 7);\n- //FIXME double[][] B = getRandomMatrix(rows, 1, -1, 1, 0.9, 7);\n+ double[][] B = getRandomMatrix(rows, 1, -1, 1, 0.9, 7);\nwriteInputMatrixWithMTD(\"A\", A, false);\nwriteInputMatrixWithMTD(\"B\", B, false);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2470] Fix distributed spark cumsumprod (aggregate 1st pass) This patch fixes result correctness issues of distributed spark operations of the new cumulative aggregate cumsumprod. In detail, we now use cumsumprod(AB)[n] instead of sum(AB) as aggregation function during the forward pass of the generic two-pass algorithm.
49,738
30.07.2018 18:55:13
25,200
827d73bd58087f2c774697bb6114ccf23d768bb1
Fix robustness IPA wrt updates of function call graph
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallGraph.java", "diff": "@@ -179,7 +179,9 @@ public class FunctionCallGraph\n* @param sb source statement block\n*/\npublic void removeFunctionCall(String fkey, FunctionOp fop, StatementBlock sb) {\n+ if( _fCalls.containsKey(fkey) )\n_fCalls.get(fkey).remove(fop);\n+ if( _fCallsSB.containsKey(fkey) )\n_fCallsSB.get(fkey).remove(sb);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java", "diff": "@@ -136,9 +136,12 @@ public class InterProceduralAnalysis\n_passes.add(new IPAPassRemoveUnnecessaryCheckpoints());\n_passes.add(new IPAPassRemoveConstantBinaryOps());\n_passes.add(new IPAPassPropagateReplaceLiterals());\n- _passes.add(new IPAPassApplyStaticHopRewrites());\n_passes.add(new IPAPassInlineFunctions());\n_passes.add(new IPAPassEliminateDeadCode());\n+ //note: apply rewrites last because statement block rewrites\n+ //might merge relevant statement blocks in special cases, which\n+ //would require an update of the function call graph\n+ _passes.add(new IPAPassApplyStaticHopRewrites());\n}\npublic InterProceduralAnalysis(StatementBlock sb) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2473] Fix robustness IPA wrt updates of function call graph
49,738
30.07.2018 21:58:37
25,200
f2c0d13e2d785ae9143f38cc59b06f14b7dd4fc8
Fix matrix/frame left indexing into list data types This patch fixes the missing left indexing support for frames and matrices into lists. Furthermore, this also includes a robustness fix for inferring the output data type of builtin functions when the target is a list left indexing (which propagated incorrectly to the source).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/PartialAggregate.java", "new_path": "src/main/java/org/apache/sysml/lops/PartialAggregate.java", "diff": "@@ -377,6 +377,14 @@ public class PartialAggregate extends Lop\n}\n}\n+ case SumProduct: {\n+ switch( dir ) {\n+ case RowCol: return \"ua+*\";\n+ case Row: return \"uar+*\";\n+ case Col: return \"uac+*\";\n+ }\n+ }\n+\ncase Max: {\nif( dir == DirectionTypes.RowCol )\nreturn \"uamax\";\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -2325,10 +2325,7 @@ public class DMLTranslator\n}\nHop currBuiltinOp = null;\n-\n- if (target == null) {\n- target = createTarget(source);\n- }\n+ target = (target == null) ? createTarget(source) : target;\n// Construct the hop based on the type of Builtin function\nswitch (source.getOpCode()) {\n@@ -2344,15 +2341,15 @@ public class DMLTranslator\ncase COLMEAN:\ncase COLPROD:\ncase COLVAR:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX, target.getValueType(),\nAggOp.valueOf(source.getOpCode().name().substring(3)), Direction.Col, expr);\nbreak;\ncase COLSD:\n// colStdDevs = sqrt(colVariances)\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX,\ntarget.getValueType(), AggOp.VAR, Direction.Col, expr);\n- currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new UnaryOp(target.getName(), DataType.MATRIX,\ntarget.getValueType(), Hop.OpOp1.SQRT, currBuiltinOp);\nbreak;\n@@ -2362,25 +2359,25 @@ public class DMLTranslator\ncase ROWMEAN:\ncase ROWPROD:\ncase ROWVAR:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX, target.getValueType(),\nAggOp.valueOf(source.getOpCode().name().substring(3)), Direction.Row, expr);\nbreak;\ncase ROWINDEXMAX:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(), AggOp.MAXINDEX,\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX, target.getValueType(), AggOp.MAXINDEX,\nDirection.Row, expr);\nbreak;\ncase ROWINDEXMIN:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(), AggOp.MININDEX,\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX, target.getValueType(), AggOp.MININDEX,\nDirection.Row, expr);\nbreak;\ncase ROWSD:\n// rowStdDevs = sqrt(rowVariances)\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.MATRIX,\ntarget.getValueType(), AggOp.VAR, Direction.Row, expr);\n- currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new UnaryOp(target.getName(), DataType.MATRIX,\ntarget.getValueType(), Hop.OpOp1.SQRT, currBuiltinOp);\nbreak;\n@@ -2409,38 +2406,38 @@ public class DMLTranslator\nbreak;\ncase EXISTS:\n- currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new UnaryOp(target.getName(), DataType.SCALAR,\ntarget.getValueType(), Hop.OpOp1.EXISTS, expr);\nbreak;\ncase SUM:\ncase PROD:\ncase VAR:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.SCALAR, target.getValueType(),\nAggOp.valueOf(source.getOpCode().name()), Direction.RowCol, expr);\nbreak;\ncase MEAN:\nif ( expr2 == null ) {\n// example: x = mean(Y);\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(), AggOp.MEAN,\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.SCALAR, target.getValueType(), AggOp.MEAN,\nDirection.RowCol, expr);\n}\nelse {\n// example: x = mean(Y,W);\n// stable weighted mean is implemented by using centralMoment with order = 0\nHop orderHop = new LiteralOp(0);\n- currBuiltinOp=new TernaryOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp=new TernaryOp(target.getName(), DataType.SCALAR, target.getValueType(),\nHop.OpOp3.MOMENT, expr, expr2, orderHop);\n}\nbreak;\ncase SD:\n// stdDev = sqrt(variance)\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.SCALAR,\ntarget.getValueType(), AggOp.VAR, Direction.RowCol, expr);\nHopRewriteUtils.setOutputParametersForScalar(currBuiltinOp);\n- currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new UnaryOp(target.getName(), DataType.SCALAR,\ntarget.getValueType(), Hop.OpOp1.SQRT, currBuiltinOp);\nbreak;\n@@ -2448,7 +2445,7 @@ public class DMLTranslator\ncase MAX:\n//construct AggUnary for min(X) but BinaryOp for min(X,Y) and NaryOp for min(X,Y,Z)\ncurrBuiltinOp = (expr2 == null) ?\n- new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(),\n+ new AggUnaryOp(target.getName(), DataType.SCALAR, target.getValueType(),\nAggOp.valueOf(source.getOpCode().name()), Direction.RowCol, expr) :\n(source.getAllExpr().length == 2) ?\nnew BinaryOp(target.getName(), target.getDataType(), target.getValueType(),\n@@ -2480,14 +2477,14 @@ public class DMLTranslator\nbreak;\ncase TRACE:\n- currBuiltinOp = new AggUnaryOp(target.getName(), target.getDataType(), target.getValueType(), AggOp.TRACE,\n+ currBuiltinOp = new AggUnaryOp(target.getName(), DataType.SCALAR, target.getValueType(), AggOp.TRACE,\nDirection.RowCol, expr);\nbreak;\ncase TRANS:\ncase DIAG:\ncase REV:\n- currBuiltinOp = new ReorgOp(target.getName(), target.getDataType(),\n+ currBuiltinOp = new ReorgOp(target.getName(), DataType.MATRIX,\ntarget.getValueType(), ReOrgOp.valueOf(source.getOpCode().name()), expr);\nbreak;\n@@ -2725,7 +2722,7 @@ public class DMLTranslator\nif( op == null )\nthrow new HopsException(\"Unsupported outer vector binary operation: \"+((LiteralOp)expr3).getStringValue());\n- currBuiltinOp = new BinaryOp(target.getName(), target.getDataType(), target.getValueType(), op, expr, expr2);\n+ currBuiltinOp = new BinaryOp(target.getName(), DataType.MATRIX, target.getValueType(), op, expr, expr2);\n((BinaryOp)currBuiltinOp).setOuterVectorOperation(true); //flag op as specific outer vector operation\ncurrBuiltinOp.refreshSizeInformation(); //force size reevaluation according to 'outer' flag otherwise danger of incorrect dims\nbreak;\n@@ -2735,21 +2732,21 @@ public class DMLTranslator\nArrayList<Hop> inHops1 = new ArrayList<>();\ninHops1.add(expr);\ninHops1.add(expr2);\n- currBuiltinOp = new DnnOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new DnnOp(target.getName(), DataType.MATRIX, target.getValueType(),\nOpOpDnn.valueOf(source.getOpCode().name()), inHops1);\nsetBlockSizeAndRefreshSizeInfo(expr, currBuiltinOp);\nbreak;\n}\ncase AVG_POOL:\ncase MAX_POOL: {\n- currBuiltinOp = new DnnOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new DnnOp(target.getName(), DataType.MATRIX, target.getValueType(),\nOpOpDnn.valueOf(source.getOpCode().name()), getALHopsForPoolingForwardIM2COL(expr, source, 1, hops));\nsetBlockSizeAndRefreshSizeInfo(expr, currBuiltinOp);\nbreak;\n}\ncase AVG_POOL_BACKWARD:\ncase MAX_POOL_BACKWARD: {\n- currBuiltinOp = new DnnOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new DnnOp(target.getName(), DataType.MATRIX, target.getValueType(),\nOpOpDnn.valueOf(source.getOpCode().name()), getALHopsForConvOpPoolingCOL2IM(expr, source, 1, hops));\nsetBlockSizeAndRefreshSizeInfo(expr, currBuiltinOp);\nbreak;\n@@ -2757,7 +2754,7 @@ public class DMLTranslator\ncase CONV2D:\ncase CONV2D_BACKWARD_FILTER:\ncase CONV2D_BACKWARD_DATA: {\n- currBuiltinOp = new DnnOp(target.getName(), target.getDataType(), target.getValueType(),\n+ currBuiltinOp = new DnnOp(target.getName(), DataType.MATRIX, target.getValueType(),\nOpOpDnn.valueOf(source.getOpCode().name()), getALHopsForConvOp(expr, source, 1, hops));\nsetBlockSizeAndRefreshSizeInfo(expr, currBuiltinOp);\nbreak;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "diff": "@@ -225,6 +225,10 @@ public class ExecutionContext {\nreturn (FrameObject) dat;\n}\n+ public CacheableData<?> getCacheableData(CPOperand input) {\n+ return getCacheableData(input.getName());\n+ }\n+\npublic CacheableData<?> getCacheableData(String varname) {\nData dat = getVariable(varname);\n//error handling if non existing or no matrix\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListIndexingCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListIndexingCPInstruction.java", "diff": "@@ -23,6 +23,7 @@ import org.apache.sysml.lops.LeftIndex;\nimport org.apache.sysml.lops.RightIndex;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\npublic final class ListIndexingCPInstruction extends IndexingCPInstruction {\n@@ -76,6 +77,14 @@ public final class ListIndexingCPInstruction extends IndexingCPInstruction {\nelse\nec.setVariable(output.getName(), lin.copy().set((int)rl.getLongValue()-1, scalar));\n}\n+ else if( input2.getDataType().isMatrix() ) { //LIST <- MATRIX/FRAME\n+ CacheableData<?> dat = ec.getCacheableData(input2);\n+ dat.enableCleanup(false);\n+ if( rl.getValueType()==ValueType.STRING )\n+ ec.setVariable(output.getName(), lin.copy().set(rl.getStringValue(), dat));\n+ else\n+ ec.setVariable(output.getName(), lin.copy().set((int)rl.getLongValue()-1, dat));\n+ }\nelse {\nthrow new DMLRuntimeException(\"Unsupported list \"\n+ \"left indexing rhs type: \"+input2.getDataType().name());\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2475] Fix matrix/frame left indexing into list data types This patch fixes the missing left indexing support for frames and matrices into lists. Furthermore, this also includes a robustness fix for inferring the output data type of builtin functions when the target is a list left indexing (which propagated incorrectly to the source).
49,738
30.07.2018 22:06:49
25,200
430c04d5988cb542b2037938b239f9d69706d7c5
Support for list result variables in parfor This patch introduces support for list data types as parfor result variables. In detail, this includes (1) a generalized parfor dependency analysis for lists, (2) a hardened parfor optimizer for list data types, and (3) a dedicated result merge procedure for lists.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "diff": "@@ -686,10 +686,10 @@ public class ParForStatementBlock extends ForStatementBlock\nfor(DataIdentifier write : datsUpdated) {\nif( !c._var.equals( write.getName() ) ) continue;\n- if( cdt != DataType.MATRIX ) {\n+ if( cdt != DataType.MATRIX && cdt != DataType.LIST ) {\n//cannot infer type, need to exit (conservative approach)\n- throw new LanguageException(\"PARFOR loop dependency analysis: \"\n- + \"cannot check for dependencies due to unknown datatype of var '\"+c._var+\"'.\");\n+ throw new LanguageException(\"PARFOR loop dependency analysis: cannot check \"\n+ + \"for dependencies due to unknown datatype of var '\"+c._var+\"': \"+cdt.name()+\".\");\n}\nDataIdentifier dat2 = write;\n@@ -724,7 +724,8 @@ public class ParForStatementBlock extends ForStatementBlock\nif( ABORT_ON_FIRST_DEPENDENCY )\nreturn;\n}\n- else if( cdt == DataType.MATRIX && dat2dt == DataType.MATRIX )\n+ else if( (cdt == DataType.MATRIX && dat2dt == DataType.MATRIX)\n+ || (cdt == DataType.LIST && dat2dt == DataType.LIST ) )\n{\nboolean invalid = false;\nif( runEqualsCheck(c._dat, dat2) )\n@@ -746,8 +747,8 @@ public class ParForStatementBlock extends ForStatementBlock\n}\nelse { //if( c._dat.getDataType() == DataType.UNKNOWN )\n//cannot infer type, need to exit (conservative approach)\n- throw new LanguageException(\"PARFOR loop dependency analysis: \"\n- + \"cannot check for dependencies due to unknown datatype of var '\"+c._var+\"'.\");\n+ throw new LanguageException(\"PARFOR loop dependency analysis: cannot check \"\n+ + \"for dependencies due to unknown datatype of var '\"+c._var+\"': \"+cdt.name()+\".\");\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1670,19 +1670,17 @@ public class ParForProgramBlock extends ForProgramBlock\nfor( ResultVar var : _resultVars ) //foreach non-local write\n{\nData dat = ec.getVariable(var._name);\n+\nif( dat instanceof MatrixObject ) //robustness scalars\n{\nMatrixObject out = (MatrixObject) dat;\n- MatrixObject[] in = new MatrixObject[ results.length ];\n- for( int i=0; i< results.length; i++ )\n- in[i] = (MatrixObject) results[i].get( var._name );\n+ MatrixObject[] in = Arrays.stream(results).map(vars ->\n+ vars.get(var._name)).toArray(MatrixObject[]::new);\nString fname = constructResultMergeFileName();\nResultMerge rm = createResultMerge(_resultMerge, out, in, fname, var._isAccum, ec);\n- MatrixObject outNew = null;\n- if( USE_PARALLEL_RESULT_MERGE )\n- outNew = rm.executeParallelMerge( _numThreads );\n- else\n- outNew = rm.executeSerialMerge();\n+ MatrixObject outNew = USE_PARALLEL_RESULT_MERGE ?\n+ rm.executeParallelMerge(_numThreads) :\n+ rm.executeSerialMerge();\n//cleanup existing var\nData exdata = ec.removeVariable(var._name);\n@@ -1695,6 +1693,27 @@ public class ParForProgramBlock extends ForProgramBlock\n//set merged result variable\nec.setVariable(var._name, outNew);\n}\n+ else if(dat instanceof ListObject) {\n+ ListObject oldList = (ListObject) dat;\n+ ListObject newList = new ListObject(oldList);\n+ ListObject[] in = Arrays.stream(results).map(vars ->\n+ vars.get(var._name)).toArray(ListObject[]::new);\n+\n+ //merge modified list entries into result\n+ for(int i=0; i<oldList.getLength(); i++) {\n+ Data compare = oldList.slice(i);\n+ for( int j=0; j<in.length; j++ ) {\n+ Data tmp = in[j].slice(i);\n+ if( compare != tmp ) {\n+ newList.set(i, tmp);\n+ break; //inner for loop\n+ }\n+ }\n+ }\n+\n+ //set merged result variable\n+ ec.setVariable(var._name, newList);\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -417,7 +417,8 @@ public class OptimizerRuleBased extends Optimizer\ndouble mem = getMemoryEstimate(c, vars);\nif( dpf != PartitionFormat.NONE\n&& dpf._dpf != PDataPartitionFormat.BLOCK_WISE_M_N\n- && (constrained || (mem > _lm/2 && mem > _rm/2)) ) {\n+ && (constrained || (mem > _lm/2 && mem > _rm/2))\n+ && !vars.get(c).getDataType().isList() ) {\ncand2.put( c, dpf );\n}\n}\n@@ -2039,7 +2040,6 @@ public class OptimizerRuleBased extends Optimizer\nif( dat instanceof MatrixObject && ((MatrixObject)dat).getNnz()!=0 //subject to result merge with compare\n&& n.hasOnlySimpleChilds() //guaranteed no conditional indexing\n&& rContainsResultFullReplace(n, rvar._name, itervar, (MatrixObject)dat) //guaranteed full matrix replace\n- //&& !pfsb.variablesRead().containsVariable(rvar) //never read variable in loop body\n&& !rIsReadInRightIndexing(n, rvar._name) //never read variable in loop body\n&& ((MatrixObject)dat).getNumRows()<=Integer.MAX_VALUE\n&& ((MatrixObject)dat).getNumColumns()<=Integer.MAX_VALUE )\n@@ -2334,10 +2334,11 @@ public class OptimizerRuleBased extends Optimizer\nLeftIndexingOp hop = (LeftIndexingOp) OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\n//check agains set of varname\nString varName = hop.getInput().get(0).getName();\n- if( ResultVar.contains(resultVars, varName) && vars.keySet().contains(varName) )\n- {\n+ if( ResultVar.contains(resultVars, varName) && vars.keySet().contains(varName) ) {\n+ Data dat = vars.get(hop.getInput().get(0).getName());\n//dims of result vars must be known at this point in time\n- MatrixObject mo = (MatrixObject) vars.get( hop.getInput().get(0).getName() );\n+ if( dat instanceof MatrixObject ) {\n+ MatrixObject mo = (MatrixObject) dat;\nlong rows = mo.getNumRows();\nlong cols = mo.getNumColumns();\ndouble memBudget = inLocal ? OptimizerUtils.getLocalMemBudget() :\n@@ -2346,6 +2347,7 @@ public class OptimizerRuleBased extends Optimizer\n}\n}\n}\n+ }\nelse\n{\nfor( OptNode c : n.getChilds() )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "@@ -47,6 +47,13 @@ public class ListObject extends Data {\n_names = names;\n}\n+ public ListObject(ListObject that) {\n+ this(new ArrayList<>(that._data), (that._names != null) ?\n+ new ArrayList<>(that._names) : null);\n+ if( that._dataState != null )\n+ _dataState = Arrays.copyOf(that._dataState, getLength());\n+ }\n+\npublic void setStatus(boolean[] status) {\n_dataState = status;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForDependencyAnalysisTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForDependencyAnalysisTest.java", "diff": "@@ -67,7 +67,7 @@ import org.junit.Test;\n* * accumulators\n* 53a: no, 53b dep, 53c dep, 53d dep, 53e dep\n* * lists\n- * 54a: no, 54b: dep, 54c: dep\n+ * 54a: no, 54b: no, 54c: dep, 54d: dep\n*/\npublic class ParForDependencyAnalysisTest extends AutomatedTestBase\n{\n@@ -76,9 +76,7 @@ public class ParForDependencyAnalysisTest extends AutomatedTestBase\nprivate static final String TEST_CLASS_DIR = TEST_DIR + ParForDependencyAnalysisTest.class.getSimpleName() + \"/\";\n@Override\n- public void setUp() {\n-\n- }\n+ public void setUp() {}\n@Test\npublic void testDependencyAnalysis1() { runTest(\"parfor1.dml\", false); }\n@@ -322,11 +320,13 @@ public class ParForDependencyAnalysisTest extends AutomatedTestBase\npublic void testDependencyAnalysis54a() { runTest(\"parfor54a.dml\", false); }\n@Test\n- public void testDependencyAnalysis54b() { runTest(\"parfor54b.dml\", true); }\n+ public void testDependencyAnalysis54b() { runTest(\"parfor54b.dml\", false); }\n@Test\npublic void testDependencyAnalysis54c() { runTest(\"parfor54c.dml\", true); }\n+ @Test\n+ public void testDependencyAnalysis54d() { runTest(\"parfor54d.dml\", true); }\nprivate void runTest( String scriptFilename, boolean expectedException ) {\nboolean raisedException = false;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForListResultVarsTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.parfor;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+\n+public class ParForListResultVarsTest extends AutomatedTestBase\n+{\n+ private final static String TEST_DIR = \"functions/parfor/\";\n+ private final static String TEST_NAME1 = \"parfor_listResults\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + ParForListResultVarsTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME1,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testParForListResult1a() {\n+ runListResultVarTest(TEST_NAME1, 2, 1);\n+ }\n+\n+ @Test\n+ public void testParForListResult1b() {\n+ runListResultVarTest(TEST_NAME1, 35, 10);\n+ }\n+\n+ private void runListResultVarTest(String testName, int rows, int cols) {\n+ loadTestConfiguration(getTestConfiguration(testName));\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n+ programArgs = new String[]{\"-explain\",\"-args\",\n+ String.valueOf(rows), String.valueOf(cols), output(\"R\") };\n+\n+ runTest(true, false, null, -1);\n+ Assert.assertEquals(new Double(7),\n+ readDMLMatrixFromHDFS(\"R\").get(new CellIndex(1,1)));\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor54d.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = matrix(7, rows=2, cols=2);\n+B = matrix(3, rows=2, cols=2);\n+C = list(A, B, A);\n+parfor( i in 2:3 )\n+ C[i] = as.matrix(C[i-1])+7;\n+print(sum(as.matrix(C[1])));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor_listResults.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = matrix(0, 1, $1*$2);\n+L = list(A+1,A+2,A+3,A+4,A+5,A+6,A+7);\n+\n+parfor(i in 1:length(L))\n+ L[i] = rowMeans(as.matrix(L[i]));\n+\n+R1 = matrix(0,0,1)\n+for(i in 1:length(L))\n+ R1 = rbind(R1, as.matrix(L[i]));\n+\n+R = as.matrix(sum(R1==seq(1,7)));\n+write(R, $3);\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/parfor/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/parfor/ZPackageSuite.java", "diff": "@@ -34,6 +34,7 @@ import org.junit.runners.Suite;\nParForDataPartitionExecuteTest.class,\nParForDataPartitionLeftIndexingTest.class,\nParForDependencyAnalysisTest.class,\n+ ParForListResultVarsTest.class,\nParForFunctionSerializationTest.class,\nParForMultipleDataPartitioningTest.class,\nParForNaNResultMergeTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2474] Support for list result variables in parfor This patch introduces support for list data types as parfor result variables. In detail, this includes (1) a generalized parfor dependency analysis for lists, (2) a hardened parfor optimizer for list data types, and (3) a dedicated result merge procedure for lists.
49,760
31.07.2018 21:25:18
25,200
5dee6c7edd8b43e14ce1ee280863df5011a989cf
Extended MNC sketch propagation (track full diags) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -149,11 +149,13 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nprivate final int rN1, cN1; //number of rows/cols with nnz=1\nprivate final int rNonEmpty, cNonEmpty; //number of non-empty rows/cols (w/ empty is nnz=0)\nprivate final int rNdiv2, cNdiv2; //number of rows/cols with nnz > #cols/2 and #rows/2\n+ private boolean fullDiag; //true if there exists a full diagonal of nonzeros\npublic MatrixHistogram(MatrixBlock in, boolean useExcepts) {\n// 1) allocate basic synopsis\nrNnz = new int[in.getNumRows()];\ncNnz = new int[in.getNumColumns()];\n+ fullDiag = in.getNumRows() == in.getNonZeros();\n// 2) compute basic synopsis details\nif( !in.isEmpty() ) {\n@@ -161,10 +163,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nSparseBlock sblock = in.getSparseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\nif( sblock.isEmpty(i) ) continue;\n+ int apos = sblock.pos(i);\nint alen = sblock.size(i);\n+ int[] aix = sblock.indexes(i);\nrNnz[i] = alen;\n- LibMatrixAgg.countAgg(sblock.values(i),\n- cNnz, sblock.indexes(i), sblock.pos(i), alen);\n+ LibMatrixAgg.countAgg(sblock.values(i), cNnz, aix, apos, alen);\n+ fullDiag &= aix[apos] == i;\n}\n}\nelse {\n@@ -174,6 +178,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nint lnnz = 0, aix = dblock.pos(i);\nfor( int j=0; j<in.getNumColumns(); j++ ) {\nif( avals[aix+j] != 0 ) {\n+ fullDiag &= (i == j);\ncNnz[j] ++;\nlnnz ++;\n}\n@@ -250,6 +255,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n+ //exact propagation if lhs or rhs full diag\n+ if( h1.fullDiag ) return h2;\n+ if( h2.fullDiag ) return h1;\n+\n//get input/output nnz for scaling\nlong nnz1 = Arrays.stream(h1.rNnz).sum();\nlong nnz2 = Arrays.stream(h2.cNnz).sum();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2468] Extended MNC sketch propagation (track full diags) Closes #810.
49,738
31.07.2018 22:32:53
25,200
4916d454ba370f76abacf3c6bdd6e2c526917fd9
Extended literal replacement for scalar list lookups This patch extends the literal replacement rewrites during dynamic recompilation to also handle scalar list lookups (scalar cast over list lookup) which is important for hyper parameters that affect sizes of subsequent operations.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/LiteralReplacement.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/LiteralReplacement.java", "diff": "@@ -36,7 +36,6 @@ import org.apache.sysml.hops.Hop.OpOp1;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.lops.compile.Dag;\nimport org.apache.sysml.parser.Expression.DataType;\n-import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n@@ -78,6 +77,7 @@ public class LiteralReplacement\nlit = (lit==null) ? replaceLiteralFullUnaryAggregateRightIndexing(c, vars) : lit;\nlit = (lit==null) ? replaceTReadMatrixFromList(c, vars) : lit;\nlit = (lit==null) ? replaceTReadMatrixLookupFromList(c, vars) : lit;\n+ lit = (lit==null) ? replaceTReadScalarLookupFromList(c, vars) : lit;\n}\n//replace hop w/ literal on demand\n@@ -368,7 +368,7 @@ public class LiteralReplacement\nListObject list = (ListObject)vars.get(ixIn.getName());\nString varname = Dag.getNextUniqueVarname(DataType.MATRIX);\nLiteralOp lit = (LiteralOp) ix.getInput().get(1);\n- MatrixObject mo = (MatrixObject) ((lit.getValueType() == ValueType.STRING) ?\n+ MatrixObject mo = (MatrixObject) (!lit.getValueType().isNumeric() ?\nlist.slice(lit.getName()) : list.slice((int)lit.getLongValue()-1));\nvars.put(varname, mo);\nret = HopRewriteUtils.createTransientRead(varname, c);\n@@ -377,6 +377,27 @@ public class LiteralReplacement\nreturn ret;\n}\n+ private static LiteralOp replaceTReadScalarLookupFromList( Hop c, LocalVariableMap vars ) {\n+ //pattern: as.scalar(X[i:i]) or as.scalar(X['a','a']) with X being a list\n+ if( HopRewriteUtils.isUnary(c, OpOp1.CAST_AS_SCALAR)\n+ && c.getInput().get(0) instanceof IndexingOp ) {\n+ Hop ix = c.getInput().get(0);\n+ Hop ixIn = c.getInput().get(0).getInput().get(0);\n+ if( ixIn.getDataType() == DataType.LIST\n+ && HopRewriteUtils.isData(ixIn, DataOpTypes.TRANSIENTREAD)\n+ && ix.getInput().get(1) instanceof LiteralOp\n+ && ix.getInput().get(2) instanceof LiteralOp\n+ && ix.getInput().get(1) == ix.getInput().get(2) ) {\n+ ListObject list = (ListObject)vars.get(ixIn.getName());\n+ LiteralOp lit = (LiteralOp) ix.getInput().get(1);\n+ ScalarObject so = (ScalarObject) (!lit.getValueType().isNumeric() ?\n+ list.slice(lit.getName()) : list.slice((int)lit.getLongValue()-1));\n+ return ScalarObjectFactory.createLiteralOp(so);\n+ }\n+ }\n+ return null;\n+ }\n+\n///////////////////////////////\n// Utility functions\n///////////////////////////////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -91,26 +91,20 @@ public class HopRewriteUtils\n// literal handling\npublic static boolean getBooleanValue( LiteralOp op ) {\n- switch( op.getValueType() )\n- {\n+ switch( op.getValueType() ) {\ncase DOUBLE: return op.getDoubleValue() != 0;\ncase INT: return op.getLongValue() != 0;\ncase BOOLEAN: return op.getBooleanValue();\n-\ndefault: throw new HopsException(\"Invalid boolean value: \"+op.getValueType());\n}\n}\n- public static boolean getBooleanValueSafe( LiteralOp op )\n- {\n- try\n- {\n- switch( op.getValueType() )\n- {\n+ public static boolean getBooleanValueSafe( LiteralOp op ) {\n+ try {\n+ switch( op.getValueType() ) {\ncase DOUBLE: return op.getDoubleValue() != 0;\ncase INT: return op.getLongValue() != 0;\ncase BOOLEAN: return op.getBooleanValue();\n-\ndefault: throw new HopsException(\"Invalid boolean value: \"+op.getValueType());\n}\n}\n@@ -123,6 +117,7 @@ public class HopRewriteUtils\npublic static double getDoubleValue( LiteralOp op ) {\nswitch( op.getValueType() ) {\n+ case STRING:\ncase DOUBLE: return op.getDoubleValue();\ncase INT: return op.getLongValue();\ncase BOOLEAN: return op.getBooleanValue() ? 1 : 0;\n@@ -152,6 +147,7 @@ public class HopRewriteUtils\npublic static long getIntValue( LiteralOp op ) {\nswitch( op.getValueType() ) {\ncase DOUBLE: return UtilFunctions.toLong(op.getDoubleValue());\n+ case STRING:\ncase INT: return op.getLongValue();\ncase BOOLEAN: return op.getBooleanValue() ? 1 : 0;\ndefault: throw new HopsException(\"Invalid int value: \"+op.getValueType());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/Expression.java", "new_path": "src/main/java/org/apache/sysml/parser/Expression.java", "diff": "@@ -205,7 +205,10 @@ public abstract class Expression implements ParseInfo\n* Value types (int, double, string, boolean, object, unknown).\n*/\npublic enum ValueType {\n- INT, DOUBLE, STRING, BOOLEAN, OBJECT, UNKNOWN\n+ INT, DOUBLE, STRING, BOOLEAN, OBJECT, UNKNOWN;\n+ public boolean isNumeric() {\n+ return this == INT || this == DOUBLE;\n+ }\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "new_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "diff": "@@ -132,7 +132,7 @@ gradients = function(matrix[double] features,\n# PB: not be able to get scalar from list\n- C = 1\n+ C = as.scalar(hyperparams[\"C\"])\nHin = 28\nWin = 28\nHf = 5\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2476] Extended literal replacement for scalar list lookups This patch extends the literal replacement rewrites during dynamic recompilation to also handle scalar list lookups (scalar cast over list lookup) which is important for hyper parameters that affect sizes of subsequent operations.
49,760
01.08.2018 18:45:50
25,200
31610e36db121bcfa289210fdff900682a6b96ec
Extended MNC estimator for other operations, part 1 Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n/**\n@@ -41,14 +41,18 @@ public class EstimatorBasicAvg extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- return estim(m1.getMatrixCharacteristics(), m2.getMatrixCharacteristics());\n+ return estimIntern(m1.getSparsity(), m2.getSparsity(),\n+ m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- return estimIntern(\n- OptimizerUtils.getSparsity(mc1), OptimizerUtils.getSparsity(mc2),\n- mc1.getRows(), mc1.getCols(), mc2.getCols());\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\n+ throw new NotImplementedException();\n}\nprivate double estimIntern(double sp1, double sp2, long m, long k, long n) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n/**\n@@ -45,14 +45,18 @@ public class EstimatorBasicWorst extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- return estim(m1.getMatrixCharacteristics(), m2.getMatrixCharacteristics());\n+ return estimIntern(m1.getSparsity(), m2.getSparsity(),\n+ m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- return estimIntern(\n- OptimizerUtils.getSparsity(mc1), OptimizerUtils.getSparsity(mc2),\n- mc1.getRows(), mc1.getCols(), mc2.getCols());\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\n+ throw new NotImplementedException();\n}\nprivate double estimIntern(double sp1, double sp2, long m, long k, long n) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -21,8 +21,8 @@ package org.apache.sysml.hops.estim;\nimport java.util.BitSet;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -68,9 +68,13 @@ public class EstimatorBitsetMM extends SparsityEstimator {\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- LOG.warn(\"Meta-data-only estimates not supported in EstimatorBitsetMM, falling back to EstimatorBasicAvg.\");\n- return new EstimatorBasicAvg().estim(mc1, mc2);\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\n+ throw new NotImplementedException();\n}\nprivate static class BitsetMatrix {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -83,9 +83,13 @@ public class EstimatorDensityMap extends SparsityEstimator\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- LOG.warn(\"Meta-data-only estimates not supported in EstimatorDensityMap, falling back to EstimatorBasicAvg.\");\n- return new EstimatorBasicAvg().estim(mc1, mc2);\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\n+ throw new NotImplementedException();\n}\nprivate MatrixBlock computeDensityMap(MatrixBlock in) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -22,7 +22,6 @@ import org.apache.commons.lang.NotImplementedException;\nimport org.apache.commons.math3.distribution.ExponentialDistribution;\nimport org.apache.commons.math3.random.Well1024a;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -58,7 +57,12 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\nthrow new NotImplementedException();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -21,9 +21,10 @@ package org.apache.sysml.hops.estim;\nimport java.util.Arrays;\nimport java.util.Random;\n+import java.util.stream.IntStream;\n+import org.apache.directory.api.util.exception.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -65,7 +66,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnew MatrixHistogram(root.getRight().getData(), _useExcepts);\n//estimate output sparsity based on input histograms\n- double ret = estimIntern(h1, h2);\n+ double ret = estimIntern(h1, h2, OpCode.MM);\n//derive and memoize output histogram\nroot.setSynopsis(MatrixHistogram.deriveOutputHistogram(h1, h2, ret));\n@@ -75,20 +76,64 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n+ return estim(m1, m2, OpCode.MM);\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\nMatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\nMatrixHistogram h2 = (m1 == m2) ? //self product\nh1 : new MatrixHistogram(m2, _useExcepts);\n- return estimIntern(h1, h2);\n+ return estimIntern(h1, h2, op);\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- LOG.warn(\"Meta-data-only estimates not supported in \"\n- + \"EstimatorMatrixHistogram, falling back to EstimatorBasicAvg.\");\n- return new EstimatorBasicAvg().estim(mc1, mc2);\n+ public double estim(MatrixBlock m1, OpCode op) {\n+ MatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\n+ return estimIntern(h1, null, op);\n+ }\n+\n+ private double estimIntern(MatrixHistogram h1, MatrixHistogram h2, OpCode op) {\n+ double msize = h1.getRows()*h1.getCols();\n+\n+ switch (op) {\n+ case MM:\n+ return estimInternMM(h1, h2);\n+ case MULT:\n+ return Math.min(\n+ IntStream.range(0, h1.getRows()).mapToDouble(i -> (double)h1.rNnz[i]/msize * (double)h2.rNnz[i]/msize).sum(),\n+ IntStream.range(0, h1.getCols()).mapToDouble(i -> (double)h1.cNnz[i]/msize * (double)h2.cNnz[i]/msize).sum());\n+ case PLUS:\n+ return Math.min(\n+ IntStream.range(0, h1.getRows()).mapToDouble(i -> (double)h1.rNnz[i]/msize\n+ + (double)h2.rNnz[i]/msize - (double)h1.rNnz[i]/msize * (double)h2.rNnz[i]/msize).sum(),\n+ IntStream.range(0, h1.getCols()).mapToDouble(i -> (double)h1.cNnz[i]/msize\n+ + (double)h2.cNnz[i]/msize - (double)h1.cNnz[i]/msize * (double)h2.cNnz[i]/msize).sum());\n+ case EQZERO:\n+ return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(),\n+ (long)h1.getRows() * h1.getCols() - h1.getNonZeros());\n+ case DIAG:\n+ return (h1.getCols()==1) ?\n+ OptimizerUtils.getSparsity(h1.getRows(), h1.getRows(), h1.getNonZeros()) :\n+ OptimizerUtils.getSparsity(h1.getRows(), 1, Math.min(h1.getRows(), h1.getNonZeros()));\n+ //binary operations that preserve sparsity exactly\n+ case CBIND:\n+ return OptimizerUtils.getSparsity(h1.getRows(),\n+ h1.getCols()+h2.getCols(), h1.getNonZeros() + h2.getNonZeros());\n+ case RBIND:\n+ return OptimizerUtils.getSparsity(h1.getRows()+h2.getRows(),\n+ h1.getCols(), h1.getNonZeros() + h2.getNonZeros());\n+ //unary operation that preserve sparsity exactly\n+ case NEQZERO:\n+ case TRANS:\n+ case RESHAPE:\n+ return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\n+ default:\n+ throw new NotImplementedException();\n+ }\n}\n- private double estimIntern(MatrixHistogram h1, MatrixHistogram h2) {\n+ private double estimInternMM(MatrixHistogram h1, MatrixHistogram h2) {\nlong nnz = 0;\n//special case, with exact sparsity estimate, where the dot product\n//dot(h1.cNnz,h2rNnz) gives the exact number of non-zeros in the output\n@@ -254,6 +299,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nreturn cNnz.length;\n}\n+ public long getNonZeros() {\n+ return getRows() < getCols() ?\n+ IntStream.range(0, getRows()).mapToLong(i-> rNnz[i]).sum() :\n+ IntStream.range(0, getRows()).mapToLong(i-> cNnz[i]).sum();\n+ }\n+\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n//exact propagation if lhs or rhs full diag\nif( h1.fullDiag ) return h2;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -76,9 +76,13 @@ public class EstimatorSample extends SparsityEstimator\n}\n@Override\n- public double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2) {\n- LOG.warn(\"Meta-data-only estimates not supported by EstimatorSample, falling back to EstimatorBasicAvg.\");\n- return new EstimatorBasicAvg().estim(mc1, mc2);\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ throw new NotImplementedException();\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m, OpCode op) {\n+ throw new NotImplementedException();\n}\nprivate int[] computeColumnNnz(MatrixBlock in, int[] ix) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "@@ -21,13 +21,19 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic abstract class SparsityEstimator\n{\nprotected static final Log LOG = LogFactory.getLog(SparsityEstimator.class.getName());\n+ public static enum OpCode {\n+ MM,\n+ MULT, PLUS, EQZERO, NEQZERO,\n+ CBIND, RBIND,\n+ TRANS, DIAG, RESHAPE;\n+ }\n+\n/**\n* Estimates the output sparsity of a DAG of matrix multiplications\n* for the given operator graph of a single root node.\n@@ -38,8 +44,7 @@ public abstract class SparsityEstimator\npublic abstract double estim(MMNode root);\n/**\n- * Estimates the output sparsity of a single matrix multiplication\n- * for the two given matrices.\n+ * Estimates the output sparsity for a single matrix multiplication.\n*\n* @param m1 left-hand-side operand\n* @param m2 right-hand-side operand\n@@ -48,13 +53,21 @@ public abstract class SparsityEstimator\npublic abstract double estim(MatrixBlock m1, MatrixBlock m2);\n/**\n- * Estimates the output sparsity of a single matrix multiplication\n- * for the two given matrices represented by meta data only.\n+ * Estimates the output sparsity for a given binary operation.\n*\n- * @param mc1 left-hand-side operand\n- * @param mc2 right-hand-side operand\n+ * @param m1 left-hand-side operand\n+ * @param m2 right-hand-side operand\n+ * @param op operator code\n* @return sparsity\n*/\n- public abstract double estim(MatrixCharacteristics mc1, MatrixCharacteristics mc2);\n+ public abstract double estim(MatrixBlock m1, MatrixBlock m2, OpCode op);\n+ /**\n+ * Estimates the output sparsity for a given unary operation.\n+ *\n+ * @param m1 left-hand-side operand\n+ * @param op operator code\n+ * @return sparsity\n+ */\n+ public abstract double estim(MatrixBlock m, OpCode op);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended MNC estimator for other operations, part 1 Closes #811.
49,738
01.08.2018 22:21:57
25,200
51154f17b316df932dd147d86b740edc4358914d
[MINOR] Fix integer overflow in MNC sparsity estimator (other ops)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -94,7 +94,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\nprivate double estimIntern(MatrixHistogram h1, MatrixHistogram h2, OpCode op) {\n- double msize = h1.getRows()*h1.getCols();\n+ double msize = (double)h1.getRows()*h1.getCols();\nswitch (op) {\ncase MM:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix integer overflow in MNC sparsity estimator (other ops)
49,738
02.08.2018 00:03:42
25,200
aed66df1360f5a74833bd64d457e26050b35164b
Rework cleanup of lists and matrices/frames in lists This patch contains a major rework of the cleanup of lists as well as any cacheable data (i.e., matrices and frames) in list objects. Specifically, we now ensure full consistent to the behavior without lists, which prevents missing cleanups and thus unnecessary evictions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -31,6 +31,7 @@ import org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.util.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;\nimport org.apache.sysml.runtime.instructions.cp.Data;\n+import org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.utils.Statistics;\n/**\n@@ -113,7 +114,8 @@ public class LocalVariableMap implements Cloneable\n}\npublic boolean hasReferences( Data d ) {\n- return localMap.containsValue(d);\n+ return localMap.values().stream().anyMatch(e -> (e instanceof ListObject) ?\n+ ((ListObject)e).getData().contains(d) : e == d);\n}\npublic void setRegisteredOutputs(HashSet<String> outputs) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1684,8 +1684,8 @@ public class ParForProgramBlock extends ForProgramBlock\n//cleanup existing var\nData exdata = ec.removeVariable(var._name);\n- if( exdata != null && exdata != outNew && exdata instanceof MatrixObject )\n- ec.cleanupCacheableData((MatrixObject)exdata);\n+ if( exdata != null && exdata != outNew )\n+ ec.cleanupDataObject(exdata);\n//cleanup of intermediate result variables\ncleanWorkerResultVariables( ec, out, in );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "diff": "@@ -539,21 +539,40 @@ public class ExecutionContext {\n*/\npublic boolean[] pinVariables(List<String> varList)\n{\n+ //analyze list variables\n+ int nlist = 0;\n+ int nlistItems = 0;\n+ for( int i=0; i<varList.size(); i++ ) {\n+ Data dat = _variables.get(varList.get(i));\n+ if( dat instanceof ListObject ) {\n+ nlistItems += ((ListObject)dat).getNumCacheableData();\n+ nlist++;\n+ }\n+ }\n+\n//2-pass approach since multiple vars might refer to same matrix object\n- boolean[] varsState = new boolean[varList.size()];\n+ boolean[] varsState = new boolean[varList.size()-nlist+nlistItems];\n//step 1) get current information\n- for( int i=0; i<varList.size(); i++ ) {\n+ for( int i=0, pos=0; i<varList.size(); i++ ) {\nData dat = _variables.get(varList.get(i));\n- if( dat instanceof MatrixObject )\n- varsState[i] = ((MatrixObject)dat).isCleanupEnabled();\n+ if( dat instanceof CacheableData<?> )\n+ varsState[pos++] = ((CacheableData<?>)dat).isCleanupEnabled();\n+ else if( dat instanceof ListObject )\n+ for( Data dat2 : ((ListObject)dat).getData() )\n+ if( dat2 instanceof CacheableData<?> )\n+ varsState[pos++] = ((CacheableData<?>)dat2).isCleanupEnabled();\n}\n//step 2) pin variables\nfor( int i=0; i<varList.size(); i++ ) {\nData dat = _variables.get(varList.get(i));\n- if( dat instanceof MatrixObject )\n- ((MatrixObject)dat).enableCleanup(false);\n+ if( dat instanceof CacheableData<?> )\n+ ((CacheableData<?>)dat).enableCleanup(false);\n+ else if( dat instanceof ListObject )\n+ for( Data dat2 : ((ListObject)dat).getData() )\n+ if( dat2 instanceof CacheableData<?> )\n+ ((CacheableData<?>)dat2).enableCleanup(false);\n}\nreturn varsState;\n@@ -576,10 +595,14 @@ public class ExecutionContext {\n* @param varsState variable state\n*/\npublic void unpinVariables(List<String> varList, boolean[] varsState) {\n- for( int i=0; i<varList.size(); i++ ) {\n+ for( int i=0, pos=0; i<varList.size(); i++ ) {\nData dat = _variables.get(varList.get(i));\n- if( dat instanceof MatrixObject )\n- ((MatrixObject)dat).enableCleanup(varsState[i]);\n+ if( dat instanceof CacheableData<?> )\n+ ((CacheableData<?>)dat).enableCleanup(varsState[pos++]);\n+ else if( dat instanceof ListObject )\n+ for( Data dat2 : ((ListObject)dat).getData() )\n+ if( dat2 instanceof CacheableData<?> )\n+ ((CacheableData<?>)dat2).enableCleanup(varsState[pos++]);\n}\n}\n@@ -608,6 +631,16 @@ public class ExecutionContext {\nreturn ret;\n}\n+ public final void cleanupDataObject(Data dat) {\n+ if( dat == null ) return;\n+ if ( dat instanceof CacheableData )\n+ cleanupCacheableData( (CacheableData<?>)dat );\n+ else if( dat instanceof ListObject )\n+ for( Data dat2 : ((ListObject)dat).getData() )\n+ if( dat2 instanceof CacheableData<?> )\n+ cleanupCacheableData( (CacheableData<?>)dat2 );\n+ }\n+\npublic void cleanupCacheableData(CacheableData<?> mo) {\nif (DMLScript.JMLC_MEM_STATISTICS)\nStatistics.removeCPMemObject(System.identityHashCode(mo));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java", "diff": "@@ -32,7 +32,6 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.DMLScriptException;\nimport org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.instructions.Instruction;\n@@ -181,9 +180,7 @@ public class FunctionCallCPInstruction extends CPInstruction {\nif( expectRetVars.contains(varName) )\ncontinue;\n//cleanup unexpected return values to avoid leaks\n- Data var = fn_ec.removeVariable(varName);\n- if( var instanceof CacheableData )\n- fn_ec.cleanupCacheableData((CacheableData<?>)var);\n+ fn_ec.cleanupDataObject(fn_ec.removeVariable(varName));\n}\n// Unpin the pinned variables\n@@ -200,9 +197,8 @@ public class FunctionCallCPInstruction extends CPInstruction {\n//cleanup existing data bound to output variable name\nData exdata = ec.removeVariable(boundVarName);\n- if ( exdata != null && exdata instanceof CacheableData && exdata != boundValue ) {\n- ec.cleanupCacheableData( (CacheableData<?>)exdata );\n- }\n+ if( exdata != boundValue )\n+ ec.cleanupDataObject(exdata);\n//add/replace data in symbol table\nec.setVariable(boundVarName, boundValue);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ListObject.java", "diff": "@@ -34,17 +34,18 @@ public class ListObject extends Data {\nprivate final List<Data> _data;\nprivate boolean[] _dataState = null;\nprivate List<String> _names = null;\n+ private int _nCacheable;\npublic ListObject(List<Data> data) {\n- super(DataType.LIST, ValueType.UNKNOWN);\n- _data = data;\n- _names = null;\n+ this(data, null);\n}\npublic ListObject(List<Data> data, List<String> names) {\nsuper(DataType.LIST, ValueType.UNKNOWN);\n_data = data;\n_names = names;\n+ _nCacheable = (int) data.stream().filter(\n+ d -> d instanceof CacheableData).count();\n}\npublic ListObject(ListObject that) {\n@@ -66,6 +67,10 @@ public class ListObject extends Data {\nreturn _data.size();\n}\n+ public int getNumCacheableData() {\n+ return _nCacheable;\n+ }\n+\npublic List<String> getNames() {\nreturn _names;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java", "diff": "@@ -344,9 +344,7 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\n//create list object over all inputs\nListObject list = new ListObject(data, names);\n-\n- //disable cleanup of individual objects and store cleanup state\n- list.setStatus(ec.pinVariables(new ArrayList<>(params.values())));\n+ list.setStatus(new boolean[params.size()]);\nec.setVariable(output.getName(), list);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarBuiltinNaryCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarBuiltinNaryCPInstruction.java", "diff": "@@ -97,10 +97,7 @@ public class ScalarBuiltinNaryCPInstruction extends BuiltinNaryCPInstruction {\n//create list object over all inputs\nListObject list = new ListObject(data);\n-\n- //disable cleanup of individual objects and store cleanup state\n- list.setStatus(ec.pinVariables(Arrays.stream(inputs)\n- .map(in -> in.getName()).collect(Collectors.toList())));\n+ list.setStatus(new boolean[data.size()]);\nec.setVariable(output.getName(), list);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java", "diff": "@@ -738,9 +738,8 @@ public class VariableCPInstruction extends CPInstruction {\nData tgt = ec.removeVariable(getInput2().getName());\n//cleanup matrix data on fs/hdfs (if necessary)\n- if ( tgt != null && tgt instanceof CacheableData ) {\n- ec.cleanupCacheableData((CacheableData<?>) tgt);\n- }\n+ if( tgt != null )\n+ ec.cleanupDataObject(tgt);\n}\n// do the actual move\n@@ -788,9 +787,8 @@ public class VariableCPInstruction extends CPInstruction {\nData input2_data = ec.removeVariable(getInput2().getName());\n//cleanup matrix data on fs/hdfs (if necessary)\n- if ( input2_data != null && input2_data instanceof CacheableData ) {\n- ec.cleanupCacheableData((CacheableData<?>) input2_data);\n- }\n+ if( input2_data != null )\n+ ec.cleanupDataObject(input2_data);\n// do the actual copy!\nec.setVariable(getInput2().getName(), dd);\n@@ -844,9 +842,8 @@ public class VariableCPInstruction extends CPInstruction {\n// remove variable from symbol table\nData dat = ec.removeVariable(varname);\n//cleanup matrix data on fs/hdfs (if necessary)\n- if ( dat != null && dat instanceof CacheableData ) {\n- ec.cleanupCacheableData((CacheableData<?>) dat);\n- }\n+ if( dat != null )\n+ ec.cleanupDataObject(dat);\n}\n/**\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2481] Rework cleanup of lists and matrices/frames in lists This patch contains a major rework of the cleanup of lists as well as any cacheable data (i.e., matrices and frames) in list objects. Specifically, we now ensure full consistent to the behavior without lists, which prevents missing cleanups and thus unnecessary evictions.
49,736
02.08.2018 16:20:08
25,200
180c4f281153e80fad6340d8bf0d31454d0cb0a0
Added support for sysml.gpu.memory.util.factor property
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- the floating point precision. supported values are double, single -->\n<sysml.floating.point.precision>double</sysml.floating.point.precision>\n- <!-- the eviction policy for the GPU bufferpool. supported values are lru, mru, lfu, min_evict, align_memory -->\n+ <!-- the eviction policy for the GPU bufferpool. Supported values are lru, mru, lfu, min_evict, align_memory -->\n<sysml.gpu.eviction.policy>align_memory</sysml.gpu.eviction.policy>\n<!-- maximum wrap length for instruction and miscellaneous timer column of statistics -->\n<!-- Advanced optimization: fraction of driver memory to use for GPU shadow buffer. This optimization is ignored for double precision.\nBy default, it is disabled (hence set to 0.0). If you intend to train network larger than GPU memory size, consider using single precision and setting this to 0.1 -->\n<sysml.gpu.eviction.shadow.bufferSize>0.0</sysml.gpu.eviction.shadow.bufferSize>\n+\n+ <!-- Fraction of available GPU memory to use. This is similar to TensorFlow's per_process_gpu_memory_fraction configuration property. (default: 0.9) -->\n+ <sysml.gpu.memory.util.factor>0.9</sysml.gpu.memory.util.factor>\n+\n</root>\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -124,6 +124,7 @@ public class DMLScript\npublic static boolean PRINT_GPU_MEMORY_INFO = false; // whether to print GPU memory-related information\npublic static long EVICTION_SHADOW_BUFFER_MAX_BYTES = 0; // maximum number of bytes to use for shadow buffer\npublic static long EVICTION_SHADOW_BUFFER_CURR_BYTES = 0; // number of bytes to use for shadow buffer\n+ public static double GPU_MEMORY_UTILIZATION_FACTOR = 0.9; // fraction of available GPU memory to use\n/**\n* Global variable indicating the script type (DML or PYDML). Can be used\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -78,16 +78,22 @@ public class ScriptExecutorUtils {\n// Whether extra statistics useful for developers and others interested\n// in digging into performance problems are recorded and displayed\nDMLScript.FINEGRAINED_STATISTICS = DMLScript.STATISTICS && dmlconf.getBooleanValue(DMLConfig.EXTRA_FINEGRAINED_STATS);\n- DMLScript.PRINT_GPU_MEMORY_INFO = dmlconf.getBooleanValue(DMLConfig.PRINT_GPU_MEMORY_INFO);\n- DMLScript.SYNCHRONIZE_GPU = dmlconf.getBooleanValue(DMLConfig.SYNCHRONIZE_GPU);\nCacheableData.CACHING_BUFFER_SIZE = dmlconf.getDoubleValue(DMLConfig.CACHING_BUFFER_SIZE);\nif(CacheableData.CACHING_BUFFER_SIZE < 0 || CacheableData.CACHING_BUFFER_SIZE > 1)\nthrow new RuntimeException(\"Incorrect value (\" + CacheableData.CACHING_BUFFER_SIZE + \") for the configuration \" + DMLConfig.CACHING_BUFFER_SIZE);\n- DMLScript.EAGER_CUDA_FREE = dmlconf.getBooleanValue(DMLConfig.EAGER_CUDA_FREE);\n+\nDMLScript.STATISTICS_MAX_WRAP_LEN = dmlconf.getIntValue(DMLConfig.STATS_MAX_WRAP_LEN);\nNativeHelper.initialize(dmlconf.getTextValue(DMLConfig.NATIVE_BLAS_DIR), dmlconf.getTextValue(DMLConfig.NATIVE_BLAS).trim());\nif(DMLScript.USE_ACCELERATOR) {\n+ DMLScript.SYNCHRONIZE_GPU = dmlconf.getBooleanValue(DMLConfig.SYNCHRONIZE_GPU);\n+ DMLScript.EAGER_CUDA_FREE = dmlconf.getBooleanValue(DMLConfig.EAGER_CUDA_FREE);\n+ DMLScript.PRINT_GPU_MEMORY_INFO = dmlconf.getBooleanValue(DMLConfig.PRINT_GPU_MEMORY_INFO);\n+ DMLScript.GPU_MEMORY_UTILIZATION_FACTOR = dmlconf.getDoubleValue(DMLConfig.GPU_MEMORY_UTILIZATION_FACTOR);\n+ if(DMLScript.GPU_MEMORY_UTILIZATION_FACTOR < 0 || DMLScript.GPU_MEMORY_UTILIZATION_FACTOR > 1) {\n+ throw new RuntimeException(\"Incorrect value (\" + DMLScript.GPU_MEMORY_UTILIZATION_FACTOR + \") for the configuration:\" + DMLConfig.GPU_MEMORY_UTILIZATION_FACTOR);\n+ }\n+\nDMLScript.FLOATING_POINT_PRECISION = dmlconf.getTextValue(DMLConfig.FLOATING_POINT_PRECISION);\norg.apache.sysml.runtime.matrix.data.LibMatrixCUDA.resetFloatingPointPrecision();\nif(DMLScript.FLOATING_POINT_PRECISION.equals(\"double\")) {\n@@ -96,7 +102,7 @@ public class ScriptExecutorUtils {\nelse {\ndouble shadowBufferSize = dmlconf.getDoubleValue(DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\nif(shadowBufferSize < 0 || shadowBufferSize > 1)\n- throw new RuntimeException(\"Incorrect value (\" + shadowBufferSize + \") for the configuration \" + DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\n+ throw new RuntimeException(\"Incorrect value (\" + shadowBufferSize + \") for the configuration:\" + DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\nDMLScript.EVICTION_SHADOW_BUFFER_MAX_BYTES = (long) (((double)InfrastructureAnalyzer.getLocalMaxMemory())*shadowBufferSize);\nif(DMLScript.EVICTION_SHADOW_BUFFER_MAX_BYTES > 0 &&\nDMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES > DMLScript.EVICTION_SHADOW_BUFFER_MAX_BYTES) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -37,8 +37,6 @@ import java.util.stream.Collectors;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\n-import org.apache.sysml.conf.ConfigurationManager;\n-import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\n@@ -130,11 +128,6 @@ public class GPUMemoryManager {\n// This often happens if user tries to use both TF and SystemML, and TF grabs onto 90% of the memory ahead of time.\nprivate static final double WARN_UTILIZATION_FACTOR = 0.7;\n- // Invoke cudaMemGetInfo to get available memory information. Useful if GPU is shared among multiple application.\n- public double GPU_MEMORY_UTILIZATION_FACTOR = ConfigurationManager.getDMLConfig()\n- .getDoubleValue(DMLConfig.GPU_MEMORY_UTILIZATION_FACTOR);\n-\n-\npublic GPUMemoryManager(GPUContext gpuCtx) {\nmatrixMemoryManager = new GPUMatrixMemoryManager(this);\nlazyCudaFreeMemoryManager = new GPULazyCudaFreeMemoryManager(this);\n@@ -603,7 +596,7 @@ public class GPUMemoryManager {\nlong free[] = { 0 };\nlong total[] = { 0 };\ncudaMemGetInfo(free, total);\n- return (long) (free[0] * GPU_MEMORY_UTILIZATION_FACTOR);\n+ return (long) (free[0] * DMLScript.GPU_MEMORY_UTILIZATION_FACTOR);\n}\nprivate static class CustomPointer extends Pointer {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added support for sysml.gpu.memory.util.factor property
49,738
02.08.2018 19:45:37
25,200
b3b50c0047b95143719411b3937d9f5ab42c7397
Fix codegen register allocation special cases This patch fixes special cases of codegen register allocation which (1) computed an incorrect initial count due to missing suport for Nary operations, and (2) incorrectly reported valid configurations for probes of count 1 and 0 because the sequence generator does not support these small cycles.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -47,6 +47,7 @@ import org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary.BinType;\nimport org.apache.sysml.hops.codegen.cplan.CNodeData;\n+import org.apache.sysml.hops.codegen.cplan.CNodeNary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTernary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\n@@ -409,7 +410,9 @@ public class TemplateUtils\npublic static boolean isUnaryOperatorPipeline(CNode node) {\nif( node.isVisited() ) {\n//second reference to vector intermediate invalidates a unary pipeline\n- return !(node instanceof CNodeBinary && ((CNodeBinary)node).getType().isVectorPrimitive());\n+ return !((node instanceof CNodeBinary && ((CNodeBinary)node).getType().isVectorPrimitive())\n+ || (node instanceof CNodeTernary && ((CNodeTernary)node).getType().isVectorPrimitive())\n+ || (node instanceof CNodeNary && ((CNodeNary)node).getType().isVectorPrimitive()));\n}\nboolean ret = true;\nfor( CNode input : node.getInput() )\n@@ -452,7 +455,9 @@ public class TemplateUtils\n&& ((CNodeUnary)node).getType().isVectorScalarPrimitive()) ? 1 : 0;\nint cntTn = (node instanceof CNodeTernary\n&& ((CNodeTernary)node).getType().isVectorPrimitive()) ? 1 : 0;\n- return ret + cntBin + cntUn + cntTn;\n+ int cntNn = (node instanceof CNodeNary\n+ && ((CNodeNary)node).getType().isVectorPrimitive()) ? 1 : 0;\n+ return ret + cntBin + cntUn + cntTn + cntNn;\n}\npublic static int getMaxLiveVectorIntermediates(CNode node, CNode main, Map<Long, Set<Long>> parents, Set<Pair<Long, Long>> stack) {\n@@ -479,6 +484,7 @@ public class TemplateUtils\n}\npublic static boolean isValidNumVectorIntermediates(CNode node, CNode main, Map<Long, Set<Long>> parents, Map<Long, Pair<Long, MutableInt>> inUse, Set<Long> inUse2, int count) {\n+ if( count <= 1 ) return false;\nIDSequence buff = new IDSequence(true, count-1); //zero based\ninUse.clear(); inUse2.clear();\nnode.resetVisitStatus();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2480] Fix codegen register allocation special cases This patch fixes special cases of codegen register allocation which (1) computed an incorrect initial count due to missing suport for Nary operations, and (2) incorrectly reported valid configurations for probes of count 1 and 0 because the sequence generator does not support these small cycles.
49,738
02.08.2018 22:16:23
25,200
fce9d978d00bf5d42d6be9a475dee346e1844e00
Codegen support for im2col/conv2d DNN operations This patch adds codegen support in row templates for DNN conv2d operations. Specially, we generated row-wise im2col and conv2d-mm operations, which allows for CSE of im2col if multiple conv2d operations are fused into the same row-wise operator.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "diff": "@@ -601,15 +601,13 @@ public class DnnOp extends MultiThreadedHop\n|| op == OpOpDnn.CONV2D\n|| op == OpOpDnn.CONV2D_BACKWARD_FILTER\n|| op == OpOpDnn.CONV2D_BACKWARD_DATA) {\n- imageHeightHop = getInput().get(8);\n- filterHeightHop = getInput().get(12);\n_cachedParams.setIfUnknown(\ngetInput().get(6), // N\ngetInput().get(7), // C\n- imageHeightHop, // H\n+ getInput().get(8), // H\ngetInput().get(9), // W\ngetInput().get(10), // K\n- filterHeightHop, // R\n+ getInput().get(12), // R\ngetInput().get(13), // S\ngetInput().get(2), // stride_h\ngetInput().get(3), // stride_w\n@@ -617,15 +615,13 @@ public class DnnOp extends MultiThreadedHop\ngetInput().get(5), _maxNumThreads);\n}\nelse {\n- imageHeightHop = getInput().get(7);\n- filterHeightHop = getInput().get(11);\n_cachedParams.setIfUnknown(\ngetInput().get(5),\ngetInput().get(6),\n- imageHeightHop,\n+ getInput().get(7),\ngetInput().get(8),\ngetInput().get(9),\n- filterHeightHop,\n+ getInput().get(11),\ngetInput().get(12),\ngetInput().get(1),\ngetInput().get(2),\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeNary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeNary.java", "diff": "@@ -33,7 +33,9 @@ public class CNodeNary extends CNode\npublic enum NaryType {\nVECT_CBIND,\nVECT_MAX_POOL,\n- VECT_AVG_POOL;\n+ VECT_AVG_POOL,\n+ VECT_IM2COL,\n+ VECT_CONV2DMM;\npublic static boolean contains(String value) {\nfor( NaryType bt : values() )\n@@ -63,18 +65,30 @@ public class CNodeNary extends CNode\n}\nreturn sb.toString();\ncase VECT_MAX_POOL:\n- case VECT_AVG_POOL:\n+ case VECT_AVG_POOL: {\nString vectName = (this==VECT_MAX_POOL) ? \"Maxpool\" : \"Avgpool\";\n- String paramStr = getPoolingParameterString(inputs);\n+ String paramStr = getDnnParameterString(inputs, true);\nreturn sparseGen ?\n\" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1v%, %IN1i%, %POS1%, alen, len, \"+paramStr+\");\\n\" :\n\" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %POS1%, %LEN%, \"+paramStr+\");\\n\";\n+ }\n+ case VECT_IM2COL: {\n+ String paramStr = getDnnParameterString(inputs, true);\n+ return sparseGen ?\n+ \" double[] %TMP% = LibSpoofPrimitives.vectIm2colWrite(%IN1v%, %IN1i%, %POS1%, alen, len, \"+paramStr+\");\\n\" :\n+ \" double[] %TMP% = LibSpoofPrimitives.vectIm2colWrite(%IN1%, %POS1%, %LEN%, \"+paramStr+\");\\n\";\n+ }\n+ case VECT_CONV2DMM: {\n+ return \" double[] %TMP% = LibSpoofPrimitives.vectConv2dmmWrite(%IN2%, %IN1%, %POS2%, %POS1%, %LEN%, \"\n+ + getDnnParameterString(inputs, false) +\");\\n\";\n+ }\ndefault:\nthrow new RuntimeException(\"Invalid nary type: \"+this.toString());\n}\n}\npublic boolean isVectorPrimitive() {\n- return this == VECT_CBIND || this == VECT_MAX_POOL || this == VECT_AVG_POOL;\n+ return this == VECT_CBIND || this == VECT_MAX_POOL || this == VECT_AVG_POOL\n+ || this == VECT_IM2COL || this == NaryType.VECT_CONV2DMM;\n}\n}\n@@ -111,8 +125,11 @@ public class CNodeNary extends CNode\ntmp = tmp.replace(\"%TMP%\", var);\n//replace sparse and dense inputs\n- String varj = _inputs.get(0).getVarname();\n- tmp = replaceUnaryPlaceholders(tmp, varj, false);\n+ String varj1 = _inputs.get(0).getVarname();\n+ String varj2 = _inputs.get(1).getVarname();\n+ tmp = (_type == NaryType.VECT_CONV2DMM) ?\n+ replaceBinaryPlaceholders(tmp, new String[]{varj1,varj2}, false) :\n+ replaceUnaryPlaceholders(tmp, varj1, false);\nsb.append(tmp);\n@@ -128,6 +145,8 @@ public class CNodeNary extends CNode\ncase VECT_CBIND: return \"n(cbind)\";\ncase VECT_MAX_POOL: return \"n(maxpool)\";\ncase VECT_AVG_POOL: return \"n(avgpool)\";\n+ case VECT_IM2COL: return \"n(im2col)\";\n+ case VECT_CONV2DMM: return \"n(conv2dmm)\";\ndefault:\nreturn \"m(\"+_type.name().toLowerCase()+\")\";\n}\n@@ -144,7 +163,7 @@ public class CNodeNary extends CNode\n_dataType = DataType.MATRIX;\nbreak;\ncase VECT_MAX_POOL:\n- case VECT_AVG_POOL: //only stride 1, pad 0\n+ case VECT_AVG_POOL: { //only stride 1, pad 0\nint C = Integer.parseInt(_inputs.get(6).getVarname());\nint H = Integer.parseInt(_inputs.get(7).getVarname());\nint W = Integer.parseInt(_inputs.get(8).getVarname());\n@@ -152,11 +171,29 @@ public class CNodeNary extends CNode\nint S = Integer.parseInt(_inputs.get(12).getVarname());\nlong P = DnnUtils.getP(H, R, 1, 0);\nlong Q = DnnUtils.getQ(W, S, 1, 0);\n- _rows = _inputs.get(0)._rows;\n+ _rows = _inputs.get(0)._rows; //N\n_cols = C * P * Q;\n_dataType = DataType.MATRIX;\nbreak;\n}\n+ case VECT_IM2COL:\n+ _rows = 1;\n+ _cols = -1;\n+ _dataType = DataType.MATRIX;\n+ break;\n+ case VECT_CONV2DMM: {\n+ int H = Integer.parseInt(_inputs.get(8).getVarname());\n+ int W = Integer.parseInt(_inputs.get(9).getVarname());\n+ int K = Integer.parseInt(_inputs.get(10).getVarname());\n+ int R = Integer.parseInt(_inputs.get(12).getVarname());\n+ int S = Integer.parseInt(_inputs.get(13).getVarname());\n+ long P = DnnUtils.getP(H, R, 1, 0);\n+ long Q = DnnUtils.getQ(W, S, 1, 0);\n+ _rows = _inputs.get(0)._rows; //N\n+ _cols = K * P * Q;\n+ _dataType = DataType.MATRIX;\n+ }\n+ }\n}\n@Override\n@@ -178,18 +215,46 @@ public class CNodeNary extends CNode\n&& _type == that._type;\n}\n- private static String getPoolingParameterString(List<CNode> inputs) {\n+ private static String getDnnParameterString(List<CNode> inputs, boolean unary) {\n+ int off = unary ? 0 : 1;\n+\n//extract and derive individual parameters\n- int C = Integer.parseInt(inputs.get(6).getVarname());\n- int H = Integer.parseInt(inputs.get(7).getVarname());\n- int W = Integer.parseInt(inputs.get(8).getVarname());\n- int R = Integer.parseInt(inputs.get(11).getVarname());\n- int S = Integer.parseInt(inputs.get(12).getVarname());\n+ int C = Integer.parseInt(inputs.get(off+6).getVarname());\n+ int H = Integer.parseInt(inputs.get(off+7).getVarname());\n+ int W = Integer.parseInt(inputs.get(off+8).getVarname());\n+ int K = Integer.parseInt(inputs.get(off+9).getVarname());\n+ int R = Integer.parseInt(inputs.get(off+11).getVarname());\n+ int S = Integer.parseInt(inputs.get(off+12).getVarname());\nint P = (int) DnnUtils.getP(H, R, 1, 0);\nint Q = (int) DnnUtils.getQ(W, S, 1, 0);\n//construct parameter string\nreturn \"rix, \" + StringUtils.join(\n- new int[]{C, P, Q, R, S, H, W}, ',');\n+ new int[]{C, P, Q, K, R, S, H, W}, ',');\n+ }\n+\n+\n+ private String replaceBinaryPlaceholders(String tmp, String[] vars, boolean vectIn) {\n+ //replace sparse and dense inputs\n+ for( int j=0; j<2; j++ ) {\n+ String varj = vars[j];\n+\n+ //replace sparse and dense inputs\n+ tmp = tmp.replace(\"%IN\"+(j+1)+\"v%\", varj+\"vals\");\n+ tmp = tmp.replace(\"%IN\"+(j+1)+\"i%\", varj+\"ix\");\n+ tmp = tmp.replace(\"%IN\"+(j+1)+\"%\",\n+ varj.startsWith(\"b\") ? varj + \".values(rix)\" : varj );\n+\n+ //replace start position of main input\n+ tmp = tmp.replace(\"%POS\"+(j+1)+\"%\", (_inputs.get(j) instanceof CNodeData\n+ && _inputs.get(j).getDataType().isMatrix()) ? !varj.startsWith(\"b\") ? varj+\"i\" :\n+ (TemplateUtils.isMatrix(_inputs.get(j)) && _type!=NaryType.VECT_CONV2DMM) ? varj + \".pos(rix)\" : \"0\" : \"0\");\n+ }\n+\n+ //replace length\n+ if( _inputs.get(0).getDataType().isMatrix() )\n+ tmp = tmp.replace(\"%LEN%\", _inputs.get(0).getVectorLength());\n+\n+ return tmp;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -116,8 +116,9 @@ public class TemplateRow extends TemplateBase\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n&& hop.getInput().get(0).getDim2()>1)\n- || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n- && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0());\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL, OpOpDnn.CONV2D)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0()\n+ && hop.getInput().get(1).dimsKnown()); //for conv2d\n}\n@Override\n@@ -142,8 +143,9 @@ public class TemplateRow extends TemplateBase\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n&& hop.getInput().get(0).getDim2()>1)\n- || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n- && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0())\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL, OpOpDnn.CONV2D)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0()\n+ && hop.getInput().get(1).dimsKnown() && hop.getInput().get(1)!=input) //for conv2d\n|| isPartOfValidCumAggChain(hop) //cum* with transpose\n|| isPartOfValidTransposeMMChain(hop)); //t(f(X))%*%X\n}\n@@ -160,8 +162,9 @@ public class TemplateRow extends TemplateBase\n|| (HopRewriteUtils.isDnn(hop, OpOpDnn.BIASADD, OpOpDnn.BIASMULT)\n&& hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown()\n&& hop.getInput().get(0).getDim2()>1 )\n- || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL)\n- && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0())\n+ || (HopRewriteUtils.isDnn(hop, OpOpDnn.MAX_POOL, OpOpDnn.AVG_POOL, OpOpDnn.CONV2D)\n+ && hop.getInput().get(0).dimsKnown() && ((DnnOp)hop).isStride1Pad0()\n+ && hop.getInput().get(1).dimsKnown() && hop.getInput().get(1)!=input) //for conv2d\n|| (HopRewriteUtils.isDataGenOpWithLiteralInputs(input, DataGenMethod.SEQ)\n&& HopRewriteUtils.hasOnlyUnaryBinaryParents(input, false))\n|| (hop instanceof AggBinaryOp\n@@ -488,6 +491,14 @@ public class TemplateRow extends TemplateBase\nout = new CNodeNary(in, CNodeNary.NaryType\n.valueOf(\"VECT_\"+((DnnOp)hop).getOp().name()));\n}\n+ else if( HopRewriteUtils.isDnn(hop, OpOpDnn.CONV2D) ) {\n+ CNode[] in1 = hop.getInput().stream().filter(h -> h!=hop.getInput().get(1))\n+ .map(h ->tmp.get(h.getHopID())).toArray(CNode[]::new);\n+ CNode im2col = new CNodeNary(in1, CNodeNary.NaryType.VECT_IM2COL);\n+ CNode[] in2 = hop.getInput().stream().map(h -> (h==hop.getInput().get(0)) ?\n+ im2col : tmp.get(h.getHopID())).toArray(CNode[]::new);\n+ out = new CNodeNary(in2, CNodeNary.NaryType.VECT_CONV2DMM);\n+ }\nelse if( hop instanceof NaryOp ) {\nCNode[] inputs = new CNode[hop.getInput().size()];\nfor( int i=0; i<hop.getInput().size(); i++ ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java", "diff": "@@ -25,10 +25,12 @@ import org.apache.commons.math3.util.FastMath;\nimport org.apache.sysml.runtime.functionobjects.BitwAnd;\nimport org.apache.sysml.runtime.functionobjects.IntegerDivide;\nimport org.apache.sysml.runtime.functionobjects.Modulus;\n+import org.apache.sysml.runtime.matrix.data.DenseBlockDRB;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNNPooling;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixMult;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN.PoolingType;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNNIm2Col;\n/**\n* This library contains all vector primitives that are used in\n@@ -2057,14 +2059,14 @@ public class LibSpoofPrimitives\n//maxpool\n- public static double[] vectMaxpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ public static double[] vectMaxpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\ndouble[] c = allocVector(C*P*Q, true);\nLibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.MAX,\n-Double.MAX_VALUE, 1, a, c, rix, rix+1, ai, 0, C, P, Q, R, S, H, W);\nreturn c;\n}\n- public static double[] vectMaxpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ public static double[] vectMaxpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\ndouble[] a = allocVector(len, true);\ndouble[] c = allocVector(C*P*Q, true);\nfor(int k=ai; k<ai+alen; k++)\n@@ -2076,14 +2078,14 @@ public class LibSpoofPrimitives\n//avgpool\n- public static double[] vectAvgpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ public static double[] vectAvgpoolWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\ndouble[] c = allocVector(C*P*Q, true);\nLibMatrixDNNPooling.poolingDenseStride1Pad0(PoolingType.AVG,\n0, 1/(R*S), a, c, rix, rix+1, ai, 0, C, P, Q, R, S, H, W);\nreturn c;\n}\n- public static double[] vectAvgpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int R, int S, int H, int W) {\n+ public static double[] vectAvgpoolWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\ndouble[] a = allocVector(len, true);\ndouble[] c = allocVector(C*P*Q, true);\nfor(int k=ai; k<ai+alen; k++)\n@@ -2093,6 +2095,34 @@ public class LibSpoofPrimitives\nreturn c;\n}\n+ //im2col\n+\n+ public static double[] vectIm2colWrite(double[] a, int ai, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\n+ double[] c = allocVector(C*R*S * P*Q, true);\n+ LibMatrixDNNIm2Col.im2colDenseStride1Pad0(a, c, ai, C, R, S, H, W, P, Q);\n+ return c;\n+ }\n+\n+ public static double[] vectIm2colWrite(double[] avals, int[] aix, int ai, int alen, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\n+ double[] a = allocVector(len, true);\n+ double[] c = allocVector(C*R*S * P*Q, true);\n+ for(int k=ai; k<ai+alen; k++)\n+ a[aix[k]] = avals[k];\n+ LibMatrixDNNIm2Col.im2colDenseStride1Pad0(a, c, ai, C, R, S, H, W, P, Q);\n+ return c;\n+ }\n+\n+ //conv2d matrix mult\n+\n+ public static double[] vectConv2dmmWrite(double[] a, double[] b, int ai, int bi, int len, int rix, int C, int P, int Q, int K, int R, int S, int H, int W) {\n+ double[] c = allocVector(K*P*Q, true);\n+ int CRS = C*R*S, PQ = P*Q;\n+ LibMatrixMult.matrixMultDenseDenseMM(\n+ new DenseBlockDRB(a, K, CRS), new DenseBlockDRB(b, CRS, PQ),\n+ new DenseBlockDRB(c, K, PQ), PQ, CRS, 0, K, 0, PQ);\n+ return c;\n+ }\n+\n//complex builtin functions that are not directly generated\n//(included here in order to reduce the number of imports)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNIm2Col.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNIm2Col.java", "diff": "@@ -41,7 +41,7 @@ public class LibMatrixDNNIm2Col\n//dense and sparse operation dispatch\nif( !in.sparse && stride1Pad0 && !trans )\nim2colDenseStride1Pad0(in.getDenseBlockValues(),\n- out.getDenseBlockValues(), r, C, R, S, H, W, P, Q);\n+ out.getDenseBlockValues(), r*C*H*W, C, R, S, H, W, P, Q);\nelse if( !in.sparse )\nim2colDense(in.getDenseBlockValues(), out.getDenseBlockValues(),\nr, C, R, S, H, W, P, Q, stride_h, stride_w, pad_h, pad_w, trans);\n@@ -50,8 +50,7 @@ public class LibMatrixDNNIm2Col\nstride_h, stride_w, pad_h, pad_w, trans);\n}\n- public static void im2colDenseStride1Pad0(double[] in, double[] out, int r, int C, int R, int S, int H, int W, int P, int Q) {\n- int nOffset = r * C * H * W;\n+ public static void im2colDenseStride1Pad0(double[] in, double[] out, int ai, int C, int R, int S, int H, int W, int P, int Q) {\nint CRS = C * R * S;\nfor (int c = 0; c < CRS; ++c) {\nint wOffset = c % S;\n@@ -60,7 +59,7 @@ public class LibMatrixDNNIm2Col\nfor (int h = 0; h < P; ++h) {\nint hPadded = h + hOffset;\nint outOffset = (c * P + h) * Q;\n- int inputOffset = nOffset + (cInput * H + hPadded) * W;\n+ int inputOffset = ai + (cInput * H + hPadded) * W;\nSystem.arraycopy(in, inputOffset + wOffset, out, outOffset, Q);\nint w = Q - 1;\nint wPadded = w + wOffset;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -1046,7 +1046,8 @@ public class LibMatrixMult\n}\n}\n- private static void matrixMultDenseDenseMM(DenseBlock a, DenseBlock b, DenseBlock c, int n, int cd, int rl, int ru, int cl, int cu) {\n+ //note: public for use by codegen for consistency\n+ public static void matrixMultDenseDenseMM(DenseBlock a, DenseBlock b, DenseBlock c, int n, int cd, int rl, int ru, int cl, int cu) {\n//1) Unrolled inner loop (for better instruction-level parallelism)\n//2) Blocked execution (for less cache trashing in parallel exec)\n//3) Asymmetric block sizes (for less misses in inner loop, yet blocks in L1/L2)\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "diff": "@@ -82,6 +82,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME43 = TEST_NAME+\"43\"; //bias_add(X,B) + bias_mult(X,B)\nprivate static final String TEST_NAME44 = TEST_NAME+\"44\"; //maxpool(X - mean(X)) + 7;\nprivate static final String TEST_NAME45 = TEST_NAME+\"45\"; //vector allocation;\n+ private static final String TEST_NAME46 = TEST_NAME+\"46\"; //conv2d(X - mean(X), F1) + conv2d(X - mean(X), F2);\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -93,7 +94,7 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<=45; i++)\n+ for(int i=1; i<=46; i++)\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@@ -772,6 +773,21 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME45, false, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite46CP() {\n+ testCodegenIntegration( TEST_NAME46, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg46CP() {\n+ testCodegenIntegration( TEST_NAME46, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg46SP() {\n+ testCodegenIntegration( TEST_NAME46, false, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n@@ -793,7 +809,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{\"-explain\", \"recompile_runtime\", \"-stats\", \"-args\", output(\"S\") };\n+ programArgs = new String[]{\"-explain\", \"-stats\", \"-args\", output(\"S\") };\nfullRScriptName = HOME + testname + \".R\";\nrCmd = getRCmd(inputDir(), expectedDir());\n@@ -836,6 +852,9 @@ public class RowAggTmplTest extends AutomatedTestBase\nif( testname.equals(TEST_NAME44) )\nAssert.assertTrue(!heavyHittersContainsSubString(\"maxpooling\")\n&& !heavyHittersContainsSubString(\"spoof\", 2));\n+ if( testname.equals(TEST_NAME46) )\n+ Assert.assertTrue(!heavyHittersContainsSubString(\"conv2d\")\n+ && !heavyHittersContainsSubString(\"spoof\", 2));\n}\nfinally {\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowConv2DOperationsTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowConv2DOperationsTest.java", "diff": "@@ -29,6 +29,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\nimport org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Assert;\nimport org.junit.Test;\npublic class RowConv2DOperationsTest extends AutomatedTestBase\n@@ -111,8 +112,8 @@ public class RowConv2DOperationsTest extends AutomatedTestBase\nHashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\nHashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\nTestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n- //Assert.assertTrue(heavyHittersContainsSubString(\"spoofRA\")\n- // || heavyHittersContainsSubString(\"sp_spoofRA\"));\n+ Assert.assertTrue(heavyHittersContainsSubString(\"spoofRA\")\n+ || heavyHittersContainsSubString(\"sp_spoofRA\"));\n}\nfinally {\nrtplatform = platformOld;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern46.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+pad_image <- function(img, Hin, Win, padh, padw){\n+ C = nrow(img)\n+ img_padded = matrix(0, C, (Hin+2*padh)*(Win+2*padw), byrow=TRUE) # zeros\n+ for (c in 1:C) {\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # depth slice C reshaped\n+ img_padded_slice = matrix(0, Hin+2*padh, Win+2*padw)\n+ img_padded_slice[(padh+1):(padh+Hin), (padw+1):(padw+Win)] = img_slice\n+ img_padded[c,] = matrix(t(img_padded_slice), 1, (Hin+2*padh)*(Win+2*padw)) # reshape\n+ }\n+ img_padded\n+}\n+\n+im2col <- function(img, Hin, Win, Hf, Wf, strideh, stridew) {\n+ C = nrow(img)\n+ Hout = as.integer((Hin - Hf) / strideh + 1)\n+ Wout = as.integer((Win - Wf) / stridew + 1)\n+\n+ img_cols = matrix(0, C*Hf*Wf, Hout*Wout, byrow=TRUE) # zeros\n+ for (hout in 1:Hout) { # all output rows\n+ hin = (hout-1) * strideh + 1\n+ for (wout in 1:Wout) { # all output columns\n+ win = (wout-1) * stridew + 1\n+ # Extract a local patch of the input image corresponding spatially to the filter sizes.\n+ img_patch = matrix(0, C, Hf*Wf, byrow=TRUE) # zeros\n+ for (c in 1:C) { # all channels\n+ img_slice = matrix(img[c,], Hin, Win, byrow=TRUE) # reshape\n+ img_patch[c,] = matrix(t(img_slice[hin:(hin+Hf-1), win:(win+Wf-1)]), 1, Hf*Wf)\n+ }\n+ img_cols[,(hout-1)*Wout + wout] = matrix(t(img_patch), C*Hf*Wf, 1) # reshape\n+ }\n+ }\n+ img_cols\n+}\n+\n+conv2d <- function(X, W, C, Hin, Win, Hf, Wf, strideh, stridew, padh, padw) {\n+ N = nrow(X)\n+ F = nrow(W)\n+ Hout = as.integer((Hin + 2 * padh - Hf) / strideh + 1)\n+ Wout = as.integer((Win + 2 * padw - Wf) / stridew + 1)\n+\n+ # Create output volume\n+ out = matrix(0, N, F*Hout*Wout, byrow=TRUE)\n+\n+ # Convolution - im2col implementation\n+ for (n in 1:N) { # all examples\n+ Xn = matrix(X[n,], C, Hin*Win, byrow=TRUE) # reshape\n+\n+ # Pad image\n+ Xn_padded = pad_image(Xn, Hin, Win, padh, padw) # shape (C, (Hin+2*padh)*(Win+2*padw))\n+\n+ # Extract local image patches into columns with im2col, of shape (C*Hf*Wf, Hout*Wout)\n+ Xn_padded_cols = im2col(Xn_padded, Hin+2*padh, Win+2*padw, Hf, Wf, strideh, stridew)\n+\n+ # Convolve patches with filters\n+ outn = W %*% Xn_padded_cols # shape (F, Hout*Wout)\n+ out[n,] = matrix(t(outn), 1, F*Hout*Wout) # reshape\n+ }\n+\n+ out\n+}\n+\n+imgSize=8\n+numImg=16\n+numChannels=4\n+numFilters=3\n+filterSize=4\n+stride=1\n+pad=0\n+\n+Hout = as.integer((imgSize + 2 * pad - filterSize) / stride + 1)\n+\n+X = matrix(seq(1, numImg*numChannels*imgSize*imgSize), numImg, numChannels*imgSize*imgSize, byrow=TRUE);\n+W1 = matrix(seq(1, numFilters*numChannels*filterSize*filterSize), numFilters, numChannels*filterSize*filterSize, byrow=TRUE)\n+W2 = matrix(seq(1, numFilters*numChannels*filterSize*filterSize)+7, numFilters, numChannels*filterSize*filterSize, byrow=TRUE)\n+b = matrix(seq(1, numFilters), numFilters, 1, byrow=TRUE)\n+\n+X = X - rowMeans(X)\n+\n+R1 = conv2d(X, W1, numChannels, imgSize, imgSize, filterSize, filterSize, stride, stride, pad, pad);\n+R2 = conv2d(X, W2, numChannels, imgSize, imgSize, filterSize, filterSize, stride, stride, pad, pad);\n+R = R1 + R2;\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern46.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+imgSize=8\n+numImg=16\n+numChannels=4\n+numFilters=3\n+filterSize=4\n+stride=1\n+pad=0\n+\n+X = matrix(seq(1, numImg*numChannels*imgSize*imgSize), rows=numImg, cols=numChannels*imgSize*imgSize);\n+W1 = matrix(seq(1, numFilters*numChannels*filterSize*filterSize), rows=numFilters, cols=numChannels*filterSize*filterSize)\n+W2 = matrix(seq(1, numFilters*numChannels*filterSize*filterSize)+7, rows=numFilters, cols=numChannels*filterSize*filterSize)\n+b = matrix(seq(1, numFilters), rows=numFilters, cols=1)\n+\n+while(FALSE){}\n+\n+X = X - rowMeans(X);\n+\n+R1 = conv2d(X, W1, padding=[pad, pad], stride=[stride, stride], input_shape=[numImg, numChannels, imgSize, imgSize], filter_shape=[numFilters, numChannels, filterSize, filterSize])\n+R2 = conv2d(X, W2, padding=[pad, pad], stride=[stride, stride], input_shape=[numImg, numChannels, imgSize, imgSize], filter_shape=[numFilters, numChannels, filterSize, filterSize])\n+R = R1 + R2;\n+\n+write(R, $1, format=\"text\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2067] Codegen support for im2col/conv2d DNN operations This patch adds codegen support in row templates for DNN conv2d operations. Specially, we generated row-wise im2col and conv2d-mm operations, which allows for CSE of im2col if multiple conv2d operations are fused into the same row-wise operator.
49,738
02.08.2018 22:53:30
25,200
8fbeca142c1fbb8f2786a1097fb9840be5ca8005
[MINOR] Performance / cleanup conv2d R test script (vectorized biasadd)
[ { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/tensor/Conv2DTest.R", "new_path": "src/test/scripts/functions/tensor/Conv2DTest.R", "diff": "@@ -105,19 +105,14 @@ conv2d <- function(X, W, C, Hin, Win, Hf, Wf, strideh, stridew, padh, padw) {\nout\n}\n-output = conv2d(x, w, numChannels, imgSize, imgSize, filterSize, filterSize, stride, stride, pad, pad);\n+R = conv2d(x, w, numChannels, imgSize, imgSize, filterSize, filterSize, stride, stride, pad, pad);\nHout = as.integer((imgSize + 2 * pad - filterSize) / stride + 1)\nWout = Hout\nb = matrix(seq(1, numFilters), numFilters, 1, byrow=TRUE)\nfor(k in 0:(numFilters-1)) {\n- for(i in 1:nrow(output)) {\n- start = k*Hout*Hout;\n- for(j in 1:(Hout*Hout)) {\n- output[i,start+j] = output[i,start+j] + b[k+1,1]\n+ start = k*Hout^2;\n+ R[,(start+1):(start+Hout^2)] = R[,(start+1):(start+Hout^2)] + b[k+1,1]\n}\n- }\n-}\n-\n-writeMM(as(output,\"CsparseMatrix\"), paste(args[8], \"B\", sep=\"\"))\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[8], \"B\", sep=\"\"))\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance / cleanup conv2d R test script (vectorized biasadd)
49,738
03.08.2018 12:36:30
25,200
93ebb3822034af2c4e29b66238f18df852ad05a3
Fix text-binary reblock for matrices w/ zero rows/cols This patch fixes a special case for distributed textcell or matrix market to binary block reblocks of matrices with zero rows or columns, which so far led to an invalid allocated buffer size of zero.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/ReblockBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/ReblockBuffer.java", "diff": "@@ -42,29 +42,21 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic class ReblockBuffer\n{\n-\n- //default buffer size: 5M -> 5M * 3x8B = 120MB\n- public static final int DEFAULT_BUFFER_SIZE = 5000000;\n-\n- //buffer <long rowindex, long colindex, long value>\n- //(pure long buffer for sort on flush)\n- private long[][] _buff = null;\n-\n- private int _bufflen = -1;\n- private int _count = -1;\n-\n- private long _rlen = -1;\n- private long _clen = -1;\n- private int _brlen = -1;\n- private int _bclen = -1;\n-\n- public ReblockBuffer( int buffersize, long rlen, long clen, int brlen, int bclen )\n- {\n- _bufflen = buffersize;\n+ public static final int DEFAULT_BUFFER_SIZE = 5000000; //5M x 3x8B = 120MB\n+\n+ //buffer <long rowindex, long colindex, long value> (long for sort on flush)\n+ private final long[][] _buff;\n+ private final int _bufflen;\n+ private int _count;\n+ private final long _rlen;\n+ private final long _clen;\n+ private final int _brlen;\n+ private final int _bclen;\n+\n+ public ReblockBuffer( int buffersize, long rlen, long clen, int brlen, int bclen ) {\n+ _bufflen = Math.max(buffersize, 16);\n_count = 0;\n-\n_buff = new long[ _bufflen ][3];\n-\n_rlen = rlen;\n_clen = clen;\n_brlen = brlen;\n@@ -85,8 +77,7 @@ public class ReblockBuffer\nif( inBlk.isInSparseFormat() ) //SPARSE\n{\nIterator<IJV> iter = inBlk.getSparseBlockIterator();\n- while( iter.hasNext() )\n- {\n+ while( iter.hasNext() ) {\nIJV cell = iter.next();\nlong tmp = Double.doubleToRawLongBits(cell.getV());\n_buff[_count][0] = r_offset + cell.getI();\n@@ -105,11 +96,9 @@ public class ReblockBuffer\nint rlen = inBlk.getNumRows();\nint clen = inBlk.getNumColumns();\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble val = inBlk.getValueDenseUnsafe(i, j);\n- if( val !=0 )\n- {\n+ if( val !=0 ) {\nlong tmp = Double.doubleToRawLongBits(val);\n_buff[_count][0] = r_offset + i;\n_buff[_count][1] = c_offset + j;\n@@ -124,13 +113,11 @@ public class ReblockBuffer\n}\n}\n- public int getSize()\n- {\n+ public int getSize() {\nreturn _count;\n}\n- public int getCapacity()\n- {\n+ public int getCapacity() {\nreturn _bufflen;\n}\n@@ -329,16 +316,13 @@ public class ReblockBuffer\n* compute the block indexes on-the-fly based on the given cell indexes.\n*\n*/\n- private class ReblockBufferComparator implements Comparator<long[]>\n- {\n+ private class ReblockBufferComparator implements Comparator<long[]> {\n@Override\n- public int compare(long[] arg0, long[] arg1)\n- {\n+ public int compare(long[] arg0, long[] arg1) {\nlong bi0 = UtilFunctions.computeBlockIndex( arg0[0], _brlen );\nlong bj0 = UtilFunctions.computeBlockIndex( arg0[1], _bclen );\nlong bi1 = UtilFunctions.computeBlockIndex( arg1[0], _brlen );\nlong bj1 = UtilFunctions.computeBlockIndex( arg1[1], _bclen );\n-\nreturn ( bi0 < bi1 || (bi0 == bi1 && bj0 < bj1) ) ? -1 :\n(( bi0 == bi1 && bj0 == bj1)? 0 : 1);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2483] Fix text-binary reblock for matrices w/ zero rows/cols This patch fixes a special case for distributed textcell or matrix market to binary block reblocks of matrices with zero rows or columns, which so far led to an invalid allocated buffer size of zero.
49,738
03.08.2018 13:41:51
25,200
5d675151e38b161abfeccfdcd042e655e5ed1a4f
Fix codegen support for vector axpy in row templates This patch fixes a very specific case of vector axpy in row templates. So far we mistakenly compiled scalar axpy operations for both axpy inputs with 1 or 2 columns although these scalar operations only apply to column vectors.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -464,11 +464,11 @@ public class TemplateRow extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nCNode cdata3 = tmp.get(hop.getInput().get(2).getHopID());\n- if( hop.getDim2() > 2 ) { //row vectors\n+ if( hop.getDim2() >= 2 ) { //matrices\nout = new CNodeBinary(cdata1, new CNodeBinary(cdata2, cdata3, BinType.VECT_MULT_SCALAR),\ntop.getOp()==OpOp3.PLUS_MULT? BinType.VECT_PLUS : BinType.VECT_MINUS);\n}\n- else {\n+ else { //column vectors\n//add lookups if required\ncdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\ncdata2 = TemplateUtils.wrapLookupIfNecessary(cdata2, hop.getInput().get(1));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2484] Fix codegen support for vector axpy in row templates This patch fixes a very specific case of vector axpy in row templates. So far we mistakenly compiled scalar axpy operations for both axpy inputs with 1 or 2 columns although these scalar operations only apply to column vectors.
49,738
03.08.2018 14:39:15
25,200
c1917d84fc2691e9e1b6da983b9a9f950844a285
Fix parfor optimizer robustness for non-existing vars This patch fixes the robustness of the parfor optimizer to correctly handling non-existing parent variables. The non-guaranteed existence of these variables is due to their conditional creation.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -418,6 +418,7 @@ public class OptimizerRuleBased extends Optimizer\nif( dpf != PartitionFormat.NONE\n&& dpf._dpf != PDataPartitionFormat.BLOCK_WISE_M_N\n&& (constrained || (mem > _lm/2 && mem > _rm/2))\n+ && vars.get(c) != null //robustness non-existing vars\n&& !vars.get(c).getDataType().isList() ) {\ncand2.put( c, dpf );\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2485] Fix parfor optimizer robustness for non-existing vars This patch fixes the robustness of the parfor optimizer to correctly handling non-existing parent variables. The non-guaranteed existence of these variables is due to their conditional creation.
49,738
03.08.2018 15:41:34
25,200
c98e815811f49df36c510446802b7c60681f3786
Fix build/estimate of layered graph sparsity estimator This patch fixes the build of the layered graph from dense matrix blocks as well as issues with sparse layered graphs where certain rows/columns are not represented at all.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -123,7 +123,7 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\nfor (int i=0; i<m; i++) {\ndouble[] avals = a.values(i);\nint aix = a.pos(i);\n- for (int j=0; j<m; j++)\n+ for (int j=0; j<n; j++)\nif( avals[aix+j] != 0 )\ncols[j].addInput(rows[i]);\n}\n@@ -159,6 +159,7 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\nreturn _input;\n}\n+ @SuppressWarnings(\"unused\")\npublic double[] getVector() {\nreturn _rvect;\n}\n@@ -183,8 +184,8 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\nreturn _rvect = ltmp.get(0);\nelse {\ndouble[] tmp = ltmp.get(0).clone();\n- for(int i=1; i<_input.size(); i++) {\n- double[] v2 = _input.get(i).getVector();\n+ for(int i=1; i<ltmp.size(); i++) {\n+ double[] v2 = ltmp.get(i);\nfor(int j=0; j<rounds; j++)\ntmp[j] = Math.min(tmp[j], v2[j]);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2291] Fix build/estimate of layered graph sparsity estimator This patch fixes the build of the layered graph from dense matrix blocks as well as issues with sparse layered graphs where certain rows/columns are not represented at all.
49,760
03.08.2018 17:41:22
25,200
f74f5ad4bf27606a5cb5e27a16eceb65c0bd5f62
Extended AVG estimator for other operations Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "@@ -27,8 +27,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n* Basic average case estimator for matrix sparsity:\n* sp = 1 - Math.pow(1-sp1*sp2, k)\n*/\n-public class EstimatorBasicAvg extends SparsityEstimator\n-{\n+public class EstimatorBasicAvg extends SparsityEstimator {\n@Override\npublic double estim(MMNode root) {\n// recursive sparsity evaluation of non-leaf nodes\n@@ -36,26 +35,61 @@ public class EstimatorBasicAvg extends SparsityEstimator\nOptimizerUtils.getSparsity(root.getLeft().getMatrixCharacteristics());\ndouble sp2 = !root.getRight().isLeaf() ? estim(root.getRight()) :\nOptimizerUtils.getSparsity(root.getRight().getMatrixCharacteristics());\n- return estimIntern(sp1, sp2, root.getRows(), root.getLeft().getCols(), root.getCols());\n+ return estimInternMM(sp1, sp2, root.getRows(), root.getLeft().getCols(), root.getCols());\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- return estimIntern(m1.getSparsity(), m2.getSparsity(),\n+ return estimInternMM(m1.getSparsity(), m2.getSparsity(),\nm1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n- throw new NotImplementedException();\n+ return estimIntern(m1, m2, op);\n}\n@Override\npublic double estim(MatrixBlock m, OpCode op) {\n+ return estimIntern(m, null, op);\n+ }\n+\n+ private double estimIntern(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ switch (op) {\n+ case MM:\n+ return estimInternMM(m1.getSparsity(), m2.getSparsity(),\n+ m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n+ case MULT:\n+ return m1.getSparsity() * m2.getSparsity();\n+ case PLUS:\n+ return m1.getSparsity() + m2.getSparsity() - m1.getSparsity() * m2.getSparsity();\n+ case EQZERO:\n+ return OptimizerUtils.getSparsity(m1.getNumRows(), m1.getNumColumns(),\n+ (long) m1.getNumRows() * m1.getNumColumns() - m1.getNonZeros());\n+ case DIAG:\n+ return (m1.getNumColumns() == 1) ?\n+ OptimizerUtils.getSparsity(m1.getNumRows(), m1.getNumRows(), m1.getNonZeros()) :\n+ OptimizerUtils.getSparsity(m1.getNumRows(), 1, Math.min(m1.getNumRows(), m1.getNonZeros()));\n+ // binary operations that preserve sparsity exactly\n+ case CBIND:\n+ return OptimizerUtils.getSparsity(m1.getNumRows(),\n+ m1.getNumColumns() + m1.getNumColumns(), m1.getNonZeros() + m2.getNonZeros());\n+ case RBIND:\n+ return OptimizerUtils.getSparsity(m1.getNumRows() + m2.getNumRows(),\n+ m1.getNumColumns(), m1.getNonZeros() + m2.getNonZeros());\n+ // unary operation that preserve sparsity exactly\n+ case NEQZERO:\n+ return m1.getSparsity();\n+ case TRANS:\n+ return m1.getSparsity();\n+ case RESHAPE:\n+ return m1.getSparsity();\n+ default:\nthrow new NotImplementedException();\n}\n+ }\n- private double estimIntern(double sp1, double sp2, long m, long k, long n) {\n+ private double estimInternMM(double sp1, double sp2, long m, long k, long n) {\nreturn OptimizerUtils.getMatMultSparsity(sp1, sp2, m, k, n, false);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -31,13 +31,11 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n/**\n- * This estimator implements a remarkably simple yet effective\n- * approach for incorporating structural properties into sparsity\n- * estimation. The key idea is to maintain row and column nnz per\n- * matrix, along with additional meta data.\n+ * This estimator implements a remarkably simple yet effective approach for\n+ * incorporating structural properties into sparsity estimation. The key idea is\n+ * to maintain row and column nnz per matrix, along with additional meta data.\n*/\n-public class EstimatorMatrixHistogram extends SparsityEstimator\n-{\n+public class EstimatorMatrixHistogram extends SparsityEstimator {\n// internal configurations\nprivate static final boolean DEFAULT_USE_EXCEPTS = true;\n@@ -58,12 +56,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nestim(root.getLeft()); // obtain synopsis\nif (!root.getRight().isLeaf())\nestim(root.getLeft()); // obtain synopsis\n- MatrixHistogram h1 = !root.getLeft().isLeaf() ?\n- (MatrixHistogram)root.getLeft().getSynopsis() :\n- new MatrixHistogram(root.getLeft().getData(), _useExcepts);\n- MatrixHistogram h2 = !root.getRight().isLeaf() ?\n- (MatrixHistogram)root.getRight().getSynopsis() :\n- new MatrixHistogram(root.getRight().getData(), _useExcepts);\n+ MatrixHistogram h1 = !root.getLeft().isLeaf() ? (MatrixHistogram) root.getLeft().getSynopsis()\n+ : new MatrixHistogram(root.getLeft().getData(), _useExcepts);\n+ MatrixHistogram h2 = !root.getRight().isLeaf() ? (MatrixHistogram) root.getRight().getSynopsis()\n+ : new MatrixHistogram(root.getRight().getData(), _useExcepts);\n// estimate output sparsity based on input histograms\ndouble ret = estimIntern(h1, h2, OpCode.MM);\n@@ -101,31 +97,35 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nreturn estimInternMM(h1, h2);\ncase MULT:\nreturn Math.min(\n- IntStream.range(0, h1.getRows()).mapToDouble(i -> (double)h1.rNnz[i]/msize * (double)h2.rNnz[i]/msize).sum(),\n- IntStream.range(0, h1.getCols()).mapToDouble(i -> (double)h1.cNnz[i]/msize * (double)h2.cNnz[i]/msize).sum());\n+ IntStream.range(0, h1.getRows()).mapToDouble(i -> h1.rNnz[i] / msize * h2.rNnz[i] / msize).sum(),\n+ IntStream.range(0, h1.getCols()).mapToDouble(i -> h1.cNnz[i] / msize * h2.cNnz[i] / msize).sum());\ncase PLUS:\nreturn Math.min(\n- IntStream.range(0, h1.getRows()).mapToDouble(i -> (double)h1.rNnz[i]/msize\n- + (double)h2.rNnz[i]/msize - (double)h1.rNnz[i]/msize * (double)h2.rNnz[i]/msize).sum(),\n- IntStream.range(0, h1.getCols()).mapToDouble(i -> (double)h1.cNnz[i]/msize\n- + (double)h2.cNnz[i]/msize - (double)h1.cNnz[i]/msize * (double)h2.cNnz[i]/msize).sum());\n+ IntStream.range(0, h1.getRows())\n+ .mapToDouble(i -> h1.rNnz[i] / msize + h2.rNnz[i] / msize\n+ - h1.rNnz[i] / msize * h2.rNnz[i] / msize)\n+ .sum(),\n+ IntStream.range(0, h1.getCols()).mapToDouble(\n+ i -> h1.cNnz[i] / msize + h2.cNnz[i] / msize - h1.cNnz[i] / msize * h2.cNnz[i] / msize)\n+ .sum());\ncase EQZERO:\nreturn OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(),\n(long) h1.getRows() * h1.getCols() - h1.getNonZeros());\ncase DIAG:\n- return (h1.getCols()==1) ?\n- OptimizerUtils.getSparsity(h1.getRows(), h1.getRows(), h1.getNonZeros()) :\n- OptimizerUtils.getSparsity(h1.getRows(), 1, Math.min(h1.getRows(), h1.getNonZeros()));\n+ return (h1.getCols() == 1) ? OptimizerUtils.getSparsity(h1.getRows(), h1.getRows(), h1.getNonZeros())\n+ : OptimizerUtils.getSparsity(h1.getRows(), 1, Math.min(h1.getRows(), h1.getNonZeros()));\n// binary operations that preserve sparsity exactly\ncase CBIND:\n- return OptimizerUtils.getSparsity(h1.getRows(),\n- h1.getCols()+h2.getCols(), h1.getNonZeros() + h2.getNonZeros());\n+ return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols() + h2.getCols(),\n+ h1.getNonZeros() + h2.getNonZeros());\ncase RBIND:\n- return OptimizerUtils.getSparsity(h1.getRows()+h2.getRows(),\n- h1.getCols(), h1.getNonZeros() + h2.getNonZeros());\n+ return OptimizerUtils.getSparsity(h1.getRows() + h2.getRows(), h1.getCols(),\n+ h1.getNonZeros() + h2.getNonZeros());\n// unary operation that preserve sparsity exactly\ncase NEQZERO:\n+ return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ncase TRANS:\n+ return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ncase RESHAPE:\nreturn OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ndefault:\n@@ -144,7 +144,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n// special case, with hybrid exact and approximate output\nelse if (h1.cNnz1e != null && h2.rNnz1e != null) {\n// note: normally h1.getRows()*h2.getCols() would define mnOut\n- //but by leveraging the knowledge of rows/cols w/ <=1 nnz, we account\n+ // but by leveraging the knowledge of rows/cols w/ <=1 nnz, we\n+ // account\n// that exact and approximate fractions touch different areas\nlong mnOut = (h1.rNonEmpty - h1.rN1) * (h2.cNonEmpty - h2.cN1);\ndouble spOutRest = 0;\n@@ -153,8 +154,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnnz += h1.cNnz1e[j] * h2.rNnz[j];\nnnz += (h1.cNnz[j] - h1.cNnz1e[j]) * h2.rNnz1e[j];\n// approximate fraction, w/o double counting\n- double lsp = (double)(h1.cNnz[j]-h1.cNnz1e[j])\n- * (h2.rNnz[j]-h2.rNnz1e[j]) / mnOut;\n+ double lsp = (double) (h1.cNnz[j] - h1.cNnz1e[j]) * (h2.rNnz[j] - h2.rNnz1e[j]) / mnOut;\nspOutRest = spOutRest + lsp - spOutRest * lsp;\n}\nnnz += (long) (spOutRest * mnOut);\n@@ -171,16 +171,13 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n// exploit upper bound on nnz based on non-empty rows/cols\n- nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\n- Math.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n+ nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ? Math.min((long) h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n// exploit lower bound on nnz based on half-full rows/cols\n- nnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\n- Math.max((long)h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n+ nnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ? Math.max((long) h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n// compute final sparsity\n- return OptimizerUtils.getSparsity(\n- h1.getRows(), h2.getCols(), nnz);\n+ return OptimizerUtils.getSparsity(h1.getRows(), h2.getCols(), nnz);\n}\nprivate static class MatrixHistogram {\n@@ -192,9 +189,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n// additional summary statistics\nprivate final int rMaxNnz, cMaxNnz; // max nnz per row/row\nprivate final int rN1, cN1; // number of rows/cols with nnz=1\n- private final int rNonEmpty, cNonEmpty; //number of non-empty rows/cols (w/ empty is nnz=0)\n- private final int rNdiv2, cNdiv2; //number of rows/cols with nnz > #cols/2 and #rows/2\n- private boolean fullDiag; //true if there exists a full diagonal of nonzeros\n+ private final int rNonEmpty, cNonEmpty; // number of non-empty rows/cols\n+ // (w/ empty is nnz=0)\n+ private final int rNdiv2, cNdiv2; // number of rows/cols with nnz >\n+ // #cols/2 and #rows/2\n+ private boolean fullDiag; // true if there exists a full diagonal of\n+ // nonzeros\npublic MatrixHistogram(MatrixBlock in, boolean useExcepts) {\n// 1) allocate basic synopsis\n@@ -207,7 +207,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nif (in.isInSparseFormat()) {\nSparseBlock sblock = in.getSparseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\n- if( sblock.isEmpty(i) ) continue;\n+ if (sblock.isEmpty(i))\n+ continue;\nint apos = sblock.pos(i);\nint alen = sblock.size(i);\nint[] aix = sblock.indexes(i);\n@@ -215,8 +216,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nLibMatrixAgg.countAgg(sblock.values(i), cNnz, aix, apos, alen);\nfullDiag &= aix[apos] == i;\n}\n- }\n- else {\n+ } else {\nDenseBlock dblock = in.getDenseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\ndouble[] avals = dblock.values(i);\n@@ -251,7 +251,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nif (in.isInSparseFormat()) {\nSparseBlock sblock = in.getSparseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\n- if( sblock.isEmpty(i) ) continue;\n+ if (sblock.isEmpty(i))\n+ continue;\nint alen = sblock.size(i);\nint apos = sblock.pos(i);\nint[] aix = sblock.indexes(i);\n@@ -261,8 +262,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nfor (int k = apos; k < apos + alen; k++)\ncNnz1e[aix[k]]++;\n}\n- }\n- else {\n+ } else {\nDenseBlock dblock = in.getDenseBlock();\nfor (int i = 0; i < in.getNumRows(); i++) {\ndouble[] avals = dblock.values(i);\n@@ -300,15 +300,16 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\npublic long getNonZeros() {\n- return getRows() < getCols() ?\n- IntStream.range(0, getRows()).mapToLong(i-> rNnz[i]).sum() :\n- IntStream.range(0, getRows()).mapToLong(i-> cNnz[i]).sum();\n+ return getRows() < getCols() ? IntStream.range(0, getRows()).mapToLong(i -> rNnz[i]).sum()\n+ : IntStream.range(0, getRows()).mapToLong(i -> cNnz[i]).sum();\n}\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n// exact propagation if lhs or rhs full diag\n- if( h1.fullDiag ) return h2;\n- if( h2.fullDiag ) return h1;\n+ if (h1.fullDiag)\n+ return h2;\n+ if (h2.fullDiag)\n+ return h1;\n// get input/output nnz for scaling\nlong nnz1 = Arrays.stream(h1.rNnz).sum();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended AVG estimator for other operations Closes #813.
49,760
03.08.2018 18:46:49
25,200
e11ae6af3c09678a5ab0241407e552bdfaa897c0
Improved MNC estimator for element-wise multiply Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -31,11 +31,13 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n/**\n- * This estimator implements a remarkably simple yet effective approach for\n- * incorporating structural properties into sparsity estimation. The key idea is\n- * to maintain row and column nnz per matrix, along with additional meta data.\n+ * This estimator implements a remarkably simple yet effective\n+ * approach for incorporating structural properties into sparsity\n+ * estimation. The key idea is to maintain row and column nnz per\n+ * matrix, along with additional meta data.\n*/\n-public class EstimatorMatrixHistogram extends SparsityEstimator {\n+public class EstimatorMatrixHistogram extends SparsityEstimator\n+{\n//internal configurations\nprivate static final boolean DEFAULT_USE_EXCEPTS = true;\n@@ -56,10 +58,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nestim(root.getLeft()); //obtain synopsis\nif( !root.getRight().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\n- MatrixHistogram h1 = !root.getLeft().isLeaf() ? (MatrixHistogram) root.getLeft().getSynopsis()\n- : new MatrixHistogram(root.getLeft().getData(), _useExcepts);\n- MatrixHistogram h2 = !root.getRight().isLeaf() ? (MatrixHistogram) root.getRight().getSynopsis()\n- : new MatrixHistogram(root.getRight().getData(), _useExcepts);\n+ MatrixHistogram h1 = !root.getLeft().isLeaf() ?\n+ (MatrixHistogram)root.getLeft().getSynopsis() :\n+ new MatrixHistogram(root.getLeft().getData(), _useExcepts);\n+ MatrixHistogram h2 = !root.getRight().isLeaf() ?\n+ (MatrixHistogram)root.getRight().getSynopsis() :\n+ new MatrixHistogram(root.getRight().getData(), _useExcepts);\n//estimate output sparsity based on input histograms\ndouble ret = estimIntern(h1, h2, OpCode.MM);\n@@ -96,36 +100,35 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\ncase MM:\nreturn estimInternMM(h1, h2);\ncase MULT:\n- return Math.min(\n- IntStream.range(0, h1.getRows()).mapToDouble(i -> h1.rNnz[i] / msize * h2.rNnz[i] / msize).sum(),\n- IntStream.range(0, h1.getCols()).mapToDouble(i -> h1.cNnz[i] / msize * h2.cNnz[i] / msize).sum());\n+ final long N1 = h1.getNonZeros();\n+ final long N2 = h2.getNonZeros();\n+ final long scale = IntStream.range(0, h1.getCols())\n+ .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ return IntStream.range(0, h1.getRows()).mapToLong(\n+ i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2).sum() / msize;\ncase PLUS:\nreturn Math.min(\n- IntStream.range(0, h1.getRows())\n- .mapToDouble(i -> h1.rNnz[i] / msize + h2.rNnz[i] / msize\n- - h1.rNnz[i] / msize * h2.rNnz[i] / msize)\n- .sum(),\n- IntStream.range(0, h1.getCols()).mapToDouble(\n- i -> h1.cNnz[i] / msize + h2.cNnz[i] / msize - h1.cNnz[i] / msize * h2.cNnz[i] / msize)\n- .sum());\n+ IntStream.range(0, h1.getRows()).mapToDouble(i -> h1.rNnz[i]/msize\n+ + h2.rNnz[i]/msize - h1.rNnz[i]/msize * h2.rNnz[i]/msize).sum(),\n+ IntStream.range(0, h1.getCols()).mapToDouble(i -> h1.cNnz[i]/msize\n+ + h2.cNnz[i]/msize - h1.cNnz[i]/msize * h2.cNnz[i]/msize).sum());\ncase EQZERO:\nreturn OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(),\n(long)h1.getRows() * h1.getCols() - h1.getNonZeros());\ncase DIAG:\n- return (h1.getCols() == 1) ? OptimizerUtils.getSparsity(h1.getRows(), h1.getRows(), h1.getNonZeros())\n- : OptimizerUtils.getSparsity(h1.getRows(), 1, Math.min(h1.getRows(), h1.getNonZeros()));\n+ return (h1.getCols()==1) ?\n+ OptimizerUtils.getSparsity(h1.getRows(), h1.getRows(), h1.getNonZeros()) :\n+ OptimizerUtils.getSparsity(h1.getRows(), 1, Math.min(h1.getRows(), h1.getNonZeros()));\n//binary operations that preserve sparsity exactly\ncase CBIND:\n- return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols() + h2.getCols(),\n- h1.getNonZeros() + h2.getNonZeros());\n+ return OptimizerUtils.getSparsity(h1.getRows(),\n+ h1.getCols()+h2.getCols(), h1.getNonZeros() + h2.getNonZeros());\ncase RBIND:\n- return OptimizerUtils.getSparsity(h1.getRows() + h2.getRows(), h1.getCols(),\n- h1.getNonZeros() + h2.getNonZeros());\n+ return OptimizerUtils.getSparsity(h1.getRows()+h2.getRows(),\n+ h1.getCols(), h1.getNonZeros() + h2.getNonZeros());\n//unary operation that preserve sparsity exactly\ncase NEQZERO:\n- return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ncase TRANS:\n- return OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ncase RESHAPE:\nreturn OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(), h1.getNonZeros());\ndefault:\n@@ -144,8 +147,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\n//special case, with hybrid exact and approximate output\nelse if(h1.cNnz1e!=null && h2.rNnz1e != null) {\n//note: normally h1.getRows()*h2.getCols() would define mnOut\n- // but by leveraging the knowledge of rows/cols w/ <=1 nnz, we\n- // account\n+ //but by leveraging the knowledge of rows/cols w/ <=1 nnz, we account\n//that exact and approximate fractions touch different areas\nlong mnOut = (h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\ndouble spOutRest = 0;\n@@ -154,7 +156,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nnnz += h1.cNnz1e[j] * h2.rNnz[j];\nnnz += (h1.cNnz[j]-h1.cNnz1e[j]) * h2.rNnz1e[j];\n//approximate fraction, w/o double counting\n- double lsp = (double) (h1.cNnz[j] - h1.cNnz1e[j]) * (h2.rNnz[j] - h2.rNnz1e[j]) / mnOut;\n+ double lsp = (double)(h1.cNnz[j]-h1.cNnz1e[j])\n+ * (h2.rNnz[j]-h2.rNnz1e[j]) / mnOut;\nspOutRest = spOutRest + lsp - spOutRest*lsp;\n}\nnnz += (long)(spOutRest * mnOut);\n@@ -171,13 +174,16 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\n}\n//exploit upper bound on nnz based on non-empty rows/cols\n- nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ? Math.min((long) h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n+ nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\n+ Math.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n//exploit lower bound on nnz based on half-full rows/cols\n- nnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ? Math.max((long) h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n+ nnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\n+ Math.max((long)h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n//compute final sparsity\n- return OptimizerUtils.getSparsity(h1.getRows(), h2.getCols(), nnz);\n+ return OptimizerUtils.getSparsity(\n+ h1.getRows(), h2.getCols(), nnz);\n}\nprivate static class MatrixHistogram {\n@@ -189,12 +195,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\n// additional summary statistics\nprivate final int rMaxNnz, cMaxNnz; //max nnz per row/row\nprivate final int rN1, cN1; //number of rows/cols with nnz=1\n- private final int rNonEmpty, cNonEmpty; // number of non-empty rows/cols\n- // (w/ empty is nnz=0)\n- private final int rNdiv2, cNdiv2; // number of rows/cols with nnz >\n- // #cols/2 and #rows/2\n- private boolean fullDiag; // true if there exists a full diagonal of\n- // nonzeros\n+ private final int rNonEmpty, cNonEmpty; //number of non-empty rows/cols (w/ empty is nnz=0)\n+ private final int rNdiv2, cNdiv2; //number of rows/cols with nnz > #cols/2 and #rows/2\n+ private boolean fullDiag; //true if there exists a full diagonal of nonzeros\npublic MatrixHistogram(MatrixBlock in, boolean useExcepts) {\n// 1) allocate basic synopsis\n@@ -207,8 +210,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nif( in.isInSparseFormat() ) {\nSparseBlock sblock = in.getSparseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\n- if (sblock.isEmpty(i))\n- continue;\n+ if( sblock.isEmpty(i) ) continue;\nint apos = sblock.pos(i);\nint alen = sblock.size(i);\nint[] aix = sblock.indexes(i);\n@@ -216,7 +218,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nLibMatrixAgg.countAgg(sblock.values(i), cNnz, aix, apos, alen);\nfullDiag &= aix[apos] == i;\n}\n- } else {\n+ }\n+ else {\nDenseBlock dblock = in.getDenseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\ndouble[] avals = dblock.values(i);\n@@ -251,8 +254,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nif( in.isInSparseFormat() ) {\nSparseBlock sblock = in.getSparseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\n- if (sblock.isEmpty(i))\n- continue;\n+ if( sblock.isEmpty(i) ) continue;\nint alen = sblock.size(i);\nint apos = sblock.pos(i);\nint[] aix = sblock.indexes(i);\n@@ -262,7 +264,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\nfor( int k=apos; k<apos+alen; k++ )\ncNnz1e[aix[k]]++;\n}\n- } else {\n+ }\n+ else {\nDenseBlock dblock = in.getDenseBlock();\nfor( int i=0; i<in.getNumRows(); i++ ) {\ndouble[] avals = dblock.values(i);\n@@ -300,16 +303,15 @@ public class EstimatorMatrixHistogram extends SparsityEstimator {\n}\npublic long getNonZeros() {\n- return getRows() < getCols() ? IntStream.range(0, getRows()).mapToLong(i -> rNnz[i]).sum()\n- : IntStream.range(0, getRows()).mapToLong(i -> cNnz[i]).sum();\n+ return getRows() < getCols() ?\n+ IntStream.range(0, getRows()).mapToLong(i-> rNnz[i]).sum() :\n+ IntStream.range(0, getRows()).mapToLong(i-> cNnz[i]).sum();\n}\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n//exact propagation if lhs or rhs full diag\n- if (h1.fullDiag)\n- return h2;\n- if (h2.fullDiag)\n- return h1;\n+ if( h1.fullDiag ) return h2;\n+ if( h2.fullDiag ) return h1;\n//get input/output nnz for scaling\nlong nnz1 = Arrays.stream(h1.rNnz).sum();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Improved MNC estimator for element-wise multiply Closes #815.
49,727
05.08.2018 14:37:59
25,200
fb90e3bff41e9c0d58a80867243641faec437c09
Language documentation for paramserv builtin function Closes
[ { "change_type": "MODIFY", "old_path": "docs/dml-language-reference.md", "new_path": "docs/dml-language-reference.md", "diff": "@@ -53,6 +53,7 @@ limitations under the License.\n* [Read/Write Built-In Functions](dml-language-reference.html#readwrite-built-in-functions)\n* [Data Pre-Processing Built-In Functions](dml-language-reference.html#data-pre-processing-built-in-functions)\n* [Deep Learning Built-In Functions](dml-language-reference.html#deep-learning-built-in-functions)\n+ * [Parameter Server Built-In Function](dml-language-reference.html#parameter-server-built-in-function)\n* [Other Built-In Functions](dml-language-reference.html#other-built-in-functions)\n* [Frames](dml-language-reference.html#frames)\n* [Creating Frames](dml-language-reference.html#creating-frames)\n@@ -1536,6 +1537,82 @@ Examples:\n| bias_add | | `ones = matrix(1, rows=1, cols=height*width); output = input + matrix(bias %*% ones, rows=1, cols=numChannels*height*width)` |\n| bias_multiply | | `ones = matrix(1, rows=1, cols=height*width); output = input * matrix(bias %*% ones, rows=1, cols=numChannels*height*width)` |\n+### Parameter Server Built-in Function\n+Apart from data-parallel operations and task-parallel parfor loops, SystemML also supports a **data-parallel Parameter Server** via a built-in function **paramserv**. Currently both local multi-threaded and spark distributed backend are supported to execute the **paramserv** function. So far we only support a single parameter server with N workers as well as synchronous and asynchronous model updates per batch or epoch. For example, in order to train a model in local backend with update strategy BSP, 10 epochs, 64 batchsize, 10 workers, **paramserv** function should look like this:\n+\n+\n+ resultModel=paramserv(model=initModel, features=X, labels=Y,\n+ upd=\"fun1\", agg=\"fun2\", epochs=10, k=10, hyperparams=hParams)\n+\n+\n+**Table**: Inputs of paramserv function\n+\n+Parameters | Description | Type | Mandatory | Options\n+-------- | ----------- | ---------- | ---------- | -------\n+model | All the parameters (e.g., the weight and bias matrices) | list | yes |\n+features | Training features | matrix | yes\n+labels | Training labels | matrix | yes\n+val_features | Validation features | matrix | no\n+val_labels | Validation labels | matrix | no\n+upd | Physical name of gradient calculation function. The format should be \"related path:func name\". For example, \"./mnist_lenet_paramserv_sgd.dml::gradients\". | string | yes\n+agg | Physical name of gradient aggregation function. The format should be \"related path:func name\". For example, \"./mnist_lenet_paramserv_sgd.dml::aggregation\". | string | yes\n+mode | Execution backend for data partitioning and worker execution | string | no | \"LOCAL\"(default), \"REMOTE_SPARK\"\n+utype | Update strategy | string | no | \"ASP\"(default), \"BSP\"\n+freq | Frequency of model updating | string | no | \"EPOCH\"(default), \"BATCH\"\n+epochs | Number of epochs, where an epoch is a full scan over the data | integer | yes |\n+batchsize | Size of a mini-batch (number of rows) | integer | no | 64(default)\n+k | Number of workers | integer | no | Number of vcores(default)\n+scheme | Scheme of data partition, i.e., how the data is distributed across workers | string | no | \"DISJOINT_CONTIGUOUS\"(default), \"DISJOINT_ROUND_ROBIN\", \"DISJOINT_RANDOM\", \"OVERLAP_RESHUFFLE\"\n+hyperparams | Additional hyper parameters, e.g., learning rate, momentum | list | yes |\n+checkpointing | Checkpoint strategy, currently not supported | string | no |\n+\n+**Table**: Output of paramserv function\n+\n+Output | Description | Type\n+-------- | ----------- | ----------\n+model | Trained model | list\n+\n+**Update function:**\n+\n+The update function calculates the gradients for a single mini-batch and the given model (e.g., via a forward and backward pass through a neural network). The implementation of this function should be based on a function signature like this: (i.e., **the input parameter including both type and name should be exactly the same as the below, except that the output name could be different**)\n+\n+```sh\n+gradients = function(list[unknown] model, list[unknown] hyperparams,\n+ matrix[double] features, matrix[double] labels)\n+ return (list[unknown] gradients)\n+ # the output name can be something else than \"gradients\" but should always return a list\n+```\n+\n+**Aggregate function:**\n+\n+The aggregate function then takes the computed or accrued gradients and updates the model via some optimizer such as Adagrad or Adam. The implementation of this function should be based on a function signature like this: (i.e., **the input parameter including both type and name should be exactly the same as the below, except that the output name could be different**)\n+\n+```sh\n+aggregation = function(list[unknown] model, list[unknown] hyperparams,\n+ list[unknown] gradients)\n+ return (list[unknown] modelResult)\n+ # the output name can be something else than \"modelResult\" but should always return a list\n+```\n+\n+**Update strategy:**\n+\n+Currently, two types of update strategy, **ASP** and **BSP**, are supported. **ASP**, a.k.a. _Asynchronous Parallel_, means that the model updates will be completed in an asynchronous manner. The parameter server updates the model and broadcasts the updated model immediately with the fresh gradients pushed by the worker and then the worker is able to pull the new updated model. This push-and-pull process is done asynchronously across workers. While **BSP**, a.k.a. _Bulk Synchronous Parallel_, the server will update the global model until having received all the gradients sent by workers in one iteration and then workers could move into the next iteration. Hence, the overall performance is affected by stragglers (i.e., the slowest worker).\n+\n+**Update frequency:**\n+\n+When pushing the gradients from workers to server for updating the model, we could determine how often this push-and-pull process will be done. Currently, two types of update frequency, **EPOCH** and **BATCH** are supported. When setting to **EPOCH**, the generated gradients of each mini-batch are accumulated locally in each worker. The accrued gradients are then pushed to the server whenever a worker finished an epoch. While setting to **BATCH**, the generated gradients of each mini-batch are pushed to server immediately to launch the push-and-pull process.\n+\n+**Data partition schemes:**\n+\n+Before launching the data-parallel parameter server, the original data will be partitioned across workers according to some schemes. Currently, four types of schemes are supported, Disjoint_Contigous, Disjoint_Round_Robin, Disjoint_Random, Overlap_Reshuffle.\n+\n+Scheme | Definition\n+-------- | -----------\n+Disjoint_Contiguous | For each worker, use a right indexing operation X[beg:end,] to obtain contiguous, non-overlapping partitions of rows\n+Disjoint_Round_Robin | For each worker, use a permutation multiply or simpler a removeEmpty such as removeEmpty(target=X, margin=rows, select=(seq(1,nrow(X))%%k)==id)\n+Disjoint_Random | For each worker, use a permutation multiply P[beg:end,] %*% X, where P is constructed for example with P=table(seq(1,nrow(X),sample(nrow(X), nrow(X)))), i.e., sampling without replacement to ensure disjointness\n+Overlap_Reshuffle | Similar to the above, except to create a new permutation matrix for each worker and without the indexing on P\n+\n### Other Built-In Functions\n**Table 16**: Other Built-In Functions\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2090] Language documentation for paramserv builtin function Closes #816.
49,738
05.08.2018 17:45:44
25,200
19b310c6b526f45789b242050c2f643a2e3ccf47
[MINOR] Fix warnings GPU backend (unused imports and constructors)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -27,7 +27,6 @@ import java.util.Set;\nimport org.apache.commons.lang3.StringUtils;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLOptions;\n-import org.apache.sysml.api.DMLScript.EvictionPolicy;\nimport org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.mlcontext.MLContext.ExecutionType;\n@@ -51,7 +50,6 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n-import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -614,16 +614,12 @@ public class GPUMemoryManager {\nsuper(p);\n}\n- public CustomPointer() {\n- super();\n- }\n-\n@Override\npublic long getNativePointer() {\nreturn super.getNativePointer();\n}\n-\n}\n+\n/**\n* Class that governs the eviction policy\n*/\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix warnings GPU backend (unused imports and constructors)
49,727
05.08.2018 14:36:09
-7,200
78e9d836ea16296fcf3bbd647b60638ce2bc24c3
Cleanup paramserv language API, incl defaults Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java", "diff": "@@ -330,12 +330,12 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\ncheckDataType(fname, Statement.PS_MODEL, DataType.LIST, conditional); // check the model which is the only non-parameterized argument\ncheckDataType(fname, Statement.PS_FEATURES, DataType.MATRIX, conditional);\ncheckDataType(fname, Statement.PS_LABELS, DataType.MATRIX, conditional);\n- checkDataType(fname, Statement.PS_VAL_FEATURES, DataType.MATRIX, conditional);\n- checkDataType(fname, Statement.PS_VAL_LABELS, DataType.MATRIX, conditional);\n+ checkDataValueType(true, fname, Statement.PS_VAL_FEATURES, DataType.MATRIX, ValueType.DOUBLE, conditional);\n+ checkDataValueType(true, fname, Statement.PS_VAL_LABELS, DataType.MATRIX, ValueType.DOUBLE, conditional);\ncheckDataValueType(false, fname, Statement.PS_UPDATE_FUN, DataType.SCALAR, ValueType.STRING, conditional);\ncheckDataValueType(false, fname, Statement.PS_AGGREGATION_FUN, DataType.SCALAR, ValueType.STRING, conditional);\n- checkStringParam(false, fname, Statement.PS_MODE, conditional);\n- checkStringParam(false, fname, Statement.PS_UPDATE_TYPE, conditional);\n+ checkStringParam(true, fname, Statement.PS_MODE, conditional);\n+ checkStringParam(true, fname, Statement.PS_UPDATE_TYPE, conditional);\ncheckStringParam(true, fname, Statement.PS_FREQUENCY, conditional);\ncheckDataValueType(false, fname, Statement.PS_EPOCHS, DataType.SCALAR, ValueType.INT, conditional);\ncheckDataValueType(true, fname, Statement.PS_BATCH_SIZE, DataType.SCALAR, ValueType.INT, conditional);\n@@ -860,7 +860,7 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\nif (optional) {\nreturn;\n}\n- raiseValidateError(String.format(\"Named parameter '%s' is missing. Please specify the input.\", fname),\n+ raiseValidateError(String.format(\"Named parameter '%s' is missing. Please specify the input.\", pname),\nconditional, LanguageErrorCodes.INVALID_PARAMETERS);\n} else if (data.getOutput().getDataType() != dt || data.getOutput().getValueType() != vt)\nraiseValidateError(String.format(\"Input to %s::%s must be of type '%s', '%s'.It should not be of type '%s', '%s'.\",\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/LocalPSWorker.java", "diff": "@@ -39,9 +39,8 @@ public class LocalPSWorker extends PSWorker implements Callable<Void> {\nprotected LocalPSWorker() {}\n- public LocalPSWorker(int workerID, String updFunc, Statement.PSFrequency freq, int epochs, long batchSize,\n- MatrixObject valFeatures, MatrixObject valLabels, ExecutionContext ec, ParamServer ps) {\n- super(workerID, updFunc, freq, epochs, batchSize, valFeatures, valLabels, ec, ps);\n+ public LocalPSWorker(int workerID, String updFunc, Statement.PSFrequency freq, int epochs, long batchSize, ExecutionContext ec, ParamServer ps) {\n+ super(workerID, updFunc, freq, epochs, batchSize, ec, ps);\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/PSWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/paramserv/PSWorker.java", "diff": "@@ -50,23 +50,17 @@ public abstract class PSWorker implements Serializable\nprotected FunctionCallCPInstruction _inst;\nprotected MatrixObject _features;\nprotected MatrixObject _labels;\n-\n- protected MatrixObject _valFeatures;\n- protected MatrixObject _valLabels;\nprotected String _updFunc;\nprotected Statement.PSFrequency _freq;\nprotected PSWorker() {}\n- protected PSWorker(int workerID, String updFunc, Statement.PSFrequency freq, int epochs, long batchSize,\n- MatrixObject valFeatures, MatrixObject valLabels, ExecutionContext ec, ParamServer ps) {\n+ protected PSWorker(int workerID, String updFunc, Statement.PSFrequency freq, int epochs, long batchSize, ExecutionContext ec, ParamServer ps) {\n_workerID = workerID;\n_updFunc = updFunc;\n_freq = freq;\n_epochs = epochs;\n_batchSize = batchSize;\n- _valFeatures = valFeatures;\n- _valLabels = valLabels;\n_ec = ec;\n_ps = ps;\nsetupUpdateFunction(updFunc, ec);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParamservBuiltinCPInstruction.java", "diff": "@@ -36,8 +36,6 @@ import static org.apache.sysml.parser.Statement.PS_PARALLELISM;\nimport static org.apache.sysml.parser.Statement.PS_SCHEME;\nimport static org.apache.sysml.parser.Statement.PS_UPDATE_FUN;\nimport static org.apache.sysml.parser.Statement.PS_UPDATE_TYPE;\n-import static org.apache.sysml.parser.Statement.PS_VAL_FEATURES;\n-import static org.apache.sysml.parser.Statement.PS_VAL_LABELS;\nimport java.util.HashMap;\nimport java.util.HashSet;\n@@ -65,14 +63,14 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n-import org.apache.sysml.runtime.controlprogram.paramserv.dp.DataPartitionLocalScheme;\n-import org.apache.sysml.runtime.controlprogram.paramserv.dp.LocalDataPartitioner;\nimport org.apache.sysml.runtime.controlprogram.paramserv.LocalPSWorker;\nimport org.apache.sysml.runtime.controlprogram.paramserv.LocalParamServer;\nimport org.apache.sysml.runtime.controlprogram.paramserv.ParamServer;\nimport org.apache.sysml.runtime.controlprogram.paramserv.ParamservUtils;\nimport org.apache.sysml.runtime.controlprogram.paramserv.SparkPSBody;\nimport org.apache.sysml.runtime.controlprogram.paramserv.SparkPSWorker;\n+import org.apache.sysml.runtime.controlprogram.paramserv.dp.DataPartitionLocalScheme;\n+import org.apache.sysml.runtime.controlprogram.paramserv.dp.LocalDataPartitioner;\nimport org.apache.sysml.runtime.controlprogram.paramserv.rpc.PSRpcFactory;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.Timing;\n@@ -83,8 +81,10 @@ import org.apache.sysml.utils.Statistics;\npublic class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruction {\nprivate static final int DEFAULT_BATCH_SIZE = 64;\n- private static final PSFrequency DEFAULT_UPDATE_FREQUENCY = PSFrequency.BATCH;\n+ private static final PSFrequency DEFAULT_UPDATE_FREQUENCY = PSFrequency.EPOCH;\nprivate static final PSScheme DEFAULT_SCHEME = PSScheme.DISJOINT_CONTIGUOUS;\n+ private static final PSModeType DEFAULT_MODE = PSModeType.LOCAL;\n+ private static final PSUpdateType DEFAULT_TYPE = PSUpdateType.ASP;\n//internal local debug level\nprivate static final boolean LDEBUG = false;\n@@ -113,13 +113,14 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\ncase REMOTE_SPARK:\nrunOnSpark((SparkExecutionContext) ec, mode);\nbreak;\n+ default:\n+ throw new DMLRuntimeException(String.format(\"Paramserv func: not support mode %s\", mode));\n}\n}\nprivate void runOnSpark(SparkExecutionContext sec, PSModeType mode) {\nTiming tSetup = DMLScript.STATISTICS ? new Timing(true) : null;\n- PSScheme scheme = getScheme();\nint workerNum = getWorkerNum(mode);\nString updFunc = getParam(PS_UPDATE_FUN);\nString aggFunc = getParam(PS_AGGREGATION_FUN);\n@@ -129,9 +130,6 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n// Level of par is 1 in spark backend because one worker will be launched per task\nExecutionContext newEC = ParamservUtils.createExecutionContext(sec, newVarsMap, updFunc, aggFunc, 1);\n- MatrixObject features = sec.getMatrixObject(getParam(PS_FEATURES));\n- MatrixObject labels = sec.getMatrixObject(getParam(PS_LABELS));\n-\n// Create the agg service's execution context\nExecutionContext aggServiceEC = ParamservUtils.copyExecutionContext(newEC, 1).get(0);\n@@ -172,24 +170,25 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nif (DMLScript.STATISTICS)\nStatistics.accPSSetupTime((long) tSetup.stop());\n+ MatrixObject features = sec.getMatrixObject(getParam(PS_FEATURES));\n+ MatrixObject labels = sec.getMatrixObject(getParam(PS_LABELS));\ntry {\n- ParamservUtils.doPartitionOnSpark(sec, features, labels, scheme, workerNum) // Do data partitioning\n+ ParamservUtils.doPartitionOnSpark(sec, features, labels, getScheme(), workerNum) // Do data partitioning\n.foreach(worker); // Run remote workers\n} catch (Exception e) {\nthrow new DMLRuntimeException(\"Paramserv function failed: \", e);\n} finally {\n- // Stop the netty server\n- server.close();\n+ server.close(); // Stop the netty server\n}\n// Accumulate the statistics for remote workers\nif (DMLScript.STATISTICS) {\n- Statistics.accPSSetupTime(aSetup.value().longValue());\n- Statistics.incWorkerNumber(aWorker.value().longValue());\n- Statistics.accPSLocalModelUpdateTime(aUpdate.value().longValue());\n- Statistics.accPSBatchIndexingTime(aIndex.value().longValue());\n- Statistics.accPSGradientComputeTime(aGrad.value().longValue());\n- Statistics.accPSRpcRequestTime(aRPC.value().longValue());\n+ Statistics.accPSSetupTime(aSetup.value());\n+ Statistics.incWorkerNumber(aWorker.value());\n+ Statistics.accPSLocalModelUpdateTime(aUpdate.value());\n+ Statistics.accPSBatchIndexingTime(aIndex.value());\n+ Statistics.accPSGradientComputeTime(aGrad.value());\n+ Statistics.accPSRpcRequestTime(aRPC.value());\n}\n// Fetch the final model from ps\n@@ -205,11 +204,9 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nString updFunc = getParam(PS_UPDATE_FUN);\nString aggFunc = getParam(PS_AGGREGATION_FUN);\n- int k = getParLevel(workerNum);\n-\n// Get the compiled execution context\nLocalVariableMap newVarsMap = createVarsMap(ec);\n- ExecutionContext newEC = ParamservUtils.createExecutionContext(ec, newVarsMap, updFunc, aggFunc, k);\n+ ExecutionContext newEC = ParamservUtils.createExecutionContext(ec, newVarsMap, updFunc, aggFunc, getParLevel(workerNum));\n// Create workers' execution context\nList<ExecutionContext> workerECs = ParamservUtils.copyExecutionContext(newEC, workerNum);\n@@ -219,17 +216,14 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nPSFrequency freq = getFrequency();\nPSUpdateType updateType = getUpdateType();\n- int epochs = getEpochs();\n// Create the parameter server\nListObject model = ec.getListObject(getParam(PS_MODEL));\nParamServer ps = createPS(mode, aggFunc, updateType, workerNum, model, aggServiceEC);\n// Create the local workers\n- MatrixObject valFeatures = ec.getMatrixObject(getParam(PS_VAL_FEATURES));\n- MatrixObject valLabels = ec.getMatrixObject(getParam(PS_VAL_LABELS));\nList<LocalPSWorker> workers = IntStream.range(0, workerNum)\n- .mapToObj(i -> new LocalPSWorker(i, updFunc, freq, epochs, getBatchSize(), valFeatures, valLabels, workerECs.get(i), ps))\n+ .mapToObj(i -> new LocalPSWorker(i, updFunc, freq, getEpochs(), getBatchSize(), workerECs.get(i), ps))\n.collect(Collectors.toList());\n// Do data partition\n@@ -251,8 +245,7 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nfor (Future<Void> ret : es.invokeAll(workers))\nret.get(); //error handling\n// Fetch the final model from ps\n- ListObject result = ps.getResult();\n- ec.setVariable(output.getName(), result);\n+ ec.setVariable(output.getName(), ps.getResult());\n} catch (InterruptedException | ExecutionException e) {\nthrow new DMLRuntimeException(\"ParamservBuiltinCPInstruction: some error occurred: \", e);\n} finally {\n@@ -271,6 +264,9 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n}\nprivate PSModeType getPSMode() {\n+ if (!getParameterMap().containsKey(PS_MODE)) {\n+ return DEFAULT_MODE;\n+ }\nPSModeType mode;\ntry {\nmode = PSModeType.valueOf(getParam(PS_MODE));\n@@ -294,6 +290,9 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n}\nprivate PSUpdateType getUpdateType() {\n+ if (!getParameterMap().containsKey(PS_UPDATE_TYPE)) {\n+ return DEFAULT_TYPE;\n+ }\nPSUpdateType updType;\ntry {\nupdType = PSUpdateType.valueOf(getParam(PS_UPDATE_TYPE));\n@@ -301,7 +300,7 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nthrow new DMLRuntimeException(String.format(\"Paramserv function: not support update type '%s'.\", getParam(PS_UPDATE_TYPE)));\n}\nif (updType == PSUpdateType.SSP)\n- throw new DMLRuntimeException(\"Not support update type SSP.\");\n+ throw new DMLRuntimeException(\"Paramserv function: Not support update type SSP.\");\nreturn updType;\n}\n@@ -318,7 +317,7 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\n}\nprivate int getRemainingCores() {\n- return InfrastructureAnalyzer.getLocalParallelism() - 1;\n+ return InfrastructureAnalyzer.getLocalParallelism();\n}\n/**\n@@ -330,7 +329,6 @@ public class ParamservBuiltinCPInstruction extends ParameterizedBuiltinCPInstruc\nprivate int getWorkerNum(PSModeType mode) {\nswitch (mode) {\ncase LOCAL:\n- // default worker number: available cores - 1 (assign one process for agg service)\nreturn getParameterMap().containsKey(PS_PARALLELISM) ?\nInteger.valueOf(getParam(PS_PARALLELISM)) : getRemainingCores();\ncase REMOTE_SPARK:\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "new_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv.dml", "diff": "@@ -123,10 +123,10 @@ train = function(matrix[double] X, matrix[double] Y,\n# Should always use 'features' (batch features), 'labels' (batch labels),\n# 'hyperparams', 'model' as the arguments\n# and return the gradients of type list\n-gradients = function(matrix[double] features,\n- matrix[double] labels,\n+gradients = function(list[unknown] model,\nlist[unknown] hyperparams,\n- list[unknown] model)\n+ matrix[double] features,\n+ matrix[double] labels)\nreturn (list[unknown] gradients) {\nC = as.integer(as.scalar(hyperparams[\"C\"]))\n@@ -205,8 +205,8 @@ gradients = function(matrix[double] features,\n# Should use the arguments named 'model', 'gradients', 'hyperparams'\n# and return always a model of type list\naggregation = function(list[unknown] model,\n- list[unknown] gradients,\n- list[unknown] hyperparams)\n+ list[unknown] hyperparams,\n+ list[unknown] gradients)\nreturn (list[unknown] modelResult) {\nW1 = as.matrix(model[1])\nW2 = as.matrix(model[2])\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml", "new_path": "src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml", "diff": "@@ -107,7 +107,7 @@ train = function(matrix[double] X, matrix[double] Y,\nparams = list(lr=lr, mu=mu, decay=decay, C=C, Hin=Hin, Win=Win, Hf=Hf, Wf=Wf, stride=stride, pad=pad, lambda=lambda, F1=F1, F2=F2, N3=N3)\n# Use paramserv function\n- modelList2 = paramserv(model=modelList, features=X, labels=Y, val_features=X_val, val_labels=Y_val, upd=\"./src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml::gradients\", agg=\"./src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml::aggregation\", mode=\"LOCAL\", utype=\"BSP\", epochs=epochs, hyperparams=params)\n+ modelList2 = paramserv(model=modelList, features=X, labels=Y, upd=\"./src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml::gradients\", agg=\"./src/test/scripts/functions/paramserv/mnist_lenet_paramserv_minimum_version.dml::aggregation\", epochs=epochs, hyperparams=params)\nW1 = as.matrix(modelList2[1])\nW2 = as.matrix(modelList2[2])\n@@ -120,10 +120,10 @@ train = function(matrix[double] X, matrix[double] Y,\n}\n-gradients = function(matrix[double] features,\n- matrix[double] labels,\n+gradients = function(list[unknown] model,\nlist[unknown] hyperparams,\n- list[unknown] model)\n+ matrix[double] features,\n+ matrix[double] labels)\nreturn (list[unknown] gradients) {\nC = as.integer(as.scalar(hyperparams[\"C\"]))\n@@ -200,8 +200,8 @@ gradients = function(matrix[double] features,\n}\naggregation = function(list[unknown] model,\n- list[unknown] gradients,\n- list[unknown] hyperparams)\n+ list[unknown] hyperparams,\n+ list[unknown] gradients)\nreturn (list[unknown] modelResult) {\nW1 = as.matrix(model[1])\nW2 = as.matrix(model[2])\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/paramserv/paramserv-without-optional-args.dml", "new_path": "src/test/scripts/functions/paramserv/paramserv-without-optional-args.dml", "diff": "@@ -38,6 +38,18 @@ e2 = \"element2\"\nparams = list(e2=e2)\n# Use paramserv function\n+# Remove the optional \"val_features\" and \"val_labels\"\n+modelList2 = paramserv(model=modelList, features=X, labels=Y, upd=\"gradients\", agg=\"aggregation\", mode=\"REMOTE_SPARK\", utype=\"BSP\", freq=\"EPOCH\", epochs=100, batchsize=64, k=7, scheme=\"DISJOINT_ROUND_ROBIN\", checkpointing=\"EPOCH\")\n+\n+# Remove the optional \"mode\"\n+modelList2 = paramserv(model=modelList, features=X, labels=Y, upd=\"gradients\", agg=\"aggregation\", utype=\"BSP\", freq=\"EPOCH\", epochs=100, batchsize=64, k=7, scheme=\"DISJOINT_ROUND_ROBIN\", checkpointing=\"EPOCH\")\n+\n+# Remove the optional \"utype\"\n+modelList2 = paramserv(model=modelList, features=X, labels=Y, upd=\"gradients\", agg=\"aggregation\", epochs=100, freq=\"EPOCH\", batchsize=64, k=7, scheme=\"DISJOINT_ROUND_ROBIN\", checkpointing=\"EPOCH\")\n+\n+# Remove the optional \"freq\"\n+modelList2 = paramserv(model=modelList, features=X, labels=Y, upd=\"gradients\", agg=\"aggregation\", utype=\"BSP\", epochs=100, batchsize=64, k=7, scheme=\"DISJOINT_ROUND_ROBIN\", checkpointing=\"EPOCH\")\n+\n# Remove the optional \"hyperparams\"\nmodelList2 = paramserv(model=modelList, features=X, labels=Y, val_features=X_val, val_labels=Y_val, upd=\"gradients\", agg=\"aggregation\", mode=\"REMOTE_SPARK\", utype=\"BSP\", freq=\"EPOCH\", epochs=100, batchsize=64, k=7, scheme=\"DISJOINT_ROUND_ROBIN\", checkpointing=\"EPOCH\")\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2299] Cleanup paramserv language API, incl defaults Closes #817.
49,736
06.08.2018 09:40:08
25,200
a11933002bfa8ba4d3e50b16f69c60bb36a270f6
Refactored the shadow buffer and added documentation for newly added features Refactored the shadow buffer logic from GPUObject to ShadowBuffer class for maintenance. Added an additional timer to measure shadow buffer time. Updated the gpu documentation
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<sysml.floating.point.precision>double</sysml.floating.point.precision>\n<!-- the eviction policy for the GPU bufferpool. Supported values are lru, mru, lfu, min_evict, align_memory -->\n- <sysml.gpu.eviction.policy>align_memory</sysml.gpu.eviction.policy>\n+ <sysml.gpu.eviction.policy>min_evict</sysml.gpu.eviction.policy>\n<!-- maximum wrap length for instruction and miscellaneous timer column of statistics -->\n<sysml.stats.maxWrapLength>30</sysml.stats.maxWrapLength>\n" }, { "change_type": "MODIFY", "old_path": "docs/gpu.md", "new_path": "docs/gpu.md", "diff": "@@ -92,3 +92,29 @@ num_cores=`grep -c ^processor /proc/cpuinfo`\nmake -j $num_cores\nsudo make install\n```\n+\n+# Advanced Configuration\n+\n+## Using single precision\n+\n+By default, SystemML uses double precision to store its matrices in the GPU memory.\n+To use single precision, the user needs to set the configuration property 'sysml.floating.point.precision'\n+to 'single'. However, with exception of BLAS operations, SystemML always performs all CPU operations\n+in double precision.\n+\n+## Training very deep network\n+\n+### Shadow buffer\n+To train very deep network with double precision, no additional configurations are necessary.\n+But to train very deep network with single precision, the user can speed up the eviction by\n+using shadow buffer. The fraction of the driver memory to be allocated to the shadow buffer can\n+be set by using the configuration property 'sysml.gpu.eviction.shadow.bufferSize'.\n+In the current version, the shadow buffer is currently not guarded by SystemML\n+and can potentially lead to OOM if the network is deep as well as wide.\n+\n+### Unified memory allocator\n+\n+By default, SystemML uses CUDA's memory allocator and performs on-demand eviction\n+using the eviction policy set by the configuration property 'sysml.gpu.eviction.policy'.\n+To use CUDA's unified memory allocator that performs page-level eviction instead,\n+please set the configuration property 'sysml.gpu.memory.allocator' to 'unified_memory'.\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -121,7 +121,7 @@ public class DMLScript\npublic static ExplainType EXPLAIN = DMLOptions.defaultOptions.explainType; // explain type\npublic static String DML_FILE_PATH_ANTLR_PARSER = DMLOptions.defaultOptions.filePath; // filename of dml/pydml script\npublic static String FLOATING_POINT_PRECISION = \"double\"; // data type to use internally\n- public static EvictionPolicy GPU_EVICTION_POLICY = EvictionPolicy.ALIGN_MEMORY; // currently employed GPU eviction policy\n+ public static EvictionPolicy GPU_EVICTION_POLICY = EvictionPolicy.MIN_EVICT; // currently employed GPU eviction policy\npublic static boolean PRINT_GPU_MEMORY_INFO = false; // whether to print GPU memory-related information\npublic static long EVICTION_SHADOW_BUFFER_MAX_BYTES = 0; // maximum number of bytes to use for shadow buffer\npublic static long EVICTION_SHADOW_BUFFER_CURR_BYTES = 0; // number of bytes to use for shadow buffer\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -143,7 +143,7 @@ public class DMLConfig\n_defaultVals.put(GPU_MEMORY_UTILIZATION_FACTOR, \"0.9\" );\n_defaultVals.put(GPU_MEMORY_ALLOCATOR, \"cuda\");\n_defaultVals.put(AVAILABLE_GPUS, \"-1\");\n- _defaultVals.put(GPU_EVICTION_POLICY, \"align_memory\");\n+ _defaultVals.put(GPU_EVICTION_POLICY, \"min_evict\");\n_defaultVals.put(SYNCHRONIZE_GPU, \"false\" );\n_defaultVals.put(CACHING_BUFFER_SIZE, \"0.15\" );\n_defaultVals.put(EAGER_CUDA_FREE, \"false\" );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "diff": "@@ -52,7 +52,7 @@ public class GPUMatrixMemoryManager {\nlong getWorstCaseContiguousMemorySize(GPUObject gpuObj) {\nlong ret = 0;\nif(!gpuObj.isDensePointerNull()) {\n- if(gpuObj.shadowPointer == null)\n+ if(!gpuObj.shadowBuffer.isBuffered())\nret = gpuManager.allPointers.get(gpuObj.getDensePointer()).getSizeInBytes();\nelse\nret = 0; // evicted hence no contiguous memory on GPU\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -43,7 +43,6 @@ import org.apache.sysml.runtime.matrix.data.SparseBlockMCSR;\nimport org.apache.sysml.utils.GPUStatistics;\nimport jcuda.Pointer;\n-import jcuda.Sizeof;\nimport jcuda.jcusparse.cusparseDirection;\nimport jcuda.jcusparse.cusparseHandle;\nimport jcuda.jcusparse.cusparseMatDescr;\n@@ -63,7 +62,7 @@ public class GPUObject {\n/**\n* Pointer to the underlying dense matrix block on GPU\n*/\n- private Pointer jcudaDenseMatrixPtr = null;\n+ Pointer jcudaDenseMatrixPtr = null;\n/**\n* Pointer to the underlying sparse matrix block on GPU\n@@ -98,19 +97,12 @@ public class GPUObject {\n/**\n* Enclosing {@link MatrixObject} instance\n*/\n- protected MatrixObject mat = null;\n+ MatrixObject mat = null;\n- float[] shadowPointer = null;\n- private static boolean _warnedAboutShadowBuffer = false;\n- public boolean canFitIntoShadowBuffer() {\n- int numBytes = toIntExact(mat.getNumRows()*mat.getNumColumns())*Sizeof.FLOAT;\n- boolean ret = DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES + numBytes <= DMLScript.EVICTION_SHADOW_BUFFER_MAX_BYTES;\n- if(!ret && !_warnedAboutShadowBuffer) {\n- LOG.warn(\"Shadow buffer is full, so using CP bufferpool instead. Consider increasing sysml.gpu.eviction.shadow.bufferSize.\");\n- _warnedAboutShadowBuffer = true;\n- }\n- return ret;\n- }\n+ /**\n+ * Shadow buffer instance\n+ */\n+ final ShadowBuffer shadowBuffer;\n// ----------------------------------------------------------------------\n// Methods used to access, set and check jcudaDenseMatrixPtr\n@@ -121,11 +113,8 @@ public class GPUObject {\n* @return a pointer to the dense matrix\n*/\npublic Pointer getDensePointer() {\n- if(jcudaDenseMatrixPtr == null && shadowPointer != null && getJcudaSparseMatrixPtr() == null) {\n- long numBytes = shadowPointer.length*LibMatrixCUDA.sizeOfDataType;\n- jcudaDenseMatrixPtr = gpuContext.allocate(null, numBytes);\n- cudaMemcpy(jcudaDenseMatrixPtr, Pointer.to(shadowPointer), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\n- clearShadowPointer();\n+ if(jcudaDenseMatrixPtr == null && shadowBuffer.isBuffered() && getJcudaSparseMatrixPtr() == null) {\n+ shadowBuffer.moveToDevice();\n}\nreturn jcudaDenseMatrixPtr;\n}\n@@ -144,17 +133,7 @@ public class GPUObject {\n*/\npublic void clearDensePointer() {\njcudaDenseMatrixPtr = null;\n- clearShadowPointer();\n- }\n-\n- /**\n- * Removes shadow pointer\n- */\n- public void clearShadowPointer() {\n- if(shadowPointer != null) {\n- DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES -= shadowPointer.length*Sizeof.FLOAT;\n- }\n- shadowPointer = null;\n+ shadowBuffer.clearShadowPointer();\n}\n@@ -221,7 +200,7 @@ public class GPUObject {\ngetGPUContext().cudaFreeHelper(null, toFree, DMLScript.EAGER_CUDA_FREE);\n}\n- private GPUContext getGPUContext() {\n+ GPUContext getGPUContext() {\nreturn gpuContext;\n}\n@@ -322,7 +301,7 @@ public class GPUObject {\n}\nthis.jcudaSparseMatrixPtr = sparseMatrixPtr;\nthis.isSparse = true;\n- if (!isDensePointerNull() && shadowPointer == null) {\n+ if (!isDensePointerNull() && !shadowBuffer.isBuffered()) {\ncudaFreeHelper(getDensePointer());\nclearDensePointer();\n}\n@@ -344,7 +323,7 @@ public class GPUObject {\nint rows = toIntExact(mat.getNumRows());\nint cols = toIntExact(mat.getNumColumns());\n- if ((isDensePointerNull() && shadowPointer == null) || !isAllocated())\n+ if ((isDensePointerNull() && !shadowBuffer.isBuffered()) || !isAllocated())\nthrow new DMLRuntimeException(\"Expected allocated dense matrix before denseToSparse() call\");\ndenseRowMajorToColumnMajor();\n@@ -462,6 +441,7 @@ public class GPUObject {\nGPUObject(GPUContext gCtx, MatrixObject mat2) {\ngpuContext = gCtx;\nthis.mat = mat2;\n+ this.shadowBuffer = new ShadowBuffer(this);\n}\npublic boolean isSparse() {\n@@ -477,7 +457,7 @@ public class GPUObject {\n}\npublic boolean isAllocated() {\n- boolean eitherAllocated = shadowPointer != null || !isDensePointerNull() || getJcudaSparseMatrixPtr() != null;\n+ boolean eitherAllocated = shadowBuffer.isBuffered() || !isDensePointerNull() || getJcudaSparseMatrixPtr() != null;\nreturn eitherAllocated;\n}\n@@ -939,7 +919,7 @@ public class GPUObject {\nif(LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : copyFromDeviceToHost, on \" + this + \", GPUContext=\" + getGPUContext());\n}\n- if(shadowPointer != null) {\n+ if(shadowBuffer.isBuffered()) {\nif(isEviction) {\n// If already copied to shadow buffer as part of previous eviction, do nothing.\nreturn;\n@@ -947,44 +927,13 @@ public class GPUObject {\nelse {\n// If already copied to shadow buffer as part of previous eviction and this is not an eviction (i.e. bufferpool call for subsequent CP/Spark instruction),\n// then copy from shadow buffer to MatrixObject.\n- long start = DMLScript.STATISTICS ? System.nanoTime() : 0;\n- MatrixBlock tmp = new MatrixBlock(toIntExact(mat.getNumRows()), toIntExact(mat.getNumColumns()), false);\n- tmp.allocateDenseBlock();\n- double [] tmpArr = tmp.getDenseBlockValues();\n- for(int i = 0; i < shadowPointer.length; i++) {\n- tmpArr[i] = shadowPointer[i];\n- }\n- mat.acquireModify(tmp);\n- mat.release();\n- clearShadowPointer();\n- dirty = false;\n- if (DMLScript.STATISTICS) {\n- long totalTime = System.nanoTime() - start;\n- GPUStatistics.cudaFromShadowToHostTime.add(totalTime);\n- GPUStatistics.cudaFromShadowToHostCount.increment();\n- // Part of dev -> host, not eviction\n- GPUStatistics.cudaFromDevTime.add(totalTime);\n- GPUStatistics.cudaFromDevCount.increment();\n- }\n+ shadowBuffer.moveToHost();\nreturn;\n}\n}\n- else if(LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT && isEviction && eagerDelete && !isDensePointerNull() && canFitIntoShadowBuffer()) {\n+ else if(shadowBuffer.isEligibleForBuffering(isEviction, eagerDelete)) {\n// Perform shadow buffering if (1) single precision, (2) during eviction, (3) for dense matrices, and (4) if the given matrix can fit into the shadow buffer.\n- long start = DMLScript.STATISTICS ? System.nanoTime() : 0;\n- int numElems = toIntExact(mat.getNumRows()*mat.getNumColumns());\n- shadowPointer = new float[numElems];\n- DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES += shadowPointer.length*Sizeof.FLOAT;\n- cudaMemcpy(Pointer.to(shadowPointer), jcudaDenseMatrixPtr, numElems*LibMatrixCUDA.sizeOfDataType, jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n- getGPUContext().cudaFreeHelper(instName, jcudaDenseMatrixPtr, eagerDelete);\n- jcudaDenseMatrixPtr = null;\n- if (DMLScript.STATISTICS) {\n- // Eviction time measure in malloc\n- long totalTime = System.nanoTime() - start;\n- GPUStatistics.cudaFromDevToShadowTime.add(totalTime);\n- GPUStatistics.cudaFromDevToShadowCount.increment();\n-\n- }\n+ shadowBuffer.moveFromDevice(instName);\nreturn;\n}\nelse if (isDensePointerNull() && getJcudaSparseMatrixPtr() == null) {\n@@ -1059,7 +1008,7 @@ public class GPUObject {\ngetJcudaSparseMatrixPtr().deallocate(eager);\n}\nclearDensePointer();\n- clearShadowPointer();\n+ shadowBuffer.clearShadowPointer();\njcudaSparseMatrixPtr = null;\nresetReadWriteLock();\ngetGPUContext().getMemoryManager().removeGPUObject(this);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.runtime.instructions.gpu.context;\n+\n+import static jcuda.runtime.JCuda.cudaMemcpy;\n+\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.utils.GPUStatistics;\n+\n+import jcuda.Pointer;\n+import jcuda.Sizeof;\n+\n+public class ShadowBuffer {\n+ private static final Log LOG = LogFactory.getLog(ShadowBuffer.class.getName());\n+\n+ GPUObject gpuObj;\n+ float[] shadowPointer = null;\n+ private static boolean _warnedAboutShadowBuffer = false;\n+\n+ public ShadowBuffer(GPUObject gpuObj) {\n+ this.gpuObj = gpuObj;\n+ }\n+\n+ /**\n+ * Check if the gpu object is shadow buffered\n+ *\n+ * @return true if the gpu object is shadow buffered\n+ */\n+ public boolean isBuffered() {\n+ return shadowPointer != null;\n+ }\n+\n+ /**\n+ * Move the data from GPU to shadow buffer\n+ * @param instName name of the instruction\n+ */\n+ public void moveFromDevice(String instName) {\n+ long start = DMLScript.STATISTICS ? System.nanoTime() : 0;\n+ int numElems = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n+ shadowPointer = new float[numElems];\n+ DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES += shadowPointer.length*Sizeof.FLOAT;\n+ cudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, numElems*LibMatrixCUDA.sizeOfDataType, jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n+ gpuObj.getGPUContext().cudaFreeHelper(instName, gpuObj.jcudaDenseMatrixPtr, true);\n+ gpuObj.jcudaDenseMatrixPtr = null;\n+ if (DMLScript.STATISTICS) {\n+ // Eviction time measure in malloc\n+ long totalTime = System.nanoTime() - start;\n+ GPUStatistics.cudaFromDevToShadowTime.add(totalTime);\n+ GPUStatistics.cudaFromDevToShadowCount.increment();\n+\n+ }\n+ }\n+\n+ /**\n+ * Move the data from shadow buffer to Matrix object\n+ */\n+ public void moveToHost() {\n+ long start = DMLScript.STATISTICS ? System.nanoTime() : 0;\n+ MatrixBlock tmp = new MatrixBlock(GPUObject.toIntExact(gpuObj.mat.getNumRows()), GPUObject.toIntExact(gpuObj.mat.getNumColumns()), false);\n+ tmp.allocateDenseBlock();\n+ double [] tmpArr = tmp.getDenseBlockValues();\n+ for(int i = 0; i < shadowPointer.length; i++) {\n+ tmpArr[i] = shadowPointer[i];\n+ }\n+ gpuObj.mat.acquireModify(tmp);\n+ gpuObj.mat.release();\n+ clearShadowPointer();\n+ gpuObj.dirty = false;\n+ if (DMLScript.STATISTICS) {\n+ long totalTime = System.nanoTime() - start;\n+ GPUStatistics.cudaFromShadowToHostTime.add(totalTime);\n+ GPUStatistics.cudaFromShadowToHostCount.increment();\n+ // Part of dev -> host, not eviction\n+ GPUStatistics.cudaFromDevTime.add(totalTime);\n+ GPUStatistics.cudaFromDevCount.increment();\n+ }\n+ }\n+\n+ /**\n+ * Move the data from shadow buffer to GPU\n+ */\n+ public void moveToDevice() {\n+ long start = DMLScript.STATISTICS ? System.nanoTime() : 0;\n+ long numBytes = shadowPointer.length*LibMatrixCUDA.sizeOfDataType;\n+ gpuObj.jcudaDenseMatrixPtr = gpuObj.getGPUContext().allocate(null, numBytes);\n+ cudaMemcpy(gpuObj.jcudaDenseMatrixPtr, Pointer.to(shadowPointer), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\n+ clearShadowPointer();\n+ if (DMLScript.STATISTICS) {\n+ long totalTime = System.nanoTime() - start;\n+ GPUStatistics.cudaFromShadowToDevTime.add(totalTime);\n+ GPUStatistics.cudaFromShadowToDevCount.increment();\n+ }\n+ }\n+\n+ /**\n+ * Checks if the GPU object is eligible for shadow buffering\n+ *\n+ * @param isEviction true if this method is called during eviction\n+ * @param eagerDelete true if the data on device has to be eagerly deleted\n+ * @return true if the given GPU object is eligible to be shadow buffered\n+ */\n+ public boolean isEligibleForBuffering(boolean isEviction, boolean eagerDelete) {\n+ if(LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT && isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n+ int numBytes = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns())*Sizeof.FLOAT;\n+ boolean ret = DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES + numBytes <= DMLScript.EVICTION_SHADOW_BUFFER_MAX_BYTES;\n+ if(!ret && !_warnedAboutShadowBuffer) {\n+ LOG.warn(\"Shadow buffer is full, so using CP bufferpool instead. Consider increasing sysml.gpu.eviction.shadow.bufferSize.\");\n+ _warnedAboutShadowBuffer = true;\n+ }\n+ return ret;\n+ }\n+ else {\n+ return false;\n+ }\n+ }\n+\n+ /**\n+ * Removes the content from shadow buffer\n+ */\n+ public void clearShadowPointer() {\n+ if(shadowPointer != null) {\n+ DMLScript.EVICTION_SHADOW_BUFFER_CURR_BYTES -= shadowPointer.length*Sizeof.FLOAT;\n+ }\n+ shadowPointer = null;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "@@ -54,6 +54,7 @@ public class GPUStatistics {\npublic static LongAdder cudaToDevTime = new LongAdder(); // time spent in copying data from host (CPU) to device (GPU) memory\npublic static LongAdder cudaFromDevTime = new LongAdder(); // time spent in copying data from device to host\npublic static LongAdder cudaFromShadowToHostTime = new LongAdder(); // time spent in copying data from shadow to host\n+ public static LongAdder cudaFromShadowToDevTime = new LongAdder(); // time spent in copying data from shadow to host\npublic static LongAdder cudaFromDevToShadowTime = new LongAdder(); // time spent in copying data from device to shadow\npublic static LongAdder cudaEvictTime = new LongAdder(); // time spent in eviction\npublic static LongAdder cudaEvictSizeTime = new LongAdder(); // time spent in eviction\n@@ -68,6 +69,7 @@ public class GPUStatistics {\npublic static LongAdder cudaToDevCount = new LongAdder();\npublic static LongAdder cudaFromDevCount = new LongAdder();\npublic static LongAdder cudaFromShadowToHostCount = new LongAdder();\n+ public static LongAdder cudaFromShadowToDevCount = new LongAdder();\npublic static LongAdder cudaFromDevToShadowCount = new LongAdder();\npublic static LongAdder cudaEvictCount = new LongAdder();\npublic static LongAdder cudaEvictSizeCount = new LongAdder();\n@@ -104,6 +106,7 @@ public class GPUStatistics {\ncudaToDevTime.reset();\ncudaFromDevTime.reset();\ncudaFromShadowToHostTime.reset();\n+ cudaFromShadowToDevTime.reset();\ncudaFromDevToShadowTime.reset();\ncudaEvictTime.reset();\ncudaEvictSizeTime.reset();\n@@ -118,6 +121,7 @@ public class GPUStatistics {\ncudaToDevCount.reset();\ncudaFromDevCount.reset();\ncudaFromShadowToHostCount.reset();\n+ cudaFromShadowToDevCount.reset();\ncudaFromDevToShadowCount.reset();\ncudaEvictCount.reset();\ncudaEvictSizeCount.reset();\n@@ -238,18 +242,20 @@ public class GPUStatistics {\n+ cudaAllocReuseCount.longValue() +\") / \"\n+ cudaDeAllocCount.longValue() + \" / \"\n+ cudaMemSet0Count.longValue() + \".\\n\");\n- sb.append(\"GPU mem tx time (toDev(d2f) / fromDev(f2d/s2h) / evict(d2s/size)):\\t\"\n+ sb.append(\"GPU mem tx time (toDev(d2f/s2d) / fromDev(f2d/s2h) / evict(d2s/size)):\\t\"\n+ String.format(\"%.3f\", cudaToDevTime.longValue()*1e-9) + \"(\"\n- + String.format(\"%.3f\", cudaDouble2FloatTime.longValue()*1e-9)+ \") / \"\n+ + String.format(\"%.3f\", cudaDouble2FloatTime.longValue()*1e-9)+ \"/\"\n+ + String.format(\"%.3f\", cudaFromShadowToDevTime.longValue()*1e-9) + \") / \"\n+ String.format(\"%.3f\", cudaFromDevTime.longValue()*1e-9) + \"(\"\n+ String.format(\"%.3f\", cudaFloat2DoubleTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaFromShadowToHostTime.longValue()*1e-9) + \") / \"\n+ String.format(\"%.3f\", cudaEvictTime.longValue()*1e-9) + \"(\"\n+ String.format(\"%.3f\", cudaFromDevToShadowTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaEvictSizeTime.longValue()*1e-9) + \") sec.\\n\");\n- sb.append(\"GPU mem tx count (toDev(d2f) / fromDev(f2d/s2h) / evict(d2s/size)):\\t\"\n+ sb.append(\"GPU mem tx count (toDev(d2f/s2d) / fromDev(f2d/s2h) / evict(d2s/size)):\\t\"\n+ cudaToDevCount.longValue() + \"(\"\n- + cudaDouble2FloatCount.longValue() + \") / \"\n+ + cudaDouble2FloatCount.longValue() + \"/\"\n+ + cudaFromShadowToDevCount.longValue() + \") / \"\n+ cudaFromDevCount.longValue() + \"(\"\n+ cudaFloat2DoubleCount.longValue() + \"/\"\n+ cudaFromShadowToHostCount.longValue() + \") / \"\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Refactored the shadow buffer and added documentation for newly added features - Refactored the shadow buffer logic from GPUObject to ShadowBuffer class for maintenance. - Added an additional timer to measure shadow buffer time. - Updated the gpu documentation
49,760
06.08.2018 16:05:26
25,200
bc7b4961a20c47c8064bc9ce7d30ba071b44a748
Finalized AVG and WC sparsity estimators, API cleanups Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -970,6 +970,10 @@ public class OptimizerUtils\n// Sparsity Estimates //\n////////////////////////\n+ public static long getMatMultNnz(double sp1, double sp2, long m, long k, long n, boolean worstcase) {\n+ return getNnz( m, n, getMatMultSparsity(sp1, sp2, m, k, n, worstcase));\n+ }\n+\n/**\n* Estimates the result sparsity for Matrix Multiplication A %*% B.\n*\n@@ -981,8 +985,7 @@ public class OptimizerUtils\n* @param worstcase true if worst case\n* @return the sparsity\n*/\n- public static double getMatMultSparsity(double sp1, double sp2, long m, long k, long n, boolean worstcase)\n- {\n+ public static double getMatMultSparsity(double sp1, double sp2, long m, long k, long n, boolean worstcase) {\nif( worstcase ){\ndouble nnz1 = sp1 * m * k;\ndouble nnz2 = sp2 * k * n;\n@@ -1159,6 +1162,10 @@ public class OptimizerUtils\n}\n}\n+ public static long getNnz(long dim1, long dim2, double sp) {\n+ return (long) Math.round(sp * dim1 * dim2);\n+ }\n+\npublic static double getSparsity( MatrixCharacteristics mc ) {\nreturn getSparsity(mc.getRows(), mc.getCols(), mc.getNonZeros());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "@@ -21,75 +21,75 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n/**\n* Basic average case estimator for matrix sparsity:\n* sp = 1 - Math.pow(1-sp1*sp2, k)\n*/\n-public class EstimatorBasicAvg extends SparsityEstimator {\n+public class EstimatorBasicAvg extends SparsityEstimator\n+{\n@Override\n- public double estim(MMNode root) {\n- // recursive sparsity evaluation of non-leaf nodes\n- double sp1 = !root.getLeft().isLeaf() ? estim(root.getLeft()) :\n- OptimizerUtils.getSparsity(root.getLeft().getMatrixCharacteristics());\n- double sp2 = !root.getRight().isLeaf() ? estim(root.getRight()) :\n- OptimizerUtils.getSparsity(root.getRight().getMatrixCharacteristics());\n- return estimInternMM(sp1, sp2, root.getRows(), root.getLeft().getCols(), root.getCols());\n+ public MatrixCharacteristics estim(MMNode root) {\n+ MatrixCharacteristics mc1 = !root.getLeft().isLeaf() ?\n+ estim(root.getLeft()) : root.getLeft().getMatrixCharacteristics();\n+ MatrixCharacteristics mc2 = !root.getRight().isLeaf() ?\n+ estim(root.getRight()) : root.getRight().getMatrixCharacteristics();\n+ return root.setMatrixCharacteristics(\n+ estimIntern(mc1, mc2, root.getOp()));\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- return estimInternMM(m1.getSparsity(), m2.getSparsity(),\n- m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n+ return estim(m1, m2, OpCode.MM);\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n- return estimIntern(m1, m2, op);\n+ return estimIntern(m1.getMatrixCharacteristics(), m2.getMatrixCharacteristics(), op).getSparsity();\n}\n@Override\npublic double estim(MatrixBlock m, OpCode op) {\n- return estimIntern(m, null, op);\n+ return estimIntern(m.getMatrixCharacteristics(), null, op).getSparsity();\n}\n- private double estimIntern(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ private MatrixCharacteristics estimIntern(MatrixCharacteristics mc1, MatrixCharacteristics mc2, OpCode op) {\nswitch (op) {\ncase MM:\n- return estimInternMM(m1.getSparsity(), m2.getSparsity(),\n- m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n+ return new MatrixCharacteristics(mc1.getRows(), mc2.getCols(),\n+ OptimizerUtils.getMatMultNnz(mc1.getSparsity(), mc2.getSparsity(),\n+ mc1.getRows(), mc1.getCols(), mc2.getCols(), false));\ncase MULT:\n- return m1.getSparsity() * m2.getSparsity();\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ OptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\n+ mc1.getSparsity() * mc2.getSparsity()));\ncase PLUS:\n- return m1.getSparsity() + m2.getSparsity() - m1.getSparsity() * m2.getSparsity();\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ OptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\n+ mc1.getSparsity() + mc2.getSparsity() - mc1.getSparsity() * mc2.getSparsity()));\ncase EQZERO:\n- return OptimizerUtils.getSparsity(m1.getNumRows(), m1.getNumColumns(),\n- (long) m1.getNumRows() * m1.getNumColumns() - m1.getNonZeros());\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ (long) mc1.getRows() * mc1.getCols() - mc1.getNonZeros());\ncase DIAG:\n- return (m1.getNumColumns() == 1) ?\n- OptimizerUtils.getSparsity(m1.getNumRows(), m1.getNumRows(), m1.getNonZeros()) :\n- OptimizerUtils.getSparsity(m1.getNumRows(), 1, Math.min(m1.getNumRows(), m1.getNonZeros()));\n+ return (mc1.getCols() == 1) ?\n+ new MatrixCharacteristics(mc1.getRows(), mc1.getRows(), mc1.getNonZeros()) :\n+ new MatrixCharacteristics(mc1.getRows(), 1, Math.min(mc1.getRows(), mc1.getNonZeros()));\n// binary operations that preserve sparsity exactly\ncase CBIND:\n- return OptimizerUtils.getSparsity(m1.getNumRows(),\n- m1.getNumColumns() + m1.getNumColumns(), m1.getNonZeros() + m2.getNonZeros());\n+ return new MatrixCharacteristics(mc1.getRows(),\n+ mc1.getCols() + mc2.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\ncase RBIND:\n- return OptimizerUtils.getSparsity(m1.getNumRows() + m2.getNumRows(),\n- m1.getNumColumns(), m1.getNonZeros() + m2.getNonZeros());\n+ return new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\n+ mc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n// unary operation that preserve sparsity exactly\ncase NEQZERO:\n- return m1.getSparsity();\ncase TRANS:\n- return m1.getSparsity();\ncase RESHAPE:\n- return m1.getSparsity();\n+ return mc1;\ndefault:\nthrow new NotImplementedException();\n}\n}\n-\n- private double estimInternMM(double sp1, double sp2, long m, long k, long n) {\n- return OptimizerUtils.getMatMultSparsity(sp1, sp2, m, k, n, false);\n- }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n/**\n@@ -34,32 +35,65 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic class EstimatorBasicWorst extends SparsityEstimator\n{\n@Override\n- public double estim(MMNode root) {\n- //recursive sparsity evaluation of non-leaf nodes\n- double sp1 = !root.getLeft().isLeaf() ? estim(root.getLeft()) :\n- OptimizerUtils.getSparsity(root.getLeft().getMatrixCharacteristics());\n- double sp2 = !root.getRight().isLeaf() ? estim(root.getRight()) :\n- OptimizerUtils.getSparsity(root.getRight().getMatrixCharacteristics());\n- return estimIntern(sp1, sp2, root.getRows(), root.getLeft().getCols(), root.getCols());\n+ public MatrixCharacteristics estim(MMNode root) {\n+ MatrixCharacteristics mc1 = !root.getLeft().isLeaf() ?\n+ estim(root.getLeft()) : root.getLeft().getMatrixCharacteristics();\n+ MatrixCharacteristics mc2 = !root.getRight().isLeaf() ?\n+ estim(root.getRight()) : root.getRight().getMatrixCharacteristics();\n+ return root.setMatrixCharacteristics(\n+ estimIntern(mc1, mc2, root.getOp()));\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- return estimIntern(m1.getSparsity(), m2.getSparsity(),\n- m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n+ return estim(m1, m2, OpCode.MM);\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n- throw new NotImplementedException();\n+ return estimIntern(m1.getMatrixCharacteristics(), m2.getMatrixCharacteristics(), op).getSparsity();\n}\n@Override\npublic double estim(MatrixBlock m, OpCode op) {\n- throw new NotImplementedException();\n+ return estimIntern(m.getMatrixCharacteristics(), null, op).getSparsity();\n}\n- private double estimIntern(double sp1, double sp2, long m, long k, long n) {\n- return OptimizerUtils.getMatMultSparsity(sp1, sp2, m, k, n, true);\n+ private MatrixCharacteristics estimIntern(MatrixCharacteristics mc1, MatrixCharacteristics mc2, OpCode op) {\n+ switch (op) {\n+ case MM:\n+ return new MatrixCharacteristics(mc1.getRows(), mc2.getCols(),\n+ OptimizerUtils.getMatMultNnz(mc1.getSparsity(), mc2.getSparsity(),\n+ mc1.getRows(), mc1.getCols(), mc2.getCols(), true));\n+ case MULT:\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ OptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\n+ Math.min(mc1.getSparsity(), mc2.getSparsity())));\n+ case PLUS:\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ OptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\n+ Math.min(mc1.getSparsity() + mc2.getSparsity(), 1)));\n+ case EQZERO:\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ (long) mc1.getRows() * mc1.getCols() - mc1.getNonZeros());\n+ case DIAG:\n+ return (mc1.getCols() == 1) ?\n+ new MatrixCharacteristics(mc1.getRows(), mc1.getRows(), mc1.getNonZeros()) :\n+ new MatrixCharacteristics(mc1.getRows(), 1, Math.min(mc1.getRows(), mc1.getNonZeros()));\n+ // binary operations that preserve sparsity exactly\n+ case CBIND:\n+ return new MatrixCharacteristics(mc1.getRows(),\n+ mc1.getCols() + mc2.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n+ case RBIND:\n+ return new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\n+ mc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n+ // unary operation that preserve sparsity exactly\n+ case NEQZERO:\n+ case TRANS:\n+ case RESHAPE:\n+ return mc1;\n+ default:\n+ throw new NotImplementedException();\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -25,6 +25,7 @@ import java.util.stream.IntStream;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -42,7 +43,7 @@ import org.apache.sysml.runtime.matrix.data.SparseBlock;\n*/\npublic class EstimatorBitsetMM extends SparsityEstimator {\n@Override\n- public double estim(MMNode root) {\n+ public MatrixCharacteristics estim(MMNode root) {\n// recursive density map computation of non-leaf nodes\nif (!root.getLeft().isLeaf())\nestim(root.getLeft()); // obtain synopsis\n@@ -56,7 +57,8 @@ public class EstimatorBitsetMM extends SparsityEstimator {\n// estimate output density map and sparsity via boolean matrix mult\nBitsetMatrix outMap = m1Map.matMult(m2Map);\nroot.setSynopsis(outMap); // memoize boolean matrix\n- return OptimizerUtils.getSparsity(outMap.getNumRows(), outMap.getNumColumns(), outMap.getNonZeros());\n+ return root.setMatrixCharacteristics(new MatrixCharacteristics(\n+ outMap.getNumRows(), outMap.getNumColumns(), outMap.getNonZeros()));\n}\n@Override\n@@ -277,7 +279,7 @@ public class EstimatorBitsetMM extends SparsityEstimator {\nc[ci+0] |= b[bi+0]; c[ci+1] |= b[bi+1];\nc[ci+2] |= b[bi+2]; c[ci+3] |= b[bi+3];\nc[ci+4] |= b[bi+4]; c[ci+5] |= b[bi+5];\n- c[ci+6] |= b[bi+4]; c[ci+7] |= b[bi+7];\n+ c[ci+6] |= b[bi+6]; c[ci+7] |= b[bi+7];\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -52,7 +53,7 @@ public class EstimatorDensityMap extends SparsityEstimator\n}\n@Override\n- public double estim(MMNode root) {\n+ public MatrixCharacteristics estim(MMNode root) {\n//recursive density map computation of non-leaf nodes\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\n@@ -67,8 +68,8 @@ public class EstimatorDensityMap extends SparsityEstimator\nMatrixBlock outMap = estimIntern(m1Map, m2Map,\nfalse, root.getRows(), root.getLeft().getCols(), root.getCols());\nroot.setSynopsis(outMap); //memoize density map\n- return OptimizerUtils.getSparsity( //aggregate output histogram\n- root.getRows(), root.getCols(), (long)outMap.sum());\n+ return root.setMatrixCharacteristics(new MatrixCharacteristics(\n+ root.getLeft().getRows(), root.getRight().getCols(), (long)outMap.sum()));\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -22,6 +22,7 @@ import org.apache.commons.lang.NotImplementedException;\nimport org.apache.commons.math3.distribution.ExponentialDistribution;\nimport org.apache.commons.math3.random.Well1024a;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -52,7 +53,7 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n}\n@Override\n- public double estim(MMNode root) {\n+ public MatrixCharacteristics estim(MMNode root) {\nthrow new NotImplementedException();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -25,6 +25,7 @@ import java.util.stream.IntStream;\nimport org.apache.directory.api.util.exception.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -52,7 +53,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n@Override\n- public double estim(MMNode root) {\n+ public MatrixCharacteristics estim(MMNode root) {\n//recursive histogram computation of non-leaf nodes\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\n@@ -69,9 +70,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ndouble ret = estimIntern(h1, h2, OpCode.MM);\n//derive and memoize output histogram\n- root.setSynopsis(MatrixHistogram.deriveOutputHistogram(h1, h2, ret));\n-\n- return ret;\n+ MatrixHistogram outMap = MatrixHistogram.deriveOutputHistogram(h1, h2, ret);\n+ root.setSynopsis(outMap);\n+ return root.setMatrixCharacteristics(new MatrixCharacteristics(\n+ outMap.getRows(), outMap.getCols(), outMap.getNonZeros()));\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixAgg;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -55,7 +56,7 @@ public class EstimatorSample extends SparsityEstimator\n}\n@Override\n- public double estim(MMNode root) {\n+ public MatrixCharacteristics estim(MMNode root) {\nLOG.warn(\"Recursive estimates not supported by EstimatorSample, falling back to EstimatorBasicAvg.\");\nreturn new EstimatorBasicAvg().estim(root);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -33,20 +34,22 @@ public class MMNode\nprivate final MatrixBlock _data;\nprivate final MatrixCharacteristics _mc;\nprivate Object _synops = null;\n+ private final OpCode _op;\npublic MMNode(MatrixBlock in) {\n_m1 = null;\n_m2 = null;\n_data = in;\n_mc = in.getMatrixCharacteristics();\n+ _op = null;\n}\n- public MMNode(MMNode left, MMNode right) {\n+ public MMNode(MMNode left, MMNode right, OpCode op) {\n_m1 = left;\n_m2 = right;\n_data = null;\n- _mc = new MatrixCharacteristics(\n- _m1.getRows(), _m2.getCols(), -1, -1);\n+ _mc = new MatrixCharacteristics(-1, -1, -1, -1);\n+ _op = op;\n}\npublic int getRows() {\n@@ -61,6 +64,10 @@ public class MMNode\nreturn _mc;\n}\n+ public MatrixCharacteristics setMatrixCharacteristics(MatrixCharacteristics mc) {\n+ return _mc.set(mc); //implicit copy\n+ }\n+\npublic MMNode getLeft() {\nreturn _m1;\n}\n@@ -84,4 +91,8 @@ public class MMNode\npublic Object getSynopsis() {\nreturn _synops;\n}\n+\n+ public OpCode getOp() {\n+ return _op;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.estim;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic abstract class SparsityEstimator\n@@ -46,7 +47,8 @@ public abstract class SparsityEstimator\n* @param root\n* @return\n*/\n- public abstract double estim(MMNode root);\n+ public abstract MatrixCharacteristics estim(MMNode root);\n+\n/**\n* Estimates the output sparsity for a single matrix multiplication.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/MatrixCharacteristics.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/MatrixCharacteristics.java", "diff": "@@ -81,6 +81,10 @@ public class MatrixCharacteristics implements Serializable\npublic MatrixCharacteristics() {}\n+ public MatrixCharacteristics(long nr, long nc, long nnz) {\n+ set(nr, nc, -1, -1, nnz);\n+ }\n+\npublic MatrixCharacteristics(long nr, long nc, int bnr, int bnc) {\nset(nr, nc, bnr, bnc);\n}\n@@ -93,29 +97,32 @@ public class MatrixCharacteristics implements Serializable\nset(that.numRows, that.numColumns, that.numRowsPerBlock, that.numColumnsPerBlock, that.nonZero);\n}\n- public void set(long nr, long nc, int bnr, int bnc) {\n+ public MatrixCharacteristics set(long nr, long nc, int bnr, int bnc) {\nnumRows = nr;\nnumColumns = nc;\nnumRowsPerBlock = bnr;\nnumColumnsPerBlock = bnc;\n+ return this;\n}\n- public void set(long nr, long nc, int bnr, int bnc, long nnz) {\n+ public MatrixCharacteristics set(long nr, long nc, int bnr, int bnc, long nnz) {\nnumRows = nr;\nnumColumns = nc;\nnumRowsPerBlock = bnr;\nnumColumnsPerBlock = bnc;\nnonZero = nnz;\nubNnz = false;\n+ return this;\n}\n- public void set(MatrixCharacteristics that) {\n+ public MatrixCharacteristics set(MatrixCharacteristics that) {\nnumRows = that.numRows;\nnumColumns = that.numColumns;\nnumRowsPerBlock = that.numRowsPerBlock;\nnumColumnsPerBlock = that.numColumnsPerBlock;\nnonZero = that.nonZero;\nubNnz = that.ubNnz;\n+ return this;\n}\npublic long getRows(){\n@@ -207,6 +214,10 @@ public class MatrixCharacteristics implements Serializable\nreturn nonZero;\n}\n+ public double getSparsity() {\n+ return OptimizerUtils.getSparsity(this);\n+ }\n+\npublic boolean dimsKnown() {\nreturn ( numRows >= 0 && numColumns >= 0 );\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "diff": "package org.apache.sysml.test.integration.functions.estim;\n-import org.junit.Test;\nimport org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\nimport org.apache.sysml.hops.estim.MMNode;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n/**\n* This is a basic sanity check for all estimator, which need\n@@ -135,8 +136,8 @@ public class SquaredProductChainTest extends AutomatedTestBase\nnew MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n//compare estimated and real sparsity\n- double est = estim.estim(new MMNode(\n- new MMNode(new MMNode(m1), new MMNode(m2)), new MMNode(m3)));\n+ double est = estim.estim(new MMNode(new MMNode(new MMNode(m1), new MMNode(m2),\n+ OpCode.MM), new MMNode(m3), OpCode.MM)).getSparsity();\nTestUtils.compareScalars(est, m5.getSparsity(),\n(estim instanceof EstimatorBitsetMM) ? eps3 : //exact\n(estim instanceof EstimatorBasicWorst) ? eps1 : eps2);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Finalized AVG and WC sparsity estimators, API cleanups Closes #818.
49,760
07.08.2018 20:44:09
25,200
e0187028e43b5fbe884e795e2d0742280634ffa7
Extended MNC sparsity estimator for other operations Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "package org.apache.sysml.hops.estim;\n-import java.util.Arrays;\nimport java.util.Random;\nimport java.util.stream.IntStream;\n+import org.apache.commons.lang.ArrayUtils;\nimport org.apache.directory.api.util.exception.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -67,13 +67,13 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnew MatrixHistogram(root.getRight().getData(), _useExcepts);\n//estimate output sparsity based on input histograms\n- double ret = estimIntern(h1, h2, OpCode.MM);\n+ double ret = estimIntern(h1, h2, root.getOp());\n- //derive and memoize output histogram\n- MatrixHistogram outMap = MatrixHistogram.deriveOutputHistogram(h1, h2, ret);\n+ MatrixHistogram outMap = MatrixHistogram.deriveOutputHistogram(h1, h2, ret, root.getOp());\nroot.setSynopsis(outMap);\nreturn root.setMatrixCharacteristics(new MatrixCharacteristics(\noutMap.getRows(), outMap.getCols(), outMap.getNonZeros()));\n+\n}\n@Override\n@@ -304,14 +304,27 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nIntStream.range(0, getRows()).mapToLong(i-> cNnz[i]).sum();\n}\n- public static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n+ public static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut, OpCode op) {\n+ switch(op) {\n+ case MM: return deriveMMHistogram(h1, h2, spOut);\n+ case MULT: return deriveMultHistogram(h1, h2);\n+ case PLUS: return derivePlusHistogram(h1, h2);\n+ case RBIND: return deriveRbindHistogram(h1, h2);\n+ case CBIND: return deriveCbindHistogram(h1, h2);\n+ //TODO add missing unary operations\n+ default:\n+ throw new NotImplementedException();\n+ }\n+ }\n+\n+ private static MatrixHistogram deriveMMHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n//exact propagation if lhs or rhs full diag\nif( h1.fullDiag ) return h2;\nif( h2.fullDiag ) return h1;\n//get input/output nnz for scaling\n- long nnz1 = Arrays.stream(h1.rNnz).sum();\n- long nnz2 = Arrays.stream(h2.cNnz).sum();\n+ long nnz1 = h1.getNonZeros();\n+ long nnz2 = h2.getNonZeros();\ndouble nnzOut = spOut * h1.getRows() * h2.getCols();\n//propagate h1.r and h2.c to output via simple scaling\n@@ -333,6 +346,69 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\n+ private static MatrixHistogram deriveMultHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n+ final long N1 = h1.getNonZeros();\n+ final long N2 = h2.getNonZeros();\n+ final long scaler = IntStream.range(0, h1.getCols())\n+ .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ final long scalec = IntStream.range(0, h1.getRows())\n+ .mapToLong(j -> (long)h1.rNnz[j] * h2.rNnz[j]).sum();\n+ int rMaxNnz = 0, cMaxNnz = 0;\n+ Random rn = new Random();\n+ int[] rNnz = new int[h1.getRows()];\n+ for(int i=0; i<h1.getRows(); i++) {\n+ rNnz[i] = probRound(h1.rNnz[i] * h2.rNnz[i] * scaler / N1 / N2, rn);\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ }\n+ int[] cNnz = new int[h1.getCols()];\n+ for(int i=0; i<h1.getCols(); i++) {\n+ cNnz[i] = probRound(h1.cNnz[i] * h2.cNnz[i] * scalec / N1 / N2, rn);\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\n+ private static MatrixHistogram derivePlusHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n+ double msize = (double)h1.getRows()*h1.getCols();\n+ int rMaxNnz = 0, cMaxNnz = 0;\n+ Random rn = new Random();\n+ int[] rNnz = new int[h1.getRows()];\n+ for(int i=0; i<h1.getRows(); i++) {\n+ rNnz[i] = probRound(h1.rNnz[i]/msize + h2.rNnz[i]/msize - h1.rNnz[i]/msize * h2.rNnz[i]/msize, rn);\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ }\n+ int[] cNnz = new int[h1.getCols()];\n+ for(int i=0; i<h1.getCols(); i++) {\n+ cNnz[i] = probRound(h1.cNnz[i]/msize + h2.cNnz[i]/msize - h1.cNnz[i]/msize * h2.cNnz[i]/msize, rn);\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\n+ private static MatrixHistogram deriveRbindHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n+ int[] rNnz = ArrayUtils.addAll(h1.rNnz, h2.rNnz);\n+ int rMaxNnz = Math.max(h1.rMaxNnz, h2.rMaxNnz);\n+ int[] cNnz = new int[h1.getCols()];\n+ int cMaxNnz = 0;\n+ for(int i=0; i<h1.getCols(); i++) {\n+ cNnz[i] = h1.cNnz[i] + h2.cNnz[i];\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\n+ private static MatrixHistogram deriveCbindHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n+ int[] rNnz = new int[h1.getRows()];\n+ int rMaxNnz = 0;\n+ for(int i=0; i<h1.getRows(); i++) {\n+ rNnz[i] = h1.rNnz[i] + h2.rNnz[i];\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ }\n+ int[] cNnz = ArrayUtils.addAll(h1.cNnz, h2.cNnz);\n+ int cMaxNnz = Math.max(h1.cMaxNnz, h2.cMaxNnz);\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\nprivate static int probRound(double inNnz, Random rand) {\ndouble temp = Math.floor(inNnz);\ndouble f = inNnz - temp; //non-int fraction [0,1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended MNC sparsity estimator for other operations Closes #820.
49,738
07.08.2018 17:42:29
25,200
1d13c8bbce6980f3244d3a624bfe380df10f0af0
[MINOR] Improved error reporting of dev sparse row validation
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1231,16 +1231,23 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nthrow new RuntimeException(\"Number of non zeros incorrect: \"+nnzBefore+\" vs \"+nnzAfter);\n}\n+ public void checkSparseRows() {\n+ checkSparseRows(0, rlen);\n+ }\n+\n/**\n* Basic debugging primitive to check sparse block column ordering.\n* This method is not intended for production use.\n+ *\n+ * @param rl row lower bound (inclusive)\n+ * @param ru row upper bound (exclusive)\n*/\n- public void checkSparseRows() {\n+ public void checkSparseRows(int rl, int ru) {\nif( !sparse || sparseBlock == null )\nreturn;\n//check ordering of column indexes per sparse row\n- for( int i=0; i<rlen; i++ )\n+ for( int i=rl; i<ru; i++ )\nif( !sparseBlock.isEmpty(i) ) {\nint apos = sparseBlock.pos(i);\nint alen = sparseBlock.size(i);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockMCSR.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockMCSR.java", "diff": "@@ -188,8 +188,8 @@ public class SparseBlockMCSR extends SparseBlock\ndouble[] avals = values(i);\nfor (int k = apos + 1; k < apos + alen; k++) {\nif (aix[k-1] >= aix[k])\n- throw new RuntimeException(\"Wrong sparse row ordering, at row: \"\n- + k + \"with \" + aix[k-1] + \">=\" + aix[k]);\n+ throw new RuntimeException(\"Wrong sparse row ordering, at row=\"+i+\", pos=\"+k\n+ + \" with column indexes \" + aix[k-1] + \">=\" + aix[k]);\nif (avals[k] == 0)\nthrow new RuntimeException(\"The values are expected to be non zeros \"\n+ \"but zero at row: \"+ i + \", col pos: \" + k);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved error reporting of dev sparse row validation
49,760
07.08.2018 22:24:59
25,200
f35cb6005b81d0360defd30fe154afbe2190b734
Rework DensityMap sparsity estimator and mult/plus ops Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -59,43 +59,217 @@ public class EstimatorDensityMap extends SparsityEstimator\nestim(root.getLeft()); //obtain synopsis\nif( !root.getRight().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\n- MatrixBlock m1Map = !root.getLeft().isLeaf() ?\n- (MatrixBlock)root.getLeft().getSynopsis() : computeDensityMap(root.getLeft().getData());\n- MatrixBlock m2Map = !root.getRight().isLeaf() ?\n- (MatrixBlock)root.getRight().getSynopsis() : computeDensityMap(root.getRight().getData());\n+ DensityMap m1Map = !root.getLeft().isLeaf() ?\n+ (DensityMap)root.getLeft().getSynopsis() :\n+ new DensityMap(root.getLeft().getData(), _b);\n+ DensityMap m2Map = !root.getRight().isLeaf() ?\n+ (DensityMap)root.getRight().getSynopsis() :\n+ new DensityMap(root.getRight().getData(), _b);\n//estimate output density map and sparsity\n- MatrixBlock outMap = estimIntern(m1Map, m2Map,\n- false, root.getRows(), root.getLeft().getCols(), root.getCols());\n+ DensityMap outMap = estimIntern(m1Map, m2Map, root.getOp());\nroot.setSynopsis(outMap); //memoize density map\nreturn root.setMatrixCharacteristics(new MatrixCharacteristics(\n- root.getLeft().getRows(), root.getRight().getCols(), (long)outMap.sum()));\n+ root.getLeft().getRows(), root.getRight().getCols(), outMap.getNonZeros()));\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- MatrixBlock m1Map = computeDensityMap(m1);\n- MatrixBlock m2Map = (m1 == m2) ? //self product\n- m1Map : computeDensityMap(m2);\n- MatrixBlock outMap = estimIntern(m1Map, m2Map,\n- true, m1.getNumRows(), m1.getNumColumns(), m2.getNumColumns());\n- return OptimizerUtils.getSparsity( //aggregate output histogram\n- m1.getNumRows(), m2.getNumColumns(), (long)outMap.sum());\n+ return estim(m1, m2, OpCode.MM);\n}\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n- throw new NotImplementedException();\n+ DensityMap m1Map = new DensityMap(m1, _b);\n+ DensityMap m2Map = (m1 == m2) ? //self product\n+ m1Map : new DensityMap(m2, _b);\n+ DensityMap outMap = estimIntern(m1Map, m2Map, OpCode.MM);\n+ return OptimizerUtils.getSparsity( //aggregate output histogram\n+ outMap.getNumRowsOrig(), outMap.getNumColumnsOrig(), outMap.getNonZeros());\n}\n@Override\npublic double estim(MatrixBlock m, OpCode op) {\n+ return estim(m, null, op);\n+ }\n+\n+ /**\n+ * Computes the output density map given the density maps of the input operands.\n+ *\n+ * @param m1Map density map left-hand-side operand\n+ * @param m2Map density map right-hand-side operand\n+ * @return density map\n+ */\n+ private DensityMap estimIntern(DensityMap m1Map, DensityMap m2Map, OpCode op) {\n+ switch(op) {\n+ case MM: return estimInternMM(m1Map, m2Map);\n+ case MULT: return estimInternMult(m1Map, m2Map);\n+ case PLUS: return estimInternPlus(m1Map, m2Map);\n+\n+ case RBIND:\n+ case CBIND:\n+ //TODO simple append not possible due to partial blocks at end of m1Map\n+\n+ case TRANS:\n+ case DIAG:\n+ case RESHAPE:\n+ //TODO add missing estimators\n+ default:\nthrow new NotImplementedException();\n}\n+ }\n- private MatrixBlock computeDensityMap(MatrixBlock in) {\n- int rlen = (int)Math.ceil((double)in.getNumRows()/_b);\n- int clen = (int)Math.ceil((double)in.getNumColumns()/_b);\n+ private DensityMap estimInternMM(DensityMap m1Map, DensityMap m2Map) {\n+ final int m = m1Map.getNumRows();\n+ final int cd = m1Map.getNumColumns();\n+ final int n = m2Map.getNumColumns();\n+ MatrixBlock out = new MatrixBlock(m1Map.getNumRows(), m2Map.getNumColumns(), false);\n+ DenseBlock c = out.allocateBlock().getDenseBlock();\n+ m1Map.toSparsity();\n+ m2Map.toSparsity();\n+ for(int i=0; i<m; i++) {\n+ for(int k=0; k<cd; k++) {\n+ int lbk = m1Map.getColBlockize(k);\n+ double sp1 = m1Map.get(i, k);\n+ if( sp1 == 0 ) continue;\n+ for(int j=0; j<n; j++) {\n+ double sp2 = m2Map.get(k, j);\n+ if( sp2 == 0 ) continue;\n+ //custom multiply for scalar sparsity\n+ double tmp1 = 1 - Math.pow(1-sp1*sp2, lbk);\n+ //custom add for scalar sparsity\n+ double tmp2 = c.get(i, j);\n+ c.set(i, j, tmp1+tmp2 - tmp1*tmp2);\n+ }\n+ }\n+ }\n+ out.recomputeNonZeros();\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m2Map.getNumColumnsOrig(), _b, true);\n+ }\n+\n+ private DensityMap estimInternMult(DensityMap m1Map, DensityMap m2Map) {\n+ MatrixBlock out = new MatrixBlock(m1Map.getNumRows(), m1Map.getNumColumns(), false);\n+ DenseBlock c = out.allocateBlock().getDenseBlock();\n+ m1Map.toSparsity();\n+ m2Map.toSparsity();\n+ for(int i=0; i<m1Map.getNumRows(); i++)\n+ for(int j=0; j<m1Map.getNumColumns(); j++)\n+ c.set(i, j, m1Map.get(i, j) * m2Map.get(i, j));\n+ out.recomputeNonZeros();\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m1Map.getNumColumnsOrig(), _b, true);\n+ }\n+\n+ private DensityMap estimInternPlus(DensityMap m1Map, DensityMap m2Map) {\n+ MatrixBlock out = new MatrixBlock(m1Map.getNumRows(), m1Map.getNumColumns(), false);\n+ DenseBlock c = out.allocateBlock().getDenseBlock();\n+ m1Map.toSparsity();\n+ m2Map.toSparsity();\n+ for(int i=0; i<m1Map.getNumRows(); i++)\n+ for(int j=0; j<m1Map.getNumColumns(); j++) {\n+ double sp1 = m1Map.get(i, j);\n+ double sp2 = m2Map.get(i, j);\n+ c.set(i, j, sp1 + sp2 - sp1 * sp2);\n+ }\n+ out.recomputeNonZeros();\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m1Map.getNumColumnsOrig(), _b, true);\n+ }\n+\n+ private static class DensityMap {\n+ private final MatrixBlock _map;\n+ private final int _rlen;\n+ private final int _clen;\n+ private final int _b;\n+ private boolean _scaled; //false->nnz, true->sp\n+\n+ public DensityMap(MatrixBlock in, int b) {\n+ _rlen = in.getNumRows();\n+ _clen = in.getNumColumns();\n+ _b = b;\n+ _map = init(in);\n+ _scaled = false;\n+ }\n+\n+ public DensityMap(MatrixBlock map, int rlenOrig, int clenOrig, int b, boolean scaled) {\n+ _rlen = rlenOrig;\n+ _clen = clenOrig;\n+ _b = b;\n+ _map = map;\n+ _scaled = scaled;\n+ }\n+\n+ public int getNumRows() {\n+ return _map.getNumRows();\n+ }\n+\n+ public int getNumColumns() {\n+ return _map.getNumColumns();\n+ }\n+\n+ public int getNumRowsOrig() {\n+ return _rlen;\n+ }\n+\n+ public int getNumColumnsOrig() {\n+ return _clen;\n+ }\n+\n+ public long getNonZeros() {\n+ if( _scaled ) toNnz();\n+ return (long)Math.round(_map.sum());\n+ }\n+\n+ public int getRowBlockize(int r) {\n+ return UtilFunctions.computeBlockSize(_rlen, r+1, _b);\n+ }\n+\n+ public int getColBlockize(int c) {\n+ return UtilFunctions.computeBlockSize(_clen, c+1, _b);\n+ }\n+\n+ public double get(int r, int c) {\n+ return _map.quickGetValue(r, c);\n+ }\n+\n+ public void toSparsity() {\n+ if( _scaled ) return;\n+ //scale histogram by block size, w/ awareness of boundary blocks\n+ int rlen = _map.getNumRows();\n+ int clen = _map.getNumColumns();\n+ DenseBlock c = _map.getDenseBlock();\n+ for(int i=0; i<rlen; i++){\n+ int lrlen = getRowBlockize(i);\n+ for(int j=0; j<clen; j++) {\n+ double cval = c.get(i, j);\n+ if( cval == 0 ) continue;\n+ c.set(i, j, cval/lrlen/getColBlockize(j));\n+ }\n+ }\n+ _scaled = true;\n+ }\n+\n+ public void toNnz() {\n+ if( !_scaled ) return;\n+ //scale histogram by block size, w/ awareness of boundary blocks\n+ int rlen = _map.getNumRows();\n+ int clen = _map.getNumColumns();\n+ DenseBlock c = _map.getDenseBlock();\n+ for(int i=0; i<rlen; i++){\n+ int lrlen = getRowBlockize(i);\n+ for(int j=0; j<clen; j++) {\n+ double cval = c.get(i, j);\n+ if( cval == 0 ) continue;\n+ c.set(i, j, cval * lrlen * getColBlockize(j));\n+ }\n+ }\n+ _scaled = false;\n+ }\n+\n+ private MatrixBlock init(MatrixBlock in) {\n+ int rlen = (int)Math.ceil((double)_rlen/_b);\n+ int clen = (int)Math.ceil((double)_clen/_b);\nMatrixBlock out = new MatrixBlock(rlen, clen, false);\n//fast-path empty input\n@@ -125,75 +299,16 @@ public class EstimatorDensityMap extends SparsityEstimator\n}\n}\nelse {\n- for(int i=0; i<in.getNumRows(); i++) {\n- for(int j=0; j<in.getNumColumns(); j++) {\n+ for(int i=0; i<_rlen; i++) {\n+ for(int j=0; j<_clen; j++) {\ndouble aval = in.quickGetValue(i, j);\nif( aval != 0 )\nc.incr(i/_b, j/_b);\n}\n}\n}\n-\n- //scale histogram by block size, w/ awareness of boundary blocks\n- for(int i=0; i<rlen; i++){\n- int lrlen = UtilFunctions.computeBlockSize(in.getNumRows(), i+1, _b);\n- for(int j=0; j<clen; j++) {\n- double cval = c.get(i, j);\n- if( cval == 0 ) continue;\n- int lclen = UtilFunctions.computeBlockSize(in.getNumColumns(), j+1, _b);\n- c.set(i, j, cval/lrlen/lclen);\n- }\n- }\nout.recomputeNonZeros();\nreturn out;\n}\n-\n- /**\n- * Computes the output density map given the density maps of the input operands.\n- *\n- * @param m1Map density map left-hand-side operand\n- * @param m2Map density map right-hand-side operand\n- * @param retNnz return number of non-zeros instead of sparsity per cell\n- * @param mOrig number of rows of output matrix, required for returning nnz\n- * @param cdOrig common dimension of original matrix multiply\n- * @param nOrig number of columns of output matrix, required for returning nnz\n- * @return density map\n- */\n- private MatrixBlock estimIntern(MatrixBlock m1Map, MatrixBlock m2Map, boolean retNnz, int mOrig, int cdOrig, int nOrig) {\n- final int m = m1Map.getNumRows();\n- final int cd = m1Map.getNumColumns();\n- final int n = m2Map.getNumColumns();\n- MatrixBlock out = new MatrixBlock(m, n, false);\n- if( m1Map.isEmptyBlock(false) || m2Map.isEmptyBlock(false) )\n- return out;\n-\n- //compute output density map with IKJ schedule\n- DenseBlock c = out.allocateBlock().getDenseBlock();\n- for(int i=0; i<m; i++) {\n- for(int k=0; k<cd; k++) {\n- int lbk = UtilFunctions.computeBlockSize(cdOrig, k+1, _b);\n- double sp1 = m1Map.quickGetValue(i, k);\n- if( sp1 == 0 ) continue;\n- for(int j=0; j<n; j++) {\n- double sp2 = m2Map.quickGetValue(k, j);\n- if( sp2 == 0 ) continue;\n- //custom multiply for scalar sparsity\n- double tmp1 = 1 - Math.pow(1-sp1*sp2, lbk);\n- //custom add for scalar sparsity\n- double tmp2 = c.get(i, j);\n- c.set(i, j, tmp1+tmp2 - tmp1*tmp2);\n- }\n- }\n- //scale to non-zeros instead of sparsity if needed\n- if( retNnz ) {\n- int lbm = UtilFunctions.computeBlockSize(mOrig, i+1, _b);\n- for( int j=0; j<n; j++ ) {\n- int lbn = UtilFunctions.computeBlockSize(nOrig, j+1, _b);\n- c.set(i, j, c.get(i, j) * lbm * lbn);\n- }\n- }\n- }\n- out.recomputeNonZeros();\n- return out;\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Rework DensityMap sparsity estimator and mult/plus ops Closes #821.
49,736
08.08.2018 13:34:18
25,200
7fb38b9b08155b2466e9711ec87a3a3a26256c32
Turn off batchnorm rewrite for mode="train"
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -120,7 +120,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\nrule_GPUKernels(roots, hi, descendFirst); //see below\nif(roots != null) {\n- hi = batchNormTrain(roots, hop, hi, i);\n+ //hi = batchNormTrain(roots, hop, hi, i);\n}\nhi = batchNormTest(hop, hi, i);\nhi = channelSums(hop, hi, i);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/BatchNormTest.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/BatchNormTest.java", "diff": "@@ -73,7 +73,7 @@ public class BatchNormTest extends GPUTests {\n}\n}\nelse {\n- assertHeavyHitterPresent(\"gpu_batch_norm2d_train\");\n+ //assertHeavyHitterPresent(\"gpu_batch_norm2d_train\");\ndouble [] threshold = new double[outputs.size()];\nArrays.fill(threshold, getTHRESHOLD());\n// Handle loss of precision in CuDNN kernel\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Turn off batchnorm rewrite for mode="train"
49,760
08.08.2018 23:13:19
25,200
b7f569bd001c0799430857e4d00af69da93c691d
Fix MNC estimator (nnz, op, plus), incl various tests Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -83,7 +83,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nDensityMap m1Map = new DensityMap(m1, _b);\nDensityMap m2Map = (m1 == m2) ? //self product\nm1Map : new DensityMap(m2, _b);\n- DensityMap outMap = estimIntern(m1Map, m2Map, OpCode.MM);\n+ DensityMap outMap = estimIntern(m1Map, m2Map, op);\nreturn OptimizerUtils.getSparsity( //aggregate output histogram\noutMap.getNumRowsOrig(), outMap.getNumColumnsOrig(), outMap.getNonZeros());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -97,23 +97,28 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nprivate double estimIntern(MatrixHistogram h1, MatrixHistogram h2, OpCode op) {\ndouble msize = (double)h1.getRows()*h1.getCols();\n-\nswitch (op) {\ncase MM:\nreturn estimInternMM(h1, h2);\n- case MULT:\n+ case MULT: {\nfinal long N1 = h1.getNonZeros();\nfinal long N2 = h2.getNonZeros();\nfinal long scale = IntStream.range(0, h1.getCols())\n.mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n- return IntStream.range(0, h1.getRows()).mapToLong(\n- i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2).sum() / msize;\n- case PLUS:\n- return Math.min(\n- IntStream.range(0, h1.getRows()).mapToDouble(i -> h1.rNnz[i]/msize\n- + h2.rNnz[i]/msize - h1.rNnz[i]/msize * h2.rNnz[i]/msize).sum(),\n- IntStream.range(0, h1.getCols()).mapToDouble(i -> h1.cNnz[i]/msize\n- + h2.cNnz[i]/msize - h1.cNnz[i]/msize * h2.cNnz[i]/msize).sum());\n+ return IntStream.range(0, h1.getRows())\n+ .mapToLong(i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2) //collisions\n+ .sum() / msize;\n+ }\n+ case PLUS: {\n+ final long N1 = h1.getNonZeros();\n+ final long N2 = h2.getNonZeros();\n+ final long scale = IntStream.range(0, h1.getCols())\n+ .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ return IntStream.range(0, h1.getRows())\n+ .mapToLong(i -> (long)h1.rNnz[i] + h2.rNnz[i] //all minus collisions\n+ - (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2)\n+ .sum() / msize;\n+ }\ncase EQZERO:\nreturn OptimizerUtils.getSparsity(h1.getRows(), h1.getCols(),\n(long)h1.getRows() * h1.getCols() - h1.getNonZeros());\n@@ -301,7 +306,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\npublic long getNonZeros() {\nreturn getRows() < getCols() ?\nIntStream.range(0, getRows()).mapToLong(i-> rNnz[i]).sum() :\n- IntStream.range(0, getRows()).mapToLong(i-> cNnz[i]).sum();\n+ IntStream.range(0, getCols()).mapToLong(i-> cNnz[i]).sum();\n}\npublic static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut, OpCode op) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -566,6 +566,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n* @param pos position\n* @return a new FunctionOp or hi\n*/\n+ @SuppressWarnings(\"unused\")\nprivate static Hop batchNormTrain(ArrayList<Hop> roots, Hop parent, Hop hi, int pos)\n{\n// norm = bias_multiply(bias_add(X, -mean), 1/sqrt(var+eps))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpBindTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.commons.lang.NotImplementedException;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+/**\n+ * this is the basic operation check for all estimators with single operations\n+ */\n+public class OpBindTest extends AutomatedTestBase\n+{\n+ private final static int m = 600;\n+ private final static int k = 300;\n+ private final static int n = 100;\n+ private final static double[] sparsity = new double[]{0.2, 0.4};\n+// private final static OpCode mult = OpCode.MULT;\n+// private final static OpCode plus = OpCode.PLUS;\n+ private final static OpCode rbind = OpCode.RBIND;\n+ private final static OpCode cbind = OpCode.CBIND;\n+// private final static OpCode eqzero = OpCode.EQZERO;\n+// private final static OpCode diag = OpCode.DIAG;\n+// private final static OpCode neqzero = OpCode.NEQZERO;\n+// private final static OpCode trans = OpCode.TRANS;\n+// private final static OpCode reshape = OpCode.RESHAPE;\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ //Average Case\n+ @Test\n+ public void testAvgRbind() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testAvgCbind() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, n, sparsity, cbind);\n+ }\n+\n+ //Worst Case\n+ @Test\n+ public void testWorstRbind() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testWorstCbind() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, n, sparsity, cbind);\n+ }\n+\n+ //DensityMap\n+ /*@Test\n+ public void testDMCaserbind() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testDMCasecbind() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, n, sparsity, cbind);\n+ }*/\n+\n+ //MNC\n+ @Test\n+ public void testMNCRbind() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testMNCCbind() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, k, n, sparsity, cbind);\n+ }\n+\n+ //Bitset\n+ /*@Test\n+ public void testBitsetCaserbind() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testBitsetCasecbind() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, sparsity, cbind);\n+ }\n+\n+ //Layered Graph\n+ @Test\n+ public void testLGCaserbind() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testLGCasecbind() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, sparsity, cbind);\n+ }\n+\n+ //Sample\n+ @Test\n+ public void testSampleCaserbind() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, rbind);\n+ }\n+\n+ @Test\n+ public void testSampleCasecbind() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, cbind);\n+ }*/\n+\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp, OpCode op) {\n+ MatrixBlock m1;\n+ MatrixBlock m2;\n+ MatrixBlock m3 = new MatrixBlock();\n+ double est = 0;\n+ switch(op) {\n+ case RBIND:\n+ m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\n+ m2 = MatrixBlock.randOperations(n, k, sp[1], 1, 1, \"uniform\", 3);\n+ m1.append(m2, m3, false);\n+ est = estim.estim(m1, m2, op);\n+ break;\n+ case CBIND:\n+ m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\n+ m2 = MatrixBlock.randOperations(m, n, sp[1], 1, 1, \"uniform\", 3);\n+ m1.append(m2, m3);\n+ est = estim.estim(m1, m2, op);\n+ break;\n+ default:\n+ throw new NotImplementedException();\n+ }\n+ //compare estimated and real sparsity\n+ TestUtils.compareScalars(est, m3.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 1e-2);\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n+import org.apache.commons.lang.NotImplementedException;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n+import org.apache.sysml.runtime.functionobjects.Multiply;\n+import org.apache.sysml.runtime.functionobjects.Plus;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+/**\n+ * this is the basic operation check for all estimators with single operations\n+ */\n+public class OpElemWTest extends AutomatedTestBase\n+{\n+ //TODO experiment with m>2n for MNC (currently suboptimal accuracy)\n+ private final static int m = 600;\n+ private final static int n = 700;\n+ private final static double[] sparsity = new double[]{0.1, 0.04};\n+ private final static OpCode mult = OpCode.MULT;\n+ private final static OpCode plus = OpCode.PLUS;\n+// private final static OpCode rbind = OpCode.RBIND;\n+// private final static OpCode cbind = OpCode.CBIND;\n+// private final static OpCode eqzero = OpCode.EQZERO;\n+// private final static OpCode diag = OpCode.DIAG;\n+// private final static OpCode neqzero = OpCode.NEQZERO;\n+// private final static OpCode trans = OpCode.TRANS;\n+// private final static OpCode reshape = OpCode.RESHAPE;\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+ //Average Case\n+ @Test\n+ public void testAvgMult() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testAvgPlus() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, n, sparsity, plus);\n+ }\n+\n+ //Worst Case\n+ @Test\n+ public void testWorstMult() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testWorstPlus() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, n, sparsity, plus);\n+ }\n+\n+ //DensityMap\n+ @Test\n+ public void testDMMult() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testDMPlus() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, n, sparsity, plus);\n+ }\n+\n+ //MNC\n+ @Test\n+ public void testMNCMult() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testMNCPlus() {\n+ runSparsityEstimateTest(new EstimatorMatrixHistogram(), m, n, sparsity, plus);\n+ }\n+\n+ //Bitset\n+ /*@Test\n+ public void testBitsetCasemult() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testBitsetCaseplus() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, n, sparsity, plus);\n+ }\n+\n+ //Layered Graph\n+ @Test\n+ public void testLGCasemult() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testLGCaseplus() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, sparsity, plus);\n+ }\n+\n+ //Sample\n+ @Test\n+ public void testSampleCasemult() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, mult);\n+ }\n+\n+ @Test\n+ public void testSampleCaseplus() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, plus);\n+ }*/\n+\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int m, int n, double[] sp, OpCode op) {\n+ MatrixBlock m1 = MatrixBlock.randOperations(m, n, sp[0], 1, 1, \"uniform\", 3);\n+ MatrixBlock m2 = MatrixBlock.randOperations(m, n, sp[1], 1, 1, \"uniform\", 3);\n+ MatrixBlock m3 = new MatrixBlock();\n+ BinaryOperator bOp;\n+ double est = 0;\n+ switch(op) {\n+ case MULT:\n+ bOp = new BinaryOperator(Multiply.getMultiplyFnObject());\n+ m1.binaryOperations(bOp, m2, m3);\n+ est = estim.estim(m1, m2, op);\n+ break;\n+ case PLUS:\n+ bOp = new BinaryOperator(Plus.getPlusFnObject());\n+ m1.binaryOperations(bOp, m2, m3);\n+ est = estim.estim(m1, m2, op);\n+ break;\n+ default:\n+ throw new NotImplementedException();\n+ }\n+ //compare estimated and real sparsity\n+ TestUtils.compareScalars(est, m3.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 1e-3);\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpSingle.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Test;\n+import org.apache.sysml.hops.estim.EstimatorBasicAvg;\n+import org.apache.sysml.hops.estim.EstimatorBasicWorst;\n+import org.apache.sysml.hops.estim.EstimatorBitsetMM;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorLayeredGraph;\n+import org.apache.sysml.hops.estim.EstimatorSample;\n+import org.apache.sysml.hops.estim.SparsityEstimator;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+\n+/**\n+ * this is the basic operation check for all estimators with single operations\n+ */\n+public class OpSingle extends AutomatedTestBase\n+{\n+ private final static int m = 600;\n+ private final static int k = 300;\n+ private final static double sparsity = 0.2;\n+// private final static OpCode mult = OpCode.MULT;\n+// private final static OpCode plus = OpCode.PLUS;\n+// private final static OpCode rbind = OpCode.RBIND;\n+// private final static OpCode cbind = OpCode.CBIND;\n+ private final static OpCode eqzero = OpCode.EQZERO;\n+ private final static OpCode diag = OpCode.DIAG;\n+ private final static OpCode neqzero = OpCode.NEQZERO;\n+ private final static OpCode trans = OpCode.TRANS;\n+ private final static OpCode reshape = OpCode.RESHAPE;\n+\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ //Average Case\n+ @Test\n+ public void testAvgCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testAvgCasediag() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testAvgCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testAvgCasetrans() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testAvgCasereshape() {\n+ runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, reshape);\n+ }\n+\n+ //Worst Case\n+ @Test\n+ public void testWCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testWCasediag() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testWCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testWCasetrans() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testWCasereshape() {\n+ runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, reshape);\n+ }\n+\n+ //DensityMap\n+ @Test\n+ public void testDMCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testDMCasediag() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testDMCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testDMCasetrans() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testDMCasereshape() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, reshape);\n+ }\n+\n+ //MNC\n+ @Test\n+ public void testMNCCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testMNCCasediag() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testMNCCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testMNCCasetrans() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testMNCCasereshape() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, reshape);\n+ }\n+\n+ //Bitset\n+ @Test\n+ public void testBitsetCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testBitsetCasediag() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testBitsetCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testBitsetCasetrans() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testBitsetCasereshape() {\n+ runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, reshape);\n+ }\n+\n+ //Layered Graph\n+ @Test\n+ public void testLGCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testLGCasediag() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testLGCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testLGCasetans() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testLGCasereshape() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, reshape);\n+ }\n+\n+ //Sample\n+ @Test\n+ public void testSampleCaseeqzero() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, eqzero);\n+ }\n+\n+ @Test\n+ public void testSampleCasediag() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, diag);\n+ }\n+\n+ @Test\n+ public void testSampleCaseneqzero() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testSampleCasetrans() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, trans);\n+ }\n+\n+ @Test\n+ public void testSampleCasereshape() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, reshape);\n+ }\n+\n+ private void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, double sp, OpCode op) {\n+// MatrixBlock m1 = MatrixBlock.randOperations(m, k, sp, 1, 1, \"uniform\", 3);\n+// MatrixBlock m2 = null;\n+// double est = 0;\n+// switch(op) {\n+// case EQZERO:\n+// case DIAG:\n+// case NEQZERO:\n+// case TRANS:\n+// case RESHAPE:\n+// }\n+// //compare estimated and real sparsity\n+// TestUtils.compareScalars(est, m2.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 1e-2);\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "diff": "@@ -26,10 +26,13 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n+ OpBindTest.class,\n+ OpElemWTest.class,\nOuterProductTest.class,\nSelfProductTest.class,\nSquaredProductChainTest.class,\nSquaredProductTest.class,\n+ //OpSingle.class\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Fix MNC estimator (nnz, op, plus), incl various tests Closes #823.
49,736
09.08.2018 09:45:42
25,200
5ca8706e98ba8de7418a24405d9d3bb600dfe468
Added rshape operator for the GPU backend This leads to 1.2x speedup for ResNet200 with batch size of 32 by reducing the number of host-to-device transfers. Also, added GPU tests for this operator.
[ { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.cu", "new_path": "src/main/cpp/kernels/SystemML.cu", "diff": "@@ -2247,3 +2247,35 @@ extern \"C\" __global__ void prepare_lstm_dinput_f(float* smlInput, float* cudnnIn\nprepare_lstm_dinput(smlInput, cudnnInput, N, D, TD, size);\n}\n+\n+/**\n+ * Do an log over all the elements of a matrix\n+ * @param A the input matrix (of length = size)\n+ * @param C the pre-allocated output matrix (of length = size)\n+ * @param size the length of the input and output matrices\n+ */\n+template <typename T>\n+__device__ void colwise_reshape(T *A, T *C, unsigned int size,\n+ unsigned int inRows, unsigned int inCols,\n+ unsigned int outRows, unsigned int outCols) {\n+ int index = blockIdx.x * blockDim.x + threadIdx.x;\n+ if (index < size) {\n+ int i = index / outCols;\n+ int j = index % outCols;\n+ int k = (outRows*j+i) % inRows;\n+ int l = (outRows*j+i) / inRows;\n+ C[index] = A[k*inCols+l];\n+ }\n+}\n+\n+extern \"C\" __global__ void colwise_reshape_d(double *A, double *C, unsigned int size,\n+ unsigned int inRows, unsigned int inCols,\n+ unsigned int outRows, unsigned int outCols) {\n+ colwise_reshape(A, C, size, inRows, inCols, outRows, outCols);\n+}\n+\n+extern \"C\" __global__ void colwise_reshape_f(float *A, float *C, unsigned int size,\n+ unsigned int inRows, unsigned int inCols,\n+ unsigned int outRows, unsigned int outCols) {\n+ colwise_reshape(A, C, size, inRows, inCols, outRows, outCols);\n+}\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.ptx", "new_path": "src/main/cpp/kernels/SystemML.ptx", "diff": "@@ -12877,12 +12877,112 @@ BB111_2:\nret;\n}\n+ // .globl colwise_reshape_d\n+.visible .entry colwise_reshape_d(\n+ .param .u64 colwise_reshape_d_param_0,\n+ .param .u64 colwise_reshape_d_param_1,\n+ .param .u32 colwise_reshape_d_param_2,\n+ .param .u32 colwise_reshape_d_param_3,\n+ .param .u32 colwise_reshape_d_param_4,\n+ .param .u32 colwise_reshape_d_param_5,\n+ .param .u32 colwise_reshape_d_param_6\n+)\n+{\n+ .reg .pred %p<2>;\n+ .reg .b32 %r<16>;\n+ .reg .f64 %fd<2>;\n+ .reg .b64 %rd<9>;\n+\n+\n+ ld.param.u64 %rd1, [colwise_reshape_d_param_0];\n+ ld.param.u64 %rd2, [colwise_reshape_d_param_1];\n+ ld.param.u32 %r6, [colwise_reshape_d_param_2];\n+ ld.param.u32 %r2, [colwise_reshape_d_param_3];\n+ ld.param.u32 %r3, [colwise_reshape_d_param_4];\n+ ld.param.u32 %r4, [colwise_reshape_d_param_5];\n+ ld.param.u32 %r5, [colwise_reshape_d_param_6];\n+ mov.u32 %r7, %ctaid.x;\n+ mov.u32 %r8, %ntid.x;\n+ mov.u32 %r9, %tid.x;\n+ mad.lo.s32 %r1, %r8, %r7, %r9;\n+ setp.ge.u32 %p1, %r1, %r6;\n+ @%p1 bra BB112_2;\n+\n+ cvta.to.global.u64 %rd3, %rd1;\n+ rem.u32 %r10, %r1, %r5;\n+ div.u32 %r11, %r1, %r5;\n+ mad.lo.s32 %r12, %r10, %r4, %r11;\n+ rem.u32 %r13, %r12, %r2;\n+ div.u32 %r14, %r12, %r2;\n+ mad.lo.s32 %r15, %r13, %r3, %r14;\n+ mul.wide.u32 %rd4, %r15, 8;\n+ add.s64 %rd5, %rd3, %rd4;\n+ ld.global.f64 %fd1, [%rd5];\n+ cvta.to.global.u64 %rd6, %rd2;\n+ mul.wide.s32 %rd7, %r1, 8;\n+ add.s64 %rd8, %rd6, %rd7;\n+ st.global.f64 [%rd8], %fd1;\n+\n+BB112_2:\n+ ret;\n+}\n+\n+ // .globl colwise_reshape_f\n+.visible .entry colwise_reshape_f(\n+ .param .u64 colwise_reshape_f_param_0,\n+ .param .u64 colwise_reshape_f_param_1,\n+ .param .u32 colwise_reshape_f_param_2,\n+ .param .u32 colwise_reshape_f_param_3,\n+ .param .u32 colwise_reshape_f_param_4,\n+ .param .u32 colwise_reshape_f_param_5,\n+ .param .u32 colwise_reshape_f_param_6\n+)\n+{\n+ .reg .pred %p<2>;\n+ .reg .f32 %f<2>;\n+ .reg .b32 %r<16>;\n+ .reg .b64 %rd<9>;\n+\n+\n+ ld.param.u64 %rd1, [colwise_reshape_f_param_0];\n+ ld.param.u64 %rd2, [colwise_reshape_f_param_1];\n+ ld.param.u32 %r6, [colwise_reshape_f_param_2];\n+ ld.param.u32 %r2, [colwise_reshape_f_param_3];\n+ ld.param.u32 %r3, [colwise_reshape_f_param_4];\n+ ld.param.u32 %r4, [colwise_reshape_f_param_5];\n+ ld.param.u32 %r5, [colwise_reshape_f_param_6];\n+ mov.u32 %r7, %ctaid.x;\n+ mov.u32 %r8, %ntid.x;\n+ mov.u32 %r9, %tid.x;\n+ mad.lo.s32 %r1, %r8, %r7, %r9;\n+ setp.ge.u32 %p1, %r1, %r6;\n+ @%p1 bra BB113_2;\n+\n+ cvta.to.global.u64 %rd3, %rd1;\n+ rem.u32 %r10, %r1, %r5;\n+ div.u32 %r11, %r1, %r5;\n+ mad.lo.s32 %r12, %r10, %r4, %r11;\n+ rem.u32 %r13, %r12, %r2;\n+ div.u32 %r14, %r12, %r2;\n+ mad.lo.s32 %r15, %r13, %r3, %r14;\n+ mul.wide.u32 %rd4, %r15, 4;\n+ add.s64 %rd5, %rd3, %rd4;\n+ ld.global.f32 %f1, [%rd5];\n+ cvta.to.global.u64 %rd6, %rd2;\n+ mul.wide.s32 %rd7, %r1, 4;\n+ add.s64 %rd8, %rd6, %rd7;\n+ st.global.f32 [%rd8], %f1;\n+\n+BB113_2:\n+ ret;\n+}\n+\n.func (.param .b64 func_retval0) __internal_trig_reduction_slowpathd(\n.param .b64 __internal_trig_reduction_slowpathd_param_0,\n.param .b64 __internal_trig_reduction_slowpathd_param_1\n)\n{\n- .local .align 8 .b8 __local_depot112[40];\n+ .local .align 8 .b8 __local_depot114[40];\n.reg .b64 %SP;\n.reg .b64 %SPL;\n.reg .pred %p<9>;\n@@ -12891,7 +12991,7 @@ BB111_2:\n.reg .b64 %rd<102>;\n- mov.u64 %rd101, __local_depot112;\n+ mov.u64 %rd101, __local_depot114;\ncvta.local.u64 %SP, %rd101;\nld.param.f64 %fd4, [__internal_trig_reduction_slowpathd_param_0];\nld.param.u64 %rd37, [__internal_trig_reduction_slowpathd_param_1];\n@@ -12905,7 +13005,7 @@ BB111_2:\nshr.u32 %r3, %r1, 20;\nbfe.u32 %r4, %r1, 20, 11;\nsetp.eq.s32 %p1, %r4, 2047;\n- @%p1 bra BB112_13;\n+ @%p1 bra BB114_13;\nadd.s32 %r15, %r4, -1024;\nshr.u32 %r16, %r15, 6;\n@@ -12918,7 +13018,7 @@ BB111_2:\nmov.u64 %rd94, 0;\nsetp.ge.s32 %p2, %r5, %r6;\nmov.u64 %rd93, %rd1;\n- @%p2 bra BB112_4;\n+ @%p2 bra BB114_4;\nmov.b64 %rd41, %fd4;\nshl.b64 %rd42, %rd41, 11;\n@@ -12935,7 +13035,7 @@ BB111_2:\nmov.u64 %rd91, %rd1;\nmov.u32 %r39, %r5;\n-BB112_3:\n+BB114_3:\n.pragma \"nounroll\";\nld.const.u64 %rd47, [%rd89];\n// inline asm\n@@ -12965,15 +13065,15 @@ BB112_3:\nadd.s64 %rd93, %rd93, 8;\nadd.s64 %rd89, %rd89, 8;\nsetp.lt.s32 %p3, %r39, %r6;\n- @%p3 bra BB112_3;\n+ @%p3 bra BB114_3;\n-BB112_4:\n+BB114_4:\nst.local.u64 [%rd93], %rd94;\nld.local.u64 %rd95, [%rd1+16];\nld.local.u64 %rd96, [%rd1+24];\nand.b32 %r9, %r3, 63;\nsetp.eq.s32 %p4, %r9, 0;\n- @%p4 bra BB112_6;\n+ @%p4 bra BB114_6;\nmov.u32 %r27, 64;\nsub.s32 %r28, %r27, %r9;\n@@ -12985,7 +13085,7 @@ BB112_4:\nshr.u64 %rd55, %rd54, %r28;\nor.b64 %rd95, %rd55, %rd53;\n-BB112_6:\n+BB114_6:\ncvta.to.local.u64 %rd56, %rd37;\nshr.u64 %rd57, %rd96, 62;\ncvt.u32.u64 %r29, %rd57;\n@@ -13002,7 +13102,7 @@ BB112_6:\nselp.b32 %r34, %r32, %r33, %p5;\nst.local.u32 [%rd56], %r34;\nsetp.eq.s32 %p6, %r31, 0;\n- @%p6 bra BB112_8;\n+ @%p6 bra BB114_8;\nmov.u64 %rd64, 0;\n// inline asm\n@@ -13022,10 +13122,10 @@ BB112_6:\n// inline asm\nxor.b32 %r40, %r40, -2147483648;\n-BB112_8:\n+BB114_8:\nclz.b64 %r41, %rd98;\nsetp.eq.s32 %p7, %r41, 0;\n- @%p7 bra BB112_10;\n+ @%p7 bra BB114_10;\nshl.b64 %rd67, %rd98, %r41;\nmov.u32 %r35, 64;\n@@ -13033,7 +13133,7 @@ BB112_8:\nshr.u64 %rd68, %rd97, %r36;\nor.b64 %rd98, %rd68, %rd67;\n-BB112_10:\n+BB114_10:\nmov.u64 %rd72, -3958705157555305931;\n// inline asm\n{\n@@ -13054,7 +13154,7 @@ BB112_10:\n}\n// inline asm\nsetp.lt.s64 %p8, %rd100, 1;\n- @%p8 bra BB112_12;\n+ @%p8 bra BB114_12;\n// inline asm\n{\n@@ -13073,7 +13173,7 @@ BB112_10:\n// inline asm\nadd.s32 %r41, %r41, 1;\n-BB112_12:\n+BB114_12:\ncvt.u64.u32 %rd79, %r40;\nshl.b64 %rd80, %rd79, 32;\nmov.u32 %r37, 1022;\n@@ -13088,7 +13188,7 @@ BB112_12:\nor.b64 %rd88, %rd87, %rd80;\nmov.b64 %fd4, %rd88;\n-BB112_13:\n+BB114_13:\nst.param.f64 [func_retval0+0], %fd4;\nret;\n}\n@@ -13116,7 +13216,7 @@ BB112_13:\n}\nshr.u32 %r51, %r50, 20;\nsetp.ne.s32 %p1, %r51, 0;\n- @%p1 bra BB113_2;\n+ @%p1 bra BB115_2;\nmul.f64 %fd14, %fd12, 0d4350000000000000;\n{\n@@ -13130,13 +13230,13 @@ BB112_13:\nshr.u32 %r16, %r50, 20;\nadd.s32 %r51, %r16, -54;\n-BB113_2:\n+BB115_2:\nadd.s32 %r52, %r51, -1023;\nand.b32 %r17, %r50, -2146435073;\nor.b32 %r18, %r17, 1072693248;\nmov.b64 %fd135, {%r49, %r18};\nsetp.lt.u32 %p2, %r18, 1073127583;\n- @%p2 bra BB113_4;\n+ @%p2 bra BB115_4;\n{\n.reg .b32 %temp;\n@@ -13150,7 +13250,7 @@ BB113_2:\nmov.b64 %fd135, {%r19, %r21};\nadd.s32 %r52, %r51, -1022;\n-BB113_4:\n+BB115_4:\nadd.f64 %fd15, %fd135, 0d3FF0000000000000;\nrcp.approx.ftz.f64 %fd16, %fd15;\nneg.f64 %fd17, %fd15;\n@@ -13313,13 +13413,13 @@ BB113_4:\nmov.b32 %f2, %r35;\nabs.f32 %f1, %f2;\nsetp.lt.f32 %p4, %f1, 0f4086232B;\n- @%p4 bra BB113_7;\n+ @%p4 bra BB115_7;\nsetp.lt.f64 %p5, %fd4, 0d0000000000000000;\nadd.f64 %fd129, %fd4, 0d7FF0000000000000;\nselp.f64 %fd136, 0d0000000000000000, %fd129, %p5;\nsetp.geu.f32 %p6, %f1, 0f40874800;\n- @%p6 bra BB113_7;\n+ @%p6 bra BB115_7;\nmov.f64 %fd134, 0d4338000000000000;\nmov.f64 %fd133, 0d3FF71547652B82FE;\n@@ -13341,26 +13441,26 @@ BB113_4:\nmov.b64 %fd131, {%r44, %r43};\nmul.f64 %fd136, %fd130, %fd131;\n-BB113_7:\n+BB115_7:\n{\n.reg .b32 %temp;\nmov.b64 {%temp, %r45}, %fd136;\n}\nand.b32 %r46, %r45, 2147483647;\nsetp.ne.s32 %p7, %r46, 2146435072;\n- @%p7 bra BB113_9;\n+ @%p7 bra BB115_9;\n{\n.reg .b32 %temp;\nmov.b64 {%r47, %temp}, %fd136;\n}\nsetp.eq.s32 %p8, %r47, 0;\n- @%p8 bra BB113_10;\n+ @%p8 bra BB115_10;\n-BB113_9:\n+BB115_9:\nfma.rn.f64 %fd136, %fd136, %fd5, %fd136;\n-BB113_10:\n+BB115_10:\nst.param.f64 [func_retval0+0], %fd136;\nret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "diff": "@@ -132,9 +132,11 @@ public class ReorgOp extends MultiThreadedHop\nreturn true;\n}\n}\n+ case RESHAPE: {\n+ return true;\n+ }\ncase DIAG:\ncase REV:\n- case RESHAPE:\ncase SORT:\nreturn false;\ndefault:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -30,6 +30,7 @@ import org.apache.sysml.runtime.instructions.gpu.DnnGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.MatrixIndexingGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.MatrixMatrixAxpyGPUInstruction;\n+import org.apache.sysml.runtime.instructions.gpu.MatrixReshapeGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction.GPUINSTRUCTION_TYPE;\nimport org.apache.sysml.runtime.instructions.gpu.MMTSJGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.RelationalBinaryGPUInstruction;\n@@ -69,6 +70,7 @@ public class GPUInstructionParser extends InstructionParser\n// Reorg/Transpose\nString2GPUInstructionType.put( \"r'\", GPUINSTRUCTION_TYPE.Reorg);\n+ String2GPUInstructionType.put( \"rshape\",GPUINSTRUCTION_TYPE.MatrixReshape);\n// Matrix Manipulation\nString2GPUInstructionType.put( \"append\", GPUINSTRUCTION_TYPE.Append);\n@@ -193,6 +195,9 @@ public class GPUInstructionParser extends InstructionParser\ncase Reorg:\nreturn ReorgGPUInstruction.parseInstruction(str);\n+ case MatrixReshape:\n+ return MatrixReshapeGPUInstruction.parseInstruction(str);\n+\ncase ArithmeticBinary:\nString opcode = InstructionUtils.getOpCode(str);\nif( opcode.equals(\"+*\") || opcode.equals(\"-*\") )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "diff": "@@ -41,6 +41,7 @@ public abstract class GPUInstruction extends Instruction {\nDnn,\nMMTSJ,\nReorg,\n+ MatrixReshape,\nAppend,\nArithmeticBinary,\nBuiltinUnary,\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixReshapeGPUInstruction.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.runtime.instructions.gpu;\n+\n+import org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.functionobjects.SwapIndex;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.BooleanObject;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.instructions.gpu.context.ExecutionConfig;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\n+import org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.runtime.matrix.operators.ReorgOperator;\n+import org.apache.sysml.utils.GPUStatistics;\n+\n+import jcuda.Pointer;\n+\n+public class MatrixReshapeGPUInstruction extends GPUInstruction {\n+\n+ private final CPOperand _input;\n+ private final CPOperand _output;\n+ private final CPOperand _opRows;\n+ private final CPOperand _opCols;\n+ private final CPOperand _opByRow;\n+\n+ protected MatrixReshapeGPUInstruction(Operator op, String opcode, String istr,\n+ CPOperand in1, CPOperand in2, CPOperand in3, CPOperand in4, CPOperand out) {\n+ super(op, opcode, istr);\n+ _input = in1;\n+ _opRows = in2;\n+ _opCols = in3;\n+ _opByRow = in4;\n+ _output = out;\n+ }\n+\n+ public static MatrixReshapeGPUInstruction parseInstruction ( String str ) {\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+ InstructionUtils.checkNumFields( parts, 5 );\n+ String opcode = parts[0];\n+ CPOperand in1 = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = new CPOperand(parts[3]);\n+ CPOperand in4 = new CPOperand(parts[4]);\n+ CPOperand out = new CPOperand(parts[5]);\n+ if(!opcode.equalsIgnoreCase(\"rshape\"))\n+ throw new DMLRuntimeException(\"Unknown opcode while parsing an MatrixReshapeGPUInstruction: \" + str);\n+ else\n+ return new MatrixReshapeGPUInstruction(new ReorgOperator(SwapIndex.getSwapIndexFnObject()), opcode, str, in1, in2, in3, in4, out);\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) {\n+ int rows = (int)ec.getScalarInput(_opRows.getName(), _opRows.getValueType(), _opRows.isLiteral()).getLongValue(); //save cast\n+ int cols = (int)ec.getScalarInput(_opCols.getName(), _opCols.getValueType(), _opCols.isLiteral()).getLongValue(); //save cast\n+ BooleanObject byRow = (BooleanObject) ec.getScalarInput(_opByRow.getName(), ValueType.BOOLEAN, _opByRow.isLiteral());\n+\n+ GPUStatistics.incrementNoOfExecutedGPUInst();\n+ String instName = getExtendedOpcode();\n+ GPUContext gCtx = ec.getGPUContext(0);\n+ MatrixObject mat = getMatrixInputForGPUInstruction(ec, _input.getName());\n+ if(rows*cols != mat.getNumRows()*mat.getNumColumns()) {\n+ throw new DMLRuntimeException(\"Incorrect number of rows and cols in rshape instruction\");\n+ }\n+ // We currently support only dense rshape\n+ Pointer inPtr = LibMatrixCUDA.getDensePointer(gCtx, mat, instName);\n+ MatrixObject out = LibMatrixCUDA.getDenseMatrixOutputForGPUInstruction(ec, instName, _output.getName(), rows, cols);\n+ Pointer outPtr = LibMatrixCUDA.getDensePointer(gCtx, out, instName);\n+ if(byRow.getBooleanValue()) {\n+ // byrow = TRUE is simple memcpy and metadata update\n+ LibMatrixCUDA.deviceCopy(instName, inPtr, outPtr, LibMatrixCUDA.toInt(mat.getNumRows()), LibMatrixCUDA.toInt(mat.getNumColumns()));\n+ }\n+ else {\n+ // byrow = FALSE uses a custom kernel to perform rshape\n+ LibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"colwise_reshape\",\n+ ExecutionConfig.getConfigForSimpleVectorOperations(LibMatrixCUDA.toInt(rows*cols)),\n+ inPtr, outPtr, LibMatrixCUDA.toInt(rows*cols),\n+ LibMatrixCUDA.toInt(mat.getNumRows()), LibMatrixCUDA.toInt(mat.getNumColumns()),\n+ rows, cols);\n+ }\n+ ec.releaseMatrixInputForGPUInstruction(_input.getName());\n+ ec.releaseMatrixOutputForGPUInstruction(_output.getName());\n+ }\n+\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -1542,7 +1542,7 @@ public class LibMatrixCUDA {\n* @param rlen number of rows\n* @param clen number of columns\n*/\n- private static void deviceCopy(String instName, Pointer src, Pointer dest, int rlen, int clen) {\n+ public static void deviceCopy(String instName, Pointer src, Pointer dest, int rlen, int clen) {\nlong t0=0;\nif (DMLScript.FINEGRAINED_STATISTICS) t0 = System.nanoTime();\nint size = rlen * clen * sizeOfDataType;\n@@ -2512,7 +2512,7 @@ public class LibMatrixCUDA {\n* @param numCols number of columns of output matrix object\n* @return the matrix object\n*/\n- protected static MatrixObject getDenseMatrixOutputForGPUInstruction(ExecutionContext ec, String instName, String name, long numRows, long numCols) {\n+ public static MatrixObject getDenseMatrixOutputForGPUInstruction(ExecutionContext ec, String instName, String name, long numRows, long numCols) {\nlong t0=0;\nif (DMLScript.FINEGRAINED_STATISTICS) t0 = System.nanoTime();\nPair<MatrixObject, Boolean> mb = ec.getDenseMatrixOutputForGPUInstruction(name, numRows, numCols);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/gpu/ReshapeTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.gpu;\n+\n+import java.util.Arrays;\n+import java.util.HashMap;\n+import java.util.List;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+/**\n+ * Tests gpu reshape\n+ */\n+public class ReshapeTest extends GPUTests {\n+\n+ private final static String TEST_NAME = \"ReshapeTests\";\n+ private final int seed = 42;\n+\n+ @Override\n+ public void setUp() {\n+ super.setUp();\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_DIR, TEST_NAME);\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ }\n+\n+ @Test\n+ public void testDenseReshape1() {\n+ testReshape(1, 10, 10, 1, true, 0.9);\n+ }\n+\n+ @Test\n+ public void testDenseReshape2() {\n+ testReshape(1, 10, 10, 1, false, 0.9);\n+ }\n+\n+ @Test\n+ public void testDenseReshape5() {\n+ testReshape(10, 3, 3, 10, true, 0.9);\n+ }\n+\n+ @Test\n+ public void testDenseReshape6() {\n+ testReshape(10, 3, 3, 10, false, 0.9);\n+ }\n+\n+ @Test\n+ public void testDenseReshape3() {\n+ testReshape(10, 3, 15, 2, true, 0.9);\n+ }\n+\n+ @Test\n+ public void testDenseReshape4() {\n+ testReshape(10, 3, 15, 2, false, 0.9);\n+ }\n+\n+ @Test\n+ public void testSparseReshape7() {\n+ testReshape(10, 3, 15, 2, true, 0.1);\n+ }\n+\n+ @Test\n+ public void testSparseReshape8() {\n+ testReshape(10, 3, 15, 2, false, 0.1);\n+ }\n+\n+ private void testReshape(int inRows, int inCols, int outRows, int outCols, boolean byrow, double sparsity) {\n+ System.out.println(\"Starting testReshape:\" + inRows + \" \" + inCols + \" \" + outRows + \" \" + outCols + \" \" + byrow + \" \" + sparsity);\n+ String scriptStr = \"output = matrix(x, rows=\" + outRows + \", cols=\" + outCols + \", byrow=\" + (byrow ? \"TRUE\" : \"FALSE\") + \");\" ;\n+ HashMap<String, Object> inputs = new HashMap<>();\n+ inputs.put(\"x\", generateInputMatrix(spark, inRows, inCols, 0, 10, sparsity, seed));\n+ List<String> outputs = Arrays.asList(\"output\");\n+ List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, outputs);\n+ List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, outputs);\n+ assertHeavyHitterPresent(\"gpu_rshape\");\n+ assertEqualObjects(outCPU.get(0), outGPU.get(0));\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added rshape operator for the GPU backend - This leads to 1.2x speedup for ResNet200 with batch size of 32 by reducing the number of host-to-device transfers. - Also, added GPU tests for this operator.
49,736
09.08.2018 21:00:21
25,200
04bc667f3650d57c0bc9de20e46e7624205cc1e6
Added SGD Nesterov update operator via rewrite for the GPU backend This leads to 10-15% speedup for ResNet200 with batch size of 32. Also, added GPU tests for this operator.
[ { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.cu", "new_path": "src/main/cpp/kernels/SystemML.cu", "diff": "@@ -2248,12 +2248,6 @@ extern \"C\" __global__ void prepare_lstm_dinput_f(float* smlInput, float* cudnnIn\n}\n-/**\n- * Do an log over all the elements of a matrix\n- * @param A the input matrix (of length = size)\n- * @param C the pre-allocated output matrix (of length = size)\n- * @param size the length of the input and output matrices\n- */\ntemplate <typename T>\n__device__ void colwise_reshape(T *A, T *C, unsigned int size,\nunsigned int inRows, unsigned int inCols,\n@@ -2279,3 +2273,20 @@ extern \"C\" __global__ void colwise_reshape_f(float *A, float *C, unsigned int si\nunsigned int outRows, unsigned int outCols) {\ncolwise_reshape(A, C, size, inRows, inCols, outRows, outCols);\n}\n+\n+// Performs the operation: out = X - mu*v_prev + (1+mu)*v\n+template <typename T>\n+__device__ void update_nesterov_x(T *X, T *v, T *v_prev, double mu, T *out, unsigned int size) {\n+ int index = blockIdx.x * blockDim.x + threadIdx.x;\n+ if (index < size) {\n+ out[index] = X[index] - mu*v_prev[index] + (1+mu)*v[index];\n+ }\n+}\n+\n+extern \"C\" __global__ void update_nesterov_x_d(double *X, double *v, double *v_prev, double mu, double *out, unsigned int size) {\n+ update_nesterov_x(X, v, v_prev, mu, out, size);\n+}\n+\n+extern \"C\" __global__ void update_nesterov_x_f(float *X, float *v, float *v_prev, double mu, float *out, unsigned int size) {\n+ update_nesterov_x(X, v, v_prev, mu, out, size);\n+}\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.ptx", "new_path": "src/main/cpp/kernels/SystemML.ptx", "diff": "@@ -12977,12 +12977,119 @@ BB113_2:\nret;\n}\n+ // .globl update_nesterov_x_d\n+.visible .entry update_nesterov_x_d(\n+ .param .u64 update_nesterov_x_d_param_0,\n+ .param .u64 update_nesterov_x_d_param_1,\n+ .param .u64 update_nesterov_x_d_param_2,\n+ .param .f64 update_nesterov_x_d_param_3,\n+ .param .u64 update_nesterov_x_d_param_4,\n+ .param .u32 update_nesterov_x_d_param_5\n+)\n+{\n+ .reg .pred %p<2>;\n+ .reg .b32 %r<6>;\n+ .reg .f64 %fd<9>;\n+ .reg .b64 %rd<14>;\n+\n+\n+ ld.param.u64 %rd1, [update_nesterov_x_d_param_0];\n+ ld.param.u64 %rd2, [update_nesterov_x_d_param_1];\n+ ld.param.u64 %rd3, [update_nesterov_x_d_param_2];\n+ ld.param.f64 %fd1, [update_nesterov_x_d_param_3];\n+ ld.param.u64 %rd4, [update_nesterov_x_d_param_4];\n+ ld.param.u32 %r2, [update_nesterov_x_d_param_5];\n+ mov.u32 %r3, %ctaid.x;\n+ mov.u32 %r4, %ntid.x;\n+ mov.u32 %r5, %tid.x;\n+ mad.lo.s32 %r1, %r4, %r3, %r5;\n+ setp.ge.u32 %p1, %r1, %r2;\n+ @%p1 bra BB114_2;\n+\n+ cvta.to.global.u64 %rd5, %rd1;\n+ mul.wide.s32 %rd6, %r1, 8;\n+ add.s64 %rd7, %rd5, %rd6;\n+ cvta.to.global.u64 %rd8, %rd3;\n+ add.s64 %rd9, %rd8, %rd6;\n+ ld.global.f64 %fd2, [%rd9];\n+ mul.f64 %fd3, %fd2, %fd1;\n+ ld.global.f64 %fd4, [%rd7];\n+ sub.f64 %fd5, %fd4, %fd3;\n+ cvta.to.global.u64 %rd10, %rd2;\n+ add.s64 %rd11, %rd10, %rd6;\n+ ld.global.f64 %fd6, [%rd11];\n+ add.f64 %fd7, %fd1, 0d3FF0000000000000;\n+ fma.rn.f64 %fd8, %fd7, %fd6, %fd5;\n+ cvta.to.global.u64 %rd12, %rd4;\n+ add.s64 %rd13, %rd12, %rd6;\n+ st.global.f64 [%rd13], %fd8;\n+\n+BB114_2:\n+ ret;\n+}\n+\n+ // .globl update_nesterov_x_f\n+.visible .entry update_nesterov_x_f(\n+ .param .u64 update_nesterov_x_f_param_0,\n+ .param .u64 update_nesterov_x_f_param_1,\n+ .param .u64 update_nesterov_x_f_param_2,\n+ .param .f64 update_nesterov_x_f_param_3,\n+ .param .u64 update_nesterov_x_f_param_4,\n+ .param .u32 update_nesterov_x_f_param_5\n+)\n+{\n+ .reg .pred %p<2>;\n+ .reg .f32 %f<5>;\n+ .reg .b32 %r<6>;\n+ .reg .f64 %fd<9>;\n+ .reg .b64 %rd<14>;\n+\n+\n+ ld.param.u64 %rd1, [update_nesterov_x_f_param_0];\n+ ld.param.u64 %rd2, [update_nesterov_x_f_param_1];\n+ ld.param.u64 %rd3, [update_nesterov_x_f_param_2];\n+ ld.param.f64 %fd1, [update_nesterov_x_f_param_3];\n+ ld.param.u64 %rd4, [update_nesterov_x_f_param_4];\n+ ld.param.u32 %r2, [update_nesterov_x_f_param_5];\n+ mov.u32 %r3, %ctaid.x;\n+ mov.u32 %r4, %ntid.x;\n+ mov.u32 %r5, %tid.x;\n+ mad.lo.s32 %r1, %r4, %r3, %r5;\n+ setp.ge.u32 %p1, %r1, %r2;\n+ @%p1 bra BB115_2;\n+\n+ cvta.to.global.u64 %rd5, %rd1;\n+ mul.wide.s32 %rd6, %r1, 4;\n+ add.s64 %rd7, %rd5, %rd6;\n+ ld.global.f32 %f1, [%rd7];\n+ cvt.f64.f32 %fd2, %f1;\n+ cvta.to.global.u64 %rd8, %rd3;\n+ add.s64 %rd9, %rd8, %rd6;\n+ ld.global.f32 %f2, [%rd9];\n+ cvt.f64.f32 %fd3, %f2;\n+ mul.f64 %fd4, %fd3, %fd1;\n+ sub.f64 %fd5, %fd2, %fd4;\n+ cvta.to.global.u64 %rd10, %rd2;\n+ add.s64 %rd11, %rd10, %rd6;\n+ ld.global.f32 %f3, [%rd11];\n+ cvt.f64.f32 %fd6, %f3;\n+ add.f64 %fd7, %fd1, 0d3FF0000000000000;\n+ fma.rn.f64 %fd8, %fd7, %fd6, %fd5;\n+ cvt.rn.f32.f64 %f4, %fd8;\n+ cvta.to.global.u64 %rd12, %rd4;\n+ add.s64 %rd13, %rd12, %rd6;\n+ st.global.f32 [%rd13], %f4;\n+\n+BB115_2:\n+ ret;\n+}\n+\n.func (.param .b64 func_retval0) __internal_trig_reduction_slowpathd(\n.param .b64 __internal_trig_reduction_slowpathd_param_0,\n.param .b64 __internal_trig_reduction_slowpathd_param_1\n)\n{\n- .local .align 8 .b8 __local_depot114[40];\n+ .local .align 8 .b8 __local_depot116[40];\n.reg .b64 %SP;\n.reg .b64 %SPL;\n.reg .pred %p<9>;\n@@ -12991,7 +13098,7 @@ BB113_2:\n.reg .b64 %rd<102>;\n- mov.u64 %rd101, __local_depot114;\n+ mov.u64 %rd101, __local_depot116;\ncvta.local.u64 %SP, %rd101;\nld.param.f64 %fd4, [__internal_trig_reduction_slowpathd_param_0];\nld.param.u64 %rd37, [__internal_trig_reduction_slowpathd_param_1];\n@@ -13005,7 +13112,7 @@ BB113_2:\nshr.u32 %r3, %r1, 20;\nbfe.u32 %r4, %r1, 20, 11;\nsetp.eq.s32 %p1, %r4, 2047;\n- @%p1 bra BB114_13;\n+ @%p1 bra BB116_13;\nadd.s32 %r15, %r4, -1024;\nshr.u32 %r16, %r15, 6;\n@@ -13018,7 +13125,7 @@ BB113_2:\nmov.u64 %rd94, 0;\nsetp.ge.s32 %p2, %r5, %r6;\nmov.u64 %rd93, %rd1;\n- @%p2 bra BB114_4;\n+ @%p2 bra BB116_4;\nmov.b64 %rd41, %fd4;\nshl.b64 %rd42, %rd41, 11;\n@@ -13035,7 +13142,7 @@ BB113_2:\nmov.u64 %rd91, %rd1;\nmov.u32 %r39, %r5;\n-BB114_3:\n+BB116_3:\n.pragma \"nounroll\";\nld.const.u64 %rd47, [%rd89];\n// inline asm\n@@ -13065,15 +13172,15 @@ BB114_3:\nadd.s64 %rd93, %rd93, 8;\nadd.s64 %rd89, %rd89, 8;\nsetp.lt.s32 %p3, %r39, %r6;\n- @%p3 bra BB114_3;\n+ @%p3 bra BB116_3;\n-BB114_4:\n+BB116_4:\nst.local.u64 [%rd93], %rd94;\nld.local.u64 %rd95, [%rd1+16];\nld.local.u64 %rd96, [%rd1+24];\nand.b32 %r9, %r3, 63;\nsetp.eq.s32 %p4, %r9, 0;\n- @%p4 bra BB114_6;\n+ @%p4 bra BB116_6;\nmov.u32 %r27, 64;\nsub.s32 %r28, %r27, %r9;\n@@ -13085,7 +13192,7 @@ BB114_4:\nshr.u64 %rd55, %rd54, %r28;\nor.b64 %rd95, %rd55, %rd53;\n-BB114_6:\n+BB116_6:\ncvta.to.local.u64 %rd56, %rd37;\nshr.u64 %rd57, %rd96, 62;\ncvt.u32.u64 %r29, %rd57;\n@@ -13102,7 +13209,7 @@ BB114_6:\nselp.b32 %r34, %r32, %r33, %p5;\nst.local.u32 [%rd56], %r34;\nsetp.eq.s32 %p6, %r31, 0;\n- @%p6 bra BB114_8;\n+ @%p6 bra BB116_8;\nmov.u64 %rd64, 0;\n// inline asm\n@@ -13122,10 +13229,10 @@ BB114_6:\n// inline asm\nxor.b32 %r40, %r40, -2147483648;\n-BB114_8:\n+BB116_8:\nclz.b64 %r41, %rd98;\nsetp.eq.s32 %p7, %r41, 0;\n- @%p7 bra BB114_10;\n+ @%p7 bra BB116_10;\nshl.b64 %rd67, %rd98, %r41;\nmov.u32 %r35, 64;\n@@ -13133,7 +13240,7 @@ BB114_8:\nshr.u64 %rd68, %rd97, %r36;\nor.b64 %rd98, %rd68, %rd67;\n-BB114_10:\n+BB116_10:\nmov.u64 %rd72, -3958705157555305931;\n// inline asm\n{\n@@ -13154,7 +13261,7 @@ BB114_10:\n}\n// inline asm\nsetp.lt.s64 %p8, %rd100, 1;\n- @%p8 bra BB114_12;\n+ @%p8 bra BB116_12;\n// inline asm\n{\n@@ -13173,7 +13280,7 @@ BB114_10:\n// inline asm\nadd.s32 %r41, %r41, 1;\n-BB114_12:\n+BB116_12:\ncvt.u64.u32 %rd79, %r40;\nshl.b64 %rd80, %rd79, 32;\nmov.u32 %r37, 1022;\n@@ -13188,7 +13295,7 @@ BB114_12:\nor.b64 %rd88, %rd87, %rd80;\nmov.b64 %fd4, %rd88;\n-BB114_13:\n+BB116_13:\nst.param.f64 [func_retval0+0], %fd4;\nret;\n}\n@@ -13216,7 +13323,7 @@ BB114_13:\n}\nshr.u32 %r51, %r50, 20;\nsetp.ne.s32 %p1, %r51, 0;\n- @%p1 bra BB115_2;\n+ @%p1 bra BB117_2;\nmul.f64 %fd14, %fd12, 0d4350000000000000;\n{\n@@ -13230,13 +13337,13 @@ BB114_13:\nshr.u32 %r16, %r50, 20;\nadd.s32 %r51, %r16, -54;\n-BB115_2:\n+BB117_2:\nadd.s32 %r52, %r51, -1023;\nand.b32 %r17, %r50, -2146435073;\nor.b32 %r18, %r17, 1072693248;\nmov.b64 %fd135, {%r49, %r18};\nsetp.lt.u32 %p2, %r18, 1073127583;\n- @%p2 bra BB115_4;\n+ @%p2 bra BB117_4;\n{\n.reg .b32 %temp;\n@@ -13250,7 +13357,7 @@ BB115_2:\nmov.b64 %fd135, {%r19, %r21};\nadd.s32 %r52, %r51, -1022;\n-BB115_4:\n+BB117_4:\nadd.f64 %fd15, %fd135, 0d3FF0000000000000;\nrcp.approx.ftz.f64 %fd16, %fd15;\nneg.f64 %fd17, %fd15;\n@@ -13413,13 +13520,13 @@ BB115_4:\nmov.b32 %f2, %r35;\nabs.f32 %f1, %f2;\nsetp.lt.f32 %p4, %f1, 0f4086232B;\n- @%p4 bra BB115_7;\n+ @%p4 bra BB117_7;\nsetp.lt.f64 %p5, %fd4, 0d0000000000000000;\nadd.f64 %fd129, %fd4, 0d7FF0000000000000;\nselp.f64 %fd136, 0d0000000000000000, %fd129, %p5;\nsetp.geu.f32 %p6, %f1, 0f40874800;\n- @%p6 bra BB115_7;\n+ @%p6 bra BB117_7;\nmov.f64 %fd134, 0d4338000000000000;\nmov.f64 %fd133, 0d3FF71547652B82FE;\n@@ -13441,26 +13548,26 @@ BB115_4:\nmov.b64 %fd131, {%r44, %r43};\nmul.f64 %fd136, %fd130, %fd131;\n-BB115_7:\n+BB117_7:\n{\n.reg .b32 %temp;\nmov.b64 {%temp, %r45}, %fd136;\n}\nand.b32 %r46, %r45, 2147483647;\nsetp.ne.s32 %p7, %r46, 2146435072;\n- @%p7 bra BB115_9;\n+ @%p7 bra BB117_9;\n{\n.reg .b32 %temp;\nmov.b64 {%r47, %temp}, %fd136;\n}\nsetp.eq.s32 %p8, %r47, 0;\n- @%p8 bra BB115_10;\n+ @%p8 bra BB117_10;\n-BB115_9:\n+BB117_9:\nfma.rn.f64 %fd136, %fd136, %fd5, %fd136;\n-BB115_10:\n+BB117_10:\nst.param.f64 [func_retval0+0], %fd136;\nret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "diff": "@@ -136,6 +136,7 @@ public class DnnOp extends MultiThreadedHop\n}\ncase BATCH_NORM2D_TEST:\ncase CHANNEL_SUMS:\n+ case UPDATE_NESTEROV_X:\n{\nif(et == ExecType.GPU) {\nsetLops(constructDnnLops(et, inputs));\n@@ -175,6 +176,8 @@ public class DnnOp extends MultiThreadedHop\nreturn 6;\ncase CHANNEL_SUMS:\nreturn 3;\n+ case UPDATE_NESTEROV_X:\n+ return 4;\ndefault:\nreturn 13;\n}\n@@ -528,7 +531,9 @@ public class DnnOp extends MultiThreadedHop\n// [numRows, numCols, NNZ]\nlong[] ret = new long[3];\n- if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST) {\n+ if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST ||\n+ op == OpOpDnn.UPDATE_NESTEROV_X) {\n+ // Same dimension as the first input\nMatrixCharacteristics[] mc = memo.getAllInputStats(getInput());\nret[0] = mc[0].rowsKnown() ? mc[0].getRows() : -1;\nret[1] = mc[0].colsKnown() ? mc[0].getCols() : -1;\n@@ -734,7 +739,8 @@ public class DnnOp extends MultiThreadedHop\n@Override\npublic void refreshSizeInformation()\n{\n- if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST) {\n+ if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST || op == OpOpDnn.UPDATE_NESTEROV_X) {\n+ // Same dimension as the first input\nHop input1 = getInput().get(0);\nsetDim1(input1.getDim1());\nsetDim2(input1.getDim2());\n@@ -840,8 +846,9 @@ public class DnnOp extends MultiThreadedHop\n* @return either -1 or value associated with the dimString\n*/\nprivate long getDim(String dimString) {\n- if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST || op == OpOpDnn.CHANNEL_SUMS) {\n- throw new RuntimeException(\"getDim method should not be invoked for batch_norm_test, channel_sums, bias_add and bias_multiply\");\n+ if(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST || op == OpOpDnn.CHANNEL_SUMS ||\n+ op == OpOpDnn.UPDATE_NESTEROV_X) {\n+ throw new RuntimeException(\"getDim method should not be invoked for \" + op.name());\n}\ntry {\nparseInput();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1099,7 +1099,8 @@ public abstract class Hop implements ParseInfo\npublic enum OpOpDnn {\nMAX_POOL, MAX_POOL_BACKWARD, AVG_POOL, AVG_POOL_BACKWARD,\nCONV2D, CONV2D_BACKWARD_FILTER, CONV2D_BACKWARD_DATA,\n- BIASADD, BIASMULT, BATCH_NORM2D_TEST, CHANNEL_SUMS\n+ BIASADD, BIASMULT, BATCH_NORM2D_TEST, CHANNEL_SUMS,\n+ UPDATE_NESTEROV_X\n}\npublic enum DataGenMethod {\n@@ -1174,6 +1175,7 @@ public abstract class Hop implements ParseInfo\nHopsConv2Lops.put(OpOpDnn.CONV2D_BACKWARD_DATA, org.apache.sysml.lops.DnnTransform.OperationTypes.CONV2D_BACKWARD_DATA);\nHopsConv2Lops.put(OpOpDnn.BATCH_NORM2D_TEST, org.apache.sysml.lops.DnnTransform.OperationTypes.BATCH_NORM2D_TEST);\nHopsConv2Lops.put(OpOpDnn.CHANNEL_SUMS, org.apache.sysml.lops.DnnTransform.OperationTypes.CHANNEL_SUMS);\n+ HopsConv2Lops.put(OpOpDnn.UPDATE_NESTEROV_X, org.apache.sysml.lops.DnnTransform.OperationTypes.UPDATE_NESTEROV_X);\n}\nprotected static final HashMap<Hop.Direction, org.apache.sysml.lops.PartialAggregate.DirectionTypes> HopsDirection2Lops;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -124,6 +124,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n}\nhi = batchNormTest(hop, hi, i);\nhi = channelSums(hop, hi, i);\n+ hi = updateNesterovX(hop, hi, i);\nif( !descendFirst )\nrule_GPUKernels(roots, hi, descendFirst);\n@@ -281,6 +282,11 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n&& getFirstInput(h).getDataType() == DataType.MATRIX && getSecondInput(h).getDataType() == DataType.MATRIX;\n}\n+ private static boolean isBinaryMMMinus(Hop h) {\n+ return h instanceof BinaryOp && ((BinaryOp)h).getOp() == OpOp2.MINUS\n+ && getFirstInput(h).getDataType() == DataType.MATRIX && getSecondInput(h).getDataType() == DataType.MATRIX;\n+ }\n+\nprivate static boolean isBinaryMSMult(Hop h, double expectedValue) {\nreturn h instanceof BinaryOp && ((BinaryOp)h).getOp() == OpOp2.MULT\n&& getFirstInput(h).getDataType() == DataType.MATRIX && getSecondInput(h).getDataType() == DataType.SCALAR\n@@ -323,6 +329,16 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n&& getSecondInput(h).getDataType() == DataType.MATRIX && getFirstInput(h).getDataType() == DataType.SCALAR;\n}\n+ private static boolean isBinarySMMult(Hop h, double expectedVal) {\n+ return h instanceof BinaryOp && ((BinaryOp)h).getOp() == OpOp2.MULT\n+ && getSecondInput(h).getDataType() == DataType.MATRIX && getFirstInput(h).getDataType() == DataType.SCALAR\n+ && getValue(getFirstInput(h)) == expectedVal;\n+ }\n+\n+ private static double getValue(Hop h) {\n+ return OptimizerUtils.rEvalSimpleDoubleExpression(h, new HashMap<>());\n+ }\n+\n/**\n* Checks if the \"mean\" hop is a moving average of mean in batch normalization layer.\n*\n@@ -703,6 +719,51 @@ public class RewriteGPUSpecificOps extends HopRewriteRule {\n}\n// ------------------------------------------------------------\n+ /**\n+ * Checks for the nesterov_update_x pattern (X = X - mu*v_prev + (1+mu)*v)\n+ * and returns a new DnnOp if matched\n+ *\n+ * @param parent parent of the input\n+ * @param hi input to be matched\n+ * @param pos position\n+ * @return a new DnnOp or hi\n+ */\n+ private static Hop updateNesterovX(Hop parent, Hop hi, int pos) {\n+ if(fitsOnGPU(hi, 4) && isBinaryMMAdd(hi) && isBinaryMMMinus(getFirstInput(hi))\n+ && isBinarySMMult(getSecondInput(getFirstInput(hi)))\n+ && isBinarySMMult(getSecondInput(hi))) {\n+ Hop onePlusMu = getFirstInput(getSecondInput(hi));\n+ Hop tmp = getSecondInput(getFirstInput(hi));\n+ Hop mu = getFirstInput(tmp);\n+ if(isOnePlusMu(onePlusMu, mu)) {\n+ Hop v_prev = getSecondInput(tmp);\n+ Hop v = getSecondInput(getSecondInput(hi));\n+ Hop X = getFirstInput(getFirstInput(hi));\n+ if(hasSameDimensions(X, v) && hasSameDimensions(X, v_prev)) {\n+ ArrayList<Hop> inHops = new ArrayList<Hop>();\n+ inHops.add(X);\n+ inHops.add(v);\n+ inHops.add(v_prev);\n+ inHops.add(mu);\n+ LOG.debug(\"Applied updateNesterovX rewrite.\");\n+ Hop newHop = new DnnOp(hi.getName(), hi.getDataType(), hi.getValueType(),\n+ OpOpDnn.UPDATE_NESTEROV_X, inHops);\n+ return HopRewriteUtils.rewireAllParentChildReferences(hi, newHop);\n+ }\n+ }\n+ }\n+ return hi;\n+ }\n+\n+ private static boolean hasSameDimensions(Hop x, Hop y) {\n+ return x.dimsKnown() && y.dimsKnown() && (x.getDim1() == y.getDim1()) && (x.getDim2() == y.getDim2());\n+ }\n+\n+ private static boolean isOnePlusMu(Hop onePlusMu, Hop mu) {\n+ return (isBinarySMMult(onePlusMu, 1.0) && getSecondInput(onePlusMu) == mu) ||\n+ getValue(onePlusMu) == getValue(mu) + 1;\n+ }\n+\n/**\n* Checks for the batch norm (mode=\"test\") pattern using the helper isBatchNormTrainMean and isBatchNormTrainVar\n* and returns a new DnnOp if matched\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/DnnTransform.java", "new_path": "src/main/java/org/apache/sysml/lops/DnnTransform.java", "diff": "@@ -31,7 +31,8 @@ public class DnnTransform extends Lop\nMAX_POOL, MAX_POOL_BACKWARD, AVG_POOL, AVG_POOL_BACKWARD,\nRELU_MAX_POOLING, RELU_MAX_POOLING_BACKWARD, RELU_BACKWARD,\nCONV2D, CONV2D_BACKWARD_FILTER, CONV2D_BACKWARD_DATA,\n- BIAS_ADD, CONV2D_BIAS_ADD, BIAS_MULTIPLY, CHANNEL_SUMS, BATCH_NORM2D_TEST\n+ BIAS_ADD, CONV2D_BIAS_ADD, BIAS_MULTIPLY, CHANNEL_SUMS, BATCH_NORM2D_TEST,\n+ UPDATE_NESTEROV_X\n}\nprivate OperationTypes operation;\n@@ -166,6 +167,9 @@ public class DnnTransform extends Lop\ncase CHANNEL_SUMS:\nreturn \"channel_sums\";\n+ case UPDATE_NESTEROV_X:\n+ return \"update_nesterov_x\";\n+\ncase BATCH_NORM2D_TEST:\nreturn \"batch_norm2d_test\";\n@@ -231,6 +235,33 @@ public class DnnTransform extends Lop\n}\n}\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String input4, String output) {\n+ if(operation == OperationTypes.UPDATE_NESTEROV_X) {\n+ StringBuilder sb = new StringBuilder();\n+ sb.append( getExecType() );\n+\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( getOpcode() );\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( getInputs().get(0).prepInputOperand(input1));\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( getInputs().get(1).prepInputOperand(input2));\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( getInputs().get(2).prepInputOperand(input3));\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( getInputs().get(3).prepInputOperand(input4));\n+ //output\n+ sb.append( OPERAND_DELIMITOR );\n+ sb.append( this.prepOutputOperand(output));\n+\n+ return sb.toString();\n+ }\n+ else {\n+ throw new LopsException(\"The operation is not supported with three operands:\" + operation.name());\n+ }\n+ }\n+\n@Override\npublic String getInstructions(String[] inputs, String output) {\nStringBuilder sb = new StringBuilder();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -63,6 +63,7 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"batch_norm2d_backward\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"batch_norm2d_test\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"batch_norm2d_train\", GPUINSTRUCTION_TYPE.Dnn);\n+ String2GPUInstructionType.put( \"update_nesterov_x\", GPUINSTRUCTION_TYPE.Dnn);\n// Matrix Multiply Operators\nString2GPUInstructionType.put( \"ba+*\", GPUINSTRUCTION_TYPE.AggregateBinary);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "diff": "@@ -124,6 +124,21 @@ public class DnnGPUInstruction extends GPUInstruction {\n_intermediateMemoryBudget = intermediateMemoryBudget;\n}\n+ public DnnGPUInstruction(CPOperand in1, CPOperand in2, CPOperand in3, CPOperand in4, CPOperand out, String opcode, String istr,\n+ double intermediateMemoryBudget) throws DMLRuntimeException {\n+ super(new ReorgOperator(SwapIndex.getSwapIndexFnObject()), opcode, istr);\n+ if( !opcode.equals(\"update_nesterov_x\") ) {\n+ throw new DMLRuntimeException(\"Incorrect opcode: \" + opcode);\n+ }\n+ _input1 = in1;\n+ _input2 = in2;\n+ _input3 = in3;\n+ _input4 = in4;\n+ _gputype = GPUINSTRUCTION_TYPE.Dnn;\n+ _output = out;\n+ _intermediateMemoryBudget = intermediateMemoryBudget;\n+ }\n+\npublic DnnGPUInstruction(CPOperand in1, CPOperand in2, CPOperand in3, CPOperand out, String opcode,\nString istr, ArrayList<CPOperand> stride,\nArrayList<CPOperand> padding, ArrayList<CPOperand> input_shape,\n@@ -298,6 +313,15 @@ public class DnnGPUInstruction extends GPUInstruction {\nCPOperand out = new CPOperand(parts[4]);\nreturn new DnnGPUInstruction(in, in2, in3, out, opcode, str, 0);\n}\n+ else if (opcode.equalsIgnoreCase(\"update_nesterov_x\")) {\n+ InstructionUtils.checkNumFields(parts, 5);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = new CPOperand(parts[3]);\n+ CPOperand in4 = new CPOperand(parts[4]);\n+ CPOperand out = new CPOperand(parts[5]);\n+ return new DnnGPUInstruction(in, in2, in3, in4, out, opcode, str, 0);\n+ }\nelse if (opcode.equalsIgnoreCase(\"lstm\")) {\nInstructionUtils.checkNumFields(parts, 8);\nCPOperand in1 = new CPOperand(parts[1]);\n@@ -552,6 +576,34 @@ public class DnnGPUInstruction extends GPUInstruction {\nec.releaseMatrixOutputForGPUInstruction(_output.getName());\n}\n+ private void processNesterovUpdateInstruction(ExecutionContext ec) {\n+ GPUStatistics.incrementNoOfExecutedGPUInst();;\n+ MatrixObject input = getMatrixInputForGPUInstruction(ec, _input1.getName());\n+ MatrixObject v = getMatrixInputForGPUInstruction(ec, _input2.getName());\n+ MatrixObject v_prev = getMatrixInputForGPUInstruction(ec, _input3.getName());\n+ double mu = (int) ec.getScalarInput(_input4.getName(), _input4.getValueType(), _input4.isLiteral()).getDoubleValue();\n+ int rows = LibMatrixCUDA.toInt(input.getNumRows());\n+ int cols = LibMatrixCUDA.toInt(input.getNumColumns());\n+ MatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName(), rows, cols);\n+\n+ GPUContext gCtx = ec.getGPUContext(0);\n+ String instName = getExtendedOpcode();\n+ LibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"update_nesterov_x\",\n+ ExecutionConfig.getConfigForSimpleVectorOperations(LibMatrixCUDA.toInt(rows*cols)),\n+ LibMatrixCUDA.getDensePointer(gCtx, input, instName),\n+ LibMatrixCUDA.getDensePointer(gCtx, v, instName),\n+ LibMatrixCUDA.getDensePointer(gCtx, v_prev, instName),\n+ mu,\n+ LibMatrixCUDA.getDensePointer(gCtx, out, instName),\n+ rows*cols);\n+\n+ // release inputs/outputs\n+ ec.releaseMatrixInputForGPUInstruction(_input1.getName());\n+ ec.releaseMatrixInputForGPUInstruction(_input2.getName());\n+ ec.releaseMatrixInputForGPUInstruction(_input3.getName());\n+ ec.releaseMatrixOutputForGPUInstruction(_output.getName());\n+ }\n+\nprivate static int toInt(long num) throws DMLRuntimeException {\nif(num >= Integer.MAX_VALUE || num <= Integer.MIN_VALUE) {\nthrow new DMLRuntimeException(\"GPU : Exceeded supported size \" + num);\n@@ -697,6 +749,10 @@ public class DnnGPUInstruction extends GPUInstruction {\nprocessChannelSumsInstruction(ec);\nreturn;\n}\n+ else if (instOpcode.equalsIgnoreCase(\"update_nesterov_x\")) {\n+ processNesterovUpdateInstruction(ec);\n+ return;\n+ }\nelse if (instOpcode.equalsIgnoreCase(\"lstm\")) {\nprocessLstmInstruction(ec);\nreturn;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -405,7 +405,8 @@ public class GPUMemoryManager {\nallPointers.remove(toFree);\nlazyCudaFreeMemoryManager.removeIfPresent(size, toFree);\nallocator.free(toFree);\n- // JCuda.cudaDeviceSynchronize(); // Force a device synchronize after free-ing the pointer for debugging\n+ if(DMLScript.SYNCHRONIZE_GPU)\n+ jcuda.runtime.JCuda.cudaDeviceSynchronize(); // Force a device synchronize after free-ing the pointer for debugging\n}\nelse {\nthrow new RuntimeException(\"Attempting to free an unaccounted pointer:\" + toFree);\n@@ -447,7 +448,7 @@ public class GPUMemoryManager {\npublic void removeGPUObject(GPUObject gpuObj) {\nif(LOG.isDebugEnabled())\nLOG.debug(\"Removing the GPU object: \" + gpuObj);\n- matrixMemoryManager.gpuObjects.removeIf(a -> a.equals(gpuObj));\n+ matrixMemoryManager.gpuObjects.remove(gpuObj);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "diff": "@@ -312,6 +312,16 @@ public abstract class GPUTests extends AutomatedTestBase {\nAssert.assertTrue(heavyHitterOpCodes.contains(heavyHitterOpCode));\n}\n+ /**\n+ * asserts that the expected op was executed\n+ *\n+ * @param heavyHitterOpCode opcode of the heavy hitter for the unary op\n+ */\n+ protected void assertHeavyHitterNotPresent(String heavyHitterOpCode) {\n+ Set<String> heavyHitterOpCodes = Statistics.getCPHeavyHitterOpCodes();\n+ Assert.assertTrue(!heavyHitterOpCodes.contains(heavyHitterOpCode));\n+ }\n+\n/**\n* Runs a program on the CPU\n*\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/gpu/SGDUpdate.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.gpu;\n+\n+import java.util.Arrays;\n+import java.util.HashMap;\n+import java.util.List;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+/**\n+ * Tests update rewrites for SGD\n+ */\n+public class SGDUpdate extends GPUTests {\n+\n+ private final static String TEST_NAME = \"SGDUpdateTests\";\n+ private final int seed = 42;\n+\n+ @Override\n+ public void setUp() {\n+ super.setUp();\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_DIR, TEST_NAME);\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ }\n+\n+ @Test\n+ public void testNesterovRewrite() {\n+ String scriptStr = \"mu=0.99; output = x - mu*v_prev + (1+mu)*v;\" ;\n+ int inRows = 10;\n+ int inCols = 30;\n+ HashMap<String, Object> inputs = new HashMap<>();\n+ inputs.put(\"x\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v_prev\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ List<String> outputs = Arrays.asList(\"output\");\n+ List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, outputs);\n+ List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, outputs);\n+ assertHeavyHitterPresent(\"gpu_update_nesterov_x\");\n+ assertEqualObjects(outCPU.get(0), outGPU.get(0));\n+ }\n+\n+ @Test\n+ public void testNoNesterovRewrite1() {\n+ String scriptStr = \"mu=0.99; output = x - mu*v_prev + (1+mu)*v;\" ;\n+ int inRows = 10;\n+ int inCols = 30;\n+ HashMap<String, Object> inputs = new HashMap<>();\n+ inputs.put(\"x\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v_prev\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v\", generateInputMatrix(spark, inRows, 1, 0, 10, 0.9, seed));\n+ List<String> outputs = Arrays.asList(\"output\");\n+ List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, outputs);\n+ List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, outputs);\n+ assertHeavyHitterNotPresent(\"gpu_update_nesterov_x\");\n+ assertEqualObjects(outCPU.get(0), outGPU.get(0));\n+ }\n+\n+ @Test\n+ public void testNoNesterovRewrite2() {\n+ String scriptStr = \"mu=0.99; output = x - mu*v_prev + mu*v;\" ;\n+ int inRows = 10;\n+ int inCols = 30;\n+ HashMap<String, Object> inputs = new HashMap<>();\n+ inputs.put(\"x\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v_prev\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ inputs.put(\"v\", generateInputMatrix(spark, inRows, inCols, 0, 10, 0.9, seed));\n+ List<String> outputs = Arrays.asList(\"output\");\n+ List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, outputs);\n+ List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, outputs);\n+ assertHeavyHitterNotPresent(\"gpu_update_nesterov_x\");\n+ assertEqualObjects(outCPU.get(0), outGPU.get(0));\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added SGD Nesterov update operator via rewrite for the GPU backend - This leads to 10-15% speedup for ResNet200 with batch size of 32. - Also, added GPU tests for this operator.
49,760
09.08.2018 19:12:28
25,200
ff4dbb3ee893b2609fa8111717d71f1bbfd46fa2
Extended bitset estimator for rbind, various cleanups Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "@@ -70,24 +70,13 @@ public class EstimatorBasicAvg extends SparsityEstimator\nOptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\nmc1.getSparsity() + mc2.getSparsity() - mc1.getSparsity() * mc2.getSparsity()));\ncase EQZERO:\n- return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n- (long) mc1.getRows() * mc1.getCols() - mc1.getNonZeros());\ncase DIAG:\n- return (mc1.getCols() == 1) ?\n- new MatrixCharacteristics(mc1.getRows(), mc1.getRows(), mc1.getNonZeros()) :\n- new MatrixCharacteristics(mc1.getRows(), 1, Math.min(mc1.getRows(), mc1.getNonZeros()));\n- // binary operations that preserve sparsity exactly\ncase CBIND:\n- return new MatrixCharacteristics(mc1.getRows(),\n- mc1.getCols() + mc2.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\ncase RBIND:\n- return new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\n- mc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n- // unary operation that preserve sparsity exactly\ncase NEQZERO:\ncase TRANS:\ncase RESHAPE:\n- return mc1;\n+ return estimExactMetaData(mc1, mc2, op);\ndefault:\nthrow new NotImplementedException();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "diff": "@@ -74,24 +74,13 @@ public class EstimatorBasicWorst extends SparsityEstimator\nOptimizerUtils.getNnz(mc1.getRows(), mc1.getCols(),\nMath.min(mc1.getSparsity() + mc2.getSparsity(), 1)));\ncase EQZERO:\n- return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n- (long) mc1.getRows() * mc1.getCols() - mc1.getNonZeros());\ncase DIAG:\n- return (mc1.getCols() == 1) ?\n- new MatrixCharacteristics(mc1.getRows(), mc1.getRows(), mc1.getNonZeros()) :\n- new MatrixCharacteristics(mc1.getRows(), 1, Math.min(mc1.getRows(), mc1.getNonZeros()));\n- // binary operations that preserve sparsity exactly\ncase CBIND:\n- return new MatrixCharacteristics(mc1.getRows(),\n- mc1.getCols() + mc2.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\ncase RBIND:\n- return new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\n- mc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n- // unary operation that preserve sparsity exactly\ncase NEQZERO:\ncase TRANS:\ncase RESHAPE:\n- return mc1;\n+ return estimExactMetaData(mc1, mc2, op);\ndefault:\nthrow new NotImplementedException();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -80,6 +80,9 @@ public class EstimatorDensityMap extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ if( isExactMetadataOp(op) )\n+ return estimExactMetaData(m1.getMatrixCharacteristics(),\n+ m2.getMatrixCharacteristics(), op).getSparsity();\nDensityMap m1Map = new DensityMap(m1, _b);\nDensityMap m2Map = (m1 == m2) ? //self product\nm1Map : new DensityMap(m2, _b);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -68,7 +68,6 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//estimate output sparsity based on input histograms\ndouble ret = estimIntern(h1, h2, root.getOp());\n-\nMatrixHistogram outMap = MatrixHistogram.deriveOutputHistogram(h1, h2, ret, root.getOp());\nroot.setSynopsis(outMap);\nreturn root.setMatrixCharacteristics(new MatrixCharacteristics(\n@@ -83,6 +82,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ if( isExactMetadataOp(op) )\n+ return estimExactMetaData(m1.getMatrixCharacteristics(),\n+ m2.getMatrixCharacteristics(), op).getSparsity();\nMatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\nMatrixHistogram h2 = (m1 == m2) ? //self product\nh1 : new MatrixHistogram(m2, _useExcepts);\n@@ -91,6 +93,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, OpCode op) {\n+ if( isExactMetadataOp(op) )\n+ return estimExactMetaData(m1.getMatrixCharacteristics(), null, op).getSparsity();\nMatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\nreturn estimIntern(h1, null, op);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "package org.apache.sysml.hops.estim;\n+import org.apache.commons.lang.ArrayUtils;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -33,6 +35,11 @@ public abstract class SparsityEstimator\npublic static boolean MULTI_THREADED_ESTIM = false;\npublic static final int MIN_PAR_THRESHOLD = 10 * 1024;\n+ private static OpCode[] EXACT_META_DATA_OPS = new OpCode[] {\n+ OpCode.EQZERO, OpCode.NEQZERO, OpCode.CBIND,\n+ OpCode.RBIND, OpCode.TRANS, OpCode.DIAG, OpCode.RESHAPE\n+ };\n+\npublic static enum OpCode {\nMM,\nMULT, PLUS, EQZERO, NEQZERO,\n@@ -77,4 +84,34 @@ public abstract class SparsityEstimator\n* @return sparsity\n*/\npublic abstract double estim(MatrixBlock m, OpCode op);\n+\n+ protected boolean isExactMetadataOp(OpCode op) {\n+ return ArrayUtils.contains(EXACT_META_DATA_OPS, op);\n+ }\n+\n+ protected MatrixCharacteristics estimExactMetaData(MatrixCharacteristics mc1, MatrixCharacteristics mc2, OpCode op) {\n+ switch( op ) {\n+ case EQZERO:\n+ return new MatrixCharacteristics(mc1.getRows(), mc1.getCols(),\n+ (long) mc1.getRows() * mc1.getCols() - mc1.getNonZeros());\n+ case DIAG:\n+ return (mc1.getCols() == 1) ?\n+ new MatrixCharacteristics(mc1.getRows(), mc1.getRows(), mc1.getNonZeros()) :\n+ new MatrixCharacteristics(mc1.getRows(), 1, Math.min(mc1.getRows(), mc1.getNonZeros()));\n+ // binary operations that preserve sparsity exactly\n+ case CBIND:\n+ return new MatrixCharacteristics(mc1.getRows(),\n+ mc1.getCols() + mc2.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n+ case RBIND:\n+ return new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\n+ mc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n+ // unary operation that preserve sparsity exactly\n+ case NEQZERO:\n+ case TRANS:\n+ case RESHAPE:\n+ return mc1;\n+ default:\n+ throw new HopsException(\"Opcode is not an exact meta data operation: \"+op.name());\n+ }\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended bitset estimator for rbind, various cleanups Closes #824.
49,760
14.08.2018 23:19:11
25,200
f296f8f51e990ad6c2c3db9f9e5b2fc8e8108611
Extended sampling-based sparsity estimator, misc fixes Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -50,7 +50,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\nif (!root.getLeft().isLeaf())\nestim(root.getLeft()); // obtain synopsis\nif (!root.getRight().isLeaf())\n- estim(root.getLeft()); // obtain synopsis\n+ estim(root.getRight()); // obtain synopsis\nBitsetMatrix m1Map = !root.getLeft().isLeaf() ? (BitsetMatrix) root.getLeft().getSynopsis() :\nnew BitsetMatrix1(root.getLeft().getData());\nBitsetMatrix m2Map = !root.getRight().isLeaf() ? (BitsetMatrix) root.getRight().getSynopsis() :\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -58,7 +58,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\nif( !root.getRight().isLeaf() )\n- estim(root.getLeft()); //obtain synopsis\n+ estim(root.getRight()); //obtain synopsis\nDensityMap m1Map = !root.getLeft().isLeaf() ?\n(DensityMap)root.getLeft().getSynopsis() :\nnew DensityMap(root.getLeft().getData(), _b);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -58,7 +58,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\nif( !root.getRight().isLeaf() )\n- estim(root.getLeft()); //obtain synopsis\n+ estim(root.getRight()); //obtain synopsis\nMatrixHistogram h1 = !root.getLeft().isLeaf() ?\n(MatrixHistogram)root.getLeft().getSynopsis() :\nnew MatrixHistogram(root.getLeft().getData(), _useExcepts);\n@@ -105,21 +105,21 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncase MM:\nreturn estimInternMM(h1, h2);\ncase MULT: {\n- final long N1 = h1.getNonZeros();\n- final long N2 = h2.getNonZeros();\n+ final double N1 = h1.getNonZeros();\n+ final double N2 = h2.getNonZeros();\nfinal long scale = IntStream.range(0, h1.getCols())\n.mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\nreturn IntStream.range(0, h1.getRows())\n- .mapToLong(i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2) //collisions\n+ .mapToDouble(i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2) //collisions\n.sum() / msize;\n}\ncase PLUS: {\n- final long N1 = h1.getNonZeros();\n- final long N2 = h2.getNonZeros();\n+ final double N1 = h1.getNonZeros();\n+ final double N2 = h2.getNonZeros();\nfinal long scale = IntStream.range(0, h1.getCols())\n.mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\nreturn IntStream.range(0, h1.getRows())\n- .mapToLong(i -> (long)h1.rNnz[i] + h2.rNnz[i] //all minus collisions\n+ .mapToDouble(i -> (long)h1.rNnz[i] + h2.rNnz[i] //all minus collisions\n- (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2)\n.sum() / msize;\n}\n@@ -356,12 +356,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\nprivate static MatrixHistogram deriveMultHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n- final long N1 = h1.getNonZeros();\n- final long N2 = h2.getNonZeros();\n- final long scaler = IntStream.range(0, h1.getCols())\n- .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n- final long scalec = IntStream.range(0, h1.getRows())\n- .mapToLong(j -> (long)h1.rNnz[j] * h2.rNnz[j]).sum();\n+ final double N1 = h1.getNonZeros();\n+ final double N2 = h2.getNonZeros();\n+ final double scaler = IntStream.range(0, h1.getCols())\n+ .mapToDouble(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ final double scalec = IntStream.range(0, h1.getRows())\n+ .mapToDouble(j -> (long)h1.rNnz[j] * h2.rNnz[j]).sum();\nint rMaxNnz = 0, cMaxNnz = 0;\nRandom rn = new Random();\nint[] rNnz = new int[h1.getRows()];\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "diff": "@@ -63,11 +63,16 @@ public class EstimatorSample extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- //get sampled indexes\n+ return estim(m1, m2, OpCode.MM);\n+ }\n+\n+ @Override\n+ public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ switch(op) {\n+ case MM: {\nint k = m1.getNumColumns();\nint[] ix = UtilFunctions.getSortedSampleIndexes(\nk, (int)Math.max(k*_frac, 1));\n- //compute output sparsity\nint[] cnnz = computeColumnNnz(m1, ix);\nlong nnzOut = 0;\nfor(int i=0; i<ix.length; i++)\n@@ -75,15 +80,72 @@ public class EstimatorSample extends SparsityEstimator\nreturn OptimizerUtils.getSparsity(\nm1.getNumRows(), m2.getNumColumns(), nnzOut);\n}\n-\n- @Override\n- public double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ case MULT: {\n+ int k = Math.max(m1.getNumColumns(), m1.getNumRows());\n+ int[] ix = UtilFunctions.getSortedSampleIndexes(\n+ k, (int)Math.max(k*_frac, 1));\n+ double spOut = 0;\n+ if( m1.getNumColumns() > m1.getNumRows() ) {\n+ int[] cnnz1 = computeColumnNnz(m1, ix);\n+ int[] cnnz2 = computeColumnNnz(m2, ix);\n+ for(int i=0; i<ix.length; i++)\n+ spOut += (double)cnnz1[i]/m1.getNumRows()\n+ * (double)cnnz2[i]/m1.getNumRows();\n+ }\n+ else {\n+ int[] rnnz1 = computeRowNnz(m1, ix);\n+ int[] rnnz2 = computeRowNnz(m2, ix);\n+ for(int i=0; i<ix.length; i++)\n+ spOut += (double)rnnz1[i]/m1.getNumColumns()\n+ * (double)rnnz2[i]/m1.getNumColumns();\n+ }\n+ return spOut/ix.length;\n+ }\n+ case PLUS: {\n+ int k = Math.max(m1.getNumColumns(), m1.getNumRows());\n+ int[] ix = UtilFunctions.getSortedSampleIndexes(\n+ k, (int)Math.max(k*_frac, 1));\n+ double spOut = 0;\n+ if( m1.getNumColumns() > m1.getNumRows() ) {\n+ int[] cnnz1 = computeColumnNnz(m1, ix);\n+ int[] cnnz2 = computeColumnNnz(m2, ix);\n+ for(int i=0; i<ix.length; i++) {\n+ spOut += (double)cnnz1[i]/m1.getNumRows()\n+ + (double)cnnz2[i]/m1.getNumRows()\n+ - (double)cnnz1[i]/m1.getNumRows()\n+ * (double)cnnz2[i]/m1.getNumRows();\n+ }\n+ }\n+ else {\n+ int[] rnnz1 = computeRowNnz(m1, ix);\n+ int[] rnnz2 = computeRowNnz(m2, ix);\n+ for(int i=0; i<ix.length; i++) {\n+ spOut += (double)rnnz1[i]/m1.getNumColumns()\n+ + (double)rnnz2[i]/m1.getNumColumns()\n+ - (double)rnnz1[i]/m1.getNumColumns()\n+ * (double)rnnz2[i]/m1.getNumColumns();\n+ }\n+ }\n+ return spOut/ix.length;\n+ }\n+ case RBIND:\n+ case CBIND:\n+ case EQZERO:\n+ case NEQZERO:\n+ case TRANS:\n+ case DIAG:\n+ case RESHAPE:\n+ MatrixCharacteristics mc1 = m1.getMatrixCharacteristics();\n+ MatrixCharacteristics mc2 = m2.getMatrixCharacteristics();\n+ return OptimizerUtils.getSparsity(estimExactMetaData(mc1, mc2, op));\n+ default:\nthrow new NotImplementedException();\n}\n+ }\n@Override\npublic double estim(MatrixBlock m, OpCode op) {\n- throw new NotImplementedException();\n+ return estim(m, null, op);\n}\nprivate int[] computeColumnNnz(MatrixBlock in, int[] ix) {\n@@ -113,4 +175,12 @@ public class EstimatorSample extends SparsityEstimator\nret[i] = nnz[ix[i]];\nreturn ret;\n}\n+\n+ private int[] computeRowNnz(MatrixBlock in, int[] ix) {\n+ //copy nnz into reduced vector\n+ int[] ret = new int[ix.length];\n+ for(int i=0; i<ix.length; i++)\n+ ret[i] = (int) in.recomputeNonZeros(ix[i], ix[i]);\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "diff": "@@ -577,7 +577,7 @@ public class DnnGPUInstruction extends GPUInstruction {\n}\nprivate void processNesterovUpdateInstruction(ExecutionContext ec) {\n- GPUStatistics.incrementNoOfExecutedGPUInst();;\n+ GPUStatistics.incrementNoOfExecutedGPUInst();\nMatrixObject input = getMatrixInputForGPUInstruction(ec, _input1.getName());\nMatrixObject v = getMatrixInputForGPUInstruction(ec, _input2.getName());\nMatrixObject v_prev = getMatrixInputForGPUInstruction(ec, _input3.getName());\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWTest.java", "diff": "@@ -27,6 +27,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\n+import org.apache.sysml.hops.estim.EstimatorSample;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\nimport org.apache.sysml.runtime.functionobjects.Multiply;\n@@ -42,16 +43,9 @@ public class OpElemWTest extends AutomatedTestBase\n{\nprivate final static int m = 1600;\nprivate final static int n = 700;\n- private final static double[] sparsity = new double[]{0.1, 0.04};\n+ private final static double[] sparsity = new double[]{0.2, 0.4};\nprivate final static OpCode mult = OpCode.MULT;\nprivate final static OpCode plus = OpCode.PLUS;\n-// private final static OpCode rbind = OpCode.RBIND;\n-// private final static OpCode cbind = OpCode.CBIND;\n-// private final static OpCode eqzero = OpCode.EQZERO;\n-// private final static OpCode diag = OpCode.DIAG;\n-// private final static OpCode neqzero = OpCode.NEQZERO;\n-// private final static OpCode trans = OpCode.TRANS;\n-// private final static OpCode reshape = OpCode.RESHAPE;\n@Override\npublic void setUp() {\n@@ -103,12 +97,12 @@ public class OpElemWTest extends AutomatedTestBase\n//Bitset\n@Test\n- public void testBitsetCasemult() {\n+ public void testBitsetMult() {\nrunSparsityEstimateTest(new EstimatorBitsetMM(), m, n, sparsity, mult);\n}\n@Test\n- public void testBitsetCaseplus() {\n+ public void testBitsetPlus() {\nrunSparsityEstimateTest(new EstimatorBitsetMM(), m, n, sparsity, plus);\n}\n/*\n@@ -121,19 +115,18 @@ public class OpElemWTest extends AutomatedTestBase\n@Test\npublic void testLGCaseplus() {\nrunSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, sparsity, plus);\n- }\n+ }*/\n//Sample\n@Test\n- public void testSampleCasemult() {\n- runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, mult);\n+ public void testSampleMult() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, n, sparsity, mult);\n}\n@Test\n- public void testSampleCaseplus() {\n- runSparsityEstimateTest(new EstimatorSample(), m, k, n, sparsity, plus);\n- }*/\n-\n+ public void testSamplePlus() {\n+ runSparsityEstimateTest(new EstimatorSample(), m, n, sparsity, plus);\n+ }\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int n, double[] sp, OpCode op) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, n, sp[0], 1, 1, \"uniform\", 3);\n@@ -146,20 +139,20 @@ public class OpElemWTest extends AutomatedTestBase\nbOp = new BinaryOperator(Multiply.getMultiplyFnObject());\nm1.binaryOperations(bOp, m2, m3);\nest = estim.estim(m1, m2, op);\n- System.out.println(m3.getSparsity());\nSystem.out.println(est);\n+ System.out.println(m3.getSparsity());\nbreak;\ncase PLUS:\nbOp = new BinaryOperator(Plus.getPlusFnObject());\nm1.binaryOperations(bOp, m2, m3);\nest = estim.estim(m1, m2, op);\n- System.out.println(m3.getSparsity());\nSystem.out.println(est);\n+ System.out.println(m3.getSparsity());\nbreak;\ndefault:\nthrow new NotImplementedException();\n}\n//compare estimated and real sparsity\n- TestUtils.compareScalars(est, m3.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 1e-3);\n+ TestUtils.compareScalars(est, m3.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 5e-3);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "diff": "@@ -146,7 +146,7 @@ public class SquaredProductTest extends AutomatedTestBase\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\n- MatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\n+ MatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 7);\nMatrixBlock m3 = m1.aggregateBinaryOperations(m1, m2,\nnew MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "diff": "@@ -26,7 +26,9 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n+ OpBindChainTest.class,\nOpBindTest.class,\n+ OpElemWChainTest.class,\nOpElemWTest.class,\nOpSingleTest.class,\nOuterProductTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended sampling-based sparsity estimator, misc fixes Closes #828.
49,736
16.08.2018 09:56:42
25,200
64110f31dcd4eb8b6d56e8d462858a58c2016544
[MINOR] Fixed failing GPU tests and updated the documentation.
[ { "change_type": "MODIFY", "old_path": "docs/dml-language-reference.md", "new_path": "docs/dml-language-reference.md", "diff": "@@ -1525,6 +1525,8 @@ Hence, the images are internally represented as a matrix with dimension (N, C *\n| batch_norm2d | input | [batch_size X num_channels* height_image* width_image] | | [batch_size X num_channels* height_image* width_image] | scale, shift, exponentialMovingAverage_Mean, exponentialMovingAverage_Variance, mode, epsilon, momentum | Performs batch normalization operation (outputs: updated exponential moving average mean and variance, cache of the batch mean and variance) |\n| batch_norm2d_backward | input, dout | [batch_size X num_channels* height_image* width_image] | [batch_size X num_channels* height_image* width_image] | [batch_size X num_channels* height_image* width_image] | scale, epsilon, cache_mean (from forward), cache_inv_var (from forward) | Computed backpropagation error for batch normalization operation |\n+Note: the builtin functions `batch_norm2d` and `batch_norm2d_backward` are deprecated and will be removed in the next release. The `lstm` builtin function is in experimental phase and is only supported for the GPU backend.\n+\nExamples:\n| Function | Parameters | Visualization / Equivalent DML |\n" }, { "change_type": "MODIFY", "old_path": "docs/index.md", "new_path": "docs/index.md", "diff": "@@ -82,3 +82,4 @@ command-line interface.\n* [Troubleshooting Guide](troubleshooting-guide) - Troubleshoot various issues related to SystemML.\n* [Release Process](release-process) - Description of the SystemML release process.\n* [Using Native BLAS](native-backend) in SystemML.\n+* [Using GPU backend](gpu) in SystemML.\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/BinaryOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/BinaryOpTests.java", "diff": "@@ -53,12 +53,11 @@ public class BinaryOpTests extends GPUTests {\ndouble sparsity = 1.0; // Only dense matrices supported by \"solve\"\nfinal int[] sides = { 32, 33, 128, 256, 513, 2049 };\nfor (int i = 0; i < sides.length; i++) {\n- for (int j = i; j < sides.length; j++) {\n- int m = sides[j];\n+ // CP LibCommonsMath expects square matrices\n+ int m = sides[i];\nint n = sides[i];\nrunSolveTest(sparsity, m, n);\n}\n- }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixed failing GPU tests and updated the documentation.
49,738
16.08.2018 19:02:33
25,200
709a5c5b44b62abb506d27571dd61a9ef8251298
[MINOR] Fix missing license header in distributed paramserv tests
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/ParamservSparkNNTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/paramserv/ParamservSparkNNTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\npackage org.apache.sysml.test.integration.functions.paramserv;\nimport org.apache.sysml.api.DMLException;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix missing license header in distributed paramserv tests
49,719
16.08.2018 22:58:13
25,200
a1a05e29f6ee78f3c33fea355f62c78ce21766ee
[maven-release-plugin] prepare release v1.2.0-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "* specific language governing permissions and limitations\n* under the License.\n-->\n-<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n- xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n+<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n<modelVersion>4.0.0</modelVersion>\n<parent>\n<groupId>org.apache</groupId>\n<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>1.2.0-SNAPSHOT</version>\n+ <version>1.2.0</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v1.2.0-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n<phase>package</phase>\n<configuration>\n<target name=\"copy and rename JAR\">\n- <copy\n- file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\"\n- tofile=\"${project.build.directory}/SystemML.jar\" />\n+ <copy file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\" tofile=\"${project.build.directory}/SystemML.jar\" />\n</target>\n</configuration>\n<goals>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v1.2.0-rc1
49,736
29.08.2018 19:40:19
25,200
81419ae6a0abcc13e2e84307b7af38732c1892cd
[MINOR] Support the list datatype in external UDF Also added RemoveDuplicates to show the usage.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/udf/ExternalFunctionInvocationInstruction.java", "new_path": "src/main/java/org/apache/sysml/udf/ExternalFunctionInvocationInstruction.java", "diff": "@@ -33,6 +33,7 @@ import org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.IntObject;\n+import org.apache.sysml.runtime.instructions.cp.ListObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.instructions.cp.StringObject;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -78,7 +79,6 @@ public class ExternalFunctionInvocationInstruction extends Instruction\nverifyAndAttachOutputs(ec, fun, outputs);\n}\n- @SuppressWarnings(\"incomplete-switch\")\nprivate ArrayList<FunctionParameter> getInputObjects(CPOperand[] inputs, LocalVariableMap vars) {\nArrayList<FunctionParameter> ret = new ArrayList<>();\nfor( CPOperand input : inputs ) {\n@@ -94,6 +94,12 @@ public class ExternalFunctionInvocationInstruction extends Instruction\ncase OBJECT:\nret.add(new BinaryObject(vars.get(input.getName())));\nbreak;\n+ case LIST:\n+ ret.add(new List((ListObject) vars.get(input.getName())));\n+ break;\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported data type: \"\n+ +input.getDataType().name());\n}\n}\nreturn ret;\n@@ -125,11 +131,14 @@ public class ExternalFunctionInvocationInstruction extends Instruction\nCPOperand output = outputs[i];\nswitch( fun.getFunctionOutput(i).getType() ) {\ncase Matrix:\n+ {\nMatrix m = (Matrix) fun.getFunctionOutput(i);\nMatrixObject newVar = createOutputMatrixObject( m );\nec.setVariable(output.getName(), newVar);\nbreak;\n+ }\ncase Scalar:\n+ {\nScalar s = (Scalar) fun.getFunctionOutput(i);\nScalarObject scalarObject = null;\nswitch( s.getScalarType() ) {\n@@ -151,6 +160,13 @@ public class ExternalFunctionInvocationInstruction extends Instruction\n}\nec.setVariable(output.getName(), scalarObject);\nbreak;\n+ }\n+ case List:\n+ {\n+ List l = (List) fun.getFunctionOutput(i);\n+ ec.setVariable(output.getName(), l.getListObject());\n+ break;\n+ }\ndefault:\nthrow new DMLRuntimeException(\"Unsupported data type: \"\n+fun.getFunctionOutput(i).getType().name());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/udf/FunctionParameter.java", "new_path": "src/main/java/org/apache/sysml/udf/FunctionParameter.java", "diff": "@@ -39,6 +39,7 @@ public abstract class FunctionParameter implements Serializable\nMatrix,\nScalar,\nObject,\n+ List\n}\nprivate FunctionParameterType _type;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/udf/List.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.udf;\n+\n+import org.apache.sysml.runtime.instructions.cp.ListObject;\n+\n+public class List extends FunctionParameter {\n+ private static final long serialVersionUID = -3230908817131624857L;\n+ protected ListObject _lObj;\n+\n+ public List(ListObject obj) {\n+ super(FunctionParameterType.List);\n+ _lObj = obj;\n+ }\n+\n+ public ListObject getListObject() {\n+ return _lObj;\n+ }\n+\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/udf/lib/RemoveDuplicates.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.udf.lib;\n+\n+import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Random;\n+\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.instructions.cp.Data;\n+import org.apache.sysml.runtime.instructions.cp.ListObject;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.udf.FunctionParameter;\n+import org.apache.sysml.udf.List;\n+import org.apache.sysml.udf.Matrix;\n+import org.apache.sysml.udf.PackageFunction;\n+import org.apache.sysml.udf.Matrix.ValueType;\n+\n+/**\n+ * Use this class to remove duplicate matrices from list of matrices.\n+ * It also returns the indexes which maps the original input list to the output list.\n+ *\n+ * Usage:\n+ * <pre>\n+ * <code>\n+ * distinct = externalFunction(list[unknown] inL) return (list[unknown] outL, matrix[double] idx) implemented in (classname=\"org.apache.sysml.udf.lib.RemoveDuplicates\", exectype=\"mem\");\n+ * X = rand(rows=10, cols=10)\n+ * Y = X*sum(X);\n+ * Z = sum(X)*X;\n+ * W = X*sum(X);\n+ * inL = list(Y, Z, W)\n+ * [outL, idx] = distinct(inL);\n+ * print(\">>\\n\" + toString(idx));\n+ * </code>\n+ * </pre>\n+ *\n+ * The above code prints:\n+ * >>\n+ * 1.000\n+ * 2.000\n+ * 1.000\n+ */\n+public class RemoveDuplicates extends PackageFunction {\n+ private static final long serialVersionUID = -3905212831582648882L;\n+\n+ private List outputList;\n+ private Matrix indexes;\n+ private Random rand = new Random();\n+\n+ @Override\n+ public int getNumFunctionOutputs() {\n+ return 2;\n+ }\n+\n+ @Override\n+ public FunctionParameter getFunctionOutput(int pos) {\n+ if(pos == 0)\n+ return outputList;\n+ else if(pos == 1)\n+ return indexes;\n+ throw new RuntimeException(\"Invalid function output being requested\");\n+ }\n+\n+ private int indexOf(java.util.List<MatrixBlock> list, MatrixBlock mb) {\n+// Caused by: java.lang.RuntimeException: equals should never be called for matrix blocks.\n+// at org.apache.sysml.runtime.matrix.data.MatrixBlock.equals(MatrixBlock.java:5644)\n+// return list.indexOf(mb);\n+ for(int i = 0; i < list.size(); i++) {\n+ if(list.get(i) == mb) {\n+ return i;\n+ }\n+ }\n+ return -1;\n+ }\n+\n+ @Override\n+ public void execute() {\n+ java.util.List<Data> inputData = ((List)getFunctionInput(0)).getListObject().getData();\n+ java.util.List<Data> outputData = new ArrayList<>();\n+ java.util.List<MatrixBlock> outputMB = new ArrayList<>();\n+ indexes = new Matrix( \"tmp_\" + rand.nextLong(), inputData.size(), 1, ValueType.Double );\n+ MatrixBlock indexesMB = allocateDenseMatrixBlock(indexes);\n+ double [] indexesData = indexesMB.getDenseBlockValues();\n+\n+ for(int i = 0; i < inputData.size(); i++) {\n+ Data elem = inputData.get(i);\n+ if(elem instanceof MatrixObject) {\n+ MatrixBlock mb = ((MatrixObject)elem).acquireRead();\n+ int index = indexOf(outputMB, mb);\n+ if(index >= 0) {\n+ indexesData[i] = indexOf(outputMB, mb) + 1;\n+ }\n+ else {\n+ outputMB.add(mb);\n+ outputData.add(elem);\n+ indexesData[i] = outputMB.size();\n+ }\n+ ((MatrixObject)elem).release();\n+ }\n+ else {\n+ throw new RuntimeException(\"Only list of matrices is supported in RemoveDuplicates\");\n+ }\n+ }\n+ indexesMB.setNonZeros(indexesData.length);\n+ try {\n+ indexes.setMatrixDoubleArray(indexesMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Exception while executing RemoveDuplicates\", e);\n+ }\n+ outputList = new List(new ListObject(outputData));\n+ }\n+\n+ private static MatrixBlock allocateDenseMatrixBlock(Matrix mat) {\n+ int rows = (int) mat.getNumRows();\n+ int cols = (int) mat.getNumCols();\n+ MatrixBlock mb = new MatrixBlock(rows, cols, false);\n+ mb.allocateDenseBlock();\n+ return mb;\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Support the list datatype in external UDF - Also added RemoveDuplicates to show the usage.
49,736
30.08.2018 15:59:37
25,200
ab251f6ee42fe44eabf51483184c95a5a3e472d9
[MINOR] Fixed javadoc errors
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -946,6 +946,7 @@ public class LibMatrixCUDA {\n/**\n* Do a simple reduction, the output of which is a single value\n* @param gCtx a valid {@link GPUContext}\n+ * @param instName instruction name\n* @param kernelFunction name of the kernel function to invoke\n* @param in {@link Pointer} to matrix in device memory\n* @param n size of array\n@@ -988,6 +989,7 @@ public class LibMatrixCUDA {\n* Do a reduction by row. Data is reduced per row and the\n* resulting vector is calculated.\n* @param gCtx a valid {@link GPUContext}\n+ * @param instName instruction name\n* @param kernelFunction name of the kernel function to invoke\n* @param in {@link Pointer} to input matrix in device memory (size - rows * columns)\n* @param out {@link Pointer} to output matrix in device memory (size - rows * 1)\n@@ -1015,6 +1017,7 @@ public class LibMatrixCUDA {\n* Do a reduction by column. Data is reduced per column and the\n* resulting vector is calculated.\n* @param gCtx a valid {@link GPUContext}\n+ * @param instName instruction name\n* @param kernelFunction name of the kernel function to invoke\n* @param in {@link Pointer} to input matrix in device memory (size - rows * columns)\n* @param out {@link Pointer} to output matrix in device memory (size - 1 * cols)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/udf/lib/RemoveDuplicates.java", "new_path": "src/main/java/org/apache/sysml/udf/lib/RemoveDuplicates.java", "diff": "@@ -49,12 +49,11 @@ import org.apache.sysml.udf.Matrix.ValueType;\n* W = X*sum(X);\n* inL = list(Y, Z, W)\n* [outL, idx] = distinct(inL);\n- * print(\">>\\n\" + toString(idx));\n+ * print(toString(idx));\n* </code>\n* </pre>\n*\n* The above code prints:\n- * >>\n* 1.000\n* 2.000\n* 1.000\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixed javadoc errors
49,736
10.09.2018 15:05:05
25,200
2fc26b3dced89a473055828b08550ed6e6a8d7be
[MINOR] Allow non-literal values in parameterized built-in functions
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUDenseInputPointerFetcher.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUDenseInputPointerFetcher.java", "diff": "@@ -20,7 +20,6 @@ package org.apache.sysml.runtime.instructions.gpu;\nimport java.util.HashMap;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixReshapeGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixReshapeGPUInstruction.java", "diff": "@@ -79,7 +79,8 @@ public class MatrixReshapeGPUInstruction extends GPUInstruction {\nGPUContext gCtx = ec.getGPUContext(0);\nMatrixObject mat = getMatrixInputForGPUInstruction(ec, _input.getName());\nif(rows*cols != mat.getNumRows()*mat.getNumColumns()) {\n- throw new DMLRuntimeException(\"Incorrect number of rows and cols in rshape instruction\");\n+ throw new DMLRuntimeException(\"Cannot reshape a matrix of dimensions: [\" + mat.getNumRows() + \", \" + mat.getNumColumns() + \"] to a matrix of\"\n+ + \" dimensions [\" + rows + \", \" + cols + \"]\");\n}\n// We currently support only dense rshape\nPointer inPtr = LibMatrixCUDA.getDensePointer(gCtx, mat, instName);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ParameterizedBuiltinSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ParameterizedBuiltinSPInstruction.java", "diff": "@@ -174,6 +174,54 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\n}\n}\n+ private double getDoubleParam(ExecutionContext ec, String key) {\n+ String val = params.get(key);\n+ try {\n+ if(val != null)\n+ return Double.parseDouble( val );\n+ else\n+ throw new RuntimeException(\"Expected parameter \" + key);\n+ } catch(NumberFormatException e) {\n+ return ec.getScalarInput(val, ValueType.DOUBLE, false).getDoubleValue();\n+ }\n+ }\n+\n+ private boolean getBooleanParam(ExecutionContext ec, String key) {\n+ String val = params.get(key);\n+ try {\n+ if(val != null)\n+ return Boolean.parseBoolean( val.toLowerCase() );\n+ else\n+ throw new RuntimeException(\"Expected parameter \" + key);\n+ } catch(NumberFormatException e) {\n+ return ec.getScalarInput(val, ValueType.BOOLEAN, false).getBooleanValue();\n+ }\n+ }\n+\n+ private long getLongParam(ExecutionContext ec, String key) {\n+ String val = params.get(key);\n+ try {\n+ if(val != null)\n+ return Long.parseLong( val );\n+ else\n+ throw new RuntimeException(\"Expected parameter \" + key);\n+ } catch(NumberFormatException e) {\n+ return ec.getScalarInput(val, ValueType.INT, false).getLongValue();\n+ }\n+ }\n+\n+ private long getLongParam(ExecutionContext ec, String key, long defaultValue) {\n+ String val = params.get(key);\n+ try {\n+ if(val != null)\n+ return Long.parseLong( val );\n+ else\n+ return defaultValue;\n+ } catch(NumberFormatException e) {\n+ return ec.getScalarInput(val, ValueType.INT, false).getLongValue();\n+ }\n+ }\n+\n@Override\n@SuppressWarnings(\"unchecked\")\n@@ -191,8 +239,8 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\nPartitionedBroadcast<MatrixBlock> groups = sec.getBroadcastForVariable(groupsVar);\nMatrixCharacteristics mc1 = sec.getMatrixCharacteristics( targetVar );\nMatrixCharacteristics mcOut = sec.getMatrixCharacteristics(output.getName());\n- CPOperand ngrpOp = new CPOperand(params.get(Statement.GAGG_NUM_GROUPS));\n- int ngroups = (int)sec.getScalarInput(ngrpOp.getName(), ngrpOp.getValueType(), ngrpOp.isLiteral()).getLongValue();\n+\n+ int ngroups = (int) getLongParam(ec, Statement.GAGG_NUM_GROUPS);\n//single-block aggregation\nif( ngroups <= mc1.getRowsPerBlock() && mc1.getCols() <= mc1.getColsPerBlock() ) {\n@@ -222,7 +270,7 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\n}\nelse if ( opcode.equalsIgnoreCase(\"groupedagg\") )\n{\n- boolean broadcastGroups = Boolean.parseBoolean(params.get(\"broadcast\"));\n+ boolean broadcastGroups = getBooleanParam(ec, \"broadcast\");\n//get input rdd handle\nString groupsVar = params.get(Statement.GAGG_GROUPS);\n@@ -253,8 +301,7 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\n}\nelse //input vector or matrix\n{\n- String ngroupsStr = params.get(Statement.GAGG_NUM_GROUPS);\n- long ngroups = (ngroupsStr != null) ? (long) Double.parseDouble(ngroupsStr) : -1;\n+ long ngroups = getLongParam(ec, Statement.GAGG_NUM_GROUPS, -1);\n//execute basic grouped aggregate (extract and preagg)\nif( broadcastGroups ) {\n@@ -312,8 +359,8 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\nString rddOffVar = params.get(\"offset\");\nboolean rows = sec.getScalarInput(params.get(\"margin\"), ValueType.STRING, true).getStringValue().equals(\"rows\");\n- boolean emptyReturn = Boolean.parseBoolean(params.get(\"empty.return\").toLowerCase());\n- long maxDim = sec.getScalarInput(params.get(\"maxdim\"), ValueType.DOUBLE, false).getLongValue();\n+ boolean emptyReturn = getBooleanParam(ec, \"empty.return\");\n+ long maxDim = getLongParam(ec, \"maxdim\");\nMatrixCharacteristics mcIn = sec.getMatrixCharacteristics(rddInVar);\nif( maxDim > 0 ) //default case\n@@ -369,8 +416,8 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\nMatrixCharacteristics mcIn = sec.getMatrixCharacteristics(params.get(\"target\"));\n//execute replace operation\n- double pattern = Double.parseDouble( params.get(\"pattern\") );\n- double replacement = Double.parseDouble( params.get(\"replacement\") );\n+ double pattern = getDoubleParam(ec, \"pattern\");\n+ double replacement = getDoubleParam(ec, \"replacement\");\nJavaPairRDD<MatrixIndexes,MatrixBlock> out =\nin1.mapValues(new RDDReplaceFunction(pattern, replacement));\n@@ -388,8 +435,8 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\nJavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(params.get(\"target\"));\nMatrixCharacteristics mcIn = sec.getMatrixCharacteristics(params.get(\"target\"));\nboolean lower = opcode.equalsIgnoreCase(\"lowertri\");\n- boolean diag = Boolean.parseBoolean(params.get(\"diag\"));\n- boolean values = Boolean.parseBoolean(params.get(\"values\"));\n+ boolean diag = getBooleanParam(ec, \"diag\");\n+ boolean values = getBooleanParam(ec, \"values\");\nJavaPairRDD<MatrixIndexes,MatrixBlock> out = in1.mapPartitionsToPair(\nnew RDDExtractTriangularFunction(lower, diag, values), true);\n@@ -408,11 +455,11 @@ public class ParameterizedBuiltinSPInstruction extends ComputationSPInstruction\n//get input rdd handle\nJavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable( rddInVar );\nMatrixCharacteristics mcIn = sec.getMatrixCharacteristics(rddInVar);\n- double maxVal = Double.parseDouble( params.get(\"max\") );\n+ double maxVal = getDoubleParam(ec, \"max\");\nlong lmaxVal = UtilFunctions.toLong(maxVal);\nboolean dirRows = params.get(\"dir\").equals(\"rows\");\n- boolean cast = Boolean.parseBoolean(params.get(\"cast\"));\n- boolean ignore = Boolean.parseBoolean(params.get(\"ignore\"));\n+ boolean cast = getBooleanParam(ec, \"cast\");\n+ boolean ignore = getBooleanParam(ec, \"ignore\");\nlong brlen = mcIn.getRowsPerBlock();\nlong bclen = mcIn.getColsPerBlock();\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Allow non-literal values in parameterized built-in functions
49,736
11.09.2018 13:26:23
25,200
77c98d693c3b2d407094de50accac615a638183f
[MINOR] Fixed import error in Keras2DML
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/estimators.py", "new_path": "src/main/python/systemml/mllearn/estimators.py", "diff": "@@ -1018,7 +1018,7 @@ class Keras2DML(Caffe2DML):\nregularization_type: regularization type (default: \"L2\")\n\"\"\"\nfrom .keras2caffe import convertKerasToCaffeNetwork, convertKerasToCaffeSolver\n- import tempfile\n+ import tempfile, keras\nif isinstance(keras_model, keras.models.Sequential):\n# Convert the sequential model to functional model\nif keras_model.model is None:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixed import error in Keras2DML
49,736
13.09.2018 11:17:33
25,200
e2dc8568855d353265ac4e0755b9ac3d2b30b1d8
Removed unnecessary long-to-int conversion in LSTM Minor cleanup of the GPUObject class. Also, fixed incorrect forced GPU configuration flag.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/ConfigurationManager.java", "new_path": "src/main/java/org/apache/sysml/conf/ConfigurationManager.java", "diff": "@@ -258,7 +258,7 @@ public class ConfigurationManager\n* @return true if GPU is enabled in forced mode\n*/\npublic static boolean isForcedGPU() {\n- return _ldmlOptions.get().isGPU();\n+ return _ldmlOptions.get().isForceGPU();\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "diff": "@@ -595,18 +595,18 @@ public class DnnGPUInstruction extends GPUInstruction {\nprivate void processLstmBackwardInstruction(ExecutionContext ec) throws DMLRuntimeException {\nMatrixObject out0 = getMatrixInputForGPUInstruction(ec, _input4.getName());\n- int M = toInt(out0.getNumColumns()); // hiddenSize .. since out0: (N, M)\n+ long M = out0.getNumColumns(); // hiddenSize .. since out0: (N, M)\nPointer out0Pointer = LibMatrixCUDA.getDensePointer(gCtx, out0, instName);\nMatrixObject W = getMatrixInputForGPUInstruction(ec, _input2.getName());\nMatrixObject bias = getMatrixInputForGPUInstruction(ec, _input3.getName());\nlong numRowsW = W.getNumRows();\n- int D = toInt(numRowsW) - M; // since W:(D+M, 4M) ... numFeatures\n+ long D = numRowsW - M; // since W:(D+M, 4M) ... numFeatures\nPointer sysmlWPointer = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, W, instName, D+M, 4*M);\nPointer sysmlBiasPointer = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, bias, instName, 1, 4*M);\nPointer cudnnWPointer = gCtx.allocate(instName, (D+M+2)*(4*M)*LibMatrixCUDA.sizeOfDataType);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_weight\",\n- ExecutionConfig.getConfigForSimpleVectorOperations((D+M+2)*(4*M)),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt((D+M+2)*(4*M))),\nsysmlWPointer, sysmlBiasPointer, cudnnWPointer, D, M);\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\nec.releaseMatrixInputForGPUInstruction(_input3.getName());\n@@ -619,7 +619,7 @@ public class DnnGPUInstruction extends GPUInstruction {\nint T = toInt(numColsX/ D); // since X:(N, T*D) ... seqLength\nPointer cudnnInput = gCtx.allocate(instName, (N*T*D)*LibMatrixCUDA.sizeOfDataType);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_input\",\n- ExecutionConfig.getConfigForSimpleVectorOperations(N*T*D),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt(N*T*D)),\nxPointer, cudnnInput, N, D, T*D, N*T*D);\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\n@@ -656,18 +656,19 @@ public class DnnGPUInstruction extends GPUInstruction {\n// previous output out0 (also represented by hx) and cell state c0 (also represented by cx): (N, M) ==> (1, M, N)\n// out: (N, T*M) or (N, M) ==> (T, M, N)\nMatrixObject out0 = getMatrixInputForGPUInstruction(ec, _input4.getName());\n- int M = toInt(out0.getNumColumns()); // hiddenSize .. since out0: (N, M)\n+ long M = out0.getNumColumns(); // hiddenSize .. since out0: (N, M)\nPointer out0Pointer = LibMatrixCUDA.getDensePointer(gCtx, out0, instName);\nMatrixObject W = getMatrixInputForGPUInstruction(ec, _input2.getName());\nMatrixObject bias = getMatrixInputForGPUInstruction(ec, _input3.getName());\nlong numRowsW = W.getNumRows();\n- int D = toInt(numRowsW) - M; // since W:(D+M, 4M) ... numFeatures\n+ long D = numRowsW - M; // since W:(D+M, 4M) ... numFeatures\n+\nPointer sysmlWPointer = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, W, instName, D+M, 4*M);\nPointer sysmlBiasPointer = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, bias, instName, 1, 4*M);\nPointer cudnnWPointer = gCtx.allocate(instName, (D+M+2)*(4*M)*LibMatrixCUDA.sizeOfDataType);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_weight\",\n- ExecutionConfig.getConfigForSimpleVectorOperations((D+M+2)*(4*M)),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt((D+M+2)*(4*M))),\nsysmlWPointer, sysmlBiasPointer, cudnnWPointer, D, M);\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\nec.releaseMatrixInputForGPUInstruction(_input3.getName());\n@@ -682,13 +683,14 @@ public class DnnGPUInstruction extends GPUInstruction {\nint T = toInt(numColsX/ D); // since X:(N, T*D) ... seqLength\nPointer cudnnInput = gCtx.allocate(instName, (N*T*D)*LibMatrixCUDA.sizeOfDataType);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_input\",\n- ExecutionConfig.getConfigForSimpleVectorOperations(N*T*D),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt(N*T*D)),\nxPointer, cudnnInput, N, D, T*D, N*T*D);\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\nPointer c0Pointer = LibMatrixCUDA.getDensePointer(gCtx, getMatrixInputForGPUInstruction(ec, _input5.getName()), instName);\n- LibMatrixCuDNN.lstm(ec, gCtx, instName, cudnnInput, cudnnWPointer, out0Pointer, c0Pointer, return_sequences, _output.getName(), _output2.getName(), N, M, D, T);\n+ LibMatrixCuDNN.lstm(ec, gCtx, instName, cudnnInput, cudnnWPointer, out0Pointer, c0Pointer, return_sequences, _output.getName(), _output2.getName(),\n+ toInt(N), toInt(M), toInt(D), toInt(T));\ngCtx.cudaFreeHelper(instName, cudnnWPointer, gCtx.EAGER_CUDA_FREE);\ngCtx.cudaFreeHelper(instName, cudnnInput, gCtx.EAGER_CUDA_FREE);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java", "diff": "@@ -475,14 +475,6 @@ public class CSRPointer {\nreturn A;\n}\n- /**\n- * Calls cudaFree lazily on the allocated {@link Pointer} instances\n- *\n- */\n- public void deallocate() {\n- deallocate(getGPUContext().EAGER_CUDA_FREE);\n- }\n-\n/**\n* Calls cudaFree lazily or eagerly on the allocated {@link Pointer} instances\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ExecutionConfig.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ExecutionConfig.java", "diff": "@@ -68,8 +68,8 @@ public class ExecutionConfig {\n* @return execution configuration\n*/\npublic static ExecutionConfig getConfigForSimpleVectorOperations(int numCells) {\n- if(numCells == 0)\n- throw new DMLRuntimeException(\"Attempting to invoke a kernel with 0 threads\");\n+ if(numCells <= 0)\n+ throw new DMLRuntimeException(\"Attempting to invoke a kernel with \" + numCells + \" threads\");\nint deviceNumber = 0;\nint blockDimX = getMaxBlockDim(deviceNumber);\nint gridDimX = (int) Math.ceil((double) numCells / blockDimX);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -53,7 +53,7 @@ public class GPUMemoryManager {\n// Developer flag: Use this flag to check for GPU memory leak in SystemML.\n// This has an additional overhead of maintaining stack trace of all the allocated GPU pointers via PointerInfo class.\nprivate static final boolean DEBUG_MEMORY_LEAK = false;\n- private static final int [] DEBUG_MEMORY_LEAK_STACKTRACE_DEPTH = {5, 6, 7, 8, 9, 10}; // Avoids printing too much text while debuggin\n+ private static final int [] DEBUG_MEMORY_LEAK_STACKTRACE_DEPTH = {5, 6, 7, 8, 9, 10, 11}; // Avoids printing too much text while debugging\nprivate final boolean PRINT_GPU_MEMORY_INFO = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.PRINT_GPU_MEMORY_INFO);\n@@ -86,7 +86,15 @@ public class GPUMemoryManager {\nprivate Set<Pointer> getNonMatrixLockedPointers() {\nSet<Pointer> managedPointers = matrixMemoryManager.getPointers();\nmanagedPointers.addAll(lazyCudaFreeMemoryManager.getAllPointers());\n- return nonIn(allPointers.keySet(), managedPointers);\n+ Set<Pointer> superSet = allPointers.keySet();\n+ Set<Pointer> ret = nonIn(superSet, managedPointers);\n+ if(DEBUG_MEMORY_LEAK) {\n+ System.out.println(\n+ ret.stream().map(p -> p.toString()).collect(Collectors.joining(\",\")) + \" = notIn(>>>\" +\n+ superSet.stream().map(p -> p.toString()).collect(Collectors.joining(\",\")) + \">>>, <<<\" +\n+ managedPointers.stream().map(p -> p.toString()).collect(Collectors.joining(\",\")) + \">>>)\");\n+ }\n+ return ret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -90,11 +90,6 @@ public class GPUObject {\n*/\nAtomicLong timestamp = new AtomicLong();\n- /**\n- * Whether this block is in sparse format\n- */\n- protected boolean isSparse = false;\n-\n/**\n* Enclosing {@link MatrixObject} instance\n*/\n@@ -131,10 +126,29 @@ public class GPUObject {\n/**\n* Removes the dense pointer and potential soft reference\n+ *\n+ * @param opcode opcode of the instruction\n+ * @param eager whether to delete eagerly\n*/\n- public void clearDensePointer() {\n- jcudaDenseMatrixPtr = null;\n+ public void clearDensePointer(String opcode, boolean eager) {\n+ if (!isDensePointerNull()) {\n+ getGPUContext().cudaFreeHelper(opcode, getDensePointer(), eager);\n+ }\nshadowBuffer.clearShadowPointer();\n+ jcudaDenseMatrixPtr = null;\n+ }\n+\n+ /**\n+ * Removes the sparse pointer\n+ *\n+ * @param opcode opcode of the instruction\n+ * @param eager whether to delete eagerly\n+ */\n+ public void clearSparsePointer(String opcode, boolean eager) {\n+ if (getJcudaSparseMatrixPtr() != null) {\n+ getJcudaSparseMatrixPtr().deallocate(eager);\n+ }\n+ jcudaSparseMatrixPtr = null;\n}\n@@ -147,14 +161,14 @@ public class GPUObject {\nif (!this.isDensePointerNull()) {\nthrow new DMLRuntimeException(\"jcudaDenseMatrixPtr was already allocated for \" + this + \", this will cause a memory leak on the GPU\");\n}\n+ clearSparsePointer(null, true);\nthis.jcudaDenseMatrixPtr = densePtr;\n- this.isSparse = false;\nif(LOG.isDebugEnabled()) {\nLOG.debug(\"Setting dense pointer of size \" + getGPUContext().getMemoryManager().getSizeAllocatedGPUPointer(densePtr));\n}\n- if (getJcudaSparseMatrixPtr() != null) {\n- getJcudaSparseMatrixPtr().deallocate();\n- jcudaSparseMatrixPtr = null;\n+ if(!gpuContext.getMemoryManager().getGPUMatrixMemoryManager().gpuObjects.contains(this)) {\n+ // Double-check if the matrix manager still has the current GPU object in case of eviction.\n+ gpuContext.getMemoryManager().getGPUMatrixMemoryManager().addGPUObject(this);\n}\n}\n// ----------------------------------------------------------------------\n@@ -170,7 +184,6 @@ public class GPUObject {\nthat.writeLock = false;\nthat.timestamp = new AtomicLong(me.timestamp.get());\n- that.isSparse = me.isSparse;\ntry {\nif (!me.isDensePointerNull()) {\n@@ -197,10 +210,6 @@ public class GPUObject {\nreturn getGPUContext().allocate(null, size);\n}\n- private void cudaFreeHelper(Pointer toFree) throws DMLRuntimeException {\n- getGPUContext().cudaFreeHelper(null, toFree, gpuContext.EAGER_CUDA_FREE);\n- }\n-\npublic GPUContext getGPUContext() {\nreturn gpuContext;\n}\n@@ -300,11 +309,11 @@ public class GPUObject {\nif (this.jcudaSparseMatrixPtr != null) {\nthrow new DMLRuntimeException(\"jcudaSparseMatrixPtr was already allocated for \" + this + \", this will cause a memory leak on the GPU\");\n}\n+ clearDensePointer(null, true);\nthis.jcudaSparseMatrixPtr = sparseMatrixPtr;\n- this.isSparse = true;\n- if (!isDensePointerNull() && !shadowBuffer.isBuffered()) {\n- cudaFreeHelper(getDensePointer());\n- clearDensePointer();\n+ if(!gpuContext.getMemoryManager().getGPUMatrixMemoryManager().gpuObjects.contains(this)) {\n+ // Double-check if the matrix manager still has the current GPU object in case of eviction.\n+ gpuContext.getMemoryManager().getGPUMatrixMemoryManager().addGPUObject(this);\n}\n}\n@@ -354,8 +363,7 @@ public class GPUObject {\n}\nPointer tmp = transpose(getGPUContext(), getDensePointer(), m, n, lda, ldc);\n- cudaFreeHelper(getDensePointer());\n- clearDensePointer();\n+ clearDensePointer(null, true);\nsetDensePointer(tmp);\n}\n@@ -376,8 +384,7 @@ public class GPUObject {\n}\nPointer tmp = transpose(getGPUContext(), getDensePointer(), m, n, lda, ldc);\n- cudaFreeHelper(getDensePointer());\n- clearDensePointer();\n+ clearDensePointer(null, true);\nsetDensePointer(tmp);\n}\n@@ -446,7 +453,7 @@ public class GPUObject {\n}\npublic boolean isSparse() {\n- return isSparse;\n+ return jcudaSparseMatrixPtr != null;\n}\nprivate static long getDatatypeSizeOf(long numElems) {\n@@ -602,7 +609,6 @@ public class GPUObject {\nLOG.trace(\"GPU : acquireDeviceModifySparse on \" + this + \", GPUContext=\" + getGPUContext());\n}\nboolean allocated = false;\n- isSparse = true;\nif (!isAllocated()) {\nif(LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : data is not allocated, allocating a sparse block, on \" + this);\n@@ -995,22 +1001,15 @@ public class GPUObject {\n* Clears the data associated with this {@link GPUObject} instance\n*\n* @param opcode opcode of the instruction\n- * @param eager whether to be done synchronously or asynchronously\n+ * @param eager whether to delete eagerly\n* @throws DMLRuntimeException if error occurs\n*/\npublic void clearData(String opcode, boolean eager) throws DMLRuntimeException {\nif(LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : clearData on \" + this + \", GPUContext=\" + getGPUContext());\n}\n- if (!isDensePointerNull()) {\n- getGPUContext().cudaFreeHelper(opcode, getDensePointer(), eager);\n- }\n- if (getJcudaSparseMatrixPtr() != null) {\n- getJcudaSparseMatrixPtr().deallocate(eager);\n- }\n- clearDensePointer();\n- shadowBuffer.clearShadowPointer();\n- jcudaSparseMatrixPtr = null;\n+ clearDensePointer(opcode, eager);\n+ clearSparsePointer(opcode, eager);\nresetReadWriteLock();\ngetGPUContext().getMemoryManager().removeGPUObject(this);\n}\n@@ -1039,7 +1038,6 @@ public class GPUObject {\nsb.append(\", dirty=\").append(dirty);\nsb.append(\", readLocks=\").append(readLocks.longValue());\nsb.append(\", writeLock=\").append(writeLock);\n- sb.append(\", sparse? \").append(isSparse);\nsb.append(\", dims=[\").append(mat.getNumRows()).append(\",\").append(mat.getNumColumns()).append(\"]\");\nif(!isDensePointerNull())\nsb.append(\", densePtr=\").append(getDensePointer());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -849,14 +849,14 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nstatic Pointer getDenseInputPointer(ExecutionContext ec, GPUContext gCtx, String instName, String inputName,\nlong numRows, long numCols) throws DMLRuntimeException {\nMatrixObject output = ec.getMatrixInputForGPUInstruction(inputName, instName);\n- return LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, output, instName, toInt(numRows), toInt(numCols));\n+ return LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, output, instName, numRows, numCols);\n}\nstatic Pointer getDenseOutputPointer(ExecutionContext ec, GPUContext gCtx, String instName, String outputName,\nlong numRows, long numCols) throws DMLRuntimeException {\nMatrixObject output = ec.getMatrixObject(outputName);\ngetDenseMatrixOutputForGPUInstruction(ec, instName, outputName, numRows, numCols); // Allocated the dense output matrix\n- return getDensePointerForCuDNN(gCtx, output, instName, toInt(numRows), toInt(numCols));\n+ return getDensePointerForCuDNN(gCtx, output, instName, numRows, numCols);\n}\n/**\n@@ -890,9 +890,14 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nString outputName, String cyName, // output\nString rnnMode, boolean return_sequences, int N, int M, int D, int T) throws DMLRuntimeException {\nboolean hasCarry = rnnMode.equalsIgnoreCase(\"lstm\");\n+ if(LOG.isDebugEnabled()) {\n+ long memRequired = (N*T*M + 2*N*M + N*T*M)*sizeOfDataType;\n+ LOG.debug(\"Memory required for invoking lstmForward is \" + memRequired + \" bytes + workspace + reserve space + memory for descriptors.\");\n+ }\n+\n// Get output pointers\nPointer cudnnYPointer = gCtx.allocate(instName, N*T*M*sizeOfDataType);\n- Pointer hyPointer = !return_sequences ? getDenseOutputPointer(ec, gCtx, instName, outputName, N, M) : gCtx.allocate(instName, N*M*sizeOfDataType);\n+ Pointer hyPointer = return_sequences ? gCtx.allocate(instName, N*M*sizeOfDataType) : getDenseOutputPointer(ec, gCtx, instName, outputName, N, M);\nPointer cyPointer = hasCarry ? getDenseOutputPointer(ec, gCtx, instName, cyName, N, M) : new Pointer();\n// Pointer wPointer = getDensePointerForCuDNN(gCtx, w, instName, D+M+2, 4*M);\n@@ -922,20 +927,27 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\npublic static void lstmBackward(ExecutionContext ec, GPUContext gCtx, String instName,\nPointer x, Pointer hx, Pointer cx, Pointer wPointer, String doutName, String dcyName, // input\nString dxName, String dwName, String dbName, String dhxName, String dcxName, // output\n- boolean return_sequences, int N, int M, int D, int T) throws DMLRuntimeException {\n+ boolean return_sequences, long N, long M, long D, long T) throws DMLRuntimeException {\n+\n+ if(LOG.isDebugEnabled()) {\n+ long memRequired = (N*T*M + (return_sequences ? T*M : M) + N*T*M + 2*N*T*D + (D+M+2)*(4*M))*sizeOfDataType;\n+ LOG.debug(\"Memory required for invoking lstmBackward is \" + memRequired + \" bytes + workspace + reserve space + memory for descriptors.\");\n+ }\n+\n// Transform the input dout and prepare them for cudnnRNNBackwardData\nPointer dy = gCtx.allocate(instName, N*T*M*sizeOfDataType);\n- int size = return_sequences ? N*T*M : N*M;\n+ long size = return_sequences ? N*T*M : N*M;\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_backward_gradients\",\n- ExecutionConfig.getConfigForSimpleVectorOperations(size),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt(size)),\ngetDenseInputPointer(ec, gCtx, instName, doutName, N, return_sequences ? T*M : M),\ndy, N, T, M, size, return_sequences ? 1 : 0);\nec.releaseMatrixInputForGPUInstruction(doutName);\n// Allocate intermediate pointers computed by forward\nPointer yPointer = gCtx.allocate(instName, N*T*M*sizeOfDataType);\n- try(LibMatrixCuDNNRnnAlgorithm algo = new LibMatrixCuDNNRnnAlgorithm(ec, gCtx, instName, \"lstm\", N, T, M, D, true, wPointer)) {\n- JCudnn.cudnnRNNForwardTraining(gCtx.getCudnnHandle(), algo.rnnDesc, T,\n+ try(LibMatrixCuDNNRnnAlgorithm algo = new LibMatrixCuDNNRnnAlgorithm(ec, gCtx, instName, \"lstm\", toInt(N), toInt(T),\n+ toInt(M), toInt(D), true, wPointer)) {\n+ JCudnn.cudnnRNNForwardTraining(gCtx.getCudnnHandle(), algo.rnnDesc, toInt(T),\nalgo.xDesc, x,\nalgo.hxDesc, hx,\nalgo.cxDesc, cx,\n@@ -947,7 +959,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nalgo.reserveSpace, algo.reserveSpaceSizeInBytes);\nPointer cudnnDx = gCtx.allocate(instName, N*T*D*LibMatrixCUDA.sizeOfDataType);\n- JCudnn.cudnnRNNBackwardData(gCtx.getCudnnHandle(), algo.rnnDesc, T,\n+ JCudnn.cudnnRNNBackwardData(gCtx.getCudnnHandle(), algo.rnnDesc, toInt(T),\nalgo.yDesc, yPointer,\n// ----------------------\n// Additional inputs:\n@@ -973,14 +985,14 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nPointer smlDx = getDenseOutputPointer(ec, gCtx, instName, dxName, N, T*D);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_dinput\",\n- ExecutionConfig.getConfigForSimpleVectorOperations(N*T*D),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt(N*T*D)),\nsmlDx, cudnnDx, N, D, T*D, N*T*D);\nec.releaseMatrixOutputForGPUInstruction(dxName);\ngCtx.cudaFreeHelper(instName, cudnnDx, gCtx.EAGER_CUDA_FREE);\n// -------------------------------------------------------------------------------------------\nPointer cudnnDwPointer = gCtx.allocate(instName, (D+M+2)*(4*M)*LibMatrixCUDA.sizeOfDataType);\n- JCudnn.cudnnRNNBackwardWeights(gCtx.getCudnnHandle(), algo.rnnDesc, T,\n+ JCudnn.cudnnRNNBackwardWeights(gCtx.getCudnnHandle(), algo.rnnDesc, toInt(T),\nalgo.xDesc, x,\nalgo.hxDesc, hx,\nalgo.yDesc, yPointer,\n@@ -988,7 +1000,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nalgo.dwDesc, cudnnDwPointer,\nalgo.reserveSpace, algo.reserveSpaceSizeInBytes);\nLibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"prepare_lstm_dweight\",\n- ExecutionConfig.getConfigForSimpleVectorOperations((D+M+2)*(4*M)),\n+ ExecutionConfig.getConfigForSimpleVectorOperations(toInt((D+M+2)*(4*M))),\ngetDenseOutputPointer(ec, gCtx, instName, dwName, D+M, 4*M),\ngetDenseOutputPointer(ec, gCtx, instName, dbName, 1, 4*M), cudnnDwPointer, D, M);\ngCtx.cudaFreeHelper(instName, cudnnDwPointer, gCtx.EAGER_CUDA_FREE);\n@@ -1242,7 +1254,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n* @return jcuda pointer\n* @throws DMLRuntimeException if error occurs while sparse to dense conversion\n*/\n- public static Pointer getDensePointerForCuDNN(GPUContext gCtx, MatrixObject image, String instName, int numRows, int numCols) throws DMLRuntimeException {\n+ public static Pointer getDensePointerForCuDNN(GPUContext gCtx, MatrixObject image, String instName, long numRows, long numCols) throws DMLRuntimeException {\nlong numElems = image.getNumRows()*image.getNumColumns();\nif(image.getNumRows() != numRows || image.getNumColumns() != numCols) {\nthrow new DMLRuntimeException(\"Expected input of size:[\" + numRows + \", \" + numCols + \"], but found [\" + image.getNumRows() + \", \" + image.getNumColumns() + \"].\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNRnnAlgorithm.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNRnnAlgorithm.java", "diff": "@@ -32,6 +32,8 @@ import static jcuda.jcudnn.cudnnRNNInputMode.CUDNN_LINEAR_INPUT;\nimport static jcuda.jcudnn.cudnnDirectionMode.CUDNN_UNIDIRECTIONAL;\nimport static jcuda.jcudnn.cudnnRNNAlgo.CUDNN_RNN_ALGO_STANDARD;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\n@@ -44,6 +46,7 @@ import jcuda.jcudnn.cudnnRNNDescriptor;\nimport jcuda.jcudnn.cudnnTensorDescriptor;\npublic class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\n+ private static final Log LOG = LogFactory.getLog(LibMatrixCuDNNRnnAlgorithm.class.getName());\nGPUContext gCtx;\nString instName;\ncudnnDropoutDescriptor dropoutDesc;\n@@ -87,8 +90,11 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\nJCudnn.cudnnDropoutGetStatesSize(gCtx.getCudnnHandle(), _dropOutSizeInBytes);\ndropOutSizeInBytes = _dropOutSizeInBytes[0];\ndropOutStateSpace = new Pointer();\n- if (dropOutSizeInBytes != 0)\n+ if (dropOutSizeInBytes != 0) {\n+ if(LOG.isDebugEnabled())\n+ LOG.debug(\"Allocating \" + dropOutSizeInBytes + \" bytes for lstm dropout space.\");\ndropOutStateSpace = gCtx.allocate(instName, dropOutSizeInBytes);\n+ }\nJCudnn.cudnnSetDropoutDescriptor(dropoutDesc, gCtx.getCudnnHandle(), 0, dropOutStateSpace, dropOutSizeInBytes, 12345);\n// Initialize RNN descriptor\n@@ -109,55 +115,20 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\n// Setup workspace\nworkSpace = new Pointer(); reserveSpace = new Pointer();\nsizeInBytes = getWorkspaceSize(T);\n- if(sizeInBytes != 0)\n+ if(sizeInBytes != 0) {\n+ if(LOG.isDebugEnabled())\n+ LOG.debug(\"Allocating \" + sizeInBytes + \" bytes for lstm workspace.\");\nworkSpace = gCtx.allocate(instName, sizeInBytes);\n+ }\nreserveSpaceSizeInBytes = 0;\nif(isTraining) {\nreserveSpaceSizeInBytes = getReservespaceSize(T);\nif (reserveSpaceSizeInBytes != 0) {\n+ if(LOG.isDebugEnabled())\n+ LOG.debug(\"Allocating \" + reserveSpaceSizeInBytes + \" bytes for lstm reserve space.\");\nreserveSpace = gCtx.allocate(instName, reserveSpaceSizeInBytes);\n}\n}\n- /*\n- int numLinearLayers = getNumLinearLayers(rnnMode);\n- for(int i = 0; i < numLinearLayers; i++) {\n- cudnnFilterDescriptor linLayerMatDesc = new cudnnFilterDescriptor();\n- cudnnCreateFilterDescriptor(linLayerMatDesc);\n- Pointer linLayerMat = new Pointer();\n- JCudnn.cudnnGetRNNLinLayerMatrixParams(gCtx.getCudnnHandle(), rnnDesc, 0,\n- xDesc[0], wDesc, w, i, linLayerMatDesc, linLayerMat);\n- int[] dataType = new int[] {-1};\n- int[] format = new int[] {-1};\n- int[] nbDims = new int[] {-1};\n- int[] filterDimA = new int[3];\n- JCudnn.cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, dataType, format, nbDims, filterDimA);\n-\n- int filterDims = filterDimA[0] * filterDimA[1] * filterDimA[2];\n- double [] tmp = new double[filterDims];\n- LibMatrixCUDA.cudaSupportFunctions.deviceToHost(gCtx, linLayerMat, tmp, instName, false);\n- System.out.println();\n- for(int j = 0 ; j < tmp.length; j++) {\n- System.out.print(\" \" + tmp[j]);\n- }\n- System.out.println();\n- LibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"fill\",\n- org.apache.sysml.runtime.instructions.gpu.context.ExecutionConfig.getConfigForSimpleVectorOperations(filterDims),\n- linLayerMat, Math.pow(filterDims, -1), filterDims);\n- JCudnn.cudnnDestroyFilterDescriptor(linLayerMatDesc);\n-\n- cudnnFilterDescriptor linLayerBiasDesc = new cudnnFilterDescriptor();\n- cudnnCreateFilterDescriptor(linLayerBiasDesc);\n- Pointer linLayerBias = new Pointer();\n- JCudnn.cudnnGetRNNLinLayerBiasParams(gCtx.getCudnnHandle(), rnnDesc, 0,\n- xDesc[0], wDesc, w, i, linLayerBiasDesc, linLayerBias);\n- JCudnn.cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, dataType, format, nbDims, filterDimA);\n- filterDims = filterDimA[0] * filterDimA[1] * filterDimA[2];\n- LibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"fill\",\n- org.apache.sysml.runtime.instructions.gpu.context.ExecutionConfig.getConfigForSimpleVectorOperations(filterDims),\n- linLayerBias, Math.pow(filterDims, -1), filterDims);\n- JCudnn.cudnnDestroyFilterDescriptor(linLayerBiasDesc);\n- }\n- */\n}\n@SuppressWarnings(\"unused\")\n@@ -321,5 +292,6 @@ public class LibMatrixCuDNNRnnAlgorithm implements java.lang.AutoCloseable {\nthrow new RuntimeException(e);\n}\n}\n+ dropOutStateSpace = null;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -516,7 +516,8 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//this method is used as a short-hand for all operations that\n//guaranteed only deal with dense blocks of a single block.\nif( denseBlock != null && denseBlock.numBlocks() > 1 ) {\n- throw new RuntimeException(\"Large dense in-memory block (with numblocks=\"+denseBlock.numBlocks()+\") \"\n+ throw new RuntimeException(\"Large dense in-memory block (with numblocks=\"+denseBlock.numBlocks()+ \") with \"\n+ + \"dimensions [\" + getNumRows() + \", \" + getNumColumns() + \"] \"\n+ \"allocated but operation access to first block only, which might cause incorrect results.\");\n}\nreturn (denseBlock != null) ? denseBlock.valuesAt(0) : null;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Removed unnecessary long-to-int conversion in LSTM - Minor cleanup of the GPUObject class. - Also, fixed incorrect forced GPU configuration flag.
49,736
14.09.2018 12:17:11
25,200
4d8df33cc53583e71c4a488577270461e6f712e2
[MINOR] Add two helper utilities. First, PersistentLRUCache to cache double[], float[] and MatrixBlock without requiring the user to worry about OOM. Second, reblockAndWrite method in MLContextUtil class to reblock the output of a DML script as rectangular blocked RDDs.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.api.mlcontext;\nimport java.io.File;\nimport java.io.FileNotFoundException;\n+import java.io.IOException;\nimport java.net.URL;\nimport java.util.ArrayList;\nimport java.util.Date;\n@@ -74,6 +75,7 @@ import org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.StringObject;\nimport org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.FrameBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n@@ -119,6 +121,35 @@ public final class MLContextUtil {\npublic static final Class[] ALL_SUPPORTED_DATA_TYPES = (Class[]) ArrayUtils.addAll(BASIC_DATA_TYPES,\nCOMPLEX_DATA_TYPES);\n+ /**\n+ * Utility method to write an output as rectangular blocked RDD\n+ *\n+ * @param spark spark session\n+ * @param dmlScript script that generates the outVariable\n+ * @param outVariable variable name\n+ * @param outFilePath output file path\n+ * @param rowsPerBlock number of rows per block\n+ * @param colsPerBlock number of columns per block\n+ * @throws IOException if error occurs\n+ */\n+ public static void reblockAndWrite(SparkSession spark, String dmlScript, String outVariable, String outFilePath, int rowsPerBlock, int colsPerBlock) throws IOException {\n+ MLContext ml = new MLContext(spark);\n+ Script helloScript = org.apache.sysml.api.mlcontext.ScriptFactory.dml(dmlScript).out(outVariable);\n+ MLResults res = ml.execute(helloScript);\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> rdd = res.getMatrix(outVariable).toBinaryBlocks();\n+ MatrixCharacteristics mc = res.getMatrix(outVariable).getMatrixMetadata().asMatrixCharacteristics();\n+ MatrixCharacteristics mcOut = new MatrixCharacteristics(mc);\n+ mcOut.setRowsPerBlock(rowsPerBlock);\n+ mcOut.setColsPerBlock(colsPerBlock);\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> out = org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils.mergeByKey(rdd.flatMapToPair(\n+ new org.apache.sysml.runtime.instructions.spark.functions.ExtractBlockForBinaryReblock(mc, mcOut)), false);\n+ out.saveAsHadoopFile(outFilePath, MatrixIndexes.class, MatrixBlock.class, org.apache.hadoop.mapred.SequenceFileOutputFormat.class);\n+ org.apache.sysml.runtime.util.MapReduceTool.writeMetaDataFile(outFilePath + \".mtd\",\n+ org.apache.sysml.parser.Expression.ValueType.DOUBLE, mcOut,\n+ org.apache.sysml.runtime.matrix.data.OutputInfo.BinaryBlockOutputInfo,\n+ new org.apache.sysml.runtime.io.FileFormatProperties());\n+ }\n+\n/**\n* Compare two version strings (ie, \"1.4.0\" and \"1.4.1\").\n*\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.utils;\n+\n+import java.io.File;\n+import java.io.FileInputStream;\n+import java.io.FileNotFoundException;\n+import java.io.FileOutputStream;\n+import java.io.IOException;\n+import java.io.ObjectInputStream;\n+import java.io.ObjectOutputStream;\n+import java.lang.ref.SoftReference;\n+import java.nio.file.Files;\n+import java.util.LinkedHashMap;\n+import java.util.Map;\n+import java.util.Random;\n+import java.util.concurrent.atomic.AtomicLong;\n+\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\n+import org.apache.log4j.Level;\n+import org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.runtime.util.FastBufferedDataInputStream;\n+import org.apache.sysml.runtime.util.FastBufferedDataOutputStream;\n+\n+/**\n+ * Simple utility to store double[], float[] and MatrixBlock in-memory.\n+ * It is designed to guard against OOM by using soft reference as well as max capacity.\n+ * When memory is full or if capacity is exceeded, SimplePersistingCache stores the least recently used values into the local filesystem.\n+ * Assumption: GC occurs before an OutOfMemoryException, and GC requires prior finalize call.\n+ *\n+ * The user should use custom put and get methods:\n+ * - put(String key, double[] value);\n+ * - put(String key, float[] value);\n+ * - put(String key, MatrixBlock value);\n+ * - double [] getAsDoubleArray(String key);\n+ * - float [] getAsFloatArray(String key);\n+ * - MatrixBlock getAsMatrixBlock(String key);\n+ *\n+ * Additionally, the user can also use standard Map methods:\n+ * - void clear();\n+ * - boolean containsKey(String key)\n+ * - remove(String key);\n+ *\n+ * Instead of using generic types i.e. LinkedHashMap<String, ?>, we are allowing the cache to store values of different types.\n+ * ValueWrapper is a container in this case to store the actual values (i.e. double[]. float[] or MatrixBlock).\n+ *\n+ * The cache can be used in two modes:\n+ * - Read-only mode (only applicable for MatrixBlock keys):\n+ * = We delete the value when capacity is exceeded or when GC occurs.\n+ * = When get is invoked on the deleted key, the key is treated as the full path and MatrixBlock is read from that path.\n+ * = Note: in the current version, the metadata file is ignored and the file-format is assumed to be binary-block. We can extend this later.\n+ * - General case:\n+ * = We persist the values to the file system (into temporary directory) when capacity is exceeded or when GC occurs.\n+ * = When get is invoked on the deleted key, the key is treated as the file name (not the absolute path) and MatrixBlock is read from that path.\n+ *\n+ * This class does not assume minimum capacity and hence only soft references.\n+ *\n+ * To test this class, please use the below command:\n+ * java -cp systemml-*-standalone.jar:commons-lang3-3.8.jar org.apache.sysml.utils.PersistentLRUCache.\n+ */\n+public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n+ static final Log LOG = LogFactory.getLog(PersistentLRUCache.class.getName());\n+ private static final long serialVersionUID = -6838798881747433047L;\n+ private String _prefixFilePath;\n+ final AtomicLong _currentNumBytes = new AtomicLong();\n+ private final long _maxNumBytes;\n+ Random _rand = new Random();\n+ boolean isInReadOnlyMode;\n+\n+ public static void main(String [] args) throws IOException {\n+ org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);\n+ double numBytesInMB = 1e+7;\n+ int numDoubleInMB = (int) (numBytesInMB / 8);\n+ PersistentLRUCache cache = new PersistentLRUCache((long)(numBytesInMB*25));\n+ for(int i = 0; i < 30; ++i) {\n+ LOG.debug(\"Putting a double array of size 1MB.\");\n+ cache.put(\"file_\" + i, new double[numDoubleInMB]);\n+ }\n+ cache.clear();\n+ }\n+\n+ /**\n+ * When enabled, the cache will discard the values instead of writing it to the local file system.\n+ *\n+ * @return this\n+ */\n+ public PersistentLRUCache enableReadOnlyMode(boolean enable) {\n+ isInReadOnlyMode = enable;\n+ return this;\n+ }\n+\n+ /**\n+ * Creates a persisting cache\n+ * @param maxNumBytes maximum capacity in bytes\n+ * @throws IOException if unable to create a temporary directory on the local file system\n+ */\n+ public PersistentLRUCache(long maxNumBytes) throws IOException {\n+ _maxNumBytes = maxNumBytes;\n+ File tmp = Files.createTempDirectory(\"systemml_\" + Math.abs(_rand.nextLong())).toFile();\n+ tmp.deleteOnExit();\n+ _prefixFilePath = tmp.getAbsolutePath();\n+ }\n+ public ValueWrapper put(String key, double[] value) throws FileNotFoundException, IOException {\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.length*Double.BYTES);\n+ }\n+ public ValueWrapper put(String key, float[] value) throws FileNotFoundException, IOException {\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.length*Float.BYTES);\n+ }\n+ public ValueWrapper put(String key, MatrixBlock value) throws FileNotFoundException, IOException {\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.getInMemorySize());\n+ }\n+\n+ private ValueWrapper putImplm(String key, ValueWrapper value, long sizeInBytes) throws FileNotFoundException, IOException {\n+ ValueWrapper prev = null;\n+ if(containsKey(key))\n+ prev = remove(key);\n+ ensureCapacity(sizeInBytes);\n+ super.put(key, value);\n+ return prev;\n+ }\n+\n+ @Override\n+ public ValueWrapper remove(Object key) {\n+ ValueWrapper prev = super.remove(key);\n+ if(prev != null) {\n+ long size = prev.getSize();\n+ if(size > 0)\n+ _currentNumBytes.addAndGet(-size);\n+ prev.remove();\n+ }\n+ return prev;\n+ }\n+\n+ @Override\n+ public ValueWrapper put(String key, ValueWrapper value) {\n+ // super.put(key, value);\n+ throw new DMLRuntimeException(\"Incorrect usage: Value should be of type double[], float[], or MatrixBlock\");\n+ }\n+\n+ @Override\n+ public void putAll(Map<? extends String, ? extends ValueWrapper> m) {\n+ // super.putAll(m);\n+ throw new DMLRuntimeException(\"Incorrect usage: Value should be of type double[], float[], or MatrixBlock\");\n+ }\n+\n+ @Override\n+ public ValueWrapper get(Object key) {\n+ // return super.get(key);\n+ throw new DMLRuntimeException(\"Incorrect usage: Use getAsDoubleArray, getAsFloatArray or getAsMatrixBlock instead.\");\n+ }\n+\n+ void makeRecent(String key) {\n+ // super.get(key); // didn't work.\n+ ValueWrapper value = super.get(key);\n+ super.remove(key);\n+ super.put(key, value);\n+ }\n+\n+ @Override\n+ public void clear() {\n+ super.clear();\n+ _currentNumBytes.set(0);\n+ File tmp;\n+ try {\n+ tmp = Files.createTempDirectory(\"systemml_\" + Math.abs(_rand.nextLong())).toFile();\n+ tmp.deleteOnExit();\n+ _prefixFilePath = tmp.getAbsolutePath();\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Error occured while creating the temp directory.\", e);\n+ }\n+ }\n+\n+ Map.Entry<String, ValueWrapper> _eldest;\n+ @Override\n+ protected boolean removeEldestEntry(Map.Entry<String, ValueWrapper> eldest) {\n+ _eldest = eldest;\n+ return false; // Never ask LinkedHashMap to remove eldest entry, instead do that in ensureCapacity.\n+ }\n+\n+ float [] tmp = new float[0];\n+ String dummyKey = \"RAND_KEY_\" + Math.abs(_rand.nextLong()) + \"_\" + Math.abs(_rand.nextLong());\n+ void ensureCapacity(long newNumBytes) throws FileNotFoundException, IOException {\n+ if(newNumBytes > _maxNumBytes) {\n+ throw new DMLRuntimeException(\"Exceeds maximum capacity. Cannot put a value of size \" + newNumBytes +\n+ \" bytes as max capacity is \" + _maxNumBytes + \" bytes.\");\n+ }\n+ long newCapacity = _currentNumBytes.addAndGet(newNumBytes);\n+ if(newCapacity > _maxNumBytes) {\n+ synchronized(this) {\n+ if(LOG.isDebugEnabled())\n+ LOG.debug(\"The required capacity (\" + newCapacity + \") is greater than max capacity:\" + _maxNumBytes);\n+ ValueWrapper dummyValue = new ValueWrapper(new DataWrapper(dummyKey, tmp, this));\n+ int maxIter = size();\n+ while(_currentNumBytes.get() > _maxNumBytes && maxIter > 0) {\n+ super.put(dummyKey, dummyValue); // This will invoke removeEldestEntry, which will set _eldest\n+ remove(dummyKey);\n+ if(_eldest != null && _eldest.getKey() != dummyKey) {\n+ DataWrapper data = _eldest.getValue().get();\n+ if(data != null) {\n+ data.write(false); // Write the eldest entry to disk if not garbage collected.\n+ }\n+ makeRecent(_eldest.getKey()); // Make recent.\n+ }\n+ maxIter--;\n+ }\n+ }\n+ }\n+ }\n+\n+// public void put(String key, MatrixObject value) {\n+// _globalMap.put(key, new ValueWrapper(new DataWrapper(key, value, this)));\n+// }\n+\n+ String getFilePath(String key) {\n+ return _prefixFilePath + File.separator + key;\n+ }\n+\n+ public double [] getAsDoubleArray(String key) throws FileNotFoundException, IOException {\n+ ValueWrapper value = super.get(key);\n+ if(!value.isAvailable()) {\n+ // Fine-grained synchronization: only one read per key, but will allow parallel loading\n+ // of distinct keys.\n+ synchronized(value._lock) {\n+ if(!value.isAvailable()) {\n+ value.update(DataWrapper.loadDoubleArr(key, this));\n+ }\n+ }\n+ }\n+ DataWrapper ret = value.get();\n+ if(ret == null)\n+ throw new DMLRuntimeException(\"Potential race-condition with Java's garbage collector while loading the value in SimplePersistingCache.\");\n+ return ret._dArr;\n+ }\n+\n+ public float [] getAsFloatArray(String key) throws FileNotFoundException, IOException {\n+ ValueWrapper value = super.get(key);\n+ if(!value.isAvailable()) {\n+ // Fine-grained synchronization: only one read per key, but will allow parallel loading\n+ // of distinct keys.\n+ synchronized(value._lock) {\n+ if(!value.isAvailable()) {\n+ value.update(DataWrapper.loadFloatArr(key, this));\n+ }\n+ }\n+ }\n+ DataWrapper ret = value.get();\n+ if(ret == null)\n+ throw new DMLRuntimeException(\"Potential race-condition with Java's garbage collector while loading the value in SimplePersistingCache.\");\n+ return ret._fArr;\n+ }\n+\n+ public MatrixBlock getAsMatrixBlock(String key) throws FileNotFoundException, IOException {\n+ ValueWrapper value = super.get(key);\n+ if(!value.isAvailable()) {\n+ // Fine-grained synchronization: only one read per key, but will allow parallel loading\n+ // of distinct keys.\n+ synchronized(value._lock) {\n+ if(!value.isAvailable()) {\n+ value.update(DataWrapper.loadMatrixBlock(key, this, value._rlen, value._clen, value._nnz));\n+ }\n+ }\n+ }\n+ DataWrapper ret = value.get();\n+ if(ret == null)\n+ throw new DMLRuntimeException(\"Potential race-condition with Java's garbage collector while loading the value in SimplePersistingCache.\");\n+ return ret._mb;\n+ }\n+}\n+\n+// ----------------------------------------------------------------------------------------\n+// Internal helper class\n+class DataWrapper {\n+ double [] _dArr;\n+ float [] _fArr;\n+ MatrixBlock _mb;\n+ MatrixObject _mo;\n+ final PersistentLRUCache _cache;\n+ final String _key;\n+ DataWrapper(String key, double [] value, PersistentLRUCache cache) {\n+ _key = key;\n+ _dArr = value;\n+ _fArr = null;\n+ _mb = null;\n+ _mo = null;\n+ _cache = cache;\n+ }\n+ DataWrapper(String key, float [] value, PersistentLRUCache cache) {\n+ _key = key;\n+ _dArr = null;\n+ _fArr = value;\n+ _mb = null;\n+ _mo = null;\n+ _cache = cache;\n+ }\n+ DataWrapper(String key, MatrixBlock value, PersistentLRUCache cache) {\n+ _key = key;\n+ _dArr = null;\n+ _fArr = null;\n+ _mb = value;\n+ _mo = null;\n+ _cache = cache;\n+ }\n+ DataWrapper(String key, MatrixObject value, PersistentLRUCache cache) {\n+ _key = key;\n+ _dArr = null;\n+ _fArr = null;\n+ _mb = null;\n+ _mo = value;\n+ _cache = cache;\n+ }\n+ @Override\n+ protected void finalize() throws Throwable {\n+ super.finalize();\n+ write(true);\n+ }\n+\n+ public synchronized void write(boolean isBeingGarbageCollected) throws FileNotFoundException, IOException {\n+ if(_key.equals(_cache.dummyKey))\n+ return;\n+ _cache.makeRecent(_key); // Make it recent.\n+\n+ if(_dArr != null || _fArr != null || _mb != null || _mo != null) {\n+ _cache._currentNumBytes.addAndGet(-getSize());\n+ }\n+\n+ if(!_cache.isInReadOnlyMode) {\n+ String debugSuffix = null;\n+ if(PersistentLRUCache.LOG.isDebugEnabled()) {\n+ if(isBeingGarbageCollected)\n+ debugSuffix = \" (is being garbage collected).\";\n+ else\n+ debugSuffix = \" (capacity exceeded).\";\n+ }\n+\n+ if(_dArr != null) {\n+ try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key)))) {\n+ os.writeInt(_dArr.length);\n+ for(int i = 0; i < _dArr.length; i++) {\n+ os.writeDouble(_dArr[i]);\n+ }\n+ }\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Writing value (double[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n+ }\n+ else if(_fArr != null) {\n+ try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key)))) {\n+ os.writeInt(_fArr.length);\n+ for(int i = 0; i < _fArr.length; i++) {\n+ os.writeFloat(_fArr[i]);\n+ }\n+ }\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Writing value (float[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n+ }\n+ else if(_mb != null) {\n+ try(FastBufferedDataOutputStream os = new FastBufferedDataOutputStream(new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key))))) {\n+ os.writeLong(_mb.getInMemorySize());\n+ _mb.write(os);\n+ }\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Writing value (MatrixBlock of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n+ }\n+ else if(_mo != null) {\n+ throw new DMLRuntimeException(\"Not implemented\");\n+ }\n+ else {\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Skipping writing of the key \" + _key + \" to disk as the value is already written\" + debugSuffix);\n+ }\n+ }\n+ _dArr = null; _fArr = null; _mb = null; _mo = null;\n+ }\n+\n+ static DataWrapper loadDoubleArr(String key, PersistentLRUCache cache) throws FileNotFoundException, IOException {\n+ if(cache.isInReadOnlyMode)\n+ throw new IOException(\"Read-only mode is only supported for MatrixBlock.\");\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Loading double array the key \" + key + \" from the disk.\");\n+ double [] ret;\n+ try (ObjectInputStream is = new ObjectInputStream(new FileInputStream(cache.getFilePath(key)))) {\n+ int size = is.readInt();\n+ cache.ensureCapacity(size*Double.BYTES);\n+ ret = new double[size];\n+ for(int i = 0; i < size; i++) {\n+ ret[i] = is.readDouble();\n+ }\n+ }\n+ return new DataWrapper(key, ret, cache);\n+ }\n+\n+ static DataWrapper loadFloatArr(String key, PersistentLRUCache cache) throws FileNotFoundException, IOException {\n+ if(cache.isInReadOnlyMode)\n+ throw new IOException(\"Read-only mode is only supported for MatrixBlock.\");\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Loading float array the key \" + key + \" from the disk.\");\n+ float [] ret;\n+ try (ObjectInputStream is = new ObjectInputStream(new FileInputStream(cache.getFilePath(key)))) {\n+ int size = is.readInt();\n+ cache.ensureCapacity(size*Float.BYTES);\n+ ret = new float[size];\n+ for(int i = 0; i < size; i++) {\n+ ret[i] = is.readFloat();\n+ }\n+ }\n+ return new DataWrapper(key, ret, cache);\n+ }\n+\n+ static DataWrapper loadMatrixBlock(String key,\n+ PersistentLRUCache cache, long rlen, long clen, long nnz) throws FileNotFoundException, IOException {\n+ if(PersistentLRUCache.LOG.isDebugEnabled())\n+ PersistentLRUCache.LOG.debug(\"Loading matrix block array the key \" + key + \" from the disk.\");\n+ MatrixBlock ret = null;\n+ if(cache.isInReadOnlyMode) {\n+ // Read from the filesystem in the read-only mode assuming binary-blocked format.\n+ // TODO: Read the meta-data file and remove the format requirement.\n+ ret = DataConverter.readMatrixFromHDFS(key,\n+ org.apache.sysml.runtime.matrix.data.InputInfo.BinaryBlockInputInfo, rlen, clen,\n+ ConfigurationManager.getBlocksize(), ConfigurationManager.getBlocksize(), nnz,\n+ new org.apache.sysml.runtime.io.FileFormatProperties());\n+ }\n+ else {\n+ try (FastBufferedDataInputStream is = new FastBufferedDataInputStream(new ObjectInputStream(new FileInputStream(cache.getFilePath(key))))) {\n+ long size = is.readLong();\n+ cache.ensureCapacity(size);\n+ ret = new MatrixBlock();\n+ ret.readFields(is);\n+ }\n+ }\n+ return new DataWrapper(key, ret, cache);\n+ }\n+\n+ void remove() {\n+ File file = new File(_cache.getFilePath(_key));\n+ if(file.exists()) {\n+ file.delete();\n+ }\n+ }\n+\n+ long getSize() {\n+ if(_dArr != null)\n+ return _dArr.length*Double.BYTES;\n+ else if(_fArr != null)\n+ return _fArr.length*Float.BYTES;\n+ else if(_fArr != null)\n+ return _mb.getInMemorySize();\n+ else\n+ throw new DMLRuntimeException(\"Not implemented\");\n+ }\n+\n+}\n+\n+// Internal helper class\n+class ValueWrapper {\n+ final Object _lock;\n+ private SoftReference<DataWrapper> _ref;\n+ long _rlen;\n+ long _clen;\n+ long _nnz;\n+\n+ ValueWrapper(DataWrapper _data) {\n+ _lock = new Object();\n+ _ref = new SoftReference<>(_data);\n+ if(_data._mb != null) {\n+ _rlen = _data._mb.getNumRows();\n+ _clen = _data._mb.getNumColumns();\n+ _nnz = _data._mb.getNonZeros();\n+ }\n+ }\n+ void update(DataWrapper _data) {\n+ _ref = new SoftReference<>(_data);\n+ if(_data._mb != null) {\n+ _rlen = _data._mb.getNumRows();\n+ _clen = _data._mb.getNumColumns();\n+ _nnz = _data._mb.getNonZeros();\n+ }\n+ }\n+ boolean isAvailable() {\n+ return _ref.get() != null;\n+ }\n+ DataWrapper get() {\n+ return _ref.get();\n+ }\n+ long getSize() {\n+ DataWrapper data = _ref.get();\n+ if(data != null)\n+ return data.getSize();\n+ else\n+ return 0;\n+ }\n+ void remove() {\n+ DataWrapper data = _ref.get();\n+ if(data != null) {\n+ data.remove();\n+ }\n+ }\n+}\n+\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add two helper utilities. - First, PersistentLRUCache to cache double[], float[] and MatrixBlock without requiring the user to worry about OOM. - Second, reblockAndWrite method in MLContextUtil class to reblock the output of a DML script as rectangular blocked RDDs.
49,736
17.09.2018 11:12:26
25,200
1d2f4b630ebf800be5009b182880b03682077ccd
Acquire read lock before copying from host to device
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -570,6 +570,7 @@ public class GPUObject {\nLOG.trace(\"GPU : acquireDeviceRead on \" + this);\n}\nboolean transferred = false;\n+ addReadLock();\nif (!isAllocated()) {\nif(LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : in acquireDeviceRead, data is not allocated, copying from host, on \" + this + \", GPUContext=\"\n@@ -578,7 +579,6 @@ public class GPUObject {\ncopyFromHostToDevice(opcode);\ntransferred = true;\n}\n- addReadLock();\nif (!isAllocated())\nthrow new DMLRuntimeException(\"Expected device data to be allocated\");\nreturn transferred;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Acquire read lock before copying from host to device
49,741
17.09.2018 14:31:31
25,200
104b20e0bca906a6b76f962145254f5a1fb02ba6
[MINOR] Fixes a minor big in LRU cache Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "@@ -462,7 +462,7 @@ class DataWrapper {\nreturn _dArr.length*Double.BYTES;\nelse if(_fArr != null)\nreturn _fArr.length*Float.BYTES;\n- else if(_fArr != null)\n+ else if(_mb != null)\nreturn _mb.getInMemorySize();\nelse\nthrow new DMLRuntimeException(\"Not implemented\");\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixes a minor big in LRU cache Closes #834.
49,736
17.09.2018 15:12:59
25,200
d2894feea6b274db46149c44fb697aa1c998fdca
[MINOR] Throw an error if the user attempts to put null keys Also, added checks to verify persisted keys for debugging purposes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "@@ -27,6 +27,7 @@ import java.io.ObjectInputStream;\nimport java.io.ObjectOutputStream;\nimport java.lang.ref.SoftReference;\nimport java.nio.file.Files;\n+import java.util.HashSet;\nimport java.util.LinkedHashMap;\nimport java.util.Map;\nimport java.util.Random;\n@@ -87,6 +88,7 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\nprivate final long _maxNumBytes;\nRandom _rand = new Random();\nboolean isInReadOnlyMode;\n+ HashSet<String> persistedKeys = new HashSet<>();\npublic static void main(String [] args) throws IOException {\norg.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);\n@@ -132,6 +134,8 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n}\nprivate ValueWrapper putImplm(String key, ValueWrapper value, long sizeInBytes) throws FileNotFoundException, IOException {\n+ if(key == null)\n+ throw new IOException(\"Null keys are not supported by PersistentLRUCache\");\nValueWrapper prev = null;\nif(containsKey(key))\nprev = remove(key);\n@@ -237,6 +241,10 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n}\npublic double [] getAsDoubleArray(String key) throws FileNotFoundException, IOException {\n+ if(key == null)\n+ throw new IOException(\"Null keys are not supported by PersistentLRUCache\");\n+ if(!containsKey(key))\n+ throw new DMLRuntimeException(\"The map doesnot contains the given key:\" + key);\nValueWrapper value = super.get(key);\nif(!value.isAvailable()) {\n// Fine-grained synchronization: only one read per key, but will allow parallel loading\n@@ -254,6 +262,10 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n}\npublic float [] getAsFloatArray(String key) throws FileNotFoundException, IOException {\n+ if(key == null)\n+ throw new DMLRuntimeException(\"Null keys are not supported by PersistentLRUCache\");\n+ if(!containsKey(key))\n+ throw new DMLRuntimeException(\"The map doesnot contains the given key:\" + key);\nValueWrapper value = super.get(key);\nif(!value.isAvailable()) {\n// Fine-grained synchronization: only one read per key, but will allow parallel loading\n@@ -271,6 +283,10 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n}\npublic MatrixBlock getAsMatrixBlock(String key) throws FileNotFoundException, IOException {\n+ if(key == null)\n+ throw new DMLRuntimeException(\"Null keys are not supported by PersistentLRUCache\");\n+ if(!containsKey(key))\n+ throw new DMLRuntimeException(\"The map doesnot contains the given key:\" + key);\nValueWrapper value = super.get(key);\nif(!value.isAvailable()) {\n// Fine-grained synchronization: only one read per key, but will allow parallel loading\n@@ -360,6 +376,7 @@ class DataWrapper {\nos.writeDouble(_dArr[i]);\n}\n}\n+ _cache.persistedKeys.add(_key);\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Writing value (double[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n}\n@@ -370,6 +387,7 @@ class DataWrapper {\nos.writeFloat(_fArr[i]);\n}\n}\n+ _cache.persistedKeys.add(_key);\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Writing value (float[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n}\n@@ -378,6 +396,7 @@ class DataWrapper {\nos.writeLong(_mb.getInMemorySize());\n_mb.write(os);\n}\n+ _cache.persistedKeys.add(_key);\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Writing value (MatrixBlock of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n}\n@@ -385,16 +404,24 @@ class DataWrapper {\nthrow new DMLRuntimeException(\"Not implemented\");\n}\nelse {\n- if(PersistentLRUCache.LOG.isDebugEnabled())\n+ if(_cache.persistedKeys.contains(_key) && PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Skipping writing of the key \" + _key + \" to disk as the value is already written\" + debugSuffix);\n+ else\n+ throw new DMLRuntimeException(\"None of the container objects (double[], float[], MatrixBlock, ...) is not null and the key has not yet been persisted\");\n}\n}\n_dArr = null; _fArr = null; _mb = null; _mo = null;\n}\n+ boolean isAvailable() {\n+ return _dArr != null || _fArr != null || _mb != null || _mo != null;\n+ }\n+\nstatic DataWrapper loadDoubleArr(String key, PersistentLRUCache cache) throws FileNotFoundException, IOException {\nif(cache.isInReadOnlyMode)\n- throw new IOException(\"Read-only mode is only supported for MatrixBlock.\");\n+ throw new DMLRuntimeException(\"Read-only mode is only supported for MatrixBlock.\");\n+ if(!cache.persistedKeys.contains(key))\n+ throw new DMLRuntimeException(\"Cannot load the key that has not been persisted: \" + key);\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Loading double array the key \" + key + \" from the disk.\");\ndouble [] ret;\n@@ -411,7 +438,9 @@ class DataWrapper {\nstatic DataWrapper loadFloatArr(String key, PersistentLRUCache cache) throws FileNotFoundException, IOException {\nif(cache.isInReadOnlyMode)\n- throw new IOException(\"Read-only mode is only supported for MatrixBlock.\");\n+ throw new DMLRuntimeException(\"Read-only mode is only supported for MatrixBlock.\");\n+ if(!cache.persistedKeys.contains(key))\n+ throw new DMLRuntimeException(\"Cannot load the key that has not been persisted: \" + key);\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Loading float array the key \" + key + \" from the disk.\");\nfloat [] ret;\n@@ -430,6 +459,8 @@ class DataWrapper {\nPersistentLRUCache cache, long rlen, long clen, long nnz) throws FileNotFoundException, IOException {\nif(PersistentLRUCache.LOG.isDebugEnabled())\nPersistentLRUCache.LOG.debug(\"Loading matrix block array the key \" + key + \" from the disk.\");\n+ if(!cache.persistedKeys.contains(key))\n+ throw new DMLRuntimeException(\"Cannot load the key that has not been persisted: \" + key);\nMatrixBlock ret = null;\nif(cache.isInReadOnlyMode) {\n// Read from the filesystem in the read-only mode assuming binary-blocked format.\n@@ -453,6 +484,7 @@ class DataWrapper {\nvoid remove() {\nFile file = new File(_cache.getFilePath(_key));\nif(file.exists()) {\n+ _cache.persistedKeys.remove(_key);\nfile.delete();\n}\n}\n@@ -496,7 +528,8 @@ class ValueWrapper {\n}\n}\nboolean isAvailable() {\n- return _ref.get() != null;\n+ DataWrapper data = _ref.get();\n+ return data != null && data.isAvailable();\n}\nDataWrapper get() {\nreturn _ref.get();\n@@ -515,4 +548,3 @@ class ValueWrapper {\n}\n}\n}\n-\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Throw an error if the user attempts to put null keys - Also, added checks to verify persisted keys for debugging purposes
49,738
18.09.2018 22:02:03
-7,200
0e323ec26c4e7d3a11b96c83d5d1047a956d44ae
[MINOR] Fix codegen register allocation w/ dynamic buffer mgmt This patch fixes minor warnings and a bug in determining the minimum number of vector intermediates in case of dynamic buffer management, which did not show up before because we're using static buffer management by default.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -531,7 +531,7 @@ public class TemplateUtils\npublic static void getAllParents(CNode node, Map<Long, Set<Long>> parents) {\nfor( CNode c : node.getInput() ) {\n- if( !parents.containsKey(c) )\n+ if( !parents.containsKey(c.getID()) )\nparents.put(c.getID(), new HashSet<>());\nparents.get(c.getID()).add(node.getID());\ngetAllParents(c, parents);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagPatternMatcher.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagPatternMatcher.java", "diff": "@@ -24,7 +24,6 @@ import java.util.HashSet;\nimport java.util.List;\nimport java.util.function.Function;\nimport java.util.function.Predicate;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.Hop;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "diff": "@@ -1849,6 +1849,7 @@ public class ParForStatementBlock extends ForStatementBlock\npublic String toString() {\nreturn _name;\n}\n+ @SuppressWarnings(\"unlikely-arg-type\")\npublic static boolean contains(Collection<ResultVar> list, String varName) {\n//helper function which is necessary because list.contains checks\n//varName.equals(rvar) which always returns false because it not a string\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -59,7 +59,6 @@ public class DenseBlockLDRB extends DenseBlock\nreset(rlen, clen, blen, v);\n}\n- @SuppressWarnings(\"resource\")\nprivate void reset(int rlen, int clen, int blen, double v) {\nlong llen = (long) rlen * clen;\nint numPart = (int)Math.ceil((double)rlen / blen);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix codegen register allocation w/ dynamic buffer mgmt This patch fixes minor warnings and a bug in determining the minimum number of vector intermediates in case of dynamic buffer management, which did not show up before because we're using static buffer management by default.
49,736
19.09.2018 09:19:30
25,200
3fbfbaecb9d1e31341df8084ff28035bede47766
Dynamically decide whether to perform float-to-double conversion in the single precision mode on the host or device Fixed a int-to-long conversion bug in the shadow buffer. Updated javadocs for GPULazyCudaFreeMemoryManager.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPULazyCudaFreeMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPULazyCudaFreeMemoryManager.java", "diff": "@@ -45,6 +45,7 @@ public class GPULazyCudaFreeMemoryManager {\n*/\nprivate HashMap<Long, Set<Pointer>> rmvarGPUPointers = new HashMap<Long, Set<Pointer>>();\n+\n/**\n* Get any pointer of the given size from rmvar-ed pointers (applicable if eager cudaFree is set to false)\n*\n@@ -85,10 +86,17 @@ public class GPULazyCudaFreeMemoryManager {\nGPUStatistics.maintainCPMiscTimes(opcode, instructionLevelTimer, System.nanoTime() - startTime);\n}\n+ /**\n+ *\n+ * @return set of all pointers managed by this memory manager.\n+ */\npublic Set<Pointer> getAllPointers() {\nreturn rmvarGPUPointers.values().stream().flatMap(ptrs -> ptrs.stream()).collect(Collectors.toSet());\n}\n+ /**\n+ * Frees up all the cached rmvar-ed pointers\n+ */\npublic void clearAll() {\nSet<Pointer> toFree = new HashSet<Pointer>();\nfor(Set<Pointer> ptrs : rmvarGPUPointers.values()) {\n@@ -100,9 +108,16 @@ public class GPULazyCudaFreeMemoryManager {\n}\n}\n+ /**\n+ * Helper method to get the rmvar pointer that is greater than equal to min size\n+ *\n+ * @param opcode instruction name\n+ * @param minSize size in bytes\n+ * @return the rmvar pointer that is greater than equal to min size\n+ * @throws DMLRuntimeException if error\n+ */\npublic Pointer getRmvarPointerMinSize(String opcode, long minSize) throws DMLRuntimeException {\n- Optional<Long> toClear = rmvarGPUPointers.entrySet().stream().filter(e -> e.getValue().size() > 0).map(e -> e.getKey())\n- .filter(size -> size >= minSize).min((s1, s2) -> s1 < s2 ? -1 : 1);\n+ Optional<Long> toClear = getRmvarSize(minSize);\nif(toClear.isPresent()) {\nboolean measureTime = opcode != null && ConfigurationManager.isFinegrainedStatistics();\nlong t0 = measureTime ? System.nanoTime() : 0;\n@@ -118,6 +133,38 @@ public class GPULazyCudaFreeMemoryManager {\nreturn null;\n}\n+ /**\n+ * Helper method to check if the lazy memory manager contains a pointer of the given size\n+ *\n+ * @param opcode instruction name\n+ * @param size size in bytes\n+ * @return true if the lazy memory manager contains a pointer of the given size\n+ */\n+ boolean contains(String opcode, long size) {\n+ return rmvarGPUPointers.containsKey(size);\n+ }\n+\n+ /**\n+ * Helper method to check if the lazy memory manager contains a pointer >= minSize\n+ *\n+ * @param opcode instruction name\n+ * @param minSize size in bytes\n+ * @return true if the lazy memory manager contains a pointer >= minSize\n+ */\n+ boolean containsRmvarPointerMinSize(String opcode, long minSize) {\n+ return getRmvarSize(minSize).isPresent();\n+ }\n+\n+ /**\n+ * Helper method to get the size of rmvar pointer that is greater than equal to min size\n+ *\n+ * @param minSize size in bytes\n+ * @return size of rmvar pointer that is >= minSize\n+ */\n+ private Optional<Long> getRmvarSize(long minSize) {\n+ return rmvarGPUPointers.entrySet().stream().filter(e -> e.getValue().size() > 0).map(e -> e.getKey())\n+ .filter(size -> size >= minSize).min((s1, s2) -> s1 < s2 ? -1 : 1);\n+ }\n/**\n* Remove any pointer in the given hashmap\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -241,6 +241,10 @@ public class GPUMemoryManager {\n}\n}\n+ public boolean canAllocateWithoutEviction(String opcode, long size) {\n+ return lazyCudaFreeMemoryManager.contains(opcode, size) || allocator.canAllocate(size) ||\n+ lazyCudaFreeMemoryManager.containsRmvarPointerMinSize(opcode, size) ;\n+ }\n/**\n* Allocate pointer of the given size in bytes.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "diff": "@@ -74,8 +74,8 @@ public class ShadowBuffer {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nint numElems = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\nshadowPointer = new float[numElems];\n- EVICTION_SHADOW_BUFFER_CURR_BYTES += shadowPointer.length*Sizeof.FLOAT;\n- cudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, numElems*LibMatrixCUDA.sizeOfDataType, jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES += getSizeOfFloat(shadowPointer.length);\n+ cudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, getSizeOfDataType(numElems), jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\ngpuObj.getGPUContext().cudaFreeHelper(instName, gpuObj.jcudaDenseMatrixPtr, true);\ngpuObj.jcudaDenseMatrixPtr = null;\nif (ConfigurationManager.isStatistics()) {\n@@ -87,6 +87,14 @@ public class ShadowBuffer {\n}\n}\n+ private long getSizeOfFloat(long numElems) {\n+ return numElems*Sizeof.FLOAT;\n+ }\n+\n+ private long getSizeOfDataType(long numElems) {\n+ return numElems*LibMatrixCUDA.sizeOfDataType;\n+ }\n+\n/**\n* Move the data from shadow buffer to Matrix object\n*/\n@@ -117,7 +125,7 @@ public class ShadowBuffer {\n*/\npublic void moveToDevice() {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\n- long numBytes = shadowPointer.length*LibMatrixCUDA.sizeOfDataType;\n+ long numBytes = getSizeOfDataType(shadowPointer.length);\ngpuObj.jcudaDenseMatrixPtr = gpuObj.getGPUContext().allocate(null, numBytes);\ncudaMemcpy(gpuObj.jcudaDenseMatrixPtr, Pointer.to(shadowPointer), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\nclearShadowPointer();\n@@ -137,7 +145,7 @@ public class ShadowBuffer {\n*/\npublic boolean isEligibleForBuffering(boolean isEviction, boolean eagerDelete) {\nif(LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT && isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n- int numBytes = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns())*Sizeof.FLOAT;\n+ long numBytes = getSizeOfFloat(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\nboolean ret = EVICTION_SHADOW_BUFFER_CURR_BYTES + numBytes <= EVICTION_SHADOW_BUFFER_MAX_BYTES;\nif(!ret && !_warnedAboutShadowBuffer) {\nLOG.warn(\"Shadow buffer is full, so using CP bufferpool instead. Consider increasing sysml.gpu.eviction.shadow.bufferSize.\");\n@@ -155,7 +163,7 @@ public class ShadowBuffer {\n*/\npublic void clearShadowPointer() {\nif(shadowPointer != null) {\n- EVICTION_SHADOW_BUFFER_CURR_BYTES -= shadowPointer.length*Sizeof.FLOAT;\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES -= getSizeOfFloat(shadowPointer.length);\n}\nshadowPointer = null;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CudaSupportFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CudaSupportFunctions.java", "diff": "@@ -55,7 +55,6 @@ import jcuda.Pointer;\n* 3. During SystemML initialization, the appropriate class implementing CudaKernels interface is set based on the configuration property sysml.dataType.\n*/\npublic interface CudaSupportFunctions {\n- public static boolean PERFORM_CONVERSION_ON_DEVICE = true;\npublic int cusparsecsrgemm(cusparseHandle handle, int transA, int transB, int m, int n, int k,\ncusparseMatDescr descrA, int nnzA, Pointer csrValA, Pointer csrRowPtrA, Pointer csrColIndA,\ncusparseMatDescr descrB, int nnzB, Pointer csrValB, Pointer csrRowPtrB, Pointer csrColIndB,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "diff": "@@ -178,17 +178,19 @@ public class SinglePrecisionCudaSupportFunctions implements CudaSupportFunctions\n// However, the conversion requires an additional space to be allocated for the conversion, which can lead to infinite recursion\n// during eviction: `evict -> devictToHost -> float2double -> allocate -> ensureFreeSpace -> evict`.\n// To avoid this recursion, it is necessary to perform this conversion in host.\n- if(PERFORM_CONVERSION_ON_DEVICE && !isEviction) {\n- Pointer deviceDoubleData = gCtx.allocate(instName, ((long)dest.length)*Sizeof.DOUBLE);\n+ if(gCtx.getMemoryManager().canAllocateWithoutEviction(instName, sizeOfDouble(dest.length)) && !isEviction) {\n+ Pointer deviceDoubleData = gCtx.allocate(instName, sizeOfDouble(dest.length));\nLibMatrixCUDA.float2double(gCtx, src, deviceDoubleData, dest.length);\n- cudaMemcpy(Pointer.to(dest), deviceDoubleData, ((long)dest.length)*Sizeof.DOUBLE, cudaMemcpyDeviceToHost);\n+ cudaMemcpy(Pointer.to(dest), deviceDoubleData, sizeOfDouble(dest.length), cudaMemcpyDeviceToHost);\ngCtx.cudaFreeHelper(instName, deviceDoubleData, gCtx.EAGER_CUDA_FREE);\n}\nelse {\nLOG.debug(\"Potential OOM: Allocated additional space on host in deviceToHost\");\n- FloatBuffer floatData = ByteBuffer.allocateDirect(Sizeof.FLOAT*dest.length).order(ByteOrder.nativeOrder()).asFloatBuffer();\n- cudaMemcpy(Pointer.to(floatData), src, ((long)dest.length)*Sizeof.FLOAT, cudaMemcpyDeviceToHost);\n- LibMatrixNative.fromFloatBuffer(floatData, dest);\n+ float[] floatData = new float[dest.length];\n+ cudaMemcpy(Pointer.to(floatData), src, sizeOfFloat(dest.length), cudaMemcpyDeviceToHost);\n+ for(int i = 0; i < dest.length; i++) {\n+ dest[i] = floatData[i];\n+ }\n}\nif(ConfigurationManager.isStatistics()) {\nlong totalTime = System.nanoTime() - t0;\n@@ -204,16 +206,19 @@ public class SinglePrecisionCudaSupportFunctions implements CudaSupportFunctions\nLOG.debug(\"Potential OOM: Allocated additional space in hostToDevice\");\n// TODO: Perform conversion on GPU using double2float and float2double kernels\nlong t0 = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\n- if(PERFORM_CONVERSION_ON_DEVICE) {\n- Pointer deviceDoubleData = gCtx.allocate(instName, ((long)src.length)*Sizeof.DOUBLE);\n- cudaMemcpy(deviceDoubleData, Pointer.to(src), ((long)src.length)*Sizeof.DOUBLE, cudaMemcpyHostToDevice);\n+ if(gCtx.getMemoryManager().canAllocateWithoutEviction(instName, sizeOfDouble(src.length))) {\n+ Pointer deviceDoubleData = gCtx.allocate(instName, sizeOfDouble(src.length));\n+ cudaMemcpy(deviceDoubleData, Pointer.to(src), sizeOfDouble(src.length), cudaMemcpyHostToDevice);\nLibMatrixCUDA.double2float(gCtx, deviceDoubleData, dest, src.length);\ngCtx.cudaFreeHelper(instName, deviceDoubleData, gCtx.EAGER_CUDA_FREE);\n}\nelse {\n- FloatBuffer floatData = ByteBuffer.allocateDirect(Sizeof.FLOAT*src.length).order(ByteOrder.nativeOrder()).asFloatBuffer();\n- IntStream.range(0, src.length).parallel().forEach(i -> floatData.put(i, (float)src[i]));\n- cudaMemcpy(dest, Pointer.to(floatData), ((long)src.length)*Sizeof.FLOAT, cudaMemcpyHostToDevice);\n+ LOG.debug(\"Potential OOM: Allocated additional space on host in hostToDevice\");\n+ float[] floatData = new float[src.length];\n+ for(int i = 0; i < src.length; i++) {\n+ floatData[i] = (float) src[i];\n+ }\n+ cudaMemcpy(dest, Pointer.to(floatData), sizeOfFloat(src.length), cudaMemcpyHostToDevice);\n}\nif(ConfigurationManager.isStatistics()) {\n@@ -224,4 +229,12 @@ public class SinglePrecisionCudaSupportFunctions implements CudaSupportFunctions\nGPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_HOST_TO_DEVICE, totalTime);\n}\n}\n+\n+ private long sizeOfFloat(long numElems) {\n+ return Sizeof.FLOAT*numElems;\n+ }\n+\n+ private long sizeOfDouble(long numElems) {\n+ return Sizeof.DOUBLE*numElems;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "@@ -78,7 +78,7 @@ import org.apache.sysml.runtime.util.FastBufferedDataOutputStream;\n* This class does not assume minimum capacity and hence only soft references.\n*\n* To test this class, please use the below command:\n- * java -cp systemml-*-standalone.jar:commons-lang3-3.8.jar org.apache.sysml.utils.PersistentLRUCache.\n+ * java -cp systemml-1.3.0-SNAPSHOT-standalone.jar:commons-lang3-3.8.jar org.apache.sysml.utils.PersistentLRUCache\n*/\npublic class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\nstatic final Log LOG = LogFactory.getLog(PersistentLRUCache.class.getName());\n@@ -93,11 +93,14 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\npublic static void main(String [] args) throws IOException {\norg.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);\ndouble numBytesInMB = 1e+7;\n- int numDoubleInMB = (int) (numBytesInMB / 8);\n- PersistentLRUCache cache = new PersistentLRUCache((long)(numBytesInMB*25));\n- for(int i = 0; i < 30; ++i) {\n- LOG.debug(\"Putting a double array of size 1MB.\");\n- cache.put(\"file_\" + i, new double[numDoubleInMB]);\n+ int numDoubleIn50MB = (int) (50.0*numBytesInMB / 8.0);\n+ long maxMemory = Runtime.getRuntime().maxMemory();\n+ double multiplier = 2.0; // 0.3; // Use value > 1 to test GC and < 1 to test max capacity\n+ PersistentLRUCache cache = new PersistentLRUCache((long)(maxMemory*multiplier));\n+ long numIter = (long) ((3.0*maxMemory) / numBytesInMB);\n+ for(long i = 0; i < numIter; ++i) {\n+ LOG.debug(\"Putting a double array of size 50MB.\");\n+ cache.put(\"file_\" + i, new double[numDoubleIn50MB]);\n}\ncache.clear();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Dynamically decide whether to perform float-to-double conversion in the single precision mode on the host or device - Fixed a int-to-long conversion bug in the shadow buffer. - Updated javadocs for GPULazyCudaFreeMemoryManager.
49,736
20.09.2018 10:44:27
25,200
69f2d377c456f9baea1e248818d544b54ee00e6f
Write to disk when the cache is used in the write-mode This avoids the need to depend on finalize to perform writing.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "@@ -86,7 +86,7 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\nprivate String _prefixFilePath;\nfinal AtomicLong _currentNumBytes = new AtomicLong();\nprivate final long _maxNumBytes;\n- Random _rand = new Random();\n+ private static final Random _rand = new Random();\nboolean isInReadOnlyMode;\nHashSet<String> persistedKeys = new HashSet<>();\n@@ -101,6 +101,9 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\nfor(long i = 0; i < numIter; ++i) {\nLOG.debug(\"Putting a double array of size 50MB.\");\ncache.put(\"file_\" + i, new double[numDoubleIn50MB]);\n+ try {\n+ Thread.sleep(100);\n+ } catch (InterruptedException e) {}\n}\ncache.clear();\n}\n@@ -127,13 +130,13 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n_prefixFilePath = tmp.getAbsolutePath();\n}\npublic ValueWrapper put(String key, double[] value) throws FileNotFoundException, IOException {\n- return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.length*Double.BYTES);\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this), isInReadOnlyMode), value.length*Double.BYTES);\n}\npublic ValueWrapper put(String key, float[] value) throws FileNotFoundException, IOException {\n- return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.length*Float.BYTES);\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this), isInReadOnlyMode), value.length*Float.BYTES);\n}\npublic ValueWrapper put(String key, MatrixBlock value) throws FileNotFoundException, IOException {\n- return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this)), value.getInMemorySize());\n+ return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this), isInReadOnlyMode), value.getInMemorySize());\n}\nprivate ValueWrapper putImplm(String key, ValueWrapper value, long sizeInBytes) throws FileNotFoundException, IOException {\n@@ -206,7 +209,7 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\n}\nfloat [] tmp = new float[0];\n- String dummyKey = \"RAND_KEY_\" + Math.abs(_rand.nextLong()) + \"_\" + Math.abs(_rand.nextLong());\n+ static String dummyKey = \"RAND_KEY_\" + Math.abs(_rand.nextLong()) + \"_\" + Math.abs(_rand.nextLong());\nvoid ensureCapacity(long newNumBytes) throws FileNotFoundException, IOException {\nif(newNumBytes > _maxNumBytes) {\nthrow new DMLRuntimeException(\"Exceeds maximum capacity. Cannot put a value of size \" + newNumBytes +\n@@ -217,7 +220,7 @@ public class PersistentLRUCache extends LinkedHashMap<String, ValueWrapper> {\nsynchronized(this) {\nif(LOG.isDebugEnabled())\nLOG.debug(\"The required capacity (\" + newCapacity + \") is greater than max capacity:\" + _maxNumBytes);\n- ValueWrapper dummyValue = new ValueWrapper(new DataWrapper(dummyKey, tmp, this));\n+ ValueWrapper dummyValue = new ValueWrapper(new DataWrapper(dummyKey, tmp, this), isInReadOnlyMode);\nint maxIter = size();\nwhile(_currentNumBytes.get() > _maxNumBytes && maxIter > 0) {\nsuper.put(dummyKey, dummyValue); // This will invoke removeEldestEntry, which will set _eldest\n@@ -348,17 +351,13 @@ class DataWrapper {\n_mo = value;\n_cache = cache;\n}\n- @Override\n- protected void finalize() throws Throwable {\n- super.finalize();\n- write(true);\n- }\n- public synchronized void write(boolean isBeingGarbageCollected) throws FileNotFoundException, IOException {\n- if(_key.equals(_cache.dummyKey))\n+ public synchronized void write(boolean forceAggresiveWrites) throws FileNotFoundException, IOException {\n+ if(_key.equals(PersistentLRUCache.dummyKey))\nreturn;\n- _cache.makeRecent(_key); // Make it recent.\n+ // Prepare for writing\n+ _cache.makeRecent(_key); // Make it recent.\nif(_dArr != null || _fArr != null || _mb != null || _mo != null) {\n_cache._currentNumBytes.addAndGet(-getSize());\n}\n@@ -366,14 +365,16 @@ class DataWrapper {\nif(!_cache.isInReadOnlyMode) {\nString debugSuffix = null;\nif(PersistentLRUCache.LOG.isDebugEnabled()) {\n- if(isBeingGarbageCollected)\n- debugSuffix = \" (is being garbage collected).\";\n+ if(forceAggresiveWrites)\n+ debugSuffix = \" (aggressively written).\";\nelse\ndebugSuffix = \" (capacity exceeded).\";\n}\nif(_dArr != null) {\n- try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key)))) {\n+ File file = new File(_cache.getFilePath(_key));\n+ file.deleteOnExit();\n+ try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(file))) {\nos.writeInt(_dArr.length);\nfor(int i = 0; i < _dArr.length; i++) {\nos.writeDouble(_dArr[i]);\n@@ -384,7 +385,9 @@ class DataWrapper {\nPersistentLRUCache.LOG.debug(\"Writing value (double[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n}\nelse if(_fArr != null) {\n- try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key)))) {\n+ File file = new File(_cache.getFilePath(_key));\n+ file.deleteOnExit();\n+ try (ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(file))) {\nos.writeInt(_fArr.length);\nfor(int i = 0; i < _fArr.length; i++) {\nos.writeFloat(_fArr[i]);\n@@ -395,7 +398,9 @@ class DataWrapper {\nPersistentLRUCache.LOG.debug(\"Writing value (float[] of size \" + getSize() + \" bytes) for the key \" + _key + \" to disk\" + debugSuffix);\n}\nelse if(_mb != null) {\n- try(FastBufferedDataOutputStream os = new FastBufferedDataOutputStream(new ObjectOutputStream(new FileOutputStream(_cache.getFilePath(_key))))) {\n+ File file = new File(_cache.getFilePath(_key));\n+ file.deleteOnExit();\n+ try(FastBufferedDataOutputStream os = new FastBufferedDataOutputStream(new ObjectOutputStream(new FileOutputStream(file)))) {\nos.writeLong(_mb.getInMemorySize());\n_mb.write(os);\n}\n@@ -508,44 +513,67 @@ class DataWrapper {\n// Internal helper class\nclass ValueWrapper {\nfinal Object _lock;\n- private SoftReference<DataWrapper> _ref;\n+ final boolean _isInReadOnlyMode;\n+ private SoftReference<DataWrapper> _softRef;\nlong _rlen;\nlong _clen;\nlong _nnz;\n- ValueWrapper(DataWrapper _data) {\n+ // This is only used in write-mode until the writing to the disk is completed.\n+ // It also prevents the _softRef from being garbage collected while it is written.\n+ volatile DataWrapper _strongRef;\n+\n+ ValueWrapper(DataWrapper data, boolean isInReadOnlyMode) {\n_lock = new Object();\n- _ref = new SoftReference<>(_data);\n- if(_data._mb != null) {\n- _rlen = _data._mb.getNumRows();\n- _clen = _data._mb.getNumColumns();\n- _nnz = _data._mb.getNonZeros();\n+ _isInReadOnlyMode = isInReadOnlyMode;\n+ boolean isDummyValue = (data._key == PersistentLRUCache.dummyKey);\n+ if(!_isInReadOnlyMode && !isDummyValue) {\n+ // Aggressive write to disk when the cache is used in the write-mode.\n+ // This avoids the need to depend on finalize to perform writing.\n+ _strongRef = data;\n+ Thread t = new Thread() {\n+ public void run() {\n+ try {\n+ _strongRef.write(true);\n+ _strongRef = null; // Reset the strong reference after aggresive writing\n+ } catch (IOException e) {\n+ throw new DMLRuntimeException(\"Error occured while aggressively writing the value to disk.\", e);\n+ }\n+ }\n+ };\n+ t.start();\n+ }\n+ _softRef = new SoftReference<>(data);\n+ if(data._mb != null) {\n+ _rlen = data._mb.getNumRows();\n+ _clen = data._mb.getNumColumns();\n+ _nnz = data._mb.getNonZeros();\n}\n}\n- void update(DataWrapper _data) {\n- _ref = new SoftReference<>(_data);\n- if(_data._mb != null) {\n- _rlen = _data._mb.getNumRows();\n- _clen = _data._mb.getNumColumns();\n- _nnz = _data._mb.getNonZeros();\n+ void update(DataWrapper data) {\n+ _softRef = new SoftReference<>(data);\n+ if(data._mb != null) {\n+ _rlen = data._mb.getNumRows();\n+ _clen = data._mb.getNumColumns();\n+ _nnz = data._mb.getNonZeros();\n}\n}\nboolean isAvailable() {\n- DataWrapper data = _ref.get();\n+ DataWrapper data = _softRef.get();\nreturn data != null && data.isAvailable();\n}\nDataWrapper get() {\n- return _ref.get();\n+ return _softRef.get();\n}\nlong getSize() {\n- DataWrapper data = _ref.get();\n+ DataWrapper data = _softRef.get();\nif(data != null)\nreturn data.getSize();\nelse\nreturn 0;\n}\nvoid remove() {\n- DataWrapper data = _ref.get();\n+ DataWrapper data = _softRef.get();\nif(data != null) {\ndata.remove();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Write to disk when the cache is used in the write-mode - This avoids the need to depend on finalize to perform writing.
49,736
20.09.2018 14:56:51
25,200
f46279a17031d3f8827923f6eddd614c3eac77d3
Added memory stats for GPU allocation/eviction Also, reverted the shadow buffer to the original implementation as we are getting OOM for lstm scripts. This likely has to do with pessimistic GC.
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- Advanced optimization: fraction of driver memory to use for caching (default: 0.15) -->\n<sysml.caching.bufferSize>0.15</sysml.caching.bufferSize>\n- <!-- Advanced optimization: maximum fraction of driver memory to use for GPU shadow buffer.\n- Shadow buffer is cleared eagerly on garbage collection to avoid OOM and is backed by org.apache.sysml.utils.PersistentLRUCache.\n- Setting this to zero disables shadow buffering. If you intend to train network larger than GPU memory size,\n- consider using large driver memory and setting this to a value greater than 0. -->\n- <sysml.gpu.eviction.shadow.bufferSize>0.5</sysml.gpu.eviction.shadow.bufferSize>\n+ <!-- Advanced optimization: fraction of driver memory to use for GPU shadow buffer. This optimization is ignored for double precision.\n+ By default, it is disabled (hence set to 0.0). If you intend to train network larger than GPU memory size, consider using single precision and setting this to 0.1. -->\n+ <sysml.gpu.eviction.shadow.bufferSize>0.0</sysml.gpu.eviction.shadow.bufferSize>\n<!-- Fraction of available GPU memory to use. This is similar to TensorFlow's per_process_gpu_memory_fraction configuration property. (default: 0.9) -->\n<sysml.gpu.memory.util.factor>0.9</sysml.gpu.memory.util.factor>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -191,7 +191,7 @@ public class GPUMemoryManager {\nGPUStatistics.cudaAllocCount.increment();\n}\nif(printDebugMessage != null && (PRINT_GPU_MEMORY_INFO || LOG.isTraceEnabled()) ) {\n- LOG.info(\"Success: \" + printDebugMessage + \":\" + byteCountToDisplaySize(size));\n+ LOG.info(\"Success: \" + printDebugMessage + \":\" + GPUStatistics.byteCountToDisplaySize(size));\n}\nreturn A;\n} catch(jcuda.CudaException e) {\n@@ -203,7 +203,7 @@ public class GPUMemoryManager {\nGPUStatistics.cudaAllocCount.increment();\n}\nif(printDebugMessage != null && (PRINT_GPU_MEMORY_INFO || LOG.isTraceEnabled()) ) {\n- LOG.info(\"Failed: \" + printDebugMessage + \":\" + byteCountToDisplaySize(size));\n+ LOG.info(\"Failed: \" + printDebugMessage + \":\" + GPUStatistics.byteCountToDisplaySize(size));\nLOG.info(\"GPU Memory info \" + printDebugMessage + \":\" + toString());\n}\nreturn null;\n@@ -224,28 +224,15 @@ public class GPUMemoryManager {\nreturn \"->\" + stackTrace[index].getClassName() + \".\" + stackTrace[index].getMethodName() + \"(\" + stackTrace[index].getFileName() + \":\" + stackTrace[index].getLineNumber() + \")\";\n}\n- /**\n- * Pretty printing utility to print bytes\n- *\n- * @param numBytes number of bytes\n- * @return a human-readable display value\n- */\n- private String byteCountToDisplaySize(long numBytes) {\n- // return org.apache.commons.io.FileUtils.byteCountToDisplaySize(bytes); // performs rounding\n- if (numBytes < 1024) {\n- return numBytes + \" bytes\";\n- }\n- else {\n- int exp = (int) (Math.log(numBytes) / 6.931471805599453);\n- return String.format(\"%.3f %sB\", ((double)numBytes) / Math.pow(1024, exp), \"KMGTP\".charAt(exp-1));\n- }\n- }\npublic boolean canAllocateWithoutEviction(String opcode, long size) {\nreturn lazyCudaFreeMemoryManager.contains(opcode, size) || allocator.canAllocate(size) ||\nlazyCudaFreeMemoryManager.containsRmvarPointerMinSize(opcode, size) ;\n}\n+ long peakSize = 0;\n+ long currentSize = 0;\n+\n/**\n* Allocate pointer of the given size in bytes.\n*\n@@ -255,12 +242,19 @@ public class GPUMemoryManager {\n*/\npublic Pointer malloc(String opcode, long size) {\nif(size <= 0) {\n- throw new DMLRuntimeException(\"Cannot allocate memory of size \" + byteCountToDisplaySize(size));\n+ throw new DMLRuntimeException(\"Cannot allocate memory of size \" + GPUStatistics.byteCountToDisplaySize(size));\n}\nif(DEBUG_MEMORY_LEAK) {\nLOG.info(\"GPU Memory info during malloc:\" + toString());\n}\n+ if(ConfigurationManager.isStatistics()) {\n+ GPUStatistics.cudaAllocAggSize.add(size);\n+ currentSize += size;\n+ peakSize = Math.max(currentSize, peakSize);\n+ GPUStatistics.cudaAllocPeakSize.set(peakSize);\n+ }\n+\n// Step 1: First try reusing exact match in rmvarGPUPointers to avoid holes in the GPU memory\nPointer A = lazyCudaFreeMemoryManager.getRmvarPointer(opcode, size);\n@@ -358,7 +352,7 @@ public class GPUMemoryManager {\n}\nif(A == null) {\n- throw new DMLRuntimeException(\"There is not enough memory on device for this matrix, requested = \" + byteCountToDisplaySize(size) + \". \\n \"\n+ throw new DMLRuntimeException(\"There is not enough memory on device for this matrix, requested = \" + GPUStatistics.byteCountToDisplaySize(size) + \". \\n \"\n+ toString());\n}\n@@ -377,6 +371,10 @@ public class GPUMemoryManager {\nboolean eagerDelete = true;\nif(gpuObj.isDirty()) {\n// Eviction\n+ if(ConfigurationManager.isStatistics()) {\n+ long size = gpuObj.getSizeOnDevice();\n+ GPUStatistics.cudaEvictAggSize.add(size);\n+ }\ngpuObj.copyFromDeviceToHost(opcode, true, eagerDelete);\n}\nelse {\n@@ -416,7 +414,7 @@ public class GPUMemoryManager {\nif(allPointers.containsKey(toFree)) {\nlong size = allPointers.get(toFree).getSizeInBytes();\nif(LOG.isTraceEnabled()) {\n- LOG.trace(\"Free-ing up the pointer of size \" + byteCountToDisplaySize(size));\n+ LOG.trace(\"Free-ing up the pointer of size \" + GPUStatistics.byteCountToDisplaySize(size));\n}\nallPointers.remove(toFree);\nlazyCudaFreeMemoryManager.removeIfPresent(size, toFree);\n@@ -441,6 +439,10 @@ public class GPUMemoryManager {\npublic void free(String opcode, Pointer toFree, boolean eager) throws DMLRuntimeException {\nif(LOG.isTraceEnabled())\nLOG.trace(\"Free-ing the pointer with eager=\" + eager);\n+ long size = allPointers.get(toFree).getSizeInBytes();\n+ if(ConfigurationManager.isStatistics()) {\n+ currentSize -= size;\n+ }\nif (eager) {\nlong t0 = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nguardedCudaFree(toFree);\n@@ -451,7 +453,6 @@ public class GPUMemoryManager {\nLOG.info(\"GPU memory info before failure:\" + toString());\nthrow new RuntimeException(\"ERROR : Internal state corrupted, cache block size map is not aware of a block it trying to free up\");\n}\n- long size = allPointers.get(toFree).getSizeInBytes();\nlazyCudaFreeMemoryManager.add(size, toFree);\n}\n}\n@@ -604,24 +605,24 @@ public class GPUMemoryManager {\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"\",\n\"Num Objects\", \"Num Pointers\", \"Size\"));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Unlocked Dirty GPU objects\",\n- numUnlockedDirtyGPUObjects, numUnlockedDirtyPointers, byteCountToDisplaySize(sizeOfUnlockedDirtyGPUObjects)));\n+ numUnlockedDirtyGPUObjects, numUnlockedDirtyPointers, GPUStatistics.byteCountToDisplaySize(sizeOfUnlockedDirtyGPUObjects)));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Unlocked NonDirty GPU objects\",\n- numUnlockedNonDirtyGPUObjects, numUnlockedNonDirtyPointers, byteCountToDisplaySize(sizeOfUnlockedNonDirtyGPUObjects)));\n+ numUnlockedNonDirtyGPUObjects, numUnlockedNonDirtyPointers, GPUStatistics.byteCountToDisplaySize(sizeOfUnlockedNonDirtyGPUObjects)));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Locked GPU objects\",\n- numLockedGPUObjects, numLockedPointers, byteCountToDisplaySize(sizeOfLockedGPUObjects)));\n+ numLockedGPUObjects, numLockedPointers, GPUStatistics.byteCountToDisplaySize(sizeOfLockedGPUObjects)));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Cached rmvar-ed pointers\",\n- \"-\", lazyCudaFreeMemoryManager.getNumPointers(), byteCountToDisplaySize(lazyCudaFreeMemoryManager.getTotalMemoryAllocated())));\n+ \"-\", lazyCudaFreeMemoryManager.getNumPointers(), GPUStatistics.byteCountToDisplaySize(lazyCudaFreeMemoryManager.getTotalMemoryAllocated())));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Non-matrix/non-cached pointers\",\n- \"-\", potentiallyLeakyPointers.size(), byteCountToDisplaySize(totalSizePotentiallyLeakyPointers)));\n+ \"-\", potentiallyLeakyPointers.size(), GPUStatistics.byteCountToDisplaySize(totalSizePotentiallyLeakyPointers)));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"All pointers\",\n- \"-\", allPointers.size(), byteCountToDisplaySize(totalMemoryAllocated)));\n+ \"-\", allPointers.size(), GPUStatistics.byteCountToDisplaySize(totalMemoryAllocated)));\nlong free[] = { 0 };\nlong total[] = { 0 };\ncudaMemGetInfo(free, total);\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Free mem (from cudaMemGetInfo)\",\n- \"-\", \"-\", byteCountToDisplaySize(free[0])));\n+ \"-\", \"-\", GPUStatistics.byteCountToDisplaySize(free[0])));\nret.append(String.format(\"%-35s%-15s%-15s%-15s\\n\", \"Total mem (from cudaMemGetInfo)\",\n- \"-\", \"-\", byteCountToDisplaySize(total[0])));\n+ \"-\", \"-\", GPUStatistics.byteCountToDisplaySize(total[0])));\nret.append(\"====================================================\\n\");\nreturn ret.toString();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -111,11 +111,7 @@ public class GPUObject {\n*/\npublic Pointer getDensePointer() {\nif(jcudaDenseMatrixPtr == null && shadowBuffer.isBuffered() && getJcudaSparseMatrixPtr() == null) {\n- try {\nshadowBuffer.moveToDevice();\n- } catch (IOException e) {\n- throw new DMLRuntimeException(\"Error moving the data from shadow buffer to the device\", e);\n- }\n}\nreturn jcudaDenseMatrixPtr;\n}\n@@ -939,21 +935,13 @@ public class GPUObject {\nelse {\n// If already copied to shadow buffer as part of previous eviction and this is not an eviction (i.e. bufferpool call for subsequent CP/Spark instruction),\n// then copy from shadow buffer to MatrixObject.\n- try {\nshadowBuffer.moveToHost();\n- } catch (IOException e) {\n- throw new DMLRuntimeException(\"Error moving the data from shadow buffer to the host memory\", e);\n- }\nreturn;\n}\n}\nelse if(shadowBuffer.isEligibleForBuffering(isEviction, eagerDelete)) {\n// Perform shadow buffering if (1) single precision, (2) during eviction, (3) for dense matrices, and (4) if the given matrix can fit into the shadow buffer.\n- try {\nshadowBuffer.moveFromDevice(instName);\n- } catch (IOException e) {\n- throw new DMLRuntimeException(\"Error moving the data from the device to the shadow buffer\", e);\n- }\nreturn;\n}\nelse if (isDensePointerNull() && getJcudaSparseMatrixPtr() == null) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "diff": "@@ -20,65 +20,41 @@ package org.apache.sysml.runtime.instructions.gpu.context;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\n-import java.io.FileNotFoundException;\n-import java.io.IOException;\n-import java.util.concurrent.atomic.AtomicLong;\n-\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n-import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.utils.GPUStatistics;\n-import org.apache.sysml.utils.PersistentLRUCache;\nimport jcuda.Pointer;\n+import jcuda.Sizeof;\n-/**\n- * Shadow buffer is a temporary staging area used during eviction.\n- * It is eagerly deleted and backed using the local filesystem in case of Garbage Collection\n- * or if the staging memory size exceeds the user-specified size.\n- * This is needed to respect SystemML's memory estimates, while still allowing\n- * for caching in case of GPU plans.\n- */\npublic class ShadowBuffer {\nprivate static final Log LOG = LogFactory.getLog(ShadowBuffer.class.getName());\n- private static PersistentLRUCache CACHE;\n- private static AtomicLong UNIQUE_ID = new AtomicLong();\n- private static long EVICTION_SHADOW_BUFFER_MAX_BYTES;\n- final GPUObject gpuObj;\n- boolean isBuffered = false;\n- String fileName;\n- public static boolean isEnabled() {\n- if(CACHE == null && EVICTION_SHADOW_BUFFER_MAX_BYTES >= 0) {\n- double shadowBufferSize = ConfigurationManager.getDMLConfig().getDoubleValue(DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\n- if(shadowBufferSize <= 0) {\n- EVICTION_SHADOW_BUFFER_MAX_BYTES = -1; // Minor optimization to avoid unnecessary invoking configuration manager.\n+ GPUObject gpuObj;\n+ float[] shadowPointer = null;\n+ private static boolean _warnedAboutShadowBuffer = false;\n+ private static long EVICTION_SHADOW_BUFFER_CURR_BYTES = 0;\n+ private static long EVICTION_SHADOW_BUFFER_MAX_BYTES;\n+ static {\n+ if(DMLScript.FLOATING_POINT_PRECISION.equals(\"double\")) {\n+ EVICTION_SHADOW_BUFFER_MAX_BYTES = 0;\n}\nelse {\n- if(shadowBufferSize > 1)\n+ double shadowBufferSize = ConfigurationManager.getDMLConfig().getDoubleValue(DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\n+ if(shadowBufferSize < 0 || shadowBufferSize > 1)\nthrow new RuntimeException(\"Incorrect value (\" + shadowBufferSize + \") for the configuration:\" + DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\nEVICTION_SHADOW_BUFFER_MAX_BYTES = (long) (((double)InfrastructureAnalyzer.getLocalMaxMemory())*shadowBufferSize);\n- try {\n- CACHE = new PersistentLRUCache(EVICTION_SHADOW_BUFFER_MAX_BYTES);\n- } catch(IOException e) {\n- LOG.warn(\"Unable to create a temporary directory for shadow buffering on the local filesystem; disabling shadow buffering:\" + e.getMessage());\n- EVICTION_SHADOW_BUFFER_MAX_BYTES = -1; // Minor optimization to avoid checking for file permission.\n- }\n}\n}\n- return CACHE != null;\n- }\npublic ShadowBuffer(GPUObject gpuObj) {\n- if(isEnabled())\n- fileName = \"shadow_\" + UNIQUE_ID.incrementAndGet();\nthis.gpuObj = gpuObj;\n-\n}\n/**\n@@ -87,39 +63,19 @@ public class ShadowBuffer {\n* @return true if the gpu object is shadow buffered\n*/\npublic boolean isBuffered() {\n- return isBuffered;\n- }\n-\n- private static long getSizeOfDataType(long numElems) {\n- return numElems * ((long) LibMatrixCUDA.sizeOfDataType);\n+ return shadowPointer != null;\n}\n/**\n* Move the data from GPU to shadow buffer\n* @param instName name of the instruction\n- * @throws IOException if error\n- * @throws FileNotFoundException if error\n*/\n- public void moveFromDevice(String instName) throws FileNotFoundException, IOException {\n+ public void moveFromDevice(String instName) {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nint numElems = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n-\n- if(isDoublePrecision()) {\n- double [] shadowPointer = new double[numElems];\n- cudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, getSizeOfDataType(numElems), jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n- CACHE.put(fileName, shadowPointer);\n- isBuffered = true;\n- }\n- else if(isSinglePrecision()) {\n- float [] shadowPointer = new float[numElems];\n+ shadowPointer = new float[numElems];\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES += getSizeOfFloat(shadowPointer.length);\ncudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, getSizeOfDataType(numElems), jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n- CACHE.put(fileName, shadowPointer);\n- isBuffered = true;\n- }\n- else {\n- throw new DMLRuntimeException(\"Unsupported datatype\");\n- }\n-\ngpuObj.getGPUContext().cudaFreeHelper(instName, gpuObj.jcudaDenseMatrixPtr, true);\ngpuObj.jcudaDenseMatrixPtr = null;\nif (ConfigurationManager.isStatistics()) {\n@@ -131,37 +87,25 @@ public class ShadowBuffer {\n}\n}\n-\n- private static boolean isDoublePrecision() {\n- return LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.DOUBLE;\n+ private long getSizeOfFloat(long numElems) {\n+ return numElems*Sizeof.FLOAT;\n}\n- private static boolean isSinglePrecision() {\n- return LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT;\n+ private long getSizeOfDataType(long numElems) {\n+ return numElems*LibMatrixCUDA.sizeOfDataType;\n}\n/**\n* Move the data from shadow buffer to Matrix object\n- * @throws IOException if error\n- * @throws FileNotFoundException if error\n*/\n- public void moveToHost() throws FileNotFoundException, IOException {\n+ public void moveToHost() {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nMatrixBlock tmp = new MatrixBlock(GPUObject.toIntExact(gpuObj.mat.getNumRows()), GPUObject.toIntExact(gpuObj.mat.getNumColumns()), false);\ntmp.allocateDenseBlock();\ndouble [] tmpArr = tmp.getDenseBlockValues();\n- if(isDoublePrecision()) {\n- System.arraycopy(CACHE.getAsDoubleArray(fileName), 0, tmpArr, 0, tmpArr.length);\n- }\n- else if(isSinglePrecision()) {\n- float [] shadowPointer = CACHE.getAsFloatArray(fileName);\nfor(int i = 0; i < shadowPointer.length; i++) {\ntmpArr[i] = shadowPointer[i];\n}\n- }\n- else {\n- throw new DMLRuntimeException(\"Unsupported datatype\");\n- }\ngpuObj.mat.acquireModify(tmp);\ngpuObj.mat.release();\nclearShadowPointer();\n@@ -178,28 +122,12 @@ public class ShadowBuffer {\n/**\n* Move the data from shadow buffer to GPU\n- * @throws IOException if error\n- * @throws FileNotFoundException if error\n*/\n- public void moveToDevice() throws FileNotFoundException, IOException {\n+ public void moveToDevice() {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\n- int length; Pointer shadowDevicePointer;\n- if(isDoublePrecision()) {\n- double [] shadowPointer = CACHE.getAsDoubleArray(fileName);\n- length = shadowPointer.length;\n- shadowDevicePointer = Pointer.to(shadowPointer);\n- }\n- else if(isSinglePrecision()) {\n- float [] shadowPointer = CACHE.getAsFloatArray(fileName);\n- length = shadowPointer.length;\n- shadowDevicePointer = Pointer.to(shadowPointer);\n- }\n- else {\n- throw new DMLRuntimeException(\"Unsupported datatype\");\n- }\n- long numBytes = getSizeOfDataType(length);\n+ long numBytes = getSizeOfDataType(shadowPointer.length);\ngpuObj.jcudaDenseMatrixPtr = gpuObj.getGPUContext().allocate(null, numBytes);\n- cudaMemcpy(gpuObj.jcudaDenseMatrixPtr, shadowDevicePointer, numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\n+ cudaMemcpy(gpuObj.jcudaDenseMatrixPtr, Pointer.to(shadowPointer), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\nclearShadowPointer();\nif (ConfigurationManager.isStatistics()) {\nlong totalTime = System.nanoTime() - start;\n@@ -216,14 +144,14 @@ public class ShadowBuffer {\n* @return true if the given GPU object is eligible to be shadow buffered\n*/\npublic boolean isEligibleForBuffering(boolean isEviction, boolean eagerDelete) {\n- if(isEnabled() && isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n- long numBytes = getSizeOfDataType(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n- if(EVICTION_SHADOW_BUFFER_MAX_BYTES <= numBytes) {\n- return false; // Don't attempt to cache very large GPU objects.\n- }\n- else {\n- return true; // Dense GPU objects is eligible for shadow buffering when called during eviction and is being eagerly deleted.\n+ if(LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT && isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n+ long numBytes = getSizeOfFloat(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n+ boolean ret = EVICTION_SHADOW_BUFFER_CURR_BYTES + numBytes <= EVICTION_SHADOW_BUFFER_MAX_BYTES;\n+ if(!ret && !_warnedAboutShadowBuffer) {\n+ LOG.warn(\"Shadow buffer is full, so using CP bufferpool instead. Consider increasing sysml.gpu.eviction.shadow.bufferSize.\");\n+ _warnedAboutShadowBuffer = true;\n}\n+ return ret;\n}\nelse {\nreturn false;\n@@ -234,9 +162,9 @@ public class ShadowBuffer {\n* Removes the content from shadow buffer\n*/\npublic void clearShadowPointer() {\n- if(CACHE.containsKey(fileName)) {\n- CACHE.remove(fileName);\n- isBuffered = false;\n+ if(shadowPointer != null) {\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES -= getSizeOfFloat(shadowPointer.length);\n}\n+ shadowPointer = null;\n}\n}\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "@@ -26,6 +26,7 @@ import java.util.HashMap;\nimport java.util.Iterator;\nimport java.util.List;\nimport java.util.Map;\n+import java.util.concurrent.atomic.AtomicLong;\nimport java.util.concurrent.atomic.LongAdder;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -79,6 +80,10 @@ public class GPUStatistics {\npublic static LongAdder cudaAllocFailedCount = new LongAdder();\npublic static LongAdder cudaAllocReuseCount = new LongAdder();\n+ public static LongAdder cudaAllocAggSize = new LongAdder();\n+ public static AtomicLong cudaAllocPeakSize = new AtomicLong();\n+ public static LongAdder cudaEvictAggSize = new LongAdder();\n+\n// Per instruction miscellaneous timers.\n// Used to record events in a CP Heavy Hitter instruction and\n// provide a breakdown of how time was spent in that instruction\n@@ -116,6 +121,9 @@ public class GPUStatistics {\ncudaDouble2FloatCount.reset();\ncudaForcedClearLazyFreedEvictTime.reset();\ncudaForcedClearUnpinnedEvictTime.reset();\n+ cudaAllocAggSize.reset();\n+ cudaAllocPeakSize.set(0);\n+ cudaEvictAggSize.reset();\ncudaAllocCount.reset();\ncudaDeAllocCount.reset();\ncudaToDevCount.reset();\n@@ -219,6 +227,23 @@ public class GPUStatistics {\nreturn sb.toString();\n}\n+ /**\n+ * Pretty printing utility to print bytes\n+ *\n+ * @param numBytes number of bytes\n+ * @return a human-readable display value\n+ */\n+ public static String byteCountToDisplaySize(long numBytes) {\n+ // return org.apache.commons.io.FileUtils.byteCountToDisplaySize(bytes); // performs rounding\n+ if (numBytes < 1024) {\n+ return numBytes + \" bytes\";\n+ }\n+ else {\n+ int exp = (int) (Math.log(numBytes) / 6.931471805599453);\n+ return String.format(\"%.3f %sB\", ((double)numBytes) / Math.pow(1024, exp), \"KMGTP\".charAt(exp-1));\n+ }\n+ }\n+\n/**\n* Used to print out cuda timers & counters\n* @return a formatted string of cuda timers & counters\n@@ -242,6 +267,10 @@ public class GPUStatistics {\n+ cudaAllocReuseCount.longValue() +\") / \"\n+ cudaDeAllocCount.longValue() + \" / \"\n+ cudaMemSet0Count.longValue() + \".\\n\");\n+ sb.append(\"GPU mem size (alloc (peak) / evict):\\t\"\n+ + byteCountToDisplaySize(cudaAllocAggSize.longValue()) + \"(\"\n+ + byteCountToDisplaySize(cudaAllocPeakSize.longValue()) + \") / \"\n+ + byteCountToDisplaySize(cudaEvictAggSize.longValue()) + \".\\n\");\nsb.append(\"GPU mem tx time (toDev(d2f/s2d) / fromDev(f2d/s2h) / evict(d2s/size)):\\t\"\n+ String.format(\"%.3f\", cudaToDevTime.longValue()*1e-9) + \"(\"\n+ String.format(\"%.3f\", cudaDouble2FloatTime.longValue()*1e-9)+ \"/\"\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "new_path": "src/main/java/org/apache/sysml/utils/PersistentLRUCache.java", "diff": "@@ -519,10 +519,6 @@ class ValueWrapper {\nlong _clen;\nlong _nnz;\n- // This is only used in write-mode until the writing to the disk is completed.\n- // It also prevents the _softRef from being garbage collected while it is written.\n- volatile DataWrapper _strongRef;\n-\nValueWrapper(DataWrapper data, boolean isInReadOnlyMode) {\n_lock = new Object();\n_isInReadOnlyMode = isInReadOnlyMode;\n@@ -530,12 +526,10 @@ class ValueWrapper {\nif(!_isInReadOnlyMode && !isDummyValue) {\n// Aggressive write to disk when the cache is used in the write-mode.\n// This avoids the need to depend on finalize to perform writing.\n- _strongRef = data;\nThread t = new Thread() {\npublic void run() {\ntry {\n- _strongRef.write(true);\n- _strongRef = null; // Reset the strong reference after aggresive writing\n+ data.write(true);\n} catch (IOException e) {\nthrow new DMLRuntimeException(\"Error occured while aggressively writing the value to disk.\", e);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added memory stats for GPU allocation/eviction - Also, reverted the shadow buffer to the original implementation as we are getting OOM for lstm scripts. This likely has to do with pessimistic GC.
49,738
23.09.2018 21:48:58
-7,200
2c1fb20ecba73417e313172c34bfdde2b9a30b3e
[MINOR] Performance update-in-place (nnz maintenance, evictions) Incl fix for GPU-related compiler warnings (missing imports).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "diff": "@@ -310,11 +310,11 @@ public class ProgramBlock implements ParseInfo\nnew MatrixBlock(mbVar, MatrixBlock.DEFAULT_INPLACE_SPARSEBLOCK, true) );\nmoNew.setFileName(mo.getFileName()+Lop.UPDATE_INPLACE_PREFIX+tid);\nmo.release();\n- moNew.release();\n- moNew.setUpdateType(UpdateType.INPLACE);\n//cleanup old variable (e.g., remove from buffer pool)\nif( ec.removeVariable(varname) != null )\nec.cleanupCacheableData(mo);\n+ moNew.release(); //after old removal to avoid unnecessary evictions\n+ moNew.setUpdateType(UpdateType.INPLACE);\nec.setVariable(varname, moNew);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -24,7 +24,6 @@ import static jcuda.runtime.JCuda.cudaMemset;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\n-import java.io.IOException;\nimport java.util.concurrent.atomic.AtomicLong;\nimport java.util.concurrent.atomic.LongAdder;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1564,7 +1564,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//no need to clear for awareDestNZ since overwritten\nallocateDenseBlock(false);\n- if( awareDestNZ )\n+ if( awareDestNZ && (nonZeros!=getLength() || src.nonZeros!=src.getLength()) )\nnonZeros = nonZeros - recomputeNonZeros(rl, ru, cl, cu) + src.nonZeros;\n//copy values\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SinglePrecisionCudaSupportFunctions.java", "diff": "@@ -22,11 +22,6 @@ import static jcuda.runtime.JCuda.cudaMemcpy;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\n-import java.nio.ByteBuffer;\n-import java.nio.ByteOrder;\n-import java.nio.FloatBuffer;\n-import java.util.stream.IntStream;\n-\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.conf.ConfigurationManager;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance update-in-place (nnz maintenance, evictions) Incl fix for GPU-related compiler warnings (missing imports).
49,738
27.09.2018 20:36:36
-7,200
069863f7f95f8da1f8aa8c366d8f32dbede28a8b
Caching in multi-level spark cumulative aggregates This patch adds optional caching for multi-level spark cumulative aggregates, where we cache intermediate aggregates of the forward pass to avoid unnecessary lazy evaluation of previous levels on the backwards pass.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "diff": "@@ -23,6 +23,7 @@ import java.util.ArrayList;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.lops.Aggregate;\n+import org.apache.sysml.lops.Checkpoint;\nimport org.apache.sysml.lops.Aggregate.OperationTypes;\nimport org.apache.sysml.lops.CombineUnary;\nimport org.apache.sysml.lops.CumulativeOffsetBinary;\n@@ -49,6 +50,9 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\npublic class UnaryOp extends MultiThreadedHop\n{\n+ private static final boolean ALLOW_CUMAGG_BROADCAST = true;\n+ private static final boolean ALLOW_CUMAGG_CACHING = false;\n+\nprivate OpOp1 _op = null;\nprivate UnaryOp() {\n@@ -439,6 +443,7 @@ public class UnaryOp extends MultiThreadedHop\nreturn TEMP;\n}\n+ @SuppressWarnings(\"unused\")\nprivate Lop constructLopsSparkCumulativeUnary()\n{\nHop input = getInput().get(0);\n@@ -458,6 +463,13 @@ public class UnaryOp extends MultiThreadedHop\nwhile( ((2*OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen) + OptimizerUtils.estimateSize(1, clen))\n> OptimizerUtils.getLocalMemBudget() && TEMP.getOutputParameters().getNumRows()>1) || force )\n{\n+ //caching within multi-level cascades\n+ if( ALLOW_CUMAGG_CACHING && level > 0 ) {\n+ Lop oldTEMP = TEMP;\n+ TEMP = new Checkpoint(oldTEMP, getDataType(), getValueType(), Checkpoint.getDefaultStorageLevelString());\n+ TEMP.getOutputParameters().setDimensions(oldTEMP.getOutputParameters());\n+ setLineNumbers(TEMP);\n+ }\nDATA.add(TEMP);\n//preaggregation per block (for spark, the CumulativePartialAggregate subsumes both\n@@ -486,7 +498,8 @@ public class UnaryOp extends MultiThreadedHop\n//(for spark, the CumulativeOffsetBinary subsumes both the split aggregate and\n//the subsequent offset binary apply of split aggregates against the original data)\ndouble initValue = getCumulativeInitValue();\n- boolean broadcast = OptimizerUtils.checkSparkBroadcastMemoryBudget(OptimizerUtils.estimateSize(\n+ boolean broadcast = ALLOW_CUMAGG_BROADCAST\n+ && OptimizerUtils.checkSparkBroadcastMemoryBudget(OptimizerUtils.estimateSize(\nTEMP.getOutputParameters().getNumRows(), TEMP.getOutputParameters().getNumCols()));\nCumulativeOffsetBinary binary = new CumulativeOffsetBinary(DATA.get(level), TEMP,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/CumulativeOffsetBinary.java", "new_path": "src/main/java/org/apache/sysml/lops/CumulativeOffsetBinary.java", "diff": "@@ -27,8 +27,6 @@ import org.apache.sysml.parser.Expression.*;\npublic class CumulativeOffsetBinary extends Lop\n{\n- private static final boolean ALLOW_BROADCAST = true;\n-\nprivate OperationTypes _op;\nprivate double _initValue = 0;\nprivate boolean _broadcast = false;\n@@ -50,7 +48,7 @@ public class CumulativeOffsetBinary extends Lop\n//in case of Spark, CumulativeOffset includes CumulativeSplit and hence needs the init value\n_initValue = init;\n- _broadcast = ALLOW_BROADCAST && broadcast;\n+ _broadcast = broadcast;\ninit(data, offsets, dt, vt, et);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/OutputParameters.java", "new_path": "src/main/java/org/apache/sysml/lops/OutputParameters.java", "diff": "@@ -87,6 +87,13 @@ public class OutputParameters\nsetDimensions(rows, cols, rows_per_block, cols_per_block, nnz);\n}\n+ public void setDimensions(OutputParameters input) {\n+ _num_rows = input._num_rows;\n+ _num_cols = input._num_cols;\n+ _num_rows_in_block = input._num_rows_in_block;\n+ _num_cols_in_block = input._num_cols_in_block;\n+ }\n+\npublic Format getFormat() {\nreturn matrix_format;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2494] Caching in multi-level spark cumulative aggregates This patch adds optional caching for multi-level spark cumulative aggregates, where we cache intermediate aggregates of the forward pass to avoid unnecessary lazy evaluation of previous levels on the backwards pass.
49,738
27.09.2018 21:17:23
-7,200
0c4a3611c316cb13c7eaa94facd3446b34c1090e
Adjust spark cumulative aggregate partitions This patch improves the robustness of spark cumulative aggregates by adjusting the number of partitions for intermediates of the forward pass because this data size can significantly shrink also grow.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeAggregateSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CumulativeAggregateSPInstruction.java", "diff": "@@ -32,6 +32,7 @@ import org.apache.sysml.runtime.functionobjects.PlusMultiply;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils;\n+import org.apache.sysml.runtime.instructions.spark.utils.SparkUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n@@ -59,9 +60,11 @@ public class CumulativeAggregateSPInstruction extends AggregateUnarySPInstructio\npublic void processInstruction(ExecutionContext ec) {\nSparkExecutionContext sec = (SparkExecutionContext)ec;\nMatrixCharacteristics mc = sec.getMatrixCharacteristics(input1.getName());\n+ MatrixCharacteristics mcOut = new MatrixCharacteristics(mc);\nlong rlen = mc.getRows();\nint brlen = mc.getRowsPerBlock();\nint bclen = mc.getColsPerBlock();\n+ mcOut.setRows((long)(Math.ceil((double)rlen/brlen)));\n//get input\nJavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable( input1.getName() );\n@@ -70,11 +73,16 @@ public class CumulativeAggregateSPInstruction extends AggregateUnarySPInstructio\nAggregateUnaryOperator auop = (AggregateUnaryOperator) _optr;\nJavaPairRDD<MatrixIndexes,MatrixBlock> out =\nin.mapToPair(new RDDCumAggFunction(auop, rlen, brlen, bclen));\n- out = RDDAggregateUtils.mergeByKey(out, false);\n+ //merge partial aggregates, adjusting for correct number of partitions\n+ //as size can significant shrink (1K) but also grow (sparse-dense)\n+ int numParts = SparkUtils.getNumPreferredPartitions(mcOut);\n+ int minPar = (int)Math.min(SparkExecutionContext.getDefaultParallelism(true), mcOut.getNumBlocks());\n+ out = RDDAggregateUtils.mergeByKey(out, Math.max(numParts, minPar), false);\n//put output handle in symbol table\nsec.setRDDHandleForVariable(output.getName(), out);\nsec.addLineageRDD(output.getName(), input1.getName());\n+ sec.getMatrixCharacteristics(output.getName()).set(mcOut);\n}\nprivate static class RDDCumAggFunction implements PairFunction<Tuple2<MatrixIndexes, MatrixBlock>, MatrixIndexes, MatrixBlock>\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2495] Adjust spark cumulative aggregate partitions This patch improves the robustness of spark cumulative aggregates by adjusting the number of partitions for intermediates of the forward pass because this data size can significantly shrink also grow.
49,738
28.09.2018 16:50:21
-7,200
7d007e7b216b4b161fa385b460f90f2d1845b4db
Improved rewrite for update-in-place in for/while loops This patch generalizes the existing update-in-place loop rewrite to allow update-in-place for cases where correct access to the updated matrix is forced by existing data dependencies.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "diff": "@@ -27,6 +27,7 @@ import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.Hop.DataOpTypes;\nimport org.apache.sysml.hops.Hop.OpOp1;\nimport org.apache.sysml.hops.LeftIndexingOp;\nimport org.apache.sysml.hops.UnaryOp;\n@@ -117,6 +118,7 @@ public class RewriteMarkLoopVariablesUpdateInPlace extends StatementBlockRewrite\n}\nelse {\nif( sb.getHops() != null )\n+ if( !isApplicableForUpdateInPlace(sb.getHops(), varname) )\nfor( Hop hop : sb.getHops() )\nret &= isApplicableForUpdateInPlace(hop, varname);\n}\n@@ -128,18 +130,14 @@ public class RewriteMarkLoopVariablesUpdateInPlace extends StatementBlockRewrite\nreturn ret;\n}\n- private static boolean isApplicableForUpdateInPlace( Hop hop, String varname )\n- {\n+ private static boolean isApplicableForUpdateInPlace(Hop hop, String varname) {\n+ //NOTE: single-root-level validity check\nif( !hop.getName().equals(varname) )\nreturn true;\n//valid if read/updated by leftindexing\n//CP exec type not evaluated here as no lops generated yet\n- boolean validLix = hop instanceof DataOp\n- && hop.isMatrix() && hop.getInput().get(0).isMatrix()\n- && hop.getInput().get(0) instanceof LeftIndexingOp\n- && hop.getInput().get(0).getInput().get(0) instanceof DataOp\n- && hop.getInput().get(0).getInput().get(0).getName().equals(varname);\n+ boolean validLix = probeLixRoot(hop, varname);\n//valid if only safe consumers of left indexing input\nif( validLix ) {\n@@ -153,6 +151,48 @@ public class RewriteMarkLoopVariablesUpdateInPlace extends StatementBlockRewrite\nreturn validLix;\n}\n+ private static boolean isApplicableForUpdateInPlace(ArrayList<Hop> hops, String varname) {\n+ //NOTE: additional DAG-level validity check\n+\n+ // check single LIX update which is direct root-child to varname assignment\n+ Hop bLix = null;\n+ for( Hop hop : hops ) {\n+ if( probeLixRoot(hop, varname) ) {\n+ if( bLix != null ) return false; //invalid\n+ bLix = hop.getInput().get(0);\n+ }\n+ }\n+\n+ // check all other roots independent of varname\n+ boolean valid = true;\n+ Hop.resetVisitStatus(hops);\n+ for( Hop hop : hops )\n+ if( hop.getInput().get(0) != bLix )\n+ valid &= rProbeOtherRoot(hop, varname);\n+ Hop.resetVisitStatus(hops);\n+\n+ return valid;\n+ }\n+\n+ private static boolean probeLixRoot(Hop root, String varname) {\n+ return root instanceof DataOp\n+ && root.isMatrix() && root.getInput().get(0).isMatrix()\n+ && root.getInput().get(0) instanceof LeftIndexingOp\n+ && root.getInput().get(0).getInput().get(0) instanceof DataOp\n+ && root.getInput().get(0).getInput().get(0).getName().equals(varname);\n+ }\n+\n+ private static boolean rProbeOtherRoot(Hop hop, String varname) {\n+ if( hop.isVisited() )\n+ return false;\n+ boolean valid = !(hop instanceof LeftIndexingOp)\n+ && !(HopRewriteUtils.isData(hop, DataOpTypes.TRANSIENTREAD) && hop.getName().equals(varname));\n+ for( Hop c : hop.getInput() )\n+ valid &= rProbeOtherRoot(c, varname);\n+ hop.setVisited();\n+ return valid;\n+ }\n+\n@Override\npublic List<StatementBlock> rewriteStatementBlocks(List<StatementBlock> sbs, ProgramRewriteStatus sate) {\nreturn sbs;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2490] Improved rewrite for update-in-place in for/while loops This patch generalizes the existing update-in-place loop rewrite to allow update-in-place for cases where correct access to the updated matrix is forced by existing data dependencies.
49,760
06.10.2018 16:52:39
-7,200
30cff5e22ed992f9a3ab2447f26e9053b5e513bc
Extended sparsity estimator layered graph for mm chains Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -54,7 +54,10 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n@Override\npublic MatrixCharacteristics estim(MMNode root) {\n- throw new NotImplementedException();\n+ List<MatrixBlock> leafs = getMatrices(root, new ArrayList<>());\n+ long nnz = new LayeredGraph(leafs, _rounds).estimateNnz();\n+ return root.setMatrixCharacteristics(new MatrixCharacteristics(\n+ leafs.get(0).getNumRows(), leafs.get(leafs.size()-1).getNumColumns(), nnz));\n}\n@Override\n@@ -69,20 +72,30 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2) {\n- LayeredGraph graph = new LayeredGraph(m1, m2, _rounds);\n- return OptimizerUtils.getSparsity(m1.getNumRows(),\n- m2.getNumColumns(), graph.estimateNnz());\n+ LayeredGraph graph = new LayeredGraph(Arrays.asList(m1,m2), _rounds);\n+ return OptimizerUtils.getSparsity(\n+ m1.getNumRows(), m2.getNumColumns(), graph.estimateNnz());\n+ }\n+\n+ private List<MatrixBlock> getMatrices(MMNode node, List<MatrixBlock> leafs) {\n+ //NOTE: this extraction is only correct and efficient for chains, no DAGs\n+ if( node.isLeaf() )\n+ leafs.add(node.getData());\n+ else {\n+ getMatrices(node.getLeft(), leafs);\n+ getMatrices(node.getRight(), leafs);\n+ }\n+ return leafs;\n}\nprivate static class LayeredGraph {\nprivate final List<Node[]> _nodes; //nodes partitioned by graph level\nprivate final int _rounds; //length of propagated r-vectors\n- public LayeredGraph(MatrixBlock m1, MatrixBlock m2, int r) {\n+ public LayeredGraph(List<MatrixBlock> chain, int r) {\n_nodes = new ArrayList<>();\n_rounds = r;\n- buildNext(m1);\n- buildNext(m2);\n+ chain.forEach(i -> buildNext(i));\n}\npublic void buildNext(MatrixBlock mb) {\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SelfProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SelfProductTest.java", "diff": "@@ -26,6 +26,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorLayeredGraph;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\nimport org.apache.sysml.hops.estim.EstimatorSample;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\n@@ -129,6 +130,16 @@ public class SelfProductTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorSample(0.2), m, sparsity2);\n}\n+ @Test\n+ public void testLayeredGraphCase1() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, sparsity1);\n+ }\n+\n+ @Test\n+ public void testLayeredGraphCase2() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, sparsity2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int n, double sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, n, sp, 1, 1, \"uniform\", 3);\nMatrixBlock m3 = m1.aggregateBinaryOperations(m1, m1,\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "diff": "@@ -23,6 +23,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorLayeredGraph;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\nimport org.apache.sysml.hops.estim.MMNode;\nimport org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n@@ -126,6 +127,16 @@ public class SquaredProductChainTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorMatrixHistogram(true), m, k, n, n2, case2);\n}\n+ @Test\n+ public void testLayeredGraphCase1() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, n2, case1);\n+ }\n+\n+ @Test\n+ public void testLayeredGraphCase2() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, n2, case2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, int n2, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 1);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 2);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "diff": "@@ -24,6 +24,7 @@ import org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\nimport org.apache.sysml.hops.estim.EstimatorDensityMap;\n+import org.apache.sysml.hops.estim.EstimatorLayeredGraph;\nimport org.apache.sysml.hops.estim.EstimatorMatrixHistogram;\nimport org.apache.sysml.hops.estim.EstimatorSample;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\n@@ -144,6 +145,16 @@ public class SquaredProductTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorSample(0.2), m, k, n, case2);\n}\n+ @Test\n+ public void testLayeredGraphCase1() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, case1);\n+ }\n+\n+ @Test\n+ public void testLayeredGraphCase2() {\n+ runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, n, case2);\n+ }\n+\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 7);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2291] Extended sparsity estimator layered graph for mm chains Closes #829.
49,738
06.10.2018 21:44:13
-7,200
ef15e582b6d6cae1aa8206279b4cc063d717e287
Extended matrix histogram sketch propagation (misc ops) This patch extends the matrix histogram sparsity estimator by sketch propagation for intermediates of remaining operations (comparison, transpose, diag, reshape). Furthermore, this also includes some minor performance improvements for sparsity estimation of element-wise addition and multiplication, as well as accuracy improvements for element-wise addition.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -105,22 +105,20 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncase MM:\nreturn estimInternMM(h1, h2);\ncase MULT: {\n- final double N1 = h1.getNonZeros();\n- final double N2 = h2.getNonZeros();\n- final long scale = IntStream.range(0, h1.getCols())\n- .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ final double scale = IntStream.range(0, h1.getCols())\n+ .mapToDouble(j -> (double)h1.cNnz[j] * h2.cNnz[j]).sum()\n+ / h1.getNonZeros() / h2.getNonZeros();\nreturn IntStream.range(0, h1.getRows())\n- .mapToDouble(i -> (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2) //collisions\n+ .mapToDouble(i -> (double)h1.rNnz[i] * h2.rNnz[i] * scale) //collisions\n.sum() / msize;\n}\ncase PLUS: {\n- final double N1 = h1.getNonZeros();\n- final double N2 = h2.getNonZeros();\n- final long scale = IntStream.range(0, h1.getCols())\n- .mapToLong(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ final double scale = IntStream.range(0, h1.getCols())\n+ .mapToDouble(j -> (double)h1.cNnz[j] * h2.cNnz[j]).sum()\n+ / h1.getNonZeros() / h2.getNonZeros();\nreturn IntStream.range(0, h1.getRows())\n- .mapToDouble(i -> (long)h1.rNnz[i] + h2.rNnz[i] //all minus collisions\n- - (long)h1.rNnz[i] * h2.rNnz[i] * scale / N1 / N2)\n+ .mapToDouble(i -> (double)h1.rNnz[i] + h2.rNnz[i] //all minus collisions\n+ - (double)h1.rNnz[i] * h2.rNnz[i] * scale)\n.sum() / msize;\n}\ncase EQZERO:\n@@ -320,7 +318,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncase PLUS: return derivePlusHistogram(h1, h2);\ncase RBIND: return deriveRbindHistogram(h1, h2);\ncase CBIND: return deriveCbindHistogram(h1, h2);\n- //TODO add missing unary operations\n+ case NEQZERO: return h1;\n+ case EQZERO: return deriveEq0Histogram(h1);\n+ case DIAG: return deriveDiagHistogram(h1);\n+ case TRANS: return deriveTransHistogram(h1);\n+ case RESHAPE: return deriveReshapeHistogram(h1, h1.getRows(), h1.getCols());\n+ //FIXME: reshape requires additional meta data from MM node\ndefault:\nthrow new NotImplementedException();\n}\n@@ -356,39 +359,44 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\nprivate static MatrixHistogram deriveMultHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n- final double N1 = h1.getNonZeros();\n- final double N2 = h2.getNonZeros();\nfinal double scaler = IntStream.range(0, h1.getCols())\n- .mapToDouble(j -> (long)h1.cNnz[j] * h2.cNnz[j]).sum();\n+ .mapToDouble(j -> (double)h1.cNnz[j] * h2.cNnz[j])\n+ .sum() / h1.getNonZeros() / h2.getNonZeros();\nfinal double scalec = IntStream.range(0, h1.getRows())\n- .mapToDouble(j -> (long)h1.rNnz[j] * h2.rNnz[j]).sum();\n+ .mapToDouble(j -> (double)h1.rNnz[j] * h2.rNnz[j])\n+ .sum() / h1.getNonZeros() / h2.getNonZeros();\nint rMaxNnz = 0, cMaxNnz = 0;\nRandom rn = new Random();\nint[] rNnz = new int[h1.getRows()];\nfor(int i=0; i<h1.getRows(); i++) {\n- rNnz[i] = probRound(h1.rNnz[i] * h2.rNnz[i] * scaler / N1 / N2, rn);\n+ rNnz[i] = probRound((double)h1.rNnz[i] * h2.rNnz[i] * scaler, rn);\nrMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n}\nint[] cNnz = new int[h1.getCols()];\nfor(int i=0; i<h1.getCols(); i++) {\n- cNnz[i] = probRound(h1.cNnz[i] * h2.cNnz[i] * scalec / N1 / N2, rn);\n+ cNnz[i] = probRound((double)h1.cNnz[i] * h2.cNnz[i] * scalec, rn);\ncMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n}\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\nprivate static MatrixHistogram derivePlusHistogram(MatrixHistogram h1, MatrixHistogram h2) {\n- double msize = (double)h1.getRows()*h1.getCols();\n+ final double scaler = IntStream.range(0, h1.getCols())\n+ .mapToDouble(j -> (double)h1.cNnz[j] * h2.cNnz[j])\n+ .sum() / h1.getNonZeros() / h2.getNonZeros();\n+ final double scalec = IntStream.range(0, h1.getRows())\n+ .mapToDouble(j -> (double)h1.rNnz[j] * h2.rNnz[j])\n+ .sum() / h1.getNonZeros() / h2.getNonZeros();\nint rMaxNnz = 0, cMaxNnz = 0;\nRandom rn = new Random();\nint[] rNnz = new int[h1.getRows()];\nfor(int i=0; i<h1.getRows(); i++) {\n- rNnz[i] = probRound(h1.rNnz[i]/msize + h2.rNnz[i]/msize - h1.rNnz[i]/msize * h2.rNnz[i]/msize, rn);\n+ rNnz[i] = probRound(h1.rNnz[i] + h2.rNnz[i] - (double)h1.rNnz[i] * h2.rNnz[i] * scaler, rn);\nrMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n}\nint[] cNnz = new int[h1.getCols()];\nfor(int i=0; i<h1.getCols(); i++) {\n- cNnz[i] = probRound(h1.cNnz[i]/msize + h2.cNnz[i]/msize - h1.cNnz[i]/msize * h2.cNnz[i]/msize, rn);\n+ cNnz[i] = probRound(h1.cNnz[i] + h2.cNnz[i] - (double)h1.cNnz[i] * h2.cNnz[i] * scalec, rn);\ncMaxNnz = Math.max(cMaxNnz, cNnz[i]);\n}\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n@@ -418,6 +426,90 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\n+ private static MatrixHistogram deriveEq0Histogram(MatrixHistogram h1) {\n+ final int m = h1.getRows(), n = h1.getCols();\n+ int[] rNnz = new int[m], cNnz = new int[n];\n+ int rMaxNnz = 0, cMaxNnz = 0;\n+ for(int i=0; i<m; i++) {\n+ rNnz[i] = n - h1.rNnz[i];\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ }\n+ for(int j=0; j<n; j++) {\n+ cNnz[j] = m - h1.cNnz[j];\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[j]);\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\n+ private static MatrixHistogram deriveDiagHistogram(MatrixHistogram h1) {\n+ if( h1.getCols() == 1 ) { //vector-matrix\n+ //shallow copy as row count vector is preserved for rows/cols\n+ return new MatrixHistogram(h1.rNnz, null,\n+ h1.rNnz, null, h1.rMaxNnz, h1.rMaxNnz);\n+ }\n+ else { //matrix-vector\n+ final int m = h1.getRows(), n = h1.getCols();\n+ int[] rNnz = new int[m], cNnz = new int[1];\n+ int rMaxNnz = 0; Random rand = new Random();\n+ for(int i=0; i<m; i++) {\n+ rNnz[i] = probRound((double)h1.getNonZeros()/n, rand);\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n+ cNnz[0] += rNnz[i];\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cNnz[0]);\n+ }\n+ }\n+\n+ private static MatrixHistogram deriveTransHistogram(MatrixHistogram h1) {\n+ return new MatrixHistogram(h1.cNnz, h1.cNnz1e, h1.rNnz, h1.rNnz1e, h1.cMaxNnz, h1.rMaxNnz);\n+ }\n+\n+ private static MatrixHistogram deriveReshapeHistogram(MatrixHistogram h1, int rows, int cols) {\n+ if( h1.getRows() == rows )\n+ return h1;\n+ else if( h1.getCols() % cols != 0\n+ && h1.getRows() % rows != 0 )\n+ return null;\n+\n+ //limitation: only applies to scenarios where each input row\n+ //maps to N output rows, or N input rows map to 1 output row.\n+ //TODO generalize implementation for partial fractions\n+ final int m = h1.getRows(), n = h1.getCols();\n+ int[] rNnz = new int[rows], cNnz = new int[cols];\n+ int rMaxNnz = 0, cMaxNnz = 0;\n+ if( h1.getCols() % cols == 0 ) { //1->N rows\n+ //scale and replicate row counts\n+ int scale = h1.getCols()/cols;\n+ for(int i=0, pos=0; i<m; i++, pos+=scale) {\n+ for(int j=0; j<scale; j++)\n+ rNnz[pos+j] = h1.rNnz[i]/scale;\n+ rMaxNnz = Math.max(rMaxNnz, h1.rNnz[i]/scale);\n+ }\n+ //aggregate column counts\n+ for(int j=0; j<n; j+=scale)\n+ for(int j2=0; j2<scale; j2++)\n+ cNnz[j2] += h1.cNnz[j];\n+ for(int j2=0; j2<scale; j2++)\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[j2]);\n+ }\n+ else if ( h1.getRows() % rows == 0 ) { //N->1 rows\n+ int scale = h1.getRows()/rows;\n+ //scale and replicate column counts\n+ for(int i=0, pos=0; i<n; i++, pos+=scale) {\n+ for(int j=0; j<scale; j++)\n+ cNnz[pos+j] = h1.cNnz[i]/scale;\n+ cMaxNnz = Math.max(cMaxNnz, h1.cNnz[i]/scale);\n+ }\n+ //aggregate row counts\n+ for(int j=0; j<m; j+=scale)\n+ for(int j2=0; j2<scale; j2++)\n+ rNnz[j2] += h1.rNnz[j];\n+ for(int j2=0; j2<scale; j2++)\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[j2]);\n+ }\n+ return new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n+ }\n+\nprivate static int probRound(double inNnz, Random rand) {\ndouble temp = Math.floor(inNnz);\ndouble f = inNnz - temp; //non-int fraction [0,1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended matrix histogram sketch propagation (misc ops) This patch extends the matrix histogram sparsity estimator by sketch propagation for intermediates of remaining operations (comparison, transpose, diag, reshape). Furthermore, this also includes some minor performance improvements for sparsity estimation of element-wise addition and multiplication, as well as accuracy improvements for element-wise addition.
49,736
09.10.2018 13:25:47
25,200
8a144f2b35343a7aa8fbb4bf7aedd31dd36a3852
[MINOR] Bugfix in Keras2DML API when loading weights from Keras
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/estimators.py", "new_path": "src/main/python/systemml/mllearn/estimators.py", "diff": "@@ -1017,7 +1017,7 @@ class Keras2DML(Caffe2DML):\nweight_decay: regularation strength (default: 5e-4)\nregularization_type: regularization type (default: \"L2\")\n\"\"\"\n- from .keras2caffe import convertKerasToCaffeNetwork, convertKerasToCaffeSolver\n+ from .keras2caffe import convertKerasToCaffeNetwork, convertKerasToCaffeSolver, convertKerasToSystemMLModel\nimport tempfile, keras\nif isinstance(keras_model, keras.models.Sequential):\n# Convert the sequential model to functional model\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Bugfix in Keras2DML API when loading weights from Keras
49,736
09.10.2018 13:36:45
25,200
fab31fd1f3b8c832641ba2cd8f2a678ecdfcf043
Fixed the error handling during GPU memory cleanup If an error occurs during cleanup of temporary memory and free-ing of GPU context, SystemML does not display the correct error message. This commit fixes this issue.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -75,6 +75,7 @@ public class ScriptExecutorUtils {\nboolean exceptionThrown = false;\nStatistics.startRunTimer();\n+ Exception finalizeException = null;\ntry {\n// run execute (w/ exception handling to ensure proper shutdown)\nif (ConfigurationManager.isGPU() && ec != null) {\n@@ -92,6 +93,7 @@ public class ScriptExecutorUtils {\nthrow e;\n} finally { // ensure cleanup/shutdown\nif (ConfigurationManager.isGPU() && !ec.getGPUContexts().isEmpty()) {\n+ try {\n// -----------------------------------------------------------------\n// The below code pulls the output variables on the GPU to the host. This is required especially when:\n// The output variable was generated as part of a MLContext session with GPU enabled\n@@ -115,6 +117,10 @@ public class ScriptExecutorUtils {\ngCtx.clearTemporaryMemory();\n}\nGPUContextPool.freeAllGPUContexts();\n+ } catch (Exception e1) {\n+ exceptionThrown = true;\n+ finalizeException = e1; // do not throw exception while cleanup\n+ }\n}\nif( ConfigurationManager.isCodegenEnabled() )\nSpoofCompiler.cleanupCodeGenerator();\n@@ -126,6 +132,9 @@ public class ScriptExecutorUtils {\nstatisticsMaxHeavyHitters : ConfigurationManager.getDMLOptions().getStatisticsMaxHeavyHitters()));\nConfigurationManager.resetStatistics();\n}\n+ if(finalizeException != null) {\n+ throw new DMLRuntimeException(\"Error occured while GPU memory cleanup.\", finalizeException);\n+ }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Fixed the error handling during GPU memory cleanup If an error occurs during cleanup of temporary memory and free-ing of GPU context, SystemML does not display the correct error message. This commit fixes this issue.
49,736
09.10.2018 13:56:47
25,200
512fb9e119541ae9d7dae58c0812a89d569d1ca0
Extend coverage for GPU batchnorm test rewrite If inv_var rewrite has already been applied, the application of GPU batchnorm test rewrite (and CuDNN batchnorm kernel) is skipped. This commit fixes this performance regression. Also, this commit allows for forcing of GPU rewrites in case of forced GPU mode.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagPatternMatcher.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagPatternMatcher.java", "diff": "@@ -294,6 +294,19 @@ public class HopDagPatternMatcher {\nreturn new HopDagPatternMatcher().addPredicate(\"sqrt\", h -> HopRewriteUtils.isUnary(h, OpOp1.SQRT))\n.addChildMatcher(child);\n}\n+ public static HopDagPatternMatcher inv_var(HopDagPatternMatcher var, HopDagPatternMatcher eps) {\n+ return new HopDagPatternMatcher().addPredicate(\"sqrt\", h -> {\n+ if(HopRewriteUtils.isDnn(h, OpOpDnn.INV_VAR)) {\n+ return true;\n+ }\n+ else {\n+ return HopRewriteUtils.isBinary(h, OpOp2.DIV) && HopRewriteUtils.isLiteralOfValue(h.getInput().get(0), 1.0) &&\n+ HopRewriteUtils.isUnary(h.getInput().get(1), OpOp1.SQRT) &&\n+ HopRewriteUtils.isBinary(h.getInput().get(1).getInput().get(0), OpOp2.PLUS);\n+ }\n+ })\n+ .addChildMatcher(var, eps);\n+ }\npublic static HopDagPatternMatcher div(HopDagPatternMatcher child1, HopDagPatternMatcher child2) {\nreturn new HopDagPatternMatcher().addPredicate(\"div\", h -> HopRewriteUtils.isBinary(h, OpOp2.DIV))\n.addChildMatcher(child1, child2);\n@@ -370,6 +383,8 @@ public class HopDagPatternMatcher {\n.addChildMatcher(child1, dummy);\n}\nprivate static boolean _fitsOnGPU(Hop h, double multiplier) {\n+ if(ConfigurationManager.isForcedGPU())\n+ return true;\ndouble memEst = multiplier*h.getMemEstimate();\nreturn ConfigurationManager.isGPU() && h.dimsKnown() && OptimizerUtils.isMemoryBasedOptLevel() &&\nmemEst < OptimizerUtils.getLocalMemBudget() && memEst < GPUContextPool.initialGPUMemBudget();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -178,7 +178,7 @@ public class RewriteGPUSpecificOps extends HopRewriteRuleWithPatternMatcher {\nHopDagPatternMatcher norm =\nbias_multiply(\nbias_add(leaf(\"X\", MATRIX), unaryMinus(leaf(\"mean\", MATRIX))), // bias_add(X, -mean)\n- div(1, sqrt(plus(leaf(\"var\", MATRIX), leaf(\"eps\", SCALAR))))); // 1/sqrt(var+eps)\n+ inv_var(leaf(\"var\", MATRIX), leaf(\"eps\", SCALAR))); // 1/sqrt(var+eps)\n// hi = bias_add(bias_multiply(norm, gamma), beta)\n_batchNormTest =\nbias_add(\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Extend coverage for GPU batchnorm test rewrite - If inv_var rewrite has already been applied, the application of GPU batchnorm test rewrite (and CuDNN batchnorm kernel) is skipped. This commit fixes this performance regression. - Also, this commit allows for forcing of GPU rewrites in case of forced GPU mode.
49,736
09.10.2018 14:58:09
25,200
3702df7c1890b8c87c42715260240c604a5c3c64
Improved the performance of batchnorm backward Added a custom kernel for computing dgamma in batch normalization layer. Also, fixed a minor bug in GPUDenseInputPointerFetcher class.
[ { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.cu", "new_path": "src/main/cpp/kernels/SystemML.cu", "diff": "@@ -2385,3 +2385,24 @@ extern \"C\" __global__ void invVar_f(float *X, float *C, double eps, unsigned int\ninvVar(X, C, eps, size);\n}\n+template <typename T>\n+__device__ void backward_dgamma_tmp(T *ema_mean, T *dout, T *X, T*ema_var, T*ret, int N, int C,\n+ int HW, int CHW, unsigned int NCHW) {\n+ int tid = blockIdx.x * blockDim.x + threadIdx.x;\n+ int ix = tid / CHW;\n+ int iy = tid % CHW;\n+ if (ix < N && iy < CHW) {\n+ int c = iy / HW;\n+ ret[tid] = dout[tid] * ((X[tid] - ema_mean[c]) * ema_var[c]);\n+ }\n+}\n+\n+extern \"C\" __global__ void backward_dgamma_tmp_d(double *ema_mean, double *dout, double *X, double* ema_var, double* ret,\n+ int N, int C, int HW, int CHW, unsigned int NCHW) {\n+ backward_dgamma_tmp(ema_mean, dout, X, ema_var, ret, N, C, HW, CHW, NCHW);\n+}\n+\n+extern \"C\" __global__ void backward_dgamma_tmp_f(double *ema_mean, double *dout, double *X, double* ema_var, double* ret,\n+ int N, int C, int HW, int CHW, int NCHW) {\n+ backward_dgamma_tmp(ema_mean, dout, X, ema_var, ret, N, C, HW, CHW, NCHW);\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.ptx", "new_path": "src/main/cpp/kernels/SystemML.ptx", "diff": "@@ -15084,12 +15084,146 @@ BB123_2:\nret;\n}\n+ // .globl backward_dgamma_tmp_d\n+.visible .entry backward_dgamma_tmp_d(\n+ .param .u64 backward_dgamma_tmp_d_param_0,\n+ .param .u64 backward_dgamma_tmp_d_param_1,\n+ .param .u64 backward_dgamma_tmp_d_param_2,\n+ .param .u64 backward_dgamma_tmp_d_param_3,\n+ .param .u64 backward_dgamma_tmp_d_param_4,\n+ .param .u32 backward_dgamma_tmp_d_param_5,\n+ .param .u32 backward_dgamma_tmp_d_param_6,\n+ .param .u32 backward_dgamma_tmp_d_param_7,\n+ .param .u32 backward_dgamma_tmp_d_param_8,\n+ .param .u32 backward_dgamma_tmp_d_param_9\n+)\n+{\n+ .reg .pred %p<4>;\n+ .reg .b32 %r<11>;\n+ .reg .f64 %fd<8>;\n+ .reg .b64 %rd<18>;\n+\n+\n+ ld.param.u64 %rd1, [backward_dgamma_tmp_d_param_0];\n+ ld.param.u64 %rd2, [backward_dgamma_tmp_d_param_1];\n+ ld.param.u64 %rd3, [backward_dgamma_tmp_d_param_2];\n+ ld.param.u64 %rd4, [backward_dgamma_tmp_d_param_3];\n+ ld.param.u64 %rd5, [backward_dgamma_tmp_d_param_4];\n+ ld.param.u32 %r4, [backward_dgamma_tmp_d_param_5];\n+ ld.param.u32 %r2, [backward_dgamma_tmp_d_param_7];\n+ ld.param.u32 %r3, [backward_dgamma_tmp_d_param_8];\n+ mov.u32 %r5, %ctaid.x;\n+ mov.u32 %r6, %ntid.x;\n+ mov.u32 %r7, %tid.x;\n+ mad.lo.s32 %r1, %r6, %r5, %r7;\n+ div.s32 %r8, %r1, %r3;\n+ setp.lt.s32 %p1, %r8, %r4;\n+ setp.gt.s32 %p2, %r3, -1;\n+ and.pred %p3, %p1, %p2;\n+ @!%p3 bra BB124_2;\n+ bra.uni BB124_1;\n+\n+BB124_1:\n+ rem.s32 %r9, %r1, %r3;\n+ cvta.to.global.u64 %rd6, %rd2;\n+ mul.wide.s32 %rd7, %r1, 8;\n+ add.s64 %rd8, %rd6, %rd7;\n+ cvta.to.global.u64 %rd9, %rd3;\n+ add.s64 %rd10, %rd9, %rd7;\n+ div.s32 %r10, %r9, %r2;\n+ cvta.to.global.u64 %rd11, %rd1;\n+ mul.wide.s32 %rd12, %r10, 8;\n+ add.s64 %rd13, %rd11, %rd12;\n+ ld.global.f64 %fd1, [%rd13];\n+ ld.global.f64 %fd2, [%rd10];\n+ sub.f64 %fd3, %fd2, %fd1;\n+ cvta.to.global.u64 %rd14, %rd4;\n+ add.s64 %rd15, %rd14, %rd12;\n+ ld.global.f64 %fd4, [%rd15];\n+ mul.f64 %fd5, %fd3, %fd4;\n+ ld.global.f64 %fd6, [%rd8];\n+ mul.f64 %fd7, %fd6, %fd5;\n+ cvta.to.global.u64 %rd16, %rd5;\n+ add.s64 %rd17, %rd16, %rd7;\n+ st.global.f64 [%rd17], %fd7;\n+\n+BB124_2:\n+ ret;\n+}\n+\n+ // .globl backward_dgamma_tmp_f\n+.visible .entry backward_dgamma_tmp_f(\n+ .param .u64 backward_dgamma_tmp_f_param_0,\n+ .param .u64 backward_dgamma_tmp_f_param_1,\n+ .param .u64 backward_dgamma_tmp_f_param_2,\n+ .param .u64 backward_dgamma_tmp_f_param_3,\n+ .param .u64 backward_dgamma_tmp_f_param_4,\n+ .param .u32 backward_dgamma_tmp_f_param_5,\n+ .param .u32 backward_dgamma_tmp_f_param_6,\n+ .param .u32 backward_dgamma_tmp_f_param_7,\n+ .param .u32 backward_dgamma_tmp_f_param_8,\n+ .param .u32 backward_dgamma_tmp_f_param_9\n+)\n+{\n+ .reg .pred %p<4>;\n+ .reg .b32 %r<11>;\n+ .reg .f64 %fd<8>;\n+ .reg .b64 %rd<18>;\n+\n+\n+ ld.param.u64 %rd1, [backward_dgamma_tmp_f_param_0];\n+ ld.param.u64 %rd2, [backward_dgamma_tmp_f_param_1];\n+ ld.param.u64 %rd3, [backward_dgamma_tmp_f_param_2];\n+ ld.param.u64 %rd4, [backward_dgamma_tmp_f_param_3];\n+ ld.param.u64 %rd5, [backward_dgamma_tmp_f_param_4];\n+ ld.param.u32 %r4, [backward_dgamma_tmp_f_param_5];\n+ ld.param.u32 %r2, [backward_dgamma_tmp_f_param_7];\n+ ld.param.u32 %r3, [backward_dgamma_tmp_f_param_8];\n+ mov.u32 %r5, %ctaid.x;\n+ mov.u32 %r6, %ntid.x;\n+ mov.u32 %r7, %tid.x;\n+ mad.lo.s32 %r1, %r6, %r5, %r7;\n+ div.s32 %r8, %r1, %r3;\n+ setp.lt.s32 %p1, %r8, %r4;\n+ setp.gt.s32 %p2, %r3, -1;\n+ and.pred %p3, %p1, %p2;\n+ @!%p3 bra BB125_2;\n+ bra.uni BB125_1;\n+\n+BB125_1:\n+ rem.s32 %r9, %r1, %r3;\n+ cvta.to.global.u64 %rd6, %rd2;\n+ mul.wide.s32 %rd7, %r1, 8;\n+ add.s64 %rd8, %rd6, %rd7;\n+ cvta.to.global.u64 %rd9, %rd3;\n+ add.s64 %rd10, %rd9, %rd7;\n+ div.s32 %r10, %r9, %r2;\n+ cvta.to.global.u64 %rd11, %rd1;\n+ mul.wide.s32 %rd12, %r10, 8;\n+ add.s64 %rd13, %rd11, %rd12;\n+ ld.global.f64 %fd1, [%rd13];\n+ ld.global.f64 %fd2, [%rd10];\n+ sub.f64 %fd3, %fd2, %fd1;\n+ cvta.to.global.u64 %rd14, %rd4;\n+ add.s64 %rd15, %rd14, %rd12;\n+ ld.global.f64 %fd4, [%rd15];\n+ mul.f64 %fd5, %fd3, %fd4;\n+ ld.global.f64 %fd6, [%rd8];\n+ mul.f64 %fd7, %fd6, %fd5;\n+ cvta.to.global.u64 %rd16, %rd5;\n+ add.s64 %rd17, %rd16, %rd7;\n+ st.global.f64 [%rd17], %fd7;\n+\n+BB125_2:\n+ ret;\n+}\n+\n.func (.param .b64 func_retval0) __internal_trig_reduction_slowpathd(\n.param .b64 __internal_trig_reduction_slowpathd_param_0,\n.param .b64 __internal_trig_reduction_slowpathd_param_1\n)\n{\n- .local .align 8 .b8 __local_depot124[40];\n+ .local .align 8 .b8 __local_depot126[40];\n.reg .b64 %SP;\n.reg .b64 %SPL;\n.reg .pred %p<9>;\n@@ -15098,7 +15232,7 @@ BB123_2:\n.reg .b64 %rd<102>;\n- mov.u64 %rd101, __local_depot124;\n+ mov.u64 %rd101, __local_depot126;\ncvta.local.u64 %SP, %rd101;\nld.param.f64 %fd4, [__internal_trig_reduction_slowpathd_param_0];\nld.param.u64 %rd37, [__internal_trig_reduction_slowpathd_param_1];\n@@ -15112,7 +15246,7 @@ BB123_2:\nshr.u32 %r3, %r1, 20;\nbfe.u32 %r4, %r1, 20, 11;\nsetp.eq.s32 %p1, %r4, 2047;\n- @%p1 bra BB124_13;\n+ @%p1 bra BB126_13;\nadd.s32 %r15, %r4, -1024;\nshr.u32 %r16, %r15, 6;\n@@ -15125,7 +15259,7 @@ BB123_2:\nmov.u64 %rd94, 0;\nsetp.ge.s32 %p2, %r5, %r6;\nmov.u64 %rd93, %rd1;\n- @%p2 bra BB124_4;\n+ @%p2 bra BB126_4;\nmov.b64 %rd41, %fd4;\nshl.b64 %rd42, %rd41, 11;\n@@ -15142,7 +15276,7 @@ BB123_2:\nmov.u64 %rd91, %rd1;\nmov.u32 %r39, %r5;\n-BB124_3:\n+BB126_3:\n.pragma \"nounroll\";\nld.const.u64 %rd47, [%rd89];\n// inline asm\n@@ -15172,15 +15306,15 @@ BB124_3:\nadd.s64 %rd93, %rd93, 8;\nadd.s64 %rd89, %rd89, 8;\nsetp.lt.s32 %p3, %r39, %r6;\n- @%p3 bra BB124_3;\n+ @%p3 bra BB126_3;\n-BB124_4:\n+BB126_4:\nst.local.u64 [%rd93], %rd94;\nld.local.u64 %rd95, [%rd1+16];\nld.local.u64 %rd96, [%rd1+24];\nand.b32 %r9, %r3, 63;\nsetp.eq.s32 %p4, %r9, 0;\n- @%p4 bra BB124_6;\n+ @%p4 bra BB126_6;\nmov.u32 %r27, 64;\nsub.s32 %r28, %r27, %r9;\n@@ -15192,7 +15326,7 @@ BB124_4:\nshr.u64 %rd55, %rd54, %r28;\nor.b64 %rd95, %rd55, %rd53;\n-BB124_6:\n+BB126_6:\ncvta.to.local.u64 %rd56, %rd37;\nshr.u64 %rd57, %rd96, 62;\ncvt.u32.u64 %r29, %rd57;\n@@ -15209,7 +15343,7 @@ BB124_6:\nselp.b32 %r34, %r32, %r33, %p5;\nst.local.u32 [%rd56], %r34;\nsetp.eq.s32 %p6, %r31, 0;\n- @%p6 bra BB124_8;\n+ @%p6 bra BB126_8;\nmov.u64 %rd64, 0;\n// inline asm\n@@ -15229,10 +15363,10 @@ BB124_6:\n// inline asm\nxor.b32 %r40, %r40, -2147483648;\n-BB124_8:\n+BB126_8:\nclz.b64 %r41, %rd98;\nsetp.eq.s32 %p7, %r41, 0;\n- @%p7 bra BB124_10;\n+ @%p7 bra BB126_10;\nshl.b64 %rd67, %rd98, %r41;\nmov.u32 %r35, 64;\n@@ -15240,7 +15374,7 @@ BB124_8:\nshr.u64 %rd68, %rd97, %r36;\nor.b64 %rd98, %rd68, %rd67;\n-BB124_10:\n+BB126_10:\nmov.u64 %rd72, -3958705157555305931;\n// inline asm\n{\n@@ -15261,7 +15395,7 @@ BB124_10:\n}\n// inline asm\nsetp.lt.s64 %p8, %rd100, 1;\n- @%p8 bra BB124_12;\n+ @%p8 bra BB126_12;\n// inline asm\n{\n@@ -15280,7 +15414,7 @@ BB124_10:\n// inline asm\nadd.s32 %r41, %r41, 1;\n-BB124_12:\n+BB126_12:\ncvt.u64.u32 %rd79, %r40;\nshl.b64 %rd80, %rd79, 32;\nmov.u32 %r37, 1022;\n@@ -15295,7 +15429,7 @@ BB124_12:\nor.b64 %rd88, %rd87, %rd80;\nmov.b64 %fd4, %rd88;\n-BB124_13:\n+BB126_13:\nst.param.f64 [func_retval0+0], %fd4;\nret;\n}\n@@ -15323,7 +15457,7 @@ BB124_13:\n}\nshr.u32 %r51, %r50, 20;\nsetp.ne.s32 %p1, %r51, 0;\n- @%p1 bra BB125_2;\n+ @%p1 bra BB127_2;\nmul.f64 %fd14, %fd12, 0d4350000000000000;\n{\n@@ -15337,13 +15471,13 @@ BB124_13:\nshr.u32 %r16, %r50, 20;\nadd.s32 %r51, %r16, -54;\n-BB125_2:\n+BB127_2:\nadd.s32 %r52, %r51, -1023;\nand.b32 %r17, %r50, -2146435073;\nor.b32 %r18, %r17, 1072693248;\nmov.b64 %fd135, {%r49, %r18};\nsetp.lt.u32 %p2, %r18, 1073127583;\n- @%p2 bra BB125_4;\n+ @%p2 bra BB127_4;\n{\n.reg .b32 %temp;\n@@ -15357,7 +15491,7 @@ BB125_2:\nmov.b64 %fd135, {%r19, %r21};\nadd.s32 %r52, %r51, -1022;\n-BB125_4:\n+BB127_4:\nadd.f64 %fd15, %fd135, 0d3FF0000000000000;\nrcp.approx.ftz.f64 %fd16, %fd15;\nneg.f64 %fd17, %fd15;\n@@ -15520,13 +15654,13 @@ BB125_4:\nmov.b32 %f2, %r35;\nabs.f32 %f1, %f2;\nsetp.lt.f32 %p4, %f1, 0f4086232B;\n- @%p4 bra BB125_7;\n+ @%p4 bra BB127_7;\nsetp.lt.f64 %p5, %fd4, 0d0000000000000000;\nadd.f64 %fd129, %fd4, 0d7FF0000000000000;\nselp.f64 %fd136, 0d0000000000000000, %fd129, %p5;\nsetp.geu.f32 %p6, %f1, 0f40874800;\n- @%p6 bra BB125_7;\n+ @%p6 bra BB127_7;\nmov.f64 %fd134, 0d4338000000000000;\nmov.f64 %fd133, 0d3FF71547652B82FE;\n@@ -15548,26 +15682,26 @@ BB125_4:\nmov.b64 %fd131, {%r44, %r43};\nmul.f64 %fd136, %fd130, %fd131;\n-BB125_7:\n+BB127_7:\n{\n.reg .b32 %temp;\nmov.b64 {%temp, %r45}, %fd136;\n}\nand.b32 %r46, %r45, 2147483647;\nsetp.ne.s32 %p7, %r46, 2146435072;\n- @%p7 bra BB125_9;\n+ @%p7 bra BB127_9;\n{\n.reg .b32 %temp;\nmov.b64 {%r47, %temp}, %fd136;\n}\nsetp.eq.s32 %p8, %r47, 0;\n- @%p8 bra BB125_10;\n+ @%p8 bra BB127_10;\n-BB125_9:\n+BB127_9:\nfma.rn.f64 %fd136, %fd136, %fd5, %fd136;\n-BB125_10:\n+BB127_10:\nst.param.f64 [func_retval0+0], %fd136;\nret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "diff": "@@ -141,6 +141,7 @@ public class DnnOp extends MultiThreadedHop\ncase UPDATE_EMA:\ncase INV_VAR:\ncase BATCH_NORM2D_BACKWARD_DX:\n+ case BATCH_NORM2D_BACKWARD_DGAMMA:\n{\n// GPU-specific operators\nsetLops(constructDnnLops(ExecType.GPU, inputs));\n@@ -181,6 +182,7 @@ public class DnnOp extends MultiThreadedHop\ncase CHANNEL_SUMS:\ncase UPDATE_EMA:\nreturn 3;\n+ case BATCH_NORM2D_BACKWARD_DGAMMA:\ncase UPDATE_NESTEROV_X:\nreturn 4;\ndefault:\n@@ -538,7 +540,7 @@ public class DnnOp extends MultiThreadedHop\nif(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST ||\nop == OpOpDnn.UPDATE_NESTEROV_X || op == OpOpDnn.UPDATE_EMA || op == OpOpDnn.INV_VAR ||\n- op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX) {\n+ op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX || op == OpOpDnn.BATCH_NORM2D_BACKWARD_DGAMMA) {\n// Same dimension as the first input\nMatrixCharacteristics[] mc = memo.getAllInputStats(getInput());\nret[0] = mc[0].rowsKnown() ? mc[0].getRows() : -1;\n@@ -755,7 +757,7 @@ public class DnnOp extends MultiThreadedHop\n{\nif(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST ||\nop == OpOpDnn.UPDATE_NESTEROV_X || op == OpOpDnn.UPDATE_EMA || op == OpOpDnn.INV_VAR ||\n- op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX) {\n+ op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX || op == OpOpDnn.BATCH_NORM2D_BACKWARD_DGAMMA) {\n// Same dimension as the first input\nHop input1 = getInput().get(0);\nsetDim1(input1.getDim1());\n@@ -873,7 +875,7 @@ public class DnnOp extends MultiThreadedHop\nif(op == OpOpDnn.BIASADD || op == OpOpDnn.BIASMULT || op == OpOpDnn.BATCH_NORM2D_TEST || op == OpOpDnn.CHANNEL_SUMS ||\nop == OpOpDnn.UPDATE_NESTEROV_X || op == OpOpDnn.RESHAPE_COLMEANS ||\nop == OpOpDnn.UPDATE_EMA_VAR || op == OpOpDnn.UPDATE_EMA || op == OpOpDnn.INV_VAR ||\n- op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX) {\n+ op == OpOpDnn.BATCH_NORM2D_BACKWARD_DX || op == OpOpDnn.BATCH_NORM2D_BACKWARD_DGAMMA) {\nthrow new RuntimeException(\"getDim method should not be invoked for \" + op.name());\n}\ntry {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1101,7 +1101,7 @@ public abstract class Hop implements ParseInfo\nCONV2D, CONV2D_BACKWARD_FILTER, CONV2D_BACKWARD_DATA,\nBIASADD, BIASMULT, BATCH_NORM2D_TEST, CHANNEL_SUMS,\nUPDATE_NESTEROV_X, RESHAPE_COLMEANS, UPDATE_EMA_VAR, UPDATE_EMA, INV_VAR,\n- BATCH_NORM2D_BACKWARD_DX\n+ BATCH_NORM2D_BACKWARD_DX, BATCH_NORM2D_BACKWARD_DGAMMA\n}\npublic enum DataGenMethod {\n@@ -1182,6 +1182,7 @@ public abstract class Hop implements ParseInfo\nHopsConv2Lops.put(OpOpDnn.UPDATE_EMA, org.apache.sysml.lops.DnnTransform.OperationTypes.UPDATE_EMA);\nHopsConv2Lops.put(OpOpDnn.INV_VAR, org.apache.sysml.lops.DnnTransform.OperationTypes.INV_VAR);\nHopsConv2Lops.put(OpOpDnn.BATCH_NORM2D_BACKWARD_DX, org.apache.sysml.lops.DnnTransform.OperationTypes.BATCH_NORM2D_BACKWARD_DX);\n+ HopsConv2Lops.put(OpOpDnn.BATCH_NORM2D_BACKWARD_DGAMMA, org.apache.sysml.lops.DnnTransform.OperationTypes.BATCH_NORM2D_BACKWARD_DGAMMA);\n}\nprotected static final HashMap<Hop.Direction, org.apache.sysml.lops.PartialAggregate.DirectionTypes> HopsDirection2Lops;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteGPUSpecificOps.java", "diff": "@@ -170,6 +170,25 @@ public class RewriteGPUSpecificOps extends HopRewriteRuleWithPatternMatcher {\nreturn hi;\n};\n+ // Avoids unnecessary intermediates:\n+ // mean = cache_mean\n+ // centered = bias_add(X, -mean) # shape (N, C*Hin*Win)\n+ // norm = bias_multiply(centered, cache_inv_var) # shape (N, C*Hin*Win)\n+ // # Compute gradients during training\n+ // dgamma = util::channel_sums(dout*norm, C, Hin, Win)\n+ private static final HopDagPatternMatcher _batchNormDGamma;\n+ static {\n+ _batchNormDGamma = util_channel_sums(\n+ mult( leaf(\"dout\", MATRIX).fitsOnGPU(3),\n+ bias_multiply(bias_add(leaf(\"X\", MATRIX), unaryMinus(leaf(\"ema_mean\", MATRIX))),\n+ leaf(\"ema_var\", MATRIX))), leaf(\"C\", SCALAR), leaf(\"HW\", SCALAR));\n+ }\n+ private static final Function<Hop, Hop> _batchNormDGammaReplacer = hi -> {\n+ LOG.debug(\"Applied batchNormDGamma rewrite.\");\n+ Hop newHop = HopRewriteUtils.createDnnOp(_batchNormDGamma, OpOpDnn.BATCH_NORM2D_BACKWARD_DGAMMA,\n+ \"ema_mean\", \"dout\", \"X\", \"ema_var\");\n+ return HopRewriteUtils.rewireAllParentChildReferences(hi, newHop);\n+ };\n// Pattern 3:\nprivate static final HopDagPatternMatcher _batchNormTest;\n@@ -282,8 +301,9 @@ public class RewriteGPUSpecificOps extends HopRewriteRuleWithPatternMatcher {\nif(_rewriters == null) {\nArrayList<HopPatternRewriter> rewriters = new ArrayList<>();\nrewriters.add(new HopPatternRewriter(\"batchNormdX\", _batchNormdX, _batchNormdXReplacer));\n- rewriters.add(new HopPatternRewriter(\"batchNormUpdatedVar\", _batchNormUpdatedVar, _batchNormUpdatedVarReplacer));\nrewriters.add(new HopPatternRewriter(\"batchNormTest\", _batchNormTest, _batchNormTestReplacer));\n+ rewriters.add(new HopPatternRewriter(\"batchNormUpdatedVar\", _batchNormUpdatedVar, _batchNormUpdatedVarReplacer));\n+ // rewriters.add(new HopPatternRewriter(\"batchNormDGamma\", _batchNormDGamma, _batchNormDGammaReplacer));\nrewriters.add(new HopPatternRewriter(\"channelSums\", _channelSums, _channelSumsReplacer));\nrewriters.add(new HopPatternRewriter(\"updateNesterovX\", _updateNesterovX, _updateNesterovXReplacer));\nrewriters.add(new HopPatternRewriter(\"reshapeColMeans\", _reshapeColMeans, _reshapeColMeansReplacer));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/DnnTransform.java", "new_path": "src/main/java/org/apache/sysml/lops/DnnTransform.java", "diff": "@@ -33,7 +33,7 @@ public class DnnTransform extends Lop\nCONV2D, CONV2D_BACKWARD_FILTER, CONV2D_BACKWARD_DATA,\nBIAS_ADD, CONV2D_BIAS_ADD, BIAS_MULTIPLY, CHANNEL_SUMS, BATCH_NORM2D_TEST,\nUPDATE_NESTEROV_X, RESHAPE_COLMEANS, UPDATE_EMA_VAR, UPDATE_EMA, INV_VAR,\n- BATCH_NORM2D_BACKWARD_DX\n+ BATCH_NORM2D_BACKWARD_DX, BATCH_NORM2D_BACKWARD_DGAMMA\n}\nprivate OperationTypes operation;\n@@ -174,6 +174,9 @@ public class DnnTransform extends Lop\ncase UPDATE_NESTEROV_X:\nreturn \"update_nesterov_x\";\n+ case BATCH_NORM2D_BACKWARD_DGAMMA:\n+ return \"batch_norm2d_bwd_dgamma\";\n+\ncase BATCH_NORM2D_TEST:\nreturn \"batch_norm2d_test\";\n@@ -254,7 +257,7 @@ public class DnnTransform extends Lop\n@Override\npublic String getInstructions(String input1, String input2, String input3, String input4, String output) {\n- if(operation == OperationTypes.UPDATE_NESTEROV_X) {\n+ if(operation == OperationTypes.UPDATE_NESTEROV_X || operation == OperationTypes.BATCH_NORM2D_BACKWARD_DGAMMA) {\nStringBuilder sb = new StringBuilder();\nsb.append( getExecType() );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -66,6 +66,7 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"reshape_colmeans\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"inv_var\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"batch_norm2d_bwd_dx\", GPUINSTRUCTION_TYPE.Dnn);\n+ String2GPUInstructionType.put( \"batch_norm2d_bwd_dgamma\", GPUINSTRUCTION_TYPE.Dnn);\n// Matrix Multiply Operators\nString2GPUInstructionType.put( \"ba+*\", GPUINSTRUCTION_TYPE.AggregateBinary);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "diff": "@@ -127,7 +127,7 @@ public class DnnGPUInstruction extends GPUInstruction {\npublic DnnGPUInstruction(CPOperand in1, CPOperand in2, CPOperand in3, CPOperand in4, CPOperand out, String opcode, String istr,\ndouble intermediateMemoryBudget) throws DMLRuntimeException {\nsuper(new ReorgOperator(SwapIndex.getSwapIndexFnObject()), opcode, istr);\n- if( !( opcode.equals(\"update_nesterov_x\")) ) {\n+ if( !( opcode.equals(\"update_nesterov_x\") || opcode.equals(\"batch_norm2d_bwd_dgamma\")) ) {\nthrow new DMLRuntimeException(\"Incorrect opcode: \" + opcode);\n}\n_input1 = in1;\n@@ -339,6 +339,15 @@ public class DnnGPUInstruction extends GPUInstruction {\nCPOperand out = new CPOperand(parts[5]);\nreturn new DnnGPUInstruction(in, in2, in3, in4, out, opcode, str, 0);\n}\n+ else if (opcode.equalsIgnoreCase(\"batch_norm2d_bwd_dgamma\")) {\n+ InstructionUtils.checkNumFields(parts, 5);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = new CPOperand(parts[3]);\n+ CPOperand in4 = new CPOperand(parts[4]);\n+ CPOperand out = new CPOperand(parts[5]);\n+ return new DnnGPUInstruction(in, in2, in3, in4, out, opcode, str, 0);\n+ }\nelse if (opcode.equalsIgnoreCase(\"lstm\")) {\nInstructionUtils.checkNumFields(parts, 8);\nCPOperand in1 = new CPOperand(parts[1]);\n@@ -586,6 +595,42 @@ public class DnnGPUInstruction extends GPUInstruction {\n}\n}\n+ // \"ema_mean\", \"dout\", \"X\", \"ema_var\"\n+ private void processBatchNorm2dBackwardDGammaInstruction(ExecutionContext ec) {\n+ try(GPUDenseInputPointerFetcher fetcher = new GPUDenseInputPointerFetcher(ec, gCtx, instName, _output)) {\n+ fetcher.add(\"ema_mean\", _input1).add(\"dout\", _input2).add(\"X\", _input3)\n+ .add(\"ema_var\", _input4);\n+ MatrixObject ema_mean = fetcher.getInputMatrixObject(\"ema_mean\");\n+ MatrixObject dout = fetcher.getInputMatrixObject(\"dout\");\n+ long C = ema_mean.getNumRows();\n+ long N = dout.getNumRows();\n+ long CHW = dout.getNumColumns();\n+ fetcher.validateDimensions(\"ema_mean\", C, 1);\n+ fetcher.validateDimensions(\"dout\", N, CHW);\n+ fetcher.validateDimensions(\"X\", N, CHW);\n+ fetcher.validateDimensions(\"ema_var\", C, 1);\n+ if(CHW % C != 0) {\n+ throw new DMLRuntimeException(\"Incorrect dimensions: C=\" + C + \", CHW=\" + CHW);\n+ }\n+ long HW = CHW / C;\n+ Pointer tmp = gCtx.allocate(instName, N*CHW*LibMatrixCUDA.sizeOfDataType);\n+ // jcuda.runtime.JCuda.cudaDeviceSynchronize();\n+ LibMatrixCUDA.getCudaKernels(gCtx).launchKernel(\"backward_dgamma_tmp\",\n+ ExecutionConfig.getConfigForSimpleVectorOperations(LibMatrixCUDA.toInt(N*CHW)),\n+ fetcher.getInputPointer(\"ema_mean\"),\n+ fetcher.getInputPointer(\"dout\"),\n+ fetcher.getInputPointer(\"X\"),\n+ fetcher.getInputPointer(\"ema_var\"),\n+ tmp,\n+ // N, C, HW, CHW, NCHW\n+ toInt(N), toInt(C), toInt(HW), toInt(CHW), N*CHW);\n+\n+ LibMatrixCUDA.channelSums(gCtx, instName,\n+ tmp, fetcher.getOutputPointer(C, 1), N, C, HW);\n+ gCtx.cudaFreeHelper(instName, tmp, gCtx.EAGER_CUDA_FREE);\n+ }\n+ }\n+\nprivate static int toInt(long num) throws DMLRuntimeException {\nif(num >= Integer.MAX_VALUE || num <= Integer.MIN_VALUE) {\nthrow new DMLRuntimeException(\"GPU : Exceeded supported size \" + num);\n@@ -734,6 +779,10 @@ public class DnnGPUInstruction extends GPUInstruction {\nprocessNesterovUpdateInstruction(ec);\nreturn;\n}\n+ else if (instOpcode.equalsIgnoreCase(\"batch_norm2d_bwd_dgamma\")) {\n+ processBatchNorm2dBackwardDGammaInstruction(ec);\n+ return;\n+ }\nelse if (instOpcode.equalsIgnoreCase(\"update_ema_var\")) {\nprocessUpdateEMAVarInstruction(ec);\nreturn;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUDenseInputPointerFetcher.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUDenseInputPointerFetcher.java", "diff": "@@ -94,10 +94,10 @@ public class GPUDenseInputPointerFetcher implements java.lang.AutoCloseable {\npublic void validateDimensions(String var, long numRows, long numCols) {\nMatrixObject mo = getInputMatrixObject(var);\nif(numRows > 0 && mo.getNumRows() != numRows) {\n- throw new DMLRuntimeException(\"Expected number of rows of subgrp_means to be \" + numRows + \", but found \" + mo.getNumRows());\n+ throw new DMLRuntimeException(\"Expected number of rows of \" + var + \" to be \" + numRows + \", but found \" + mo.getNumRows());\n}\nelse if(numCols > 0 && mo.getNumColumns() != numCols) {\n- throw new DMLRuntimeException(\"Expected number of columns of subgrp_means to be \" + numCols + \", but found \" + mo.getNumColumns());\n+ throw new DMLRuntimeException(\"Expected number of columns of \" + var + \" to be \" + numCols + \", but found \" + mo.getNumColumns());\n}\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -362,10 +362,25 @@ public class LibMatrixCUDA {\n}\nPointer imagePointer = getDensePointer(gCtx, input, instName);\nPointer outputPointer = getDensePointer(gCtx, outputBlock, instName);\n+ channelSums(gCtx, instName, imagePointer, outputPointer, N, C, HW);\n+ }\n+ /**\n+ * Perform channel_sums operations: out = rowSums(matrix(colSums(A), rows=C, cols=HW))\n+ *\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param imagePointer input image pointer\n+ * @param outputPointer output pointer\n+ * @param N number of rows\n+ * @param C number of channels\n+ * @param HW height*width\n+ */\n+ public static void channelSums(GPUContext gCtx, String instName, Pointer imagePointer, Pointer outputPointer, long N, long C, long HW) {\n+ int cols = toInt(C*HW);\n// We can replace this with CuDNN tensor reduce\nPointer tmp = gCtx.allocate(instName, cols*sizeOfDataType);\n- reduceCol(gCtx, instName, \"reduce_col_sum\", imagePointer, tmp, N, cols);\n+ reduceCol(gCtx, instName, \"reduce_col_sum\", imagePointer, tmp, toInt(N), cols);\nreduceRow(gCtx, instName, \"reduce_row_sum\", tmp, outputPointer, toInt(C), toInt(HW));\ngCtx.cudaFreeHelper(instName, tmp, gCtx.EAGER_CUDA_FREE);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Improved the performance of batchnorm backward - Added a custom kernel for computing dgamma in batch normalization layer. - Also, fixed a minor bug in GPUDenseInputPointerFetcher class.
49,736
09.10.2018 16:41:18
25,200
97fd7d1aa3ce7a152066d4d4b713fb0a9aee4092
Avoid unnecessary transfer to the GPU for size estimation Compute memory estimates (exact and worst-case) using metadata rather than requiring pointer transfer.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "diff": "@@ -44,32 +44,6 @@ public class GPUMatrixMemoryManager {\ngpuObjects.add(gpuObj);\n}\n- /**\n- * Returns worst-case contiguous memory size\n- * @param gpuObj gpu object\n- * @return memory size in bytes\n- */\n- long getWorstCaseContiguousMemorySize(GPUObject gpuObj) {\n- long ret = 0;\n- if(!gpuObj.isDensePointerNull()) {\n- if(!gpuObj.shadowBuffer.isBuffered())\n- ret = gpuManager.allPointers.get(gpuObj.getDensePointer()).getSizeInBytes();\n- else\n- ret = 0; // evicted hence no contiguous memory on GPU\n- }\n- else if(gpuObj.getJcudaSparseMatrixPtr() != null) {\n- CSRPointer sparsePtr = gpuObj.getJcudaSparseMatrixPtr();\n- if(sparsePtr.nnz > 0) {\n- if(sparsePtr.rowPtr != null)\n- ret = Math.max(ret, gpuManager.allPointers.get(sparsePtr.rowPtr).getSizeInBytes());\n- if(sparsePtr.colInd != null)\n- ret = Math.max(ret, gpuManager.allPointers.get(sparsePtr.colInd).getSizeInBytes());\n- if(sparsePtr.val != null)\n- ret = Math.max(ret, gpuManager.allPointers.get(sparsePtr.val).getSizeInBytes());\n- }\n- }\n- return ret;\n- }\n/**\n* Get list of all Pointers in a GPUObject\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -292,7 +292,7 @@ public class GPUMemoryManager {\nif(A == null) {\nlong t0 = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nOptional<GPUObject> sizeBasedUnlockedGPUObjects = matrixMemoryManager.gpuObjects.stream()\n- .filter(gpuObj -> !gpuObj.isLocked() && matrixMemoryManager.getWorstCaseContiguousMemorySize(gpuObj) >= size)\n+ .filter(gpuObj -> !gpuObj.isLocked() && gpuObj.getWorstCaseContiguousMemorySize() >= size)\n.min((o1, o2) -> worstCaseContiguousMemorySizeCompare(o1, o2));\nif(sizeBasedUnlockedGPUObjects.isPresent()) {\nevictOrClear(sizeBasedUnlockedGPUObjects.get(), opcode);\n@@ -363,7 +363,7 @@ public class GPUMemoryManager {\n}\nprivate int worstCaseContiguousMemorySizeCompare(GPUObject o1, GPUObject o2) {\n- long ret = matrixMemoryManager.getWorstCaseContiguousMemorySize(o1) - matrixMemoryManager.getWorstCaseContiguousMemorySize(o2);\n+ long ret = o1.getWorstCaseContiguousMemorySize() - o2.getWorstCaseContiguousMemorySize();\nreturn ret < 0 ? -1 : (ret == 0 ? 0 : 1);\n}\n@@ -423,7 +423,7 @@ public class GPUMemoryManager {\njcuda.runtime.JCuda.cudaDeviceSynchronize(); // Force a device synchronize after free-ing the pointer for debugging\n}\nelse {\n- throw new RuntimeException(\"Attempting to free an unaccounted pointer:\" + toFree);\n+ throw new RuntimeException(\"ERROR : Internal state corrupted, attempting to free an unaccounted pointer:\" + toFree);\n}\n}\n@@ -439,6 +439,12 @@ public class GPUMemoryManager {\npublic void free(String opcode, Pointer toFree, boolean eager) throws DMLRuntimeException {\nif(LOG.isTraceEnabled())\nLOG.trace(\"Free-ing the pointer with eager=\" + eager);\n+ if(toFree == null)\n+ throw new DMLRuntimeException(\"Attempting to free a null pointer\");\n+ else if (!allPointers.containsKey(toFree)) {\n+ LOG.info(\"GPU memory info before failure:\" + toString());\n+ throw new RuntimeException(\"ERROR : Internal state corrupted, attempting to free an unaccounted pointer:\" + toFree);\n+ }\nlong size = allPointers.get(toFree).getSizeInBytes();\nif(ConfigurationManager.isStatistics()) {\ncurrentSize -= size;\n@@ -449,10 +455,6 @@ public class GPUMemoryManager {\naddMiscTime(opcode, GPUStatistics.cudaDeAllocTime, GPUStatistics.cudaDeAllocCount, GPUInstruction.MISC_TIMER_CUDA_FREE, t0);\n}\nelse {\n- if (!allPointers.containsKey(toFree)) {\n- LOG.info(\"GPU memory info before failure:\" + toString());\n- throw new RuntimeException(\"ERROR : Internal state corrupted, cache block size map is not aware of a block it trying to free up\");\n- }\nlazyCudaFreeMemoryManager.add(size, toFree);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -774,17 +774,34 @@ public class GPUObject {\n}\nprotected long getSizeOnDevice() {\n- long GPUSize = 0;\nlong rlen = mat.getNumRows();\nlong clen = mat.getNumColumns();\nlong nnz = mat.getNnz();\n- if (LibMatrixCUDA.isInSparseFormat(getGPUContext(), mat)) {\n- GPUSize = CSRPointer.estimateSize(nnz, rlen);\n- } else {\n- GPUSize = getDatatypeSizeOf(rlen * clen);\n+ if(jcudaDenseMatrixPtr != null)\n+ return getDatatypeSizeOf(rlen * clen); // allocated in dense format\n+ else if(jcudaSparseMatrixPtr != null || LibMatrixCUDA.isInSparseFormat(getGPUContext(), mat))\n+ return CSRPointer.estimateSize(nnz, rlen); // either allocated in sparse format or matrix object is in sparse format\n+ else\n+ return getDatatypeSizeOf(rlen * clen); // not allocated and matrix object is in dense format\n}\n- return GPUSize;\n+\n+ /**\n+ * Returns worst-case contiguous memory size\n+ *\n+ * @return memory size in bytes\n+ */\n+ long getWorstCaseContiguousMemorySize() {\n+ long rlen = mat.getNumRows();\n+ long clen = mat.getNumColumns();\n+ long nnz = mat.getNnz();\n+\n+ if(jcudaDenseMatrixPtr != null)\n+ return getDatatypeSizeOf(rlen * clen); // allocated in dense format\n+ else if(jcudaSparseMatrixPtr != null || LibMatrixCUDA.isInSparseFormat(getGPUContext(), mat))\n+ return Math.max(getDatatypeSizeOf(nnz), getIntSizeOf(Math.max(Math.max(rlen+1, clen), 4))); // either allocated in sparse format or matrix object is in sparse format\n+ else\n+ return getDatatypeSizeOf(rlen * clen); // not allocated and matrix object is in dense format\n}\nvoid copyFromHostToDevice(String opcode) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Avoid unnecessary transfer to the GPU for size estimation - Compute memory estimates (exact and worst-case) using metadata rather than requiring pointer transfer.
49,736
11.10.2018 13:52:36
25,200
11c67055accecf8582de0ca6dc62d0e3952e2804
[MINOR] Provide an useful error message when copying a large dense block to device memory
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -889,6 +889,10 @@ public class GPUObject {\nGPUStatistics.maintainCPMiscTimes(opcode, GPUInstruction.MISC_TIMER_HOST_TO_DEVICE, System.nanoTime() - t1);\n}\n} else {\n+ if(((long)tmp.getNumRows())*((long)tmp.getNumColumns()) > Integer.MAX_VALUE) {\n+ throw new DMLRuntimeException(\"Cannot allocate a dense double array on the GPU for a matrix with \"\n+ + \"dimensions [\" + tmp.getNumRows() + \",\" + tmp.getNumColumns() + \"]\");\n+ }\ndouble[] data = tmp.getDenseBlockValues();\nif (data == null && tmp.getSparseBlock() != null)\n@@ -982,6 +986,7 @@ public class GPUObject {\nif (!isDensePointerNull()) {\ntmp = new MatrixBlock(toIntExact(mat.getNumRows()), toIntExact(mat.getNumColumns()), false);\ntmp.allocateDenseBlock();\n+ // No need to double-check if tmp.getDenseBlockValues() is valid here.\nLibMatrixCUDA.cudaSupportFunctions.deviceToHost(getGPUContext(),\ngetDensePointer(), tmp.getDenseBlockValues(), instName, isEviction);\nif(eagerDelete)\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Provide an useful error message when copying a large dense block to device memory
49,738
13.10.2018 21:53:59
-7,200
41de8dcdc621b7dc2c1557aca64095512cdd6cf6
Performance matrix histogram estimator for dense This patch improves the performance of matrix histogram construction via a special case for fully dense matrices that allow for the construction from meta data without a pass over the input.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -108,7 +108,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\n}\n}\n- private abstract static class BitsetMatrix {\n+ public abstract static class BitsetMatrix {\nprotected final int _rlen;\nprotected final int _clen;\nprotected long _nonZeros;\n@@ -207,7 +207,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\n* not allow for range ORs). However, this implies a maximum size of 16GB.\n*\n*/\n- private static class BitsetMatrix1 extends BitsetMatrix {\n+ public static class BitsetMatrix1 extends BitsetMatrix {\n//linearized and padded data array in row-major order, where each long\n//represents 64 boolean values, all rows are aligned at 64 for simple access\nprivate final int _rowLen;\n@@ -407,7 +407,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\n}\n@SuppressWarnings(\"unused\")\n- private static class BitsetMatrix2 extends BitsetMatrix {\n+ public static class BitsetMatrix2 extends BitsetMatrix {\nprivate BitSet[] _data;\npublic BitsetMatrix2(int rlen, int clen) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -180,7 +180,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nm1Map.getNumColumnsOrig(), _b, true);\n}\n- private static class DensityMap {\n+ public static class DensityMap {\nprivate final MatrixBlock _map;\nprivate final int _rlen;\nprivate final int _clen;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -88,7 +88,7 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\nreturn leafs;\n}\n- private static class LayeredGraph {\n+ public static class LayeredGraph {\nprivate final List<Node[]> _nodes; //nodes partitioned by graph level\nprivate final int _rounds; //length of propagated r-vectors\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "package org.apache.sysml.hops.estim;\n+import java.util.Arrays;\nimport java.util.Random;\nimport java.util.stream.IntStream;\n@@ -195,7 +196,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nh1.getRows(), h2.getCols(), nnz);\n}\n- private static class MatrixHistogram {\n+ public static class MatrixHistogram {\n// count vectors (the histogram)\nprivate final int[] rNnz; //nnz per row\nprivate int[] rNnz1e = null; //nnz per row for cols w/ <= 1 non-zeros\n@@ -218,7 +219,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n&& in.getNumRows() == in.getNumColumns();\n// 2) compute basic synopsis details\n- if( !in.isEmpty() ) {\n+ if( in.getLength() == in.getNonZeros() ) {\n+ //fully dense: constant row/column counts\n+ Arrays.fill(rNnz, n);\n+ Arrays.fill(cNnz, m);\n+ }\n+ else if( !in.isEmpty() ) {\nif( in.isInSparseFormat() ) {\nSparseBlock a = in.getSparseBlock();\nfor( int i=0; i<m; i++ ) {\n@@ -250,7 +256,8 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nrNdiv2 = rSummary[3]; cNdiv2 = cSummary[3];\n// 4) compute exception details if necessary (optional)\n- if( useExcepts & !in.isEmpty() && (rMaxNnz > 1 || cMaxNnz > 1) ) {\n+ if( useExcepts && !in.isEmpty() && (rMaxNnz > 1 || cMaxNnz > 1)\n+ && in.getLength() != in.getNonZeros() ) { //not fully dense\nrNnz1e = new int[in.getNumRows()];\ncNnz1e = new int[in.getNumColumns()];\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2486] Performance matrix histogram estimator for dense This patch improves the performance of matrix histogram construction via a special case for fully dense matrices that allow for the construction from meta data without a pass over the input.
49,738
16.10.2018 21:48:38
-7,200
f1b9d1c08d750059af7c4dad6938d80d4852ee86
Fix MNC sparsity estimator integer overflows This patch fixes various cases of the MNC (matrix histogram) sparsity estimator that ran into integer overflows on moderately large data.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -152,19 +152,19 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//dot(h1.cNnz,h2rNnz) gives the exact number of non-zeros in the output\nif( h1.rMaxNnz <= 1 || h2.cMaxNnz <= 1 ) {\nfor( int j=0; j<h1.getCols(); j++ )\n- nnz += h1.cNnz[j] * h2.rNnz[j];\n+ nnz += (long)h1.cNnz[j] * h2.rNnz[j];\n}\n//special case, with hybrid exact and approximate output\nelse if(h1.cNnz1e!=null && h2.rNnz1e != null) {\n//note: normally h1.getRows()*h2.getCols() would define mnOut\n//but by leveraging the knowledge of rows/cols w/ <=1 nnz, we account\n//that exact and approximate fractions touch different areas\n- long mnOut = (h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\n+ long mnOut = (long)(h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\ndouble spOutRest = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\n//exact fractions, w/o double counting\n- nnz += h1.cNnz1e[j] * h2.rNnz[j];\n- nnz += (h1.cNnz[j]-h1.cNnz1e[j]) * h2.rNnz1e[j];\n+ nnz += (long)h1.cNnz1e[j] * h2.rNnz[j];\n+ nnz += (long)(h1.cNnz[j]-h1.cNnz1e[j]) * h2.rNnz1e[j];\n//approximate fraction, w/o double counting\ndouble lsp = (double)(h1.cNnz[j]-h1.cNnz1e[j])\n* (h2.rNnz[j]-h2.rNnz1e[j]) / mnOut;\n@@ -174,7 +174,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n//general case with approximate output\nelse {\n- long mnOut = h1.getRows()*h2.getCols();\n+ long mnOut = (long)h1.getRows()*h2.getCols();\ndouble spOut = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\ndouble lsp = (double) h1.cNnz[j] * h2.rNnz[j] / mnOut;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2468] Fix MNC sparsity estimator integer overflows This patch fixes various cases of the MNC (matrix histogram) sparsity estimator that ran into integer overflows on moderately large data.
49,738
17.10.2018 18:29:56
-7,200
ca24ec5647dedbf6eb50bbc630ccee673b1b3320
Extended sampling-based sparsity estimator This patch fixes the existing sampling-based estimator by optionally removing its bias via an approach similar to element-wise addition used in other estimators.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorSample.java", "diff": "@@ -37,22 +37,29 @@ import org.apache.sysml.runtime.util.UtilFunctions;\n* The basic idea is to draw random samples of aligned columns SA and rows SB,\n* and compute the output nnz as max(nnz(SA_i)*nnz(SB_i)). However, this estimator is\n* biased toward underestimation as the maximum is unlikely sampled and collisions are\n- * not accounted for.\n+ * not accounted for. Accordingly, we also support an extended estimator that relies\n+ * on similar ideas for element-wise addition as the other estimators.\n*/\npublic class EstimatorSample extends SparsityEstimator\n{\nprivate static final double SAMPLE_FRACTION = 0.1; //10%\nprivate final double _frac;\n+ private final boolean _extended;\npublic EstimatorSample() {\n- this(SAMPLE_FRACTION);\n+ this(SAMPLE_FRACTION, false);\n}\npublic EstimatorSample(double sampleFrac) {\n+ this(sampleFrac, false);\n+ }\n+\n+ public EstimatorSample(double sampleFrac, boolean extended) {\nif( sampleFrac < 0 || sampleFrac > 1.0 )\nthrow new DMLRuntimeException(\"Invalid sample fraction: \"+sampleFrac);\n_frac = sampleFrac;\n+ _extended = extended;\n}\n@Override\n@@ -73,13 +80,28 @@ public class EstimatorSample extends SparsityEstimator\nint k = m1.getNumColumns();\nint[] ix = UtilFunctions.getSortedSampleIndexes(\nk, (int)Math.max(k*_frac, 1));\n+ int p = ix.length;\nint[] cnnz = computeColumnNnz(m1, ix);\n+ if( _extended ) {\n+ double ml = (long)m1.getNumRows()*m2.getNumColumns();\n+ double sumS = 0, prodS = 1;\n+ for(int i=0; i<ix.length; i++) {\n+ long rnnz = m2.recomputeNonZeros(ix[i], ix[i]);\n+ double v = (double)cnnz[i] * rnnz /ml;\n+ sumS += v;\n+ prodS *= 1-v;\n+ }\n+ return 1-Math.pow(1-1d/p * sumS, k - p) * prodS;\n+ }\n+ else {\n+ //biased sampling-based estimator\nlong nnzOut = 0;\n- for(int i=0; i<ix.length; i++)\n+ for(int i=0; i<p; i++)\nnnzOut = Math.max(nnzOut, cnnz[i] * m2.recomputeNonZeros(ix[i], ix[i]));\nreturn OptimizerUtils.getSparsity(\nm1.getNumRows(), m2.getNumColumns(), nnzOut);\n}\n+ }\ncase MULT: {\nint k = Math.max(m1.getNumColumns(), m1.getNumRows());\nint[] ix = UtilFunctions.getSortedSampleIndexes(\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2329] Extended sampling-based sparsity estimator This patch fixes the existing sampling-based estimator by optionally removing its bias via an approach similar to element-wise addition used in other estimators.
49,738
19.10.2018 16:35:38
-7,200
ef842da9c891851c8e0a0db3cad3bd88aacb6cd9
[MINOR] Fix INF robustness layered graph sparsity estimator (rounding)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -156,8 +156,8 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n}\n//step 2: propagate vectors bottom-up and aggregate nnz\n- return (long) Arrays.stream(_nodes.get(_nodes.size()-1))\n- .mapToDouble(n -> calcNNZ(n.computeVector(_rounds), _rounds)).sum();\n+ return (long) Math.round(Arrays.stream(_nodes.get(_nodes.size()-1))\n+ .mapToDouble(n -> calcNNZ(n.computeVector(_rounds), _rounds)).sum());\n}\nprivate static double calcNNZ(double[] inpvec, int rounds) {\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix INF robustness layered graph sparsity estimator (rounding)
49,738
20.10.2018 20:25:29
-7,200
07650acf25a7ffb3d9663c622be0ae82778c0db0
Fix MNC sparsity estimator reshape operations This patch fixes various smaller correctness issues of MNC sketch propagation for reshape operations, adds a related test, and finally removes the invalid skipping of estimation tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -406,7 +406,6 @@ public class EstimatorBitsetMM extends SparsityEstimator\n}\n}\n- @SuppressWarnings(\"unused\")\npublic static class BitsetMatrix2 extends BitsetMatrix {\nprivate BitSet[] _data;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -24,7 +24,7 @@ import java.util.Random;\nimport java.util.stream.IntStream;\nimport org.apache.commons.lang.ArrayUtils;\n-import org.apache.directory.api.util.exception.NotImplementedException;\n+import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\n@@ -58,22 +58,22 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//recursive histogram computation of non-leaf nodes\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft()); //obtain synopsis\n- if( !root.getRight().isLeaf() )\n+ if( root.getRight()!=null && !root.getRight().isLeaf() )\nestim(root.getRight()); //obtain synopsis\nMatrixHistogram h1 = !root.getLeft().isLeaf() ?\n(MatrixHistogram)root.getLeft().getSynopsis() :\nnew MatrixHistogram(root.getLeft().getData(), _useExcepts);\n- MatrixHistogram h2 = !root.getRight().isLeaf() ?\n+ MatrixHistogram h2 = root.getRight() != null ? !root.getRight().isLeaf() ?\n(MatrixHistogram)root.getRight().getSynopsis() :\n- new MatrixHistogram(root.getRight().getData(), _useExcepts);\n+ new MatrixHistogram(root.getRight().getData(), _useExcepts) : null;\n//estimate output sparsity based on input histograms\n- double ret = estimIntern(h1, h2, root.getOp());\n- MatrixHistogram outMap = MatrixHistogram.deriveOutputHistogram(h1, h2, ret, root.getOp());\n+ double ret = estimIntern(h1, h2, root.getOp(), root.getMisc());\n+ MatrixHistogram outMap = MatrixHistogram\n+ .deriveOutputHistogram(h1, h2, ret, root.getOp(), root.getMisc());\nroot.setSynopsis(outMap);\nreturn root.setMatrixCharacteristics(new MatrixCharacteristics(\noutMap.getRows(), outMap.getCols(), outMap.getNonZeros()));\n-\n}\n@Override\n@@ -89,7 +89,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nMatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\nMatrixHistogram h2 = (m1 == m2) ? //self product\nh1 : new MatrixHistogram(m2, _useExcepts);\n- return estimIntern(h1, h2, op);\n+ return estimIntern(h1, h2, op, null);\n}\n@Override\n@@ -97,10 +97,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nif( isExactMetadataOp(op) )\nreturn estimExactMetaData(m1.getMatrixCharacteristics(), null, op).getSparsity();\nMatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\n- return estimIntern(h1, null, op);\n+ return estimIntern(h1, null, op, null);\n}\n- private double estimIntern(MatrixHistogram h1, MatrixHistogram h2, OpCode op) {\n+ private double estimIntern(MatrixHistogram h1, MatrixHistogram h2, OpCode op, long[] misc) {\ndouble msize = (double)h1.getRows()*h1.getCols();\nswitch (op) {\ncase MM:\n@@ -312,13 +312,21 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nreturn cNnz.length;\n}\n+ public int[] getRowCounts() {\n+ return rNnz;\n+ }\n+\n+ public int[] getColCounts() {\n+ return cNnz;\n+ }\n+\npublic long getNonZeros() {\nreturn getRows() < getCols() ?\nIntStream.range(0, getRows()).mapToLong(i-> rNnz[i]).sum() :\nIntStream.range(0, getCols()).mapToLong(i-> cNnz[i]).sum();\n}\n- public static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut, OpCode op) {\n+ public static MatrixHistogram deriveOutputHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut, OpCode op, long[] misc) {\nswitch(op) {\ncase MM: return deriveMMHistogram(h1, h2, spOut);\ncase MULT: return deriveMultHistogram(h1, h2);\n@@ -329,8 +337,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncase EQZERO: return deriveEq0Histogram(h1);\ncase DIAG: return deriveDiagHistogram(h1);\ncase TRANS: return deriveTransHistogram(h1);\n- case RESHAPE: return deriveReshapeHistogram(h1, h1.getRows(), h1.getCols());\n- //FIXME: reshape requires additional meta data from MM node\n+ case RESHAPE: return deriveReshapeHistogram(h1, (int)misc[0], (int)misc[1]);\ndefault:\nthrow new NotImplementedException();\n}\n@@ -493,11 +500,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nrMaxNnz = Math.max(rMaxNnz, h1.rNnz[i]/scale);\n}\n//aggregate column counts\n- for(int j=0; j<n; j+=scale)\n- for(int j2=0; j2<scale; j2++)\n- cNnz[j2] += h1.cNnz[j];\n- for(int j2=0; j2<scale; j2++)\n- cMaxNnz = Math.max(cMaxNnz, cNnz[j2]);\n+ for(int j=0; j<n; j++)\n+ cNnz[j%cols] += h1.cNnz[j];\n+ for(int j=0; j<cols; j++)\n+ cMaxNnz = Math.max(cMaxNnz, cNnz[j]);\n}\nelse if ( h1.getRows() % rows == 0 ) { //N->1 rows\nint scale = h1.getRows()/rows;\n@@ -508,11 +514,11 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\ncMaxNnz = Math.max(cMaxNnz, h1.cNnz[i]/scale);\n}\n//aggregate row counts\n- for(int j=0; j<m; j+=scale)\n- for(int j2=0; j2<scale; j2++)\n- rNnz[j2] += h1.rNnz[j];\n- for(int j2=0; j2<scale; j2++)\n- rMaxNnz = Math.max(rMaxNnz, rNnz[j2]);\n+ for(int i=0, pos=0; i<m; i+=scale, pos++)\n+ for(int i2=0; i2<scale; i2++)\n+ rNnz[pos] += h1.rNnz[i+i2];\n+ for(int i=0; i<rows; i++)\n+ rMaxNnz = Math.max(rMaxNnz, rNnz[i]);\n}\nreturn new MatrixHistogram(rNnz, null, cNnz, null, rMaxNnz, cMaxNnz);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "diff": "package org.apache.sysml.hops.estim;\nimport org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -35,6 +36,7 @@ public class MMNode\nprivate final MatrixCharacteristics _mc;\nprivate Object _synops = null;\nprivate final OpCode _op;\n+ private final long[] _misc;\npublic MMNode(MatrixBlock in) {\n_m1 = null;\n@@ -42,14 +44,28 @@ public class MMNode\n_data = in;\n_mc = in.getMatrixCharacteristics();\n_op = null;\n+ _misc = null;\n}\n- public MMNode(MMNode left, MMNode right, OpCode op) {\n+ public MMNode(MMNode left, MMNode right, OpCode op, long[] misc) {\n_m1 = left;\n_m2 = right;\n_data = null;\n_mc = new MatrixCharacteristics(-1, -1, -1, -1);\n_op = op;\n+ _misc = misc;\n+ }\n+\n+ public MMNode(MMNode left, MMNode right, OpCode op) {\n+ this(left, right, op, null);\n+ }\n+\n+ public MMNode(MMNode left, OpCode op) {\n+ this(left, null, op);\n+ }\n+\n+ public MMNode(MMNode left, OpCode op, long[] misc) {\n+ this(left, null, op, misc);\n}\npublic int getRows() {\n@@ -60,6 +76,16 @@ public class MMNode\nreturn (int)_mc.getCols();\n}\n+ public long[] getMisc() {\n+ return _misc;\n+ }\n+\n+ public long getMisc(int pos) {\n+ if( _misc == null )\n+ throw new DMLRuntimeException(\"Extra meta data not available.\");\n+ return _misc[pos];\n+ }\n+\npublic MatrixCharacteristics getMatrixCharacteristics() {\nreturn _mc;\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/MNCReshapeTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.estim;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.hops.estim.EstimatorMatrixHistogram.MatrixHistogram;\n+import org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixReorg;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+\n+public class MNCReshapeTest extends AutomatedTestBase\n+{\n+ @Override\n+ public void setUp() {\n+ //do nothing\n+ }\n+\n+ @Test\n+ public void testMNCReshapeN1() {\n+ runMNCReshapeTest(1000, 100, 200, 500);\n+ }\n+\n+ @Test\n+ public void testMNCReshape1N() {\n+ runMNCReshapeTest(100, 1000, 500, 200);\n+ }\n+\n+ private void runMNCReshapeTest(int m, int n, int m2, int n2) {\n+ MatrixBlock in = createStructuredInput(m, n, m2, n2);\n+ MatrixBlock out = LibMatrixReorg.reshape(in, new MatrixBlock(m2, n2, false), m2, n2, true);\n+\n+ MatrixHistogram hIn = new MatrixHistogram(in, false);\n+ MatrixHistogram hOut = MatrixHistogram.deriveOutputHistogram(\n+ hIn, null, in.getSparsity(), OpCode.RESHAPE, new long[] {m2,n2});\n+\n+ MatrixHistogram hExpect = new MatrixHistogram(out, false);\n+\n+ //expected exact sparsity, even with sketch propagation\n+ if( m % m2 == 0 )\n+ Assert.assertArrayEquals(hExpect.getRowCounts(), hOut.getRowCounts());\n+ if( n % n2 == 0 )\n+ Assert.assertArrayEquals(hExpect.getColCounts(), hOut.getColCounts());\n+ }\n+\n+ private MatrixBlock createStructuredInput(int m, int n, int m2, int n2) {\n+ if( n % n2 == 0 ) { //1:N\n+ MatrixBlock tmp = createStructuredInput(n, m, n2, m2);\n+ return LibMatrixReorg.transpose(tmp, new MatrixBlock(m, n, false));\n+ }\n+ else if( m % m2 == 0 ) { //N:1\n+ MatrixBlock tmp = new MatrixBlock(m, n, false);\n+ int L = m/m2;\n+ for(int i=0; i<m; i+=L) {\n+ for( int k=0; k<L; k++ )\n+ for(int j=0; j<n/(k+1); j++ ) //j=i/100\n+ tmp.quickSetValue(i+k, j, 1);\n+ }\n+ return tmp;\n+ }\n+ else {\n+ throw new RuntimeException(\"Unsupported general case.\");\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpBindChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpBindChainTest.java", "diff": "@@ -124,9 +124,6 @@ public class OpBindChainTest extends AutomatedTestBase\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp, OpCode op) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1;\nMatrixBlock m2;\nMatrixBlock m3 = new MatrixBlock();\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpBindTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpBindTest.java", "diff": "@@ -134,9 +134,6 @@ public class OpBindTest extends AutomatedTestBase\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp, OpCode op) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1;\nMatrixBlock m2;\nMatrixBlock m3 = new MatrixBlock();\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWChainTest.java", "diff": "@@ -118,9 +118,6 @@ public class OpElemWChainTest extends AutomatedTestBase\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int n, double[] sp, OpCode op) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, n, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(m, n, sp[1], 1, 1, \"uniform\", 5);\nMatrixBlock m3 = MatrixBlock.randOperations(n, m, sp[1], 1, 1, \"uniform\", 7);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpElemWTest.java", "diff": "@@ -129,9 +129,6 @@ public class OpElemWTest extends AutomatedTestBase\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int n, double[] sp, OpCode op) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, n, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(m, n, sp[1], 1, 1, \"uniform\", 7);\nMatrixBlock m3 = new MatrixBlock();\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpSingleTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpSingleTest.java", "diff": "@@ -232,9 +232,6 @@ public class OpSingleTest extends AutomatedTestBase\n// }\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, double sp, OpCode op) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp, 1, 1, \"uniform\", 3);\nMatrixBlock m2 = new MatrixBlock();\ndouble est = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OuterProductTest.java", "diff": "@@ -140,9 +140,6 @@ public class OuterProductTest extends AutomatedTestBase\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 3);\nMatrixBlock m3 = m1.aggregateBinaryOperations(m1, m2,\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SelfProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SelfProductTest.java", "diff": "@@ -141,9 +141,6 @@ public class SelfProductTest extends AutomatedTestBase\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int n, double sp) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, n, sp, 1, 1, \"uniform\", 3);\nMatrixBlock m3 = m1.aggregateBinaryOperations(m1, m1,\nnew MatrixBlock(), InstructionUtils.getMatMultOperator(1));\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductChainTest.java", "diff": "@@ -138,9 +138,6 @@ public class SquaredProductChainTest extends AutomatedTestBase\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, int n2, double[] sp) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 1);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 2);\nMatrixBlock m3 = MatrixBlock.randOperations(n, n2, sp[2], 1, 1, \"uniform\", 3);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/SquaredProductTest.java", "diff": "@@ -156,9 +156,6 @@ public class SquaredProductTest extends AutomatedTestBase\n}\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, int n, double[] sp) {\n- if(shouldSkipTest())\n- return;\n-\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp[0], 1, 1, \"uniform\", 3);\nMatrixBlock m2 = MatrixBlock.randOperations(k, n, sp[1], 1, 1, \"uniform\", 7);\nMatrixBlock m3 = m1.aggregateBinaryOperations(m1, m2,\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/estim/ZPackageSuite.java", "diff": "@@ -26,6 +26,7 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n+ MNCReshapeTest.class,\nOpBindChainTest.class,\nOpBindTest.class,\nOpElemWChainTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Fix MNC sparsity estimator reshape operations This patch fixes various smaller correctness issues of MNC sketch propagation for reshape operations, adds a related test, and finally removes the invalid skipping of estimation tests.
49,738
21.10.2018 02:01:45
-7,200
ab8cccdff8465cf29acd4887b1009989a9e7c97f
[MINOR] Fixes baseline sparsity estimators (layered graph, bitset) This patch fixes (1) the selection of bitset implementations according to input datasize, and (2) operation-specific API of the layered graph.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -71,9 +71,9 @@ public class EstimatorBitsetMM extends SparsityEstimator\nif( isExactMetadataOp(op) )\nreturn estimExactMetaData(m1.getMatrixCharacteristics(),\nm2.getMatrixCharacteristics(), op).getSparsity();\n- BitsetMatrix m1Map = new BitsetMatrix1(m1);\n+ BitsetMatrix m1Map = createBitset(m1);\nBitsetMatrix m2Map = (m1 == m2) ? //self product\n- m1Map : new BitsetMatrix1(m2);\n+ m1Map : createBitset(m2);\nBitsetMatrix outMap = estimInternal(m1Map, m2Map, op);\nreturn OptimizerUtils.getSparsity(outMap.getNumRows(),\noutMap.getNumColumns(), outMap.getNonZeros());\n@@ -83,7 +83,7 @@ public class EstimatorBitsetMM extends SparsityEstimator\npublic double estim(MatrixBlock m, OpCode op) {\nif( isExactMetadataOp(op) )\nreturn estimExactMetaData(m.getMatrixCharacteristics(), null, op).getSparsity();\n- BitsetMatrix m1Map = new BitsetMatrix1(m);\n+ BitsetMatrix m1Map = createBitset(m);\nBitsetMatrix outMap = estimInternal(m1Map, null, op);\nreturn OptimizerUtils.getSparsity(outMap.getNumRows(),\noutMap.getNumColumns(), outMap.getNonZeros());\n@@ -199,6 +199,18 @@ public class EstimatorBitsetMM extends SparsityEstimator\n//protected abstract BitsetMatrix reshape(int rows, int cols, boolean byrow);\n}\n+ public static BitsetMatrix createBitset(int m, int n) {\n+ return (long)m*n < Integer.MAX_VALUE ?\n+ new BitsetMatrix1(m, n) : //linearized long array\n+ new BitsetMatrix2(m, n); //bitset per row\n+ }\n+\n+ public static BitsetMatrix createBitset(MatrixBlock in) {\n+ return in.getLength() < Integer.MAX_VALUE ?\n+ new BitsetMatrix1(in) : //linearized long array\n+ new BitsetMatrix2(in); //bitset per row\n+ }\n+\n/**\n* This class represents a boolean matrix and provides key operations.\n* In the interest of a cache-conscious matrix multiplication and reduced\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorLayeredGraph.java", "diff": "@@ -62,6 +62,8 @@ public class EstimatorLayeredGraph extends SparsityEstimator {\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\n+ if( op == OpCode.MM )\n+ return estim(m1, m2);\nthrow new NotImplementedException();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixes baseline sparsity estimators (layered graph, bitset) This patch fixes (1) the selection of bitset implementations according to input datasize, and (2) operation-specific API of the layered graph.
49,738
21.10.2018 02:30:25
-7,200
cca6356f8de49dcb6aeb1f23cefd53930309fedb
[MINOR] Utility to obtain the exact output sparsity of sparse products
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimationUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimationUtils.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.estim;\nimport java.util.Arrays;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\n@@ -106,4 +107,59 @@ public abstract class EstimationUtils\n}\nreturn retNnz;\n}\n+\n+ public static long getSparseProductOutputNnz(MatrixBlock m1, MatrixBlock m2) {\n+ if( !m1.isInSparseFormat() || !m2.isInSparseFormat() )\n+ throw new DMLRuntimeException(\"Invalid call to sparse output nnz estimation.\");\n+\n+ final int m = m1.getNumRows();\n+ final int n2 = m2.getNumColumns();\n+ long retNnz = 0;\n+\n+ SparseBlock a = m1.getSparseBlock();\n+ SparseBlock b = m2.getSparseBlock();\n+\n+ SparseRowVector tmpS = new SparseRowVector(1024);\n+ double[] tmpD = null;\n+\n+ for( int i=0; i<m; i++ ) {\n+ if( a.isEmpty(i) ) continue;\n+ int alen = a.size(i);\n+ int apos = a.pos(i);\n+ int[] aix = a.indexes(i);\n+ double[] avals = a.values(i);\n+\n+ //compute number of aggregated non-zeros for input row\n+ int nnz1 = (int) Math.min(UtilFunctions.computeNnz(b, aix, apos, alen), n2);\n+ boolean ldense = nnz1 > n2 / 128;\n+\n+ //perform vector-matrix multiply w/ dense or sparse output\n+ if( ldense ) { //init dense tmp row\n+ tmpD = (tmpD == null) ? new double[n2] : tmpD;\n+ Arrays.fill(tmpD, 0);\n+ }\n+ else {\n+ tmpS.setSize(0);\n+ }\n+ for( int k=apos; k<apos+alen; k++ ) {\n+ if( b.isEmpty(aix[k]) ) continue;\n+ int blen = b.size(aix[k]);\n+ int bpos = b.pos(aix[k]);\n+ int[] bix = b.indexes(aix[k]);\n+ double aval = avals[k];\n+ double[] bvals = b.values(aix[k]);\n+ if( ldense ) { //dense aggregation\n+ for( int j=bpos; j<bpos+blen; j++ )\n+ tmpD[bix[j]] += aval * bvals[j];\n+ }\n+ else { //sparse aggregation\n+ for( int j=bpos; j<bpos+blen; j++ )\n+ tmpS.add(bix[j], aval * bvals[j]);\n+ }\n+ }\n+ retNnz += !ldense ? tmpS.size() :\n+ UtilFunctions.computeNnz(tmpD, 0, n2);\n+ }\n+ return retNnz;\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Utility to obtain the exact output sparsity of sparse products
49,738
21.10.2018 18:43:32
-7,200
569806dcdf3c37bff59ad052884cf5f9af9bd598
Improved MNC estimator (avoid final sketch propagation)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -55,11 +55,15 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n@Override\npublic MatrixCharacteristics estim(MMNode root) {\n+ return estim(root, true);\n+ }\n+\n+ private MatrixCharacteristics estim(MMNode root, boolean topLevel) {\n//recursive histogram computation of non-leaf nodes\nif( !root.getLeft().isLeaf() )\n- estim(root.getLeft()); //obtain synopsis\n+ estim(root.getLeft(), false); //obtain synopsis\nif( root.getRight()!=null && !root.getRight().isLeaf() )\n- estim(root.getRight()); //obtain synopsis\n+ estim(root.getRight(), false); //obtain synopsis\nMatrixHistogram h1 = !root.getLeft().isLeaf() ?\n(MatrixHistogram)root.getLeft().getSynopsis() :\nnew MatrixHistogram(root.getLeft().getData(), _useExcepts);\n@@ -69,6 +73,12 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//estimate output sparsity based on input histograms\ndouble ret = estimIntern(h1, h2, root.getOp(), root.getMisc());\n+ if( topLevel ) { //fast-path final result\n+ return MatrixHistogram.deriveOutputCharacteristics(\n+ h1, h2, ret, root.getOp(), root.getMisc());\n+ }\n+\n+ //sketch propagation for intermediates other than final result\nMatrixHistogram outMap = MatrixHistogram\n.deriveOutputHistogram(h1, h2, ret, root.getOp(), root.getMisc());\nroot.setSynopsis(outMap);\n@@ -183,6 +193,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnnz = (long)(spOut * mnOut);\n}\n+ if( _useExcepts ) {\n//exploit upper bound on nnz based on non-empty rows/cols\nnnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\nMath.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n@@ -190,6 +201,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//exploit lower bound on nnz based on half-full rows/cols\nnnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\nMath.max((long)h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n+ }\n//compute final sparsity\nreturn OptimizerUtils.getSparsity(\n@@ -343,6 +355,37 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n}\n+ public static MatrixCharacteristics deriveOutputCharacteristics(MatrixHistogram h1, MatrixHistogram h2, double spOut, OpCode op, long[] misc) {\n+ switch(op) {\n+ case MM:\n+ return new MatrixCharacteristics(h1.getRows(), h2.getCols(),\n+ OptimizerUtils.getNnz(h1.getRows(), h2.getCols(), spOut));\n+ case MULT:\n+ case PLUS:\n+ case NEQZERO:\n+ case EQZERO:\n+ return new MatrixCharacteristics(h1.getRows(), h1.getCols(),\n+ OptimizerUtils.getNnz(h1.getRows(), h1.getCols(), spOut));\n+ case RBIND:\n+ return new MatrixCharacteristics(h1.getRows()+h1.getRows(), h1.getCols(),\n+ OptimizerUtils.getNnz(h1.getRows()+h2.getRows(), h1.getCols(), spOut));\n+ case CBIND:\n+ return new MatrixCharacteristics(h1.getRows(), h1.getCols()+h2.getCols(),\n+ OptimizerUtils.getNnz(h1.getRows(), h1.getCols()+h2.getCols(), spOut));\n+ case DIAG:\n+ int ncol = h1.getCols()==1 ? h1.getRows() : 1;\n+ return new MatrixCharacteristics(h1.getRows(), ncol,\n+ OptimizerUtils.getNnz(h1.getRows(), ncol, spOut));\n+ case TRANS:\n+ return new MatrixCharacteristics(h1.getCols(), h1.getRows(), h1.getNonZeros());\n+ case RESHAPE:\n+ return new MatrixCharacteristics((int)misc[0], (int)misc[1],\n+ OptimizerUtils.getNnz((int)misc[0], (int)misc[1], spOut));\n+ default:\n+ throw new NotImplementedException();\n+ }\n+ }\n+\nprivate static MatrixHistogram deriveMMHistogram(MatrixHistogram h1, MatrixHistogram h2, double spOut) {\n//exact propagation if lhs or rhs full diag\nif( h1.fullDiag ) return h2;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2468] Improved MNC estimator (avoid final sketch propagation)
49,738
21.10.2018 19:23:08
-7,200
0a957e4c9a6aca0ef1cbf41e7dcdbdbc90ba4a04
Extended density map estimator (additional operations)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -23,6 +23,7 @@ import org.apache.commons.lang.NotImplementedException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.DenseBlock;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixReorg;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -81,10 +82,10 @@ public class EstimatorDensityMap extends SparsityEstimator\n@Override\npublic double estim(MatrixBlock m1, MatrixBlock m2, OpCode op) {\nif( isExactMetadataOp(op) )\n- return estimExactMetaData(m1.getMatrixCharacteristics(),\n- m2.getMatrixCharacteristics(), op).getSparsity();\n+ return estimExactMetaData(m1.getMatrixCharacteristics(), m2 != null ?\n+ m2.getMatrixCharacteristics() : null, op).getSparsity();\nDensityMap m1Map = new DensityMap(m1, _b);\n- DensityMap m2Map = (m1 == m2) ? //self product\n+ DensityMap m2Map = (m1 == m2 || m2 == null) ? //self product\nm1Map : new DensityMap(m2, _b);\nDensityMap outMap = estimIntern(m1Map, m2Map, op);\nreturn OptimizerUtils.getSparsity( //aggregate output histogram\n@@ -108,15 +109,16 @@ public class EstimatorDensityMap extends SparsityEstimator\ncase MM: return estimInternMM(m1Map, m2Map);\ncase MULT: return estimInternMult(m1Map, m2Map);\ncase PLUS: return estimInternPlus(m1Map, m2Map);\n+ case NEQZERO: return m1Map;\n+ case EQZERO: return estimInternEqZero(m1Map);\ncase RBIND:\ncase CBIND:\n//TODO simple append not possible due to partial blocks at end of m1Map\n- case TRANS:\n- case DIAG:\n- case RESHAPE:\n- //TODO add missing estimators\n+ case TRANS: return estimInternTrans(m1Map);\n+ case DIAG: return estimInternDiag(m1Map);\n+ case RESHAPE: return estimInternReshape(m1Map);\ndefault:\nthrow new NotImplementedException();\n}\n@@ -180,6 +182,40 @@ public class EstimatorDensityMap extends SparsityEstimator\nm1Map.getNumColumnsOrig(), _b, true);\n}\n+ private DensityMap estimInternTrans(DensityMap m1Map) {\n+ MatrixBlock out = LibMatrixReorg.transpose(m1Map.getMap(),\n+ new MatrixBlock(m1Map.getNumColumns(), m1Map.getNumRows(), false));\n+ return new DensityMap(out, m1Map.getNumColumnsOrig(),\n+ m1Map.getNumRowsOrig(), _b, m1Map._scaled);\n+ }\n+\n+ private DensityMap estimInternDiag(DensityMap m1Map) {\n+ if( m1Map.getNumColumnsOrig() > 1 )\n+ throw new NotImplementedException();\n+ m1Map.toNnz();\n+ MatrixBlock out = LibMatrixReorg.diag(m1Map.getMap(),\n+ new MatrixBlock(m1Map.getNumRows(), m1Map.getNumRows(), false));\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m1Map.getNumRowsOrig(), _b, m1Map._scaled);\n+ }\n+\n+ private DensityMap estimInternReshape(DensityMap m1Map) {\n+ MatrixBlock out = new MatrixBlock(1,1,(double)m1Map.getNonZeros());\n+ int b = Math.max(m1Map.getNumRowsOrig(), m1Map.getNumColumnsOrig());\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m1Map.getNumColumnsOrig(), b, false);\n+ }\n+\n+ private DensityMap estimInternEqZero(DensityMap m1Map) {\n+ MatrixBlock out = new MatrixBlock(m1Map.getNumRows(), m1Map.getNumColumns(), false);\n+ m1Map.toSparsity();\n+ for(int i=0; i<m1Map.getNumRows(); i++)\n+ for(int j=0; j<m1Map.getNumColumns(); j++)\n+ out.quickSetValue(i, j, 1-m1Map.get(i, j));\n+ return new DensityMap(out, m1Map.getNumRowsOrig(),\n+ m1Map.getNumColumnsOrig(), _b, m1Map._scaled);\n+ }\n+\npublic static class DensityMap {\nprivate final MatrixBlock _map;\nprivate final int _rlen;\n@@ -207,6 +243,10 @@ public class EstimatorDensityMap extends SparsityEstimator\nthrow new RuntimeException(\"Invalid block size: \"+_b);\n}\n+ public MatrixBlock getMap() {\n+ return _map;\n+ }\n+\npublic int getNumRows() {\nreturn _map.getNumRows();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpSingleTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/estim/OpSingleTest.java", "diff": "@@ -24,6 +24,7 @@ import org.apache.directory.api.util.exception.NotImplementedException;\nimport org.apache.sysml.hops.estim.EstimatorBasicAvg;\nimport org.apache.sysml.hops.estim.EstimatorBasicWorst;\nimport org.apache.sysml.hops.estim.EstimatorBitsetMM;\n+import org.apache.sysml.hops.estim.EstimatorDensityMap;\nimport org.apache.sysml.hops.estim.SparsityEstimator;\nimport org.apache.sysml.hops.estim.SparsityEstimator.OpCode;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -49,17 +50,6 @@ public class OpSingleTest extends AutomatedTestBase\n//do nothing\n}\n- //Average Case\n-// @Test\n-// public void testAvgEqzero() {\n-// runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, eqzero);\n-// }\n-\n-// @Test\n-// public void testAvgDiag() {\n-// runSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, diag);\n-// }\n-\n@Test\npublic void testAvgNeqzero() {\nrunSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, neqzero);\n@@ -75,94 +65,35 @@ public class OpSingleTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorBasicAvg(), m, k, sparsity, reshape);\n}\n- //Worst Case\n-// @Test\n-// public void testWorstEqzero() {\n-// runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, eqzero);\n-// }\n-\n-// @Test\n-// public void testWCasediag() {\n-// runSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, diag);\n-// }\n-\n@Test\n- public void testWorstNeqzero() {\n+ public void testWCNeqzero() {\nrunSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, neqzero);\n}\n@Test\n- public void testWoestTrans() {\n+ public void testWCTrans() {\nrunSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, trans);\n}\n@Test\n- public void testWorstReshape() {\n+ public void testWCReshape() {\nrunSparsityEstimateTest(new EstimatorBasicWorst(), m, k, sparsity, reshape);\n}\n-// //DensityMap\n-// @Test\n-// public void testDMCaseeqzero() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, eqzero);\n-// }\n-//\n-// @Test\n-// public void testDMCasediag() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, diag);\n-// }\n-//\n-// @Test\n-// public void testDMCaseneqzero() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, neqzero);\n-// }\n-//\n-// @Test\n-// public void testDMCasetrans() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, trans);\n-// }\n-//\n-// @Test\n-// public void testDMCasereshape() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, reshape);\n-// }\n-//\n-// //MNC\n-// @Test\n-// public void testMNCCaseeqzero() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, eqzero);\n-// }\n-//\n-// @Test\n-// public void testMNCCasediag() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, diag);\n-// }\n-//\n-// @Test\n-// public void testMNCCaseneqzero() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, neqzero);\n-// }\n-//\n-// @Test\n-// public void testMNCCasetrans() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, trans);\n-// }\n-//\n-// @Test\n-// public void testMNCCasereshape() {\n-// runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, reshape);\n-// }\n-//\n- //Bitset\n-// @Test\n-// public void testBitsetCaseeqzero() {\n-// runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, eqzero);\n-// }\n+ @Test\n+ public void testDMapNeqzero() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, neqzero);\n+ }\n+\n+ @Test\n+ public void testDMapTrans() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, trans);\n+ }\n-// @Test\n-// public void testBitsetCasediag() {\n-// runSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, diag);\n-// }\n+ @Test\n+ public void testDMapReshape() {\n+ runSparsityEstimateTest(new EstimatorDensityMap(), m, k, sparsity, reshape);\n+ }\n@Test\npublic void testBitsetNeqzero() {\n@@ -179,58 +110,6 @@ public class OpSingleTest extends AutomatedTestBase\nrunSparsityEstimateTest(new EstimatorBitsetMM(), m, k, sparsity, reshape);\n}\n-// //Layered Graph\n-// @Test\n-// public void testLGCaseeqzero() {\n-// runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, eqzero);\n-// }\n-//\n-// @Test\n-// public void testLGCasediag() {\n-// runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, diag);\n-// }\n-//\n-// @Test\n-// public void testLGCaseneqzero() {\n-// runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, neqzero);\n-// }\n-//\n-// @Test\n-// public void testLGCasetans() {\n-// runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, trans);\n-// }\n-//\n-// @Test\n-// public void testLGCasereshape() {\n-// runSparsityEstimateTest(new EstimatorLayeredGraph(), m, k, sparsity, reshape);\n-// }\n-//\n-// //Sample\n-// @Test\n-// public void testSampleCaseeqzero() {\n-// runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, eqzero);\n-// }\n-//\n-// @Test\n-// public void testSampleCasediag() {\n-// runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, diag);\n-// }\n-//\n-// @Test\n-// public void testSampleCaseneqzero() {\n-// runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, neqzero);\n-// }\n-//\n-// @Test\n-// public void testSampleCasetrans() {\n-// runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, trans);\n-// }\n-//\n-// @Test\n-// public void testSampleCasereshape() {\n-// runSparsityEstimateTest(new EstimatorSample(), m, k, sparsity, reshape);\n-// }\n-\nprivate void runSparsityEstimateTest(SparsityEstimator estim, int m, int k, double sp, OpCode op) {\nMatrixBlock m1 = MatrixBlock.randOperations(m, k, sp, 1, 1, \"uniform\", 3);\nMatrixBlock m2 = new MatrixBlock();\n@@ -255,6 +134,6 @@ public class OpSingleTest extends AutomatedTestBase\nthrow new NotImplementedException();\n}\n//compare estimated and real sparsity\n- TestUtils.compareScalars(est, m2.getSparsity(), (estim instanceof EstimatorBasicWorst) ? 5e-1 : 1e-2);\n+ TestUtils.compareScalars(est, m2.getSparsity(), 0);\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Extended density map estimator (additional operations)
49,736
22.10.2018 09:34:26
25,200
73e1e40d766fda53210b5176597b182024cac344
[MINOR] Bugfix in Large Dense Block Current master throws java.lang.ArrayIndexOutOfBoundsException when counting number of non-zeroes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -164,7 +164,7 @@ public class DenseBlockLDRB extends DenseBlock\nnnz += UtilFunctions.computeNnz(data[bi], lpos, len);\nelse\nfor(int i=lpos; i<lpos+len; i+=clen)\n- nnz += UtilFunctions.computeNnz(data[i], i+cl, cu-cl);\n+ nnz += UtilFunctions.computeNnz(data[bi], i+cl, cu-cl);\n}\nreturn nnz;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Bugfix in Large Dense Block - Current master throws java.lang.ArrayIndexOutOfBoundsException when counting number of non-zeroes
49,738
22.10.2018 23:00:03
-7,200
17821d10543c4373b4728068d1b79bdf9346a38f
Fix and cleanup baseline estimators for chains of ops
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicAvg.java", "diff": "@@ -34,8 +34,9 @@ public class EstimatorBasicAvg extends SparsityEstimator\npublic MatrixCharacteristics estim(MMNode root) {\nMatrixCharacteristics mc1 = !root.getLeft().isLeaf() ?\nestim(root.getLeft()) : root.getLeft().getMatrixCharacteristics();\n- MatrixCharacteristics mc2 = !root.getRight().isLeaf() ?\n- estim(root.getRight()) : root.getRight().getMatrixCharacteristics();\n+ MatrixCharacteristics mc2 = root.getRight()==null ? null :\n+ !root.getRight().isLeaf() ? estim(root.getRight()) :\n+ root.getRight().getMatrixCharacteristics();\nreturn root.setMatrixCharacteristics(\nestimIntern(mc1, mc2, root.getOp()));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBasicWorst.java", "diff": "@@ -36,10 +36,15 @@ public class EstimatorBasicWorst extends SparsityEstimator\n{\n@Override\npublic MatrixCharacteristics estim(MMNode root) {\n+ if (!root.getLeft().isLeaf())\n+ estim(root.getLeft()); // obtain synopsis\n+ if (root.getRight()!=null && !root.getRight().isLeaf())\n+ estim(root.getRight()); // obtain synopsis\nMatrixCharacteristics mc1 = !root.getLeft().isLeaf() ?\nestim(root.getLeft()) : root.getLeft().getMatrixCharacteristics();\n- MatrixCharacteristics mc2 = !root.getRight().isLeaf() ?\n- estim(root.getRight()) : root.getRight().getMatrixCharacteristics();\n+ MatrixCharacteristics mc2 = root.getRight()==null ? null :\n+ !root.getRight().isLeaf() ? estim(root.getRight()) :\n+ root.getRight().getMatrixCharacteristics();\nreturn root.setMatrixCharacteristics(\nestimIntern(mc1, mc2, root.getOp()));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorBitsetMM.java", "diff": "@@ -46,14 +46,11 @@ public class EstimatorBitsetMM extends SparsityEstimator\n{\n@Override\npublic MatrixCharacteristics estim(MMNode root) {\n- // recursive density map computation of non-leaf nodes\n- if (!root.getLeft().isLeaf())\n- estim(root.getLeft()); // obtain synopsis\n- if (!root.getRight().isLeaf())\n- estim(root.getRight()); // obtain synopsis\n+ estimateInputs(root);\nBitsetMatrix m1Map = !root.getLeft().isLeaf() ? (BitsetMatrix) root.getLeft().getSynopsis() :\nnew BitsetMatrix1(root.getLeft().getData());\n- BitsetMatrix m2Map = !root.getRight().isLeaf() ? (BitsetMatrix) root.getRight().getSynopsis() :\n+ BitsetMatrix m2Map = root.getRight() == null ? null :\n+ !root.getRight().isLeaf() ? (BitsetMatrix) root.getRight().getSynopsis() :\nnew BitsetMatrix1(root.getRight().getData());\nBitsetMatrix outMap = estimInternal(m1Map, m2Map, root.getOp());\nroot.setSynopsis(outMap); // memorize boolean matrix\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorDensityMap.java", "diff": "@@ -55,15 +55,12 @@ public class EstimatorDensityMap extends SparsityEstimator\n@Override\npublic MatrixCharacteristics estim(MMNode root) {\n- //recursive density map computation of non-leaf nodes\n- if( !root.getLeft().isLeaf() )\n- estim(root.getLeft()); //obtain synopsis\n- if( !root.getRight().isLeaf() )\n- estim(root.getRight()); //obtain synopsis\n+ estimateInputs(root);\nDensityMap m1Map = !root.getLeft().isLeaf() ?\n(DensityMap)root.getLeft().getSynopsis() :\nnew DensityMap(root.getLeft().getData(), _b);\n- DensityMap m2Map = !root.getRight().isLeaf() ?\n+ DensityMap m2Map = root.getRight()==null ? null:\n+ !root.getRight().isLeaf() ?\n(DensityMap)root.getRight().getSynopsis() :\nnew DensityMap(root.getRight().getData(), _b);\n@@ -71,7 +68,7 @@ public class EstimatorDensityMap extends SparsityEstimator\nDensityMap outMap = estimIntern(m1Map, m2Map, root.getOp());\nroot.setSynopsis(outMap); //memoize density map\nreturn root.setMatrixCharacteristics(new MatrixCharacteristics(\n- root.getLeft().getRows(), root.getRight().getCols(), outMap.getNonZeros()));\n+ outMap.getNumRowsOrig(), outMap.getNumColumnsOrig(), outMap.getNonZeros()));\n}\n@Override\n@@ -230,7 +227,7 @@ public class EstimatorDensityMap extends SparsityEstimator\n_map = init(in);\n_scaled = false;\nif( !isPow2(_b) )\n- throw new RuntimeException(\"Invalid block size: \"+_b);\n+ System.out.println(\"WARN: Invalid block size: \"+_b);\n}\npublic DensityMap(MatrixBlock map, int rlenOrig, int clenOrig, int b, boolean scaled) {\n@@ -240,7 +237,7 @@ public class EstimatorDensityMap extends SparsityEstimator\n_map = map;\n_scaled = scaled;\nif( !isPow2(_b) )\n- throw new RuntimeException(\"Invalid block size: \"+_b);\n+ System.out.println(\"WARN: Invalid block size: \"+_b);\n}\npublic MatrixBlock getMap() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -59,7 +59,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\nprivate MatrixCharacteristics estim(MMNode root, boolean topLevel) {\n- //recursive histogram computation of non-leaf nodes\n+ //NOTE: not estimateInputs due to handling of topLevel\nif( !root.getLeft().isLeaf() )\nestim(root.getLeft(), false); //obtain synopsis\nif( root.getRight()!=null && !root.getRight().isLeaf() )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/MMNode.java", "diff": "@@ -68,6 +68,14 @@ public class MMNode\nthis(left, null, op, misc);\n}\n+ public void reset() {\n+ if( _m1 != null )\n+ _m1.reset();\n+ if( _m2 != null )\n+ _m2.reset();\n+ _synops = null;\n+ }\n+\npublic int getRows() {\nreturn (int)_mc.getRows();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "@@ -114,4 +114,11 @@ public abstract class SparsityEstimator\nthrow new HopsException(\"Opcode is not an exact meta data operation: \"+op.name());\n}\n}\n+\n+ protected void estimateInputs(MMNode root) {\n+ if (!root.getLeft().isLeaf())\n+ estim(root.getLeft()); // obtain synopsis\n+ if (root.getRight()!=null && !root.getRight().isLeaf())\n+ estim(root.getRight()); // obtain synopsis\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2479] Fix and cleanup baseline estimators for chains of ops
49,738
23.10.2018 12:35:23
-7,200
d9d6f56157806ce2e93718587ebae583629d693f
[MINOR] Fix exact size propagation avg-case sparsity estimator
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/SparsityEstimator.java", "diff": "@@ -105,9 +105,10 @@ public abstract class SparsityEstimator\ncase RBIND:\nreturn new MatrixCharacteristics(mc1.getRows() + mc2.getRows(),\nmc1.getCols(), mc1.getNonZeros() + mc2.getNonZeros());\n+ case TRANS:\n+ return new MatrixCharacteristics(mc1.getCols(), mc1.getRows(), mc1.getNonZeros());\n// unary operation that preserve sparsity exactly\ncase NEQZERO:\n- case TRANS:\ncase RESHAPE:\nreturn mc1;\ndefault:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix exact size propagation avg-case sparsity estimator
49,738
26.10.2018 23:16:12
-7,200
0eff9f28d3618220985041d7034dafdcc1701240
[MINOR] Cleanup MNC sparsity estimator (redundant upper bound) With the modified output size of the generic fallback estimator, the upper bound became obsolete because the upper bound is already ensured via the modified areas. This patch cleans up the entire configuration handling of extended MNC sketches.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "new_path": "src/main/java/org/apache/sysml/hops/estim/EstimatorMatrixHistogram.java", "diff": "@@ -41,16 +41,16 @@ import org.apache.sysml.runtime.matrix.data.SparseBlock;\npublic class EstimatorMatrixHistogram extends SparsityEstimator\n{\n//internal configurations\n- private static final boolean DEFAULT_USE_EXCEPTS = true;\n+ private static final boolean DEFAULT_USE_EXTENDED = true;\n- private final boolean _useExcepts;\n+ private final boolean _useExtended;\npublic EstimatorMatrixHistogram() {\n- this(DEFAULT_USE_EXCEPTS);\n+ this(DEFAULT_USE_EXTENDED);\n}\n- public EstimatorMatrixHistogram(boolean useExcepts) {\n- _useExcepts = useExcepts;\n+ public EstimatorMatrixHistogram(boolean useExtended) {\n+ _useExtended = useExtended;\n}\n@Override\n@@ -66,10 +66,10 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nestim(root.getRight(), false); //obtain synopsis\nMatrixHistogram h1 = !root.getLeft().isLeaf() ?\n(MatrixHistogram)root.getLeft().getSynopsis() :\n- new MatrixHistogram(root.getLeft().getData(), _useExcepts);\n+ new MatrixHistogram(root.getLeft().getData(), _useExtended);\nMatrixHistogram h2 = root.getRight() != null ? !root.getRight().isLeaf() ?\n(MatrixHistogram)root.getRight().getSynopsis() :\n- new MatrixHistogram(root.getRight().getData(), _useExcepts) : null;\n+ new MatrixHistogram(root.getRight().getData(), _useExtended) : null;\n//estimate output sparsity based on input histograms\ndouble ret = estimIntern(h1, h2, root.getOp(), root.getMisc());\n@@ -96,9 +96,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nif( isExactMetadataOp(op) )\nreturn estimExactMetaData(m1.getMatrixCharacteristics(),\nm2.getMatrixCharacteristics(), op).getSparsity();\n- MatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\n+ MatrixHistogram h1 = new MatrixHistogram(m1, _useExtended);\nMatrixHistogram h2 = (m1 == m2) ? //self product\n- h1 : new MatrixHistogram(m2, _useExcepts);\n+ h1 : new MatrixHistogram(m2, _useExtended);\nreturn estimIntern(h1, h2, op, null);\n}\n@@ -106,7 +106,7 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\npublic double estim(MatrixBlock m1, OpCode op) {\nif( isExactMetadataOp(op) )\nreturn estimExactMetaData(m1.getMatrixCharacteristics(), null, op).getSparsity();\n- MatrixHistogram h1 = new MatrixHistogram(m1, _useExcepts);\n+ MatrixHistogram h1 = new MatrixHistogram(m1, _useExtended);\nreturn estimIntern(h1, null, op, null);\n}\n@@ -169,7 +169,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n//note: normally h1.getRows()*h2.getCols() would define mnOut\n//but by leveraging the knowledge of rows/cols w/ <=1 nnz, we account\n//that exact and approximate fractions touch different areas\n- long mnOut = (long)(h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1);\n+ long mnOut = _useExtended ?\n+ (long)(h1.rNonEmpty-h1.rN1) * (h2.cNonEmpty-h2.cN1) :\n+ (long)(h1.getRows()-h1.rN1) * (h2.getCols()-h2.cN1);\ndouble spOutRest = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\n//exact fractions, w/o double counting\n@@ -184,7 +186,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\n}\n//general case with approximate output\nelse {\n- long mnOut = (long)h1.rNonEmpty*h2.cNonEmpty;\n+ long mnOut = _useExtended ?\n+ (long)h1.rNonEmpty * h2.cNonEmpty :\n+ (long)h1.getRows() * h2.getCols();\ndouble spOut = 0;\nfor( int j=0; j<h1.getCols(); j++ ) {\ndouble lsp = (double) h1.cNnz[j] * h2.rNnz[j] / mnOut;\n@@ -193,12 +197,9 @@ public class EstimatorMatrixHistogram extends SparsityEstimator\nnnz = (long)(spOut * mnOut);\n}\n- if( _useExcepts ) {\n- //exploit upper bound on nnz based on non-empty rows/cols\n- nnz = (h1.rNonEmpty >= 0 && h2.cNonEmpty >= 0) ?\n- Math.min((long)h1.rNonEmpty * h2.cNonEmpty, nnz) : nnz;\n-\n+ if( _useExtended ) {\n//exploit lower bound on nnz based on half-full rows/cols\n+ //note: upper bound applied via modified output sizes\nnnz = (h1.rNdiv2 >= 0 && h2.cNdiv2 >= 0) ?\nMath.max((long)h1.rNdiv2 * h2.cNdiv2, nnz) : nnz;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup MNC sparsity estimator (redundant upper bound) With the modified output size of the generic fallback estimator, the upper bound became obsolete because the upper bound is already ensured via the modified areas. This patch cleans up the entire configuration handling of extended MNC sketches.
49,738
28.10.2018 20:08:38
-3,600
1c8e2974423b6bf972465dd8108fd0ef6404bbc7
Basic initialization of tensor blocks, incl tests
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/TensorBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/TensorBlock.java", "diff": "@@ -18,27 +18,289 @@ package org.tugraz.sysds.runtime.data;\nimport java.io.Serializable;\n+import org.tugraz.sysds.common.Types.ValueType;\n+import org.tugraz.sysds.runtime.DMLRuntimeException;\n+\npublic class TensorBlock implements Serializable\n{\nprivate static final long serialVersionUID = -4205257127878517048L;\n- protected int[] dims = new int[2];\n- protected boolean sparse = true;\n- protected long nonZeros = 0;\n+ public static final double SPARSITY_TURN_POINT = 0.4;\n+ public static final ValueType DEFAULT_VTYPE = ValueType.DOUBLE;\n+ public static final int[] DEFAULT_DIMS = new int[]{0, 0};\n+ public static final SparseBlock.Type DEFAULT_SPARSEBLOCK = SparseBlock.Type.MCSR;\n+\n+ //constant value type of tensor block\n+ protected final ValueType _vt;\n+\n+ //min 2 dimensions to preserve proper matrix semantics\n+ protected int[] _dims; //[2,inf)\n+ protected boolean _sparse = true;\n+ protected long _nnz = 0;\n//matrix data (sparse or dense)\n- protected DenseBlock denseBlock = null;\n- protected SparseBlock sparseBlock = null;\n+ protected DenseBlock _denseBlock = null;\n+ protected SparseBlock _sparseBlock = null;\npublic TensorBlock() {\n+ this(DEFAULT_VTYPE, DEFAULT_DIMS.clone(), true, -1);\n+ }\n+\n+ public TensorBlock(ValueType vt, int[] dims) {\n+ this(vt, dims, true, -1);\n+ }\n+\n+ public TensorBlock(ValueType vt, int[] dims, boolean sp) {\n+ this(vt, dims, sp, -1);\n+ }\n+\n+ public TensorBlock(ValueType vt, int[] dims, boolean sp, long estnnz) {\n+ _vt = vt;\n+ reset(dims, sp, estnnz, 0);\n+ }\n+\n+ public TensorBlock(TensorBlock that) {\n+ _vt = that.getValueType();\n+ copy(that);\n+ }\n+\n+ public TensorBlock(double val) {\n+ _vt = DEFAULT_VTYPE;\n+ reset(new int[] {1, 1}, false, 1, val);\n+ }\n+\n+ public TensorBlock(int[] dims, ValueType vt, double val) {\n+ _vt = DEFAULT_VTYPE;\n+ _dims = dims;\n+ reset(dims, false, (val==0) ? 0 : getLength(), val);\n+ }\n+\n+ ////////\n+ // Initialization methods\n+ // (reset, init, allocate, etc)\n+\n+ public void reset() {\n+ reset(_dims, _sparse, -1, 0);\n+ }\n+\n+ public void reset(int[] dims) {\n+ reset(dims, _sparse, -1, 0);\n+ }\n+\n+ public void reset(int[] dims, long estnnz) {\n+ reset(dims, evalSparseFormatInMemory(dims, estnnz), estnnz, 0);\n+ }\n+\n+ public void reset(int[] dims, boolean sp) {\n+ reset(dims, sp, -1, 0);\n+ }\n+\n+ public void reset(int[] dims, boolean sp, long estnnz) {\n+ reset(dims, sp, estnnz, 0);\n+ }\n+\n+ /**\n+ * Internal canonical reset of dense and sparse tensor blocks.\n+ *\n+ * @param dims number and size of dimensions\n+ * @param sp sparse representation\n+ * @param estnnz estimated number of non-zeros\n+ * @param val initialization value\n+ */\n+ private void reset(int[] dims, boolean sp, long estnnz, double val) {\n+ //check for valid dimensions\n+ if( dims.length < 2 )\n+ throw new DMLRuntimeException(\"Invalid number of tensor dimensions: \"+dims.length);\n+ for( int i=0; i<dims.length; i++ )\n+ if( dims[i] < 0 )\n+ throw new DMLRuntimeException(\"Invalid \"+i+\"th dimensions: \"+dims[i]);\n+\n+ //reset basic meta data\n+ _dims = dims;\n+ _sparse = sp;\n+ _nnz = (val == 0) ? 0 : getLength();\n+\n+ //reset sparse/dense blocks\n+ if( _sparse )\n+ resetSparse();\n+ else\n+ resetDense(val);\n+ }\n+\n+ private void resetSparse() {\n+ if(_sparseBlock == null)\n+ return;\n+ //TODO simplify estimated non-zeros\n+ _sparseBlock.reset(-1, getDim(2));\n+ }\n+\n+ private void resetDense(double val) {\n+ //handle to dense block allocation and\n+ //reset dense block to given value\n+ if( _denseBlock != null )\n+ _denseBlock.reset(getDim(0), getDim(1), val);\n+ else if( val != 0 ) {\n+ allocateDenseBlock(false);\n+ _denseBlock.set(val);\n+ }\n+ }\n+\n+ public boolean isAllocated() {\n+ return _sparse ? (_sparseBlock!=null) : (_denseBlock!=null);\n+ }\n+\n+ public TensorBlock allocateDenseBlock() {\n+ allocateDenseBlock(true);\n+ return this;\n+ }\n+\n+ public TensorBlock allocateBlock() {\n+ if( _sparse )\n+ allocateSparseBlock();\n+ else\n+ allocateDenseBlock();\n+ return this;\n+ }\n+\n+ public boolean allocateDenseBlock(boolean clearNNZ) {\n+ //allocate block if non-existing or too small (guaranteed to be 0-initialized),\n+ long limit = getLength();\n+ boolean reset = (_denseBlock == null || _denseBlock.capacity() < limit);\n+ if( _denseBlock == null )\n+ _denseBlock = DenseBlockFactory.createDenseBlock(getDim(0), getDim(1));\n+ else if( _denseBlock.capacity() < limit )\n+ _denseBlock.reset(getDim(0), getDim(1));\n+\n+ //clear nnz if necessary\n+ if( clearNNZ )\n+ _nnz = 0;\n+ _sparse = false;\n+ return reset;\n}\n+ public boolean allocateSparseBlock() {\n+ return allocateSparseBlock(true);\n+ }\n+\n+ public boolean allocateSparseBlock(boolean clearNNZ) {\n+ //allocate block if non-existing or too small (guaranteed to be 0-initialized)\n+ //but do not replace existing block even if not in default type\n+ boolean reset = _sparseBlock == null || _sparseBlock.numRows()<getDim(0);\n+ if( reset ) {\n+ _sparseBlock = SparseBlockFactory\n+ .createSparseBlock(DEFAULT_SPARSEBLOCK, getDim(0));\n+ }\n+ //clear nnz if necessary\n+ if( clearNNZ )\n+ _nnz = 0;\n+\n+ return reset;\n+ }\n+\n+ ////////\n+ // Basic meta data\n+\n+ public ValueType getValueType() {\n+ return _vt;\n+ }\n+\n+ public int getNumDims() {\n+ return _dims.length;\n+ }\n+\n+ public int getNumRows() {\n+ return getDim(0);\n+ }\n+\n+ public int getNumCols() {\n+ return getDim(1);\n+ }\n+\n+ public int getDim(int i) {\n+ return _dims[i];\n+ }\n+\n+ public long getNonZeros() {\n+ return _nnz;\n+ }\n+\n+ public boolean isVector() {\n+ return getNumDims() <= 2\n+ && (getDim(0) == 1 || getDim(1) == 1);\n+ }\n+\n+ public boolean isMatrix() {\n+ return getNumDims() == 2\n+ && (getDim(0) > 1 && getDim(1) > 1);\n+ }\n+\n+ public long getLength() {\n+ long ret = 1;\n+ for(int i=0; i<getNumDims(); i++)\n+ ret *= getDim(i);\n+ return ret;\n+ }\n+\n+ public boolean isSparse() {\n+ return _sparse;\n+ }\n+\n+ public boolean isEmpty() {\n+ return isEmpty(false);\n+ }\n+\n+ public boolean isEmpty(boolean safe) {\n+ boolean ret = false;\n+ if( _sparse && _sparseBlock==null )\n+ ret = true;\n+ else if( !_sparse && _denseBlock==null )\n+ ret = true;\n+ if( _nnz==0 ) {\n+ //prevent under-estimation\n+ if(safe)\n+ //TODO recomputeNonZeros();\n+ ret = (_nnz==0);\n+ }\n+ return ret;\n+ }\n+\n+ public DenseBlock getDenseBlock() {\n+ return _denseBlock;\n+ }\n+\n+ public SparseBlock getSparseBlock() {\n+ return _sparseBlock;\n+ }\n+\n+ ////////\n+ // Basic modification\n+\npublic double get(int[] ix) {\n- return -1;\n+ return -1; //TODO get\n}\npublic void set(int[] ix, double v) {\n+ //TODO set\n+ }\n+\n+ private void copy(TensorBlock that) {\n+ _dims = that._dims.clone();\n+ _sparse = that._sparse;\n+ allocateBlock();\n+ _nnz = that._nnz;\n+\n+ // TODO Auto-generated method stub copy\n}\n+\n+ ////////\n+ // Size estimation and format decisions\n+\n+\n+ private boolean evalSparseFormatInMemory(int[] dims, long estnnz) {\n+ // TODO Auto-generated method stub\n+ return false;\n+ }\n+\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/tugraz/sysds/test/tensor/TensorConstructionTest.java", "diff": "+/*\n+ * Copyright 2018 Graz University of Technology\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+package org.tugraz.sysds.test.tensor;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.tugraz.sysds.common.Types.ValueType;\n+import org.tugraz.sysds.runtime.data.TensorBlock;\n+\n+\n+public class TensorConstructionTest\n+{\n+ @Test\n+ public void testMetaDefaultTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock();\n+ Assert.assertEquals(ValueType.DOUBLE, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(0, tb.getNumRows());\n+ Assert.assertEquals(0, tb.getNumCols());\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaValueTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock(7.3);\n+ Assert.assertEquals(ValueType.DOUBLE, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(1, tb.getNumRows());\n+ Assert.assertEquals(1, tb.getNumCols());\n+ Assert.assertEquals(1, tb.getNonZeros());\n+ Assert.assertEquals(false, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaTypedTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock(ValueType.INT, new int[]{11,12,13});\n+ Assert.assertEquals(ValueType.INT, tb.getValueType());\n+ Assert.assertEquals(3, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(13, tb.getDim(2));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaTypedTensor2() throws Exception {\n+ TensorBlock tb = new TensorBlock(ValueType.INT, new int[]{11,12,13}, false);\n+ Assert.assertEquals(ValueType.INT, tb.getValueType());\n+ Assert.assertEquals(3, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(13, tb.getDim(2));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(false, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaTypedTensor3() throws Exception {\n+ TensorBlock tb = new TensorBlock(ValueType.BOOLEAN, new int[]{11,12}, true);\n+ Assert.assertEquals(ValueType.BOOLEAN, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(12, tb.getDim(1));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(true, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaCopyDefaultTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock(new TensorBlock());\n+ Assert.assertEquals(ValueType.DOUBLE, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(0, tb.getNumRows());\n+ Assert.assertEquals(0, tb.getNumCols());\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaCopyValueTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock(new TensorBlock(7.3));\n+ Assert.assertEquals(ValueType.DOUBLE, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(1, tb.getNumRows());\n+ Assert.assertEquals(1, tb.getNumCols());\n+ Assert.assertEquals(1, tb.getNonZeros());\n+ Assert.assertEquals(false, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaCopyTypedTensor() throws Exception {\n+ TensorBlock tb = new TensorBlock(new TensorBlock(ValueType.INT, new int[]{11,12,13}));\n+ Assert.assertEquals(ValueType.INT, tb.getValueType());\n+ Assert.assertEquals(3, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(13, tb.getDim(2));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaCopyTypedTensor2() throws Exception {\n+ TensorBlock tb = new TensorBlock(new TensorBlock(ValueType.INT, new int[]{11,12,13}, false));\n+ Assert.assertEquals(ValueType.INT, tb.getValueType());\n+ Assert.assertEquals(3, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(13, tb.getDim(2));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(false, tb.isSparse());\n+ Assert.assertEquals(false, tb.isMatrix());\n+ }\n+\n+ @Test\n+ public void testMetaCopyTypedTensor3() throws Exception {\n+ TensorBlock tb = new TensorBlock(new TensorBlock(ValueType.BOOLEAN, new int[]{11,12}, true));\n+ Assert.assertEquals(ValueType.BOOLEAN, tb.getValueType());\n+ Assert.assertEquals(2, tb.getNumDims());\n+ Assert.assertEquals(11, tb.getNumRows());\n+ Assert.assertEquals(12, tb.getNumCols());\n+ Assert.assertEquals(12, tb.getDim(1));\n+ Assert.assertEquals(0, tb.getNonZeros());\n+ Assert.assertEquals(true, tb.isSparse());\n+ Assert.assertEquals(true, tb.isMatrix());\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
Basic initialization of tensor blocks, incl tests
49,738
30.10.2018 22:12:46
-3,600
34b832271a3904ef347a7080ac8d8867e967806b
DenseBlock extension for tensors and multiple data types, part I
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "diff": "package org.tugraz.sysds.runtime.data;\nimport java.io.Serializable;\n+import java.util.Arrays;\nimport org.tugraz.sysds.runtime.instructions.cp.KahanObject;\n+import org.tugraz.sysds.runtime.util.UtilFunctions;\n/**\n* This DenseBlock is an abstraction for different dense, row-major\n@@ -42,6 +44,17 @@ public abstract class DenseBlock implements Serializable\nLDRB, //large dense row block\n}\n+ protected int _rlen;\n+ protected int _odims;\n+\n+ protected DenseBlock(int[] dims) {\n+ long odims = UtilFunctions.prod(dims, 1);\n+ if( odims > Integer.MAX_VALUE )\n+ throw new RuntimeException(\"Invalid dims: \"+Arrays.toString(dims));\n+ _rlen = dims[0];\n+ _odims = (int) odims;\n+ }\n+\n/**\n* Resets the dense block by deleting non-zero values. After this\n* call all countNonZeros() calls are guaranteed to return 0.\n@@ -54,19 +67,17 @@ public abstract class DenseBlock implements Serializable\n* the new dimensions exceed the current capacity, the underlying\n* storage is extended accordingly.\n*\n- * @param rlen number of rows\n- * @param clen number of columns\n+ * @param dims length and size of dimensions.\n*/\n- public abstract void reset(int rlen, int clen);\n+ public abstract void reset(int[] dims);\n/**\n* Resets the dense block by setting the given value.\n*\n- * @param rlen number of rows\n- * @param clen number of columns\n+ * @param dims lenth and size of dimensions\n* @param v value\n*/\n- public abstract void reset(int rlen, int clen, double v);\n+ public abstract void reset(int[] dims, double v);\n/**\n@@ -74,7 +85,9 @@ public abstract class DenseBlock implements Serializable\n*\n* @return number of rows\n*/\n- public abstract int numRows();\n+ public final int numRows() {\n+ return _rlen;\n+ }\n/**\n* Get the number of allocated blocks.\n@@ -98,6 +111,12 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract int blockSize(int bix);\n+ /**\n+ * Indicates of the dnse block is numeric.\n+ * @return true if numeric (FP, INT, BOOLEAN)\n+ */\n+ public abstract boolean isNumeric();\n+\n/**\n* Indicates if the dense block has a single\n* underlying block, i.e., if numBlocks==1.\n@@ -116,14 +135,15 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract boolean isContiguous(int rl, int ru);\n-\n/**\n* Get the length of the dense block as the product\n- * of row and column dimensions.\n+ * of all dimensions.\n*\n* @return length\n*/\n- public abstract long size();\n+ public final long size() {\n+ return _rlen * _odims;\n+ }\n/**\n* Get the length of the given block.\n@@ -339,6 +359,26 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract double get(int r, int c);\n+ /**\n+ *\n+ * @param ix\n+ * @return\n+ */\n+ public abstract double get(int[] ix);\n+\n+\n@Override\n- public abstract String toString();\n+ public String toString() {\n+ StringBuilder sb = new StringBuilder();\n+ for(int i=0; i<_rlen; i++) {\n+ double[] data = values(i);\n+ int ix = pos(i);\n+ for(int j=0; j<_odims; j++) {\n+ sb.append(data[ix+j]);\n+ sb.append(\"\\t\");\n+ }\n+ sb.append(\"\\n\");\n+ }\n+ return sb.toString();\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "diff": "@@ -26,52 +26,10 @@ import java.util.Arrays;\nimport org.tugraz.sysds.runtime.util.UtilFunctions;\n-public class DenseBlockDRB extends DenseBlock\n+public abstract class DenseBlockDRB extends DenseBlock\n{\n- private static final long serialVersionUID = 8546723684649816489L;\n-\n- private double[] data;\n- private int rlen;\n- private int clen;\n-\n- public DenseBlockDRB(int rlen, int clen) {\n- reset(rlen, clen, 0);\n- }\n-\n- public DenseBlockDRB(double[] data, int rlen, int clen) {\n- this.data = data;\n- this.rlen = rlen;\n- this.clen = clen;\n- }\n-\n- @Override\n- public void reset() {\n- reset(rlen, clen, 0);\n- }\n-\n- @Override\n- public void reset(int rlen, int clen) {\n- reset(rlen, clen, 0);\n- }\n-\n- @Override\n- public void reset(int rlen, int clen, double v) {\n- int len = rlen * clen;\n- if( len > capacity() ) {\n- data = new double[len];\n- if( v != 0 )\n- Arrays.fill(data, v);\n- }\n- else {\n- Arrays.fill(data, 0, len, v);\n- }\n- this.rlen = rlen;\n- this.clen = clen;\n- }\n-\n- @Override\n- public int numRows() {\n- return rlen;\n+ protected DenseBlockDRB(int[] dims) {\n+ super(dims);\n}\n@Override\n@@ -81,12 +39,12 @@ public class DenseBlockDRB extends DenseBlock\n@Override\npublic int blockSize() {\n- return rlen;\n+ return _dims[0];\n}\n@Override\npublic int blockSize(int bix) {\n- return rlen;\n+ return _dims[0];\n}\n@Override\n@@ -99,146 +57,8 @@ public class DenseBlockDRB extends DenseBlock\nreturn true;\n}\n- @Override\n- public long size() {\n- return rlen * clen;\n- }\n-\n@Override\npublic int size(int bix) {\n- return rlen * clen;\n- }\n-\n- @Override\n- public long capacity() {\n- return (data!=null) ? data.length : -1;\n- }\n-\n- @Override\n- public long countNonZeros() {\n- return UtilFunctions.computeNnz(data, 0, rlen*clen);\n- }\n-\n- @Override\n- public int countNonZeros(int r) {\n- return UtilFunctions.computeNnz(data, r*clen, clen);\n- }\n-\n- @Override\n- public long countNonZeros(int rl, int ru, int cl, int cu) {\n- long nnz = 0;\n- if( cl == 0 && cu == clen ) { //specific case: all cols\n- nnz += UtilFunctions.computeNnz(data, rl*clen, (ru-rl)*clen);\n- }\n- else {\n- for( int i=rl, ix=rl*clen; i<ru; i++, ix+=clen )\n- nnz += UtilFunctions.computeNnz(data, ix+cl, cu-cl);\n- }\n- return nnz;\n- }\n-\n- @Override\n- public double[][] values() {\n- return new double[][]{data};\n- }\n-\n- @Override\n- public double[] values(int r) {\n- return data;\n- }\n-\n- @Override\n- public double[] valuesAt(int bix) {\n- return data;\n- }\n-\n- @Override\n- public int index(int r) {\n- return 0;\n- }\n-\n- @Override\n- public int pos(int r) {\n- return r * clen;\n- }\n-\n- @Override\n- public int pos(int r, int c) {\n- return r * clen + c;\n- }\n-\n- @Override\n- public void incr(int r, int c) {\n- data[pos(r, c)] ++;\n- }\n-\n- @Override\n- public void incr(int r, int c, double delta) {\n- data[pos(r, c)] += delta;\n- }\n-\n- @Override\n- public DenseBlock set(double v) {\n- Arrays.fill(data, 0, rlen*clen, v);\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, double v) {\n- if( cl==0 && cu == clen )\n- Arrays.fill(data, rl*clen, ru*clen, v);\n- else\n- for(int i=rl, ix=rl*clen; i<ru; i++, ix+=clen)\n- Arrays.fill(data, ix+cl, ix+cu, v);\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int r, int c, double v) {\n- data[pos(r, c)] = v;\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(DenseBlock db) {\n- System.arraycopy(db.valuesAt(0), 0, data, 0, rlen*clen);\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, DenseBlock db) {\n- double[] a = db.valuesAt(0);\n- if( cl == 0 && cu == clen)\n- System.arraycopy(a, 0, data, rl*clen+cl, (int)db.size());\n- else {\n- int len = cu - cl;\n- for(int i=rl, ix1=0, ix2=rl*clen+cl; i<ru; i++, ix1+=len, ix2+=clen)\n- System.arraycopy(a, ix1, data, ix2, len);\n- }\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int r, double[] v) {\n- System.arraycopy(v, 0, data, pos(r), clen);\n- return this;\n- }\n-\n- @Override\n- public double get(int r, int c) {\n- return data[pos(r, c)];\n- }\n-\n- @Override\n- public String toString() {\n- StringBuilder sb = new StringBuilder();\n- for(int i=0, ix=0; i<rlen; i++, ix+=clen) {\n- for(int j=0; j<clen; j++) {\n- sb.append(data[ix+j]);\n- sb.append(\"\\t\");\n- }\n- sb.append(\"\\n\");\n- }\n- return sb.toString();\n+ return (int)size();\n}\n}\n" }, { "change_type": "RENAME", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB_FP64.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "diff": "@@ -26,23 +26,11 @@ import java.util.Arrays;\nimport org.tugraz.sysds.runtime.util.UtilFunctions;\n-public class DenseBlockDRB_FP64 extends DenseBlock\n+public class DenseBlockFP64 extends DenseBlockDRB\n{\nprivate static final long serialVersionUID = 8546723684649816489L;\nprivate double[] data;\n- private int rlen;\n- private int clen;\n-\n- public DenseBlockDRB_FP64(int rlen, int clen) {\n- reset(rlen, clen, 0);\n- }\n-\n- public DenseBlockDRB_FP64(double[] data, int rlen, int clen) {\n- this.data = data;\n- this.rlen = rlen;\n- this.clen = clen;\n- }\n@Override\npublic void reset() {\n@@ -69,46 +57,6 @@ public class DenseBlockDRB_FP64 extends DenseBlock\nthis.clen = clen;\n}\n- @Override\n- public int numRows() {\n- return rlen;\n- }\n-\n- @Override\n- public int numBlocks() {\n- return 1;\n- }\n-\n- @Override\n- public int blockSize() {\n- return rlen;\n- }\n-\n- @Override\n- public int blockSize(int bix) {\n- return rlen;\n- }\n-\n- @Override\n- public boolean isContiguous() {\n- return true;\n- }\n-\n- @Override\n- public boolean isContiguous(int rl, int ru) {\n- return true;\n- }\n-\n- @Override\n- public long size() {\n- return rlen * clen;\n- }\n-\n- @Override\n- public int size(int bix) {\n- return rlen * clen;\n- }\n-\n@Override\npublic long capacity() {\nreturn (data!=null) ? data.length : -1;\n@@ -228,17 +176,4 @@ public class DenseBlockDRB_FP64 extends DenseBlock\npublic double get(int r, int c) {\nreturn data[pos(r, c)];\n}\n-\n- @Override\n- public String toString() {\n- StringBuilder sb = new StringBuilder();\n- for(int i=0, ix=0; i<rlen; i++, ix+=clen) {\n- for(int j=0; j<clen; j++) {\n- sb.append(data[ix+j]);\n- sb.append(\"\\t\");\n- }\n- sb.append(\"\\n\");\n- }\n- return sb.toString();\n- }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockLDRB.java", "diff": "@@ -269,20 +269,7 @@ public class DenseBlockLDRB extends DenseBlock\nreturn data[index(r)][pos(r, c)];\n}\n- @Override\n- public String toString() {\n- StringBuilder sb = new StringBuilder();\n- for(int i=0; i<rlen; i++) {\n- double[] data = values(i);\n- int ix = pos(i);\n- for(int j=0; j<clen; j++) {\n- sb.append(data[ix+j]);\n- sb.append(\"\\t\");\n- }\n- sb.append(\"\\n\");\n- }\n- return sb.toString();\n- }\n+\nprivate static int blocksize(int rlen, int clen) {\nreturn Math.min(rlen, Integer.MAX_VALUE / clen);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/SparseBlockCOO.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/SparseBlockCOO.java", "diff": "@@ -39,6 +39,9 @@ import org.tugraz.sysds.runtime.util.UtilFunctions;\n* is no constant-time random access to individual rows. Similar to CSR, the nnz\n* is limited to Integer.MAX_VALUE.\n*\n+ * In contrast to COO matrix formats with three arrays, we use 1+#dims arrays\n+ * to represent the values and indexes of all dimensions.\n+ *\n*/\npublic class SparseBlockCOO extends SparseBlock\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/TensorBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/TensorBlock.java", "diff": "@@ -20,6 +20,7 @@ import java.io.Serializable;\nimport org.tugraz.sysds.common.Types.ValueType;\nimport org.tugraz.sysds.runtime.DMLRuntimeException;\n+import org.tugraz.sysds.runtime.util.UtilFunctions;\npublic class TensorBlock implements Serializable\n{\n@@ -236,10 +237,7 @@ public class TensorBlock implements Serializable\n}\npublic long getLength() {\n- long ret = 1;\n- for(int i=0; i<getNumDims(); i++)\n- ret *= getDim(i);\n- return ret;\n+ return UtilFunctions.prod(_dims);\n}\npublic boolean isSparse() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "diff": "@@ -668,4 +668,18 @@ public class UtilFunctions\nIterable<T> iterable = () -> iter;\nreturn StreamSupport.stream(iterable.spliterator(), false);\n}\n+\n+ public static long prod(int[] arr) {\n+ long ret = 1;\n+ for(int i=0; i<arr.length; i++)\n+ ret *= arr[i];\n+ return ret;\n+ }\n+\n+ public static long prod(int[] arr, int off) {\n+ long ret = 1;\n+ for(int i=off; i<arr.length; i++)\n+ ret *= arr[i];\n+ return ret;\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
DenseBlock extension for tensors and multiple data types, part I
49,736
01.11.2018 05:05:10
25,200
be2b3e220401c0244bb5df33ddfa8125996066b6
Extend shadow buffer for double precision This commit also prepares SystemML for very low precision.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ShadowBuffer.java", "diff": "@@ -22,9 +22,9 @@ import static jcuda.runtime.JCuda.cudaMemcpy;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -36,22 +36,18 @@ import jcuda.Sizeof;\npublic class ShadowBuffer {\nprivate static final Log LOG = LogFactory.getLog(ShadowBuffer.class.getName());\n- GPUObject gpuObj;\n- float[] shadowPointer = null;\n+ private GPUObject gpuObj;\n+ // shadowPointer can be double[], float[] or short[].\n+ private Object shadowPointer = null;\nprivate static boolean _warnedAboutShadowBuffer = false;\nprivate static long EVICTION_SHADOW_BUFFER_CURR_BYTES = 0;\nprivate static long EVICTION_SHADOW_BUFFER_MAX_BYTES;\nstatic {\n- if(DMLScript.FLOATING_POINT_PRECISION.equals(\"double\")) {\n- EVICTION_SHADOW_BUFFER_MAX_BYTES = 0;\n- }\n- else {\ndouble shadowBufferSize = ConfigurationManager.getDMLConfig().getDoubleValue(DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\nif(shadowBufferSize < 0 || shadowBufferSize > 1)\nthrow new RuntimeException(\"Incorrect value (\" + shadowBufferSize + \") for the configuration:\" + DMLConfig.EVICTION_SHADOW_BUFFERSIZE);\nEVICTION_SHADOW_BUFFER_MAX_BYTES = (long) (((double)InfrastructureAnalyzer.getLocalMaxMemory())*shadowBufferSize);\n}\n- }\npublic ShadowBuffer(GPUObject gpuObj) {\nthis.gpuObj = gpuObj;\n@@ -73,9 +69,21 @@ public class ShadowBuffer {\npublic void moveFromDevice(String instName) {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\nint numElems = GPUObject.toIntExact(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n+ if(LibMatrixCUDA.sizeOfDataType == Sizeof.DOUBLE) {\n+ shadowPointer = new double[numElems];\n+ }\n+ else if(LibMatrixCUDA.sizeOfDataType == Sizeof.FLOAT) {\nshadowPointer = new float[numElems];\n- EVICTION_SHADOW_BUFFER_CURR_BYTES += getSizeOfFloat(shadowPointer.length);\n- cudaMemcpy(Pointer.to(shadowPointer), gpuObj.jcudaDenseMatrixPtr, getSizeOfDataType(numElems), jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\n+ }\n+ else if(LibMatrixCUDA.sizeOfDataType == Sizeof.SHORT) {\n+ shadowPointer = new short[numElems];\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Unsupported datatype\");\n+ }\n+ long numBytes = getNumBytesOfShadowBuffer();\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES += numBytes;\n+ cudaMemcpy(getHostShadowPointer(), gpuObj.jcudaDenseMatrixPtr, numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost);\ngpuObj.getGPUContext().cudaFreeHelper(instName, gpuObj.jcudaDenseMatrixPtr, true);\ngpuObj.jcudaDenseMatrixPtr = null;\nif (ConfigurationManager.isStatistics()) {\n@@ -87,14 +95,37 @@ public class ShadowBuffer {\n}\n}\n- private long getSizeOfFloat(long numElems) {\n- return numElems*Sizeof.FLOAT;\n+ private long getNumBytesOfShadowBuffer() {\n+ long numElems = 0;\n+ switch(LibMatrixCUDA.sizeOfDataType) {\n+ case Sizeof.DOUBLE:\n+ numElems = ((double[])shadowPointer).length;\n+ break;\n+ case Sizeof.FLOAT:\n+ numElems = ((float[])shadowPointer).length;\n+ break;\n+ case Sizeof.SHORT:\n+ numElems = ((short[])shadowPointer).length;\n+ break;\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported datatype of size:\" + LibMatrixCUDA.sizeOfDataType);\n}\n-\n- private long getSizeOfDataType(long numElems) {\nreturn numElems*LibMatrixCUDA.sizeOfDataType;\n}\n+ private Pointer getHostShadowPointer() {\n+ switch(LibMatrixCUDA.sizeOfDataType) {\n+ case Sizeof.DOUBLE:\n+ return Pointer.to((double[])shadowPointer);\n+ case Sizeof.FLOAT:\n+ return Pointer.to((float[])shadowPointer);\n+ case Sizeof.SHORT:\n+ return Pointer.to((short[])shadowPointer);\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported datatype of size:\" + LibMatrixCUDA.sizeOfDataType);\n+ }\n+ }\n+\n/**\n* Move the data from shadow buffer to Matrix object\n*/\n@@ -103,9 +134,24 @@ public class ShadowBuffer {\nMatrixBlock tmp = new MatrixBlock(GPUObject.toIntExact(gpuObj.mat.getNumRows()), GPUObject.toIntExact(gpuObj.mat.getNumColumns()), false);\ntmp.allocateDenseBlock();\ndouble [] tmpArr = tmp.getDenseBlockValues();\n- for(int i = 0; i < shadowPointer.length; i++) {\n- tmpArr[i] = shadowPointer[i];\n+ if(LibMatrixCUDA.sizeOfDataType == Sizeof.DOUBLE) {\n+ double[] sArr = ((double[])shadowPointer);\n+ System.arraycopy(sArr, 0, tmpArr, 0, sArr.length);\n+ }\n+ else if(LibMatrixCUDA.sizeOfDataType == Sizeof.FLOAT) {\n+ float[] sArr = ((float[])shadowPointer);\n+ for(int i = 0; i < sArr.length; i++) {\n+ tmpArr[i] = sArr[i];\n+ }\n}\n+ else if(LibMatrixCUDA.sizeOfDataType == Sizeof.SHORT) {\n+ // short[] sArr = ((short[])shadowPointer);\n+ throw new DMLRuntimeException(\"Unsupported operation: moveToHost for half precision\");\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Unsupported datatype of size:\" + LibMatrixCUDA.sizeOfDataType);\n+ }\n+\ngpuObj.mat.acquireModify(tmp);\ngpuObj.mat.release();\nclearShadowPointer();\n@@ -125,9 +171,9 @@ public class ShadowBuffer {\n*/\npublic void moveToDevice() {\nlong start = ConfigurationManager.isStatistics() ? System.nanoTime() : 0;\n- long numBytes = getSizeOfDataType(shadowPointer.length);\n+ long numBytes = getNumBytesOfShadowBuffer();\ngpuObj.jcudaDenseMatrixPtr = gpuObj.getGPUContext().allocate(null, numBytes);\n- cudaMemcpy(gpuObj.jcudaDenseMatrixPtr, Pointer.to(shadowPointer), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\n+ cudaMemcpy(gpuObj.jcudaDenseMatrixPtr, getHostShadowPointer(), numBytes, jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice);\nclearShadowPointer();\nif (ConfigurationManager.isStatistics()) {\nlong totalTime = System.nanoTime() - start;\n@@ -144,8 +190,8 @@ public class ShadowBuffer {\n* @return true if the given GPU object is eligible to be shadow buffered\n*/\npublic boolean isEligibleForBuffering(boolean isEviction, boolean eagerDelete) {\n- if(LibMatrixCUDA.sizeOfDataType == jcuda.Sizeof.FLOAT && isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n- long numBytes = getSizeOfFloat(gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns());\n+ if(isEviction && eagerDelete && !gpuObj.isDensePointerNull()) {\n+ long numBytes = gpuObj.mat.getNumRows()*gpuObj.mat.getNumColumns()*LibMatrixCUDA.sizeOfDataType;\nboolean ret = EVICTION_SHADOW_BUFFER_CURR_BYTES + numBytes <= EVICTION_SHADOW_BUFFER_MAX_BYTES;\nif(!ret && !_warnedAboutShadowBuffer) {\nLOG.warn(\"Shadow buffer is full, so using CP bufferpool instead. Consider increasing sysml.gpu.eviction.shadow.bufferSize.\");\n@@ -163,7 +209,7 @@ public class ShadowBuffer {\n*/\npublic void clearShadowPointer() {\nif(shadowPointer != null) {\n- EVICTION_SHADOW_BUFFER_CURR_BYTES -= getSizeOfFloat(shadowPointer.length);\n+ EVICTION_SHADOW_BUFFER_CURR_BYTES -= getNumBytesOfShadowBuffer();\n}\nshadowPointer = null;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Extend shadow buffer for double precision - This commit also prepares SystemML for very low precision.
49,741
03.11.2018 05:32:54
-19,080
bf4ba16b9aaa9afee20a3f1c03b0ff49c5346a9d
Fixes formatting issues and warnings. Fixes bug causing explain to sometimes not be printed. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -414,7 +414,7 @@ public class DMLScript\nExecutionContext ec = null;\ntry {\nec = ScriptExecutorUtils.executeRuntimeProgram(\n- rtprog, dmlconf, ConfigurationManager.isStatistics() ?\n+ rtprog, ConfigurationManager.isStatistics() ?\nConfigurationManager.getDMLOptions().getStatisticsMaxHeavyHitters() : 0,\nnew LocalVariableMap(), null, SystemMLAPI.DMLScript, gCtxs);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -116,7 +116,7 @@ public class ScriptExecutorUtils {\nboolean init) {\nDMLScript.SCRIPT_TYPE = scriptType;\n- Program rtprog = null;\n+ Program rtprog;\nif (ConfigurationManager.isGPU() && !IS_JCUDA_AVAILABLE)\nthrow new RuntimeException(\"Incorrect usage: Cannot use the GPU backend without JCuda libraries. Hint: Include systemml-*-extra.jar (compiled using mvn package -P distribution) into the classpath.\");\n@@ -161,7 +161,7 @@ public class ScriptExecutorUtils {\n//init working directories (before usage by following compilation steps)\nif(api != SystemMLAPI.JMLC)\n- if ((api == SystemMLAPI.MLContext && init) || api != SystemMLAPI.MLContext)\n+ if (api != SystemMLAPI.MLContext || init)\nDMLScript.initHadoopExecution( dmlconf );\n@@ -222,8 +222,9 @@ public class ScriptExecutorUtils {\nExplainCounts counts = Explain.countDistributedOperations(rtprog);\nStatistics.resetNoOfCompiledJobs( counts.numJobs );\n//explain plan of program (hops or runtime)\n- if( DMLScript.EXPLAIN != ExplainType.NONE )\n- System.out.println(Explain.display(prog, rtprog, DMLScript.EXPLAIN, counts));\n+ if( ConfigurationManager.getDMLOptions().explainType != ExplainType.NONE )\n+ System.out.println(\n+ Explain.display(prog, rtprog, ConfigurationManager.getDMLOptions().explainType, counts));\nStatistics.stopCompileTimer();\n}\n@@ -232,9 +233,6 @@ public class ScriptExecutorUtils {\n// don't chain ParseException (for cleaner error output)\nthrow pe;\n}\n- catch(IOException ex) {\n- throw new DMLException(ex);\n- }\ncatch(Exception ex) {\nthrow new DMLException(ex);\n}\n@@ -248,8 +246,6 @@ public class ScriptExecutorUtils {\n*\n* @param rtprog\n* runtime program\n- * @param dmlconf\n- * dml configuration\n* @param statisticsMaxHeavyHitters\n* maximum number of statistics to print\n* @param symbolTable\n@@ -262,7 +258,7 @@ public class ScriptExecutorUtils {\n* list of GPU contexts\n* @return execution context\n*/\n- public static ExecutionContext executeRuntimeProgram(Program rtprog, DMLConfig dmlconf, int statisticsMaxHeavyHitters,\n+ public static ExecutionContext executeRuntimeProgram(Program rtprog, int statisticsMaxHeavyHitters,\nLocalVariableMap symbolTable, HashSet<String> outputVariables,\nSystemMLAPI api, List<GPUContext> gCtxs) {\nboolean exceptionThrown = false;\n@@ -299,7 +295,7 @@ public class ScriptExecutorUtils {\nif(outputVariables != null) {\nfor(String outVar : outputVariables) {\nData data = ec.getVariable(outVar);\n- if(data != null && data instanceof MatrixObject) {\n+ if(data instanceof MatrixObject) {\nfor(GPUContext gCtx : ec.getGPUContexts()) {\nGPUObject gpuObj = ((MatrixObject)data).getGPUObject(gCtx);\nif(gpuObj != null && gpuObj.isDirty()) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "diff": "@@ -32,7 +32,6 @@ import org.apache.sysml.api.ConfigurableAPI;\nimport org.apache.sysml.api.DMLException;\nimport org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.ScriptExecutorUtils.SystemMLAPI;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -47,8 +46,6 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n-import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n-import org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\n@@ -100,6 +97,9 @@ public class PreparedScript implements ConfigurableAPI\n_inVarReuse = new HashMap<>(that._inVarReuse);\n_dmlconf = that._dmlconf;\n_cconf = that._cconf;\n+ _isStatisticsEnabled = that._isStatisticsEnabled;\n+ _gatherMemStats = that._gatherMemStats;\n+ _gpuCtx = that._gpuCtx;\n}\n/**\n@@ -463,7 +463,7 @@ public class PreparedScript implements ConfigurableAPI\n//create and populate execution context\nScriptExecutorUtils.executeRuntimeProgram(\n- _prog, _dmlconf, ConfigurationManager.isStatistics() ?\n+ _prog, ConfigurationManager.isStatistics() ?\nConfigurationManager.getDMLOptions().getStatisticsMaxHeavyHitters() : 0,\n_vars, _outVarnames, SystemMLAPI.JMLC, _gpuCtx);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "package org.apache.sysml.api.mlcontext;\n-import java.io.IOException;\nimport java.util.Collections;\nimport java.util.HashSet;\nimport java.util.List;\n@@ -31,21 +30,15 @@ import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.ScriptExecutorUtils.SystemMLAPI;\n-import org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.mlcontext.MLContext.ExecutionType;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.conf.DMLOptions;\n-import org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.OptimizerUtils;\n-import org.apache.sysml.hops.rewrite.ProgramRewriter;\n-import org.apache.sysml.hops.rewrite.RewriteRemovePersistentReadWrite;\n-import org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DMLTranslator;\n-import org.apache.sysml.parser.LanguageException;\nimport org.apache.sysml.parser.ParseException;\nimport org.apache.sysml.parser.ParserFactory;\nimport org.apache.sysml.parser.ParserWrapper;\n@@ -56,7 +49,6 @@ import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.utils.Explain;\n-import org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\nimport org.apache.sysml.utils.Statistics;\n@@ -102,7 +94,6 @@ public class ScriptExecutor {\nprotected DMLConfig config;\nprotected DMLProgram dmlProgram;\n- protected DMLTranslator dmlTranslator;\nprotected Program runtimeProgram;\nprotected ExecutionContext executionContext;\nprotected Script script;\n@@ -138,80 +129,6 @@ public class ScriptExecutor {\nConfigurationManager.setGlobalConfig(config);\n}\n- /**\n- * Construct DAGs of high-level operators (HOPs) for each block of\n- * statements.\n- */\n- protected void constructHops() {\n- try {\n- dmlTranslator.constructHops(dmlProgram);\n- } catch (LanguageException | ParseException e) {\n- throw new MLContextException(\"Exception occurred while constructing HOPS (high-level operators)\", e);\n- }\n- }\n-\n- /**\n- * Apply static rewrites, perform intra-/inter-procedural analysis to\n- * propagate size information into functions, apply dynamic rewrites, and\n- * compute memory estimates for all HOPs.\n- */\n- protected void rewriteHops() {\n- try {\n- dmlTranslator.rewriteHopsDAG(dmlProgram);\n- } catch (LanguageException | HopsException | ParseException | DMLRuntimeException e) {\n- throw new MLContextException(\"Exception occurred while rewriting HOPS (high-level operators)\", e);\n- }\n- }\n-\n- /**\n- * Output a description of the program to standard output.\n- */\n- protected void showExplanation() {\n- if (!explain)\n- return;\n-\n- try {\n- ExplainType explainType = (explainLevel != null) ? explainLevel.getExplainType() : ExplainType.RUNTIME;\n- System.out.println(Explain.display(dmlProgram, runtimeProgram, explainType, null));\n- } catch (Exception e) {\n- throw new MLContextException(\"Exception occurred while explaining dml program\", e);\n- }\n- }\n-\n- /**\n- * Construct DAGs of low-level operators (LOPs) based on the DAGs of\n- * high-level operators (HOPs).\n- */\n- protected void constructLops() {\n- try {\n- dmlTranslator.constructLops(dmlProgram);\n- } catch (ParseException | LanguageException | HopsException | LopsException e) {\n- throw new MLContextException(\"Exception occurred while constructing LOPS (low-level operators)\", e);\n- }\n- }\n-\n- /**\n- * Create runtime program. For each namespace, translate function statement\n- * blocks into function program blocks and add these to the runtime program.\n- * For each top-level block, add the program block to the runtime program.\n- */\n- protected void generateRuntimeProgram() {\n- try {\n- runtimeProgram = dmlTranslator.getRuntimeProgram(dmlProgram, config);\n- } catch (LanguageException | DMLRuntimeException | LopsException | IOException | HopsException e) {\n- throw new MLContextException(\"Exception occurred while generating runtime program\", e);\n- }\n- }\n-\n- /**\n- * Count the number of compiled MR Jobs/Spark Instructions in the runtime\n- * program and set this value in the statistics.\n- */\n- protected void countCompiledMRJobsAndSparkInstructions() {\n- ExplainCounts counts = Explain.countDistributedOperations(runtimeProgram);\n- Statistics.resetNoOfCompiledJobs(counts.numJobs);\n- }\n-\n/**\n* Set the global flags (for example: statistics, gpu, etc).\n*/\n@@ -256,25 +173,6 @@ public class ScriptExecutor {\n* Compile a DML or PYDML script. This will help analysis of DML programs\n* that have dynamic recompilation flag set to false without actually executing it.\n*\n- * This is broken down into the following\n- * primary methods:\n- *\n- * <ol>\n- * <li>{@link #setup(Script)}</li>\n- * <li>{@link #parseScript()}</li>\n- * <li>{@link #liveVariableAnalysis()}</li>\n- * <li>{@link #validateScript()}</li>\n- * <li>{@link #constructHops()}</li>\n- * <li>{@link #rewriteHops()}</li>\n- * <li>{@link #rewritePersistentReadsAndWrites()}</li>\n- * <li>{@link #constructLops()}</li>\n- * <li>{@link #generateRuntimeProgram()}</li>\n- * <li>{@link #showExplanation()}</li>\n- * <li>{@link #countCompiledMRJobsAndSparkInstructions()}</li>\n- * <li>{@link #initializeCachingAndScratchSpace()}</li>\n- * <li>{@link #cleanupRuntimeProgram()}</li>\n- * </ol>\n- *\n* @param script\n* the DML or PYDML script to compile\n* @param performHOPRewrites\n@@ -321,8 +219,8 @@ public class ScriptExecutor {\n.convertInputParametersForParser(script.getInputParameters(), script.getScriptType());\nExplain.ExplainType explainType = Explain.ExplainType.NONE;\n- if(explain && explainLevel != null) {\n- explainType = explainLevel.getExplainType();\n+ if(explain) {\n+ explainType = (explainLevel == null) ? Explain.ExplainType.RUNTIME : explainLevel.getExplainType();\n}\nRUNTIME_PLATFORM rtplatform = DMLOptions.defaultOptions.execMode;\nif(executionType != null) {\n@@ -337,9 +235,9 @@ public class ScriptExecutor {\ncompile(script);\ntry {\n- executionContext = ScriptExecutorUtils.executeRuntimeProgram(getRuntimeProgram(), getConfig(),\n+ executionContext = ScriptExecutorUtils.executeRuntimeProgram(getRuntimeProgram(),\nstatistics ? statisticsMaxHeavyHitters : 0, script.getSymbolTable(),\n- new HashSet<String>(getScript().getOutputVariables()), SystemMLAPI.MLContext, gCtxs);\n+ new HashSet<>(getScript().getOutputVariables()), SystemMLAPI.MLContext, gCtxs);\n} catch (DMLRuntimeException e) {\nthrow new MLContextException(\"Exception occurred while executing runtime program\", e);\n} finally {\n@@ -410,41 +308,6 @@ public class ScriptExecutor {\n}\n}\n- /**\n- * If {@code maintainSymbolTable} is true, delete all 'remove variable'\n- * instructions so as to maintain the values in the symbol table, which are\n- * useful when working interactively in an environment such as the Spark\n- * Shell. Otherwise, only delete 'remove variable' instructions for\n- * registered outputs.\n- */\n- protected void cleanupRuntimeProgram() {\n- if (maintainSymbolTable) {\n- MLContextUtil.deleteRemoveVariableInstructions(runtimeProgram);\n- } else {\n- JMLCUtils.cleanupRuntimeProgram(runtimeProgram, (script.getOutputVariables() == null) ? new String[0]\n- : script.getOutputVariables().toArray(new String[0]));\n- }\n- }\n-\n- /**\n- * Check security, create scratch space, cleanup working directories,\n- * initialize caching, and reset statistics.\n- */\n- protected void initializeCachingAndScratchSpace() {\n- if (!init)\n- return;\n-\n- try {\n- DMLScript.initHadoopExecution(config);\n- } catch (ParseException e) {\n- throw new MLContextException(\"Exception occurred initializing caching and scratch space\", e);\n- } catch (DMLRuntimeException e) {\n- throw new MLContextException(\"Exception occurred initializing caching and scratch space\", e);\n- } catch (IOException e) {\n- throw new MLContextException(\"Exception occurred initializing caching and scratch space\", e);\n- }\n- }\n-\n/**\n* Parse the script into an ANTLR parse tree, and convert this parse tree\n* into a SystemML program. Parsing includes lexical/syntactic analysis.\n@@ -460,29 +323,6 @@ public class ScriptExecutor {\n}\n}\n- /**\n- * Replace persistent reads and writes with transient reads and writes in\n- * the symbol table.\n- */\n- protected void rewritePersistentReadsAndWrites() {\n- LocalVariableMap symbolTable = script.getSymbolTable();\n- if (symbolTable != null) {\n- String[] inputs = (script.getInputVariables() == null) ? new String[0]\n- : script.getInputVariables().toArray(new String[0]);\n- String[] outputs = (script.getOutputVariables() == null) ? new String[0]\n- : script.getOutputVariables().toArray(new String[0]);\n- RewriteRemovePersistentReadWrite rewrite = new RewriteRemovePersistentReadWrite(inputs, outputs,\n- script.getSymbolTable());\n- ProgramRewriter programRewriter = new ProgramRewriter(rewrite);\n- try {\n- programRewriter.rewriteProgramHopDAGs(dmlProgram);\n- } catch (LanguageException | HopsException e) {\n- throw new MLContextException(\"Exception occurred while rewriting persistent reads and writes\", e);\n- }\n- }\n-\n- }\n-\n/**\n* Set the SystemML configuration properties.\n*\n@@ -494,50 +334,6 @@ public class ScriptExecutor {\nConfigurationManager.setGlobalConfig(config);\n}\n- /**\n- * Liveness analysis is performed on the program, obtaining sets of live-in\n- * and live-out variables by forward and backward passes over the program.\n- */\n- protected void liveVariableAnalysis() {\n- try {\n- dmlTranslator = new DMLTranslator(dmlProgram);\n- dmlTranslator.liveVariableAnalysis(dmlProgram);\n- } catch (DMLRuntimeException e) {\n- throw new MLContextException(\"Exception occurred during live variable analysis\", e);\n- } catch (LanguageException e) {\n- throw new MLContextException(\"Exception occurred during live variable analysis\", e);\n- }\n- }\n-\n- /**\n- * Semantically validate the program's expressions, statements, and\n- * statement blocks in a single recursive pass over the program. Constant\n- * and size propagation occurs during this step.\n- */\n- protected void validateScript() {\n- try {\n- dmlTranslator.validateParseTree(dmlProgram);\n- } catch (LanguageException | ParseException e) {\n- throw new MLContextException(\"Exception occurred while validating script\", e);\n- }\n- }\n-\n- /**\n- * Check that the Script object has a type (DML or PYDML) and a string\n- * representing the content of the Script.\n- */\n- protected void checkScriptHasTypeAndString() {\n- if (script == null) {\n- throw new MLContextException(\"Script is null\");\n- } else if (script.getScriptType() == null) {\n- throw new MLContextException(\"ScriptType (DML or PYDML) needs to be specified\");\n- } else if (script.getScriptString() == null) {\n- throw new MLContextException(\"Script string is null\");\n- } else if (StringUtils.isBlank(script.getScriptString())) {\n- throw new MLContextException(\"Script string is blank\");\n- }\n- }\n-\n/**\n* Obtain the program\n*\n@@ -547,15 +343,6 @@ public class ScriptExecutor {\nreturn dmlProgram;\n}\n- /**\n- * Obtain the translator\n- *\n- * @return the translator\n- */\n- public DMLTranslator getDmlTranslator() {\n- return dmlTranslator;\n- }\n-\n/**\n* Obtain the runtime program\n*\n@@ -591,9 +378,7 @@ public class ScriptExecutor {\n* {@code true} if explanation should be output, {@code false}\n* otherwise\n*/\n- public void setExplain(boolean explain) {\n- this.explain = explain;\n- }\n+ public void setExplain(boolean explain) { this.explain = explain; }\n/**\n* Whether or not statistics about the DML/PYDML program should be output to\n@@ -664,8 +449,7 @@ public class ScriptExecutor {\nif (explainLevel == null) {\nDMLScript.EXPLAIN = ExplainType.NONE;\n} else {\n- ExplainType explainType = explainLevel.getExplainType();\n- DMLScript.EXPLAIN = explainType;\n+ DMLScript.EXPLAIN = explainLevel.getExplainType();\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/ConfigurationManager.java", "new_path": "src/main/java/org/apache/sysml/conf/ConfigurationManager.java", "diff": "@@ -37,10 +37,10 @@ import org.apache.sysml.utils.lite.LiteCheck;\npublic class ConfigurationManager\n{\n/** Global cached job conf for read-only operations */\n- private static JobConf _rJob = null;\n+ private static JobConf _rJob;\n/** Global DML configuration (read or defaults) */\n- private static DMLConfig _dmlconf = null;\n+ private static DMLConfig _dmlconf;\n/** Local DML configuration for thread-local config updates */\nprivate static ThreadLocalDMLConfig _ldmlconf = new ThreadLocalDMLConfig();\n@@ -52,7 +52,7 @@ public class ConfigurationManager\nprivate static ThreadLocalDMLOptions _ldmlOptions = new ThreadLocalDMLOptions();\n/** Global compiler configuration (defaults) */\n- private static CompilerConfig _cconf = null;\n+ private static CompilerConfig _cconf;\n/** Local compiler configuration for thead-local config updates */\nprivate static ThreadLocalCompilerConfig _lcconf = new ThreadLocalCompilerConfig();\n@@ -208,7 +208,7 @@ public class ConfigurationManager\n*/\npublic static boolean getCompilerConfigFlag(ConfigType key) {\nCompilerConfig cconf = getCompilerConfig();\n- return (cconf!=null) ? cconf.getBool(key) : false;\n+ return (cconf!=null) && cconf.getBool(key);\n}\n/////////////////////////////////////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -26,7 +26,6 @@ import java.util.Map.Entry;\nimport java.util.Set;\nimport java.util.StringTokenizer;\n-import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.util.ProgramConverter;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/JMLCTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/JMLCTests.java", "diff": "@@ -61,7 +61,7 @@ public class JMLCTests extends GPUTests {\n// I.e. Z = X %*% W1 %*% W2 %*% W3 ...\n// numMatrices determines the number of matrices in the sequences. The size of the matrices can be set\n// in executeDMLScript\n- static ScriptContainer generateDMLScript(int numMatrices) {\n+ private static ScriptContainer generateDMLScript(int numMatrices) {\nScriptContainer SC = new ScriptContainer();\nString[] inputVarNames = new String[numMatrices + 1];\ninputVarNames[0] = \"x\";\n@@ -71,13 +71,15 @@ public class JMLCTests extends GPUTests {\n{\nString name = \"W\" + ix;\ninputVarNames[ix+1] = name;\n- dml.append(name + \" = read(\\\"/tmp/\" + name + \".mtx\\\", rows=-1, cols=-1)\\n\");\n+ String readCmd = name + \" = read(\\\"/tmp/\" + name + \".mtx\\\", rows=-1, cols=-1)\\n\";\n+ dml.append(readCmd);\n}\ndml.append(\"Z = x %*% W0\\n\");\nfor (int ix=1; ix<numMatrices; ix++)\n{\n- dml.append(\"Z = Z %*% W\" + ix + \"\\n\");\n+ String multiplyCmd = \"Z = Z %*% W\" + ix + \"\\n\";\n+ dml.append(multiplyCmd);\n}\ndml.append(\"while (-1 > 1)\\n print(as.scalar(Z[1,1]))\\n\");\n@@ -93,31 +95,27 @@ public class JMLCTests extends GPUTests {\n// Set this parameter larger to use more memory. The parameter numMatrices must be set to the same value as\n// in generateDMLScript. The parameter pinWeights controls whether weight matrices should be\n// pinned in memory between script invocations.\n- static void executeDMLScript(PreparedScript script, int n, int rows, int numMatrices, boolean pinWeights) {\n+ private static void executeDMLScript(PreparedScript script, int n, int rows, int numMatrices, boolean pinWeights) {\nfor (int ix=0; ix<numMatrices; ix++)\n- script.setMatrix(\"W\" + ix, randomMatrix(rows, rows, 0.0,1.0, 1.0), pinWeights);\n+ script.setMatrix(\"W\" + ix, randomMatrix(rows, rows), pinWeights);\nfor (int ix=0; ix<n; ix++)\n{\n- script.setMatrix(\"x\", randomMatrix(rows, rows, 0.0, 1.0, 1.0), false);\n+ script.setMatrix(\"x\", randomMatrix(rows, rows), false);\nscript.executeScript();\nif (!pinWeights)\nfor (int iy=0; iy<numMatrices; iy++)\nscript.setMatrix(\n- \"W\" + iy, randomMatrix(rows, rows, 0.0,1.0, 1.0), false);\n+ \"W\" + iy, randomMatrix(rows, rows), false);\n}\n}\n- static double[][] randomMatrix(\n- int rows, int cols, double min, double max, double sparsity) {\n+ private static double[][] randomMatrix(int rows, int cols) {\ndouble[][] matrix = new double[rows][cols];\nRandom random = new Random(System.currentTimeMillis());\nfor (int i = 0; i < rows; i++) {\nfor (int j = 0; j < cols; j++) {\n- if (random.nextDouble() > sparsity) {\n- continue;\n- }\n- matrix[i][j] = (random.nextDouble() * (max - min) + min);\n+ matrix[i][j] = random.nextDouble();\n}\n}\nreturn matrix;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "diff": "@@ -413,10 +413,7 @@ public class MLContextTest extends MLContextTestBase {\nsetExpectedStdOut(testString);\nScript script = new Script(\"print('\" + testString + \"');\", org.apache.sysml.api.mlcontext.ScriptType.DML);\n- ScriptExecutor scriptExecutor = new ScriptExecutor() {\n- @Override\n- protected void showExplanation() {}\n- };\n+ ScriptExecutor scriptExecutor = new ScriptExecutor();\nml.execute(script, scriptExecutor);\n}\n@@ -427,10 +424,7 @@ public class MLContextTest extends MLContextTestBase {\nsetExpectedStdOut(testString);\nScript script = new Script(\"print('\" + testString + \"')\", org.apache.sysml.api.mlcontext.ScriptType.PYDML);\n- ScriptExecutor scriptExecutor = new ScriptExecutor() {\n- @Override\n- protected void showExplanation() {}\n- };\n+ ScriptExecutor scriptExecutor = new ScriptExecutor();\nml.execute(script, scriptExecutor);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1325] Fixes formatting issues and warnings. Fixes bug causing explain to sometimes not be printed. Closes #838.
49,736
03.11.2018 05:40:00
-19,080
912b4701875d4de0db8327479398c32607f4687d
Improve the performance of LSTM forward on GPU This commit improves the performance of LSTM forward by reducing unnecessary ping pongs between CPU-GPU due to left indexing. There is no performance gains for CPU execution. Closes
[ { "change_type": "MODIFY", "old_path": "scripts/nn/layers/lstm.dml", "new_path": "scripts/nn/layers/lstm.dml", "diff": "@@ -89,13 +89,13 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int T,\nfor (t in 1:T) { # each timestep\nX_t = X[,(t-1)*D+1:t*D] # shape (N, D)\ninput = cbind(X_t, out_prev) # shape (N, D+M)\n- ifog = input %*% W + b # input, forget, output, and g gates; shape (N, 4M)\n- ifog[,1:3*M] = sigmoid::forward(ifog[,1:3*M]) # i,f,o gates squashed with sigmoid\n- ifog[,3*M+1:4*M] = tanh::forward(ifog[,3*M+1:4*M]) # g gate squashed with tanh\n+ ifog_raw = input %*% W + b # input, forget, output, and g gates; shape (N, 4M)\n+ ifo = sigmoid::forward(ifog_raw[,1:3*M]) # i,f,o gates squashed with sigmoid\n+ g = tanh::forward(ifog_raw[,3*M+1:4*M]) # g gate squashed with tanh\n# c_t = f*prev_c + i*g\n- c = ifog[,M+1:2*M]*c_prev + ifog[,1:M]*ifog[,3*M+1:4*M] # shape (N, M)\n+ c = ifo[,M+1:2*M]*c_prev + ifo[,1:M]*g # shape (N, M)\n# out_t = o*tanh(c)\n- out_t = ifog[,2*M+1:3*M] * tanh::forward(c) # shape (N, M)\n+ out_t = ifo[,2*M+1:3*M] * tanh::forward(c) # shape (N, M)\n# store\nif (return_sequences) {\n@@ -108,7 +108,7 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int T,\nc_prev = c\ncache_out[t,] = matrix(out_t, rows=1, cols=N*M) # reshape\ncache_c[t,] = matrix(c, rows=1, cols=N*M) # reshape\n- cache_ifog[t,] = matrix(ifog, rows=1, cols=N*4*M) # reshape\n+ cache_ifog[t,] = matrix(cbind(ifo, g), rows=1, cols=N*4*M) # reshape\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Improve the performance of LSTM forward on GPU - This commit improves the performance of LSTM forward by reducing unnecessary ping pongs between CPU-GPU due to left indexing. - There is no performance gains for CPU execution. Closes #756.
49,736
04.11.2018 14:04:02
-19,080
8606754eaf6af43dbeab5cf5aa5a3d7621bef889
Setting floating point precision in JMLC In current master, the configuration sysml.floating.point.precision is ignored. This commit fixes that issue
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java", "diff": "@@ -44,7 +44,6 @@ import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.conf.DMLOptions;\nimport org.apache.sysml.hops.codegen.SpoofCompiler;\nimport org.apache.sysml.parser.DataExpression;\n-import org.apache.sysml.parser.LanguageException;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\n@@ -61,7 +60,6 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.transform.TfUtils;\nimport org.apache.sysml.runtime.transform.meta.TfMetaUtils;\nimport org.apache.sysml.runtime.util.DataConverter;\n-import org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.wink.json4j.JSONObject;\n@@ -945,5 +943,6 @@ public class Connection implements Closeable\n//set thread-local configurations for compilation and read\nConfigurationManager.setLocalConfig(_dmlconf);\nConfigurationManager.setLocalConfig(_cconf);\n+ DMLScript.setGlobalFlags(_dmlconf);\n}\n}\n\\ No newline at end of file\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1325] Setting floating point precision in JMLC - In current master, the configuration sysml.floating.point.precision is ignored. This commit fixes that issue
49,736
04.11.2018 14:07:38
-19,080
beb1a1d19a5a2710b55bd41d36a5d8085fb0afda
Support recomputation of activations to reduce the memory footprint Added a configuration property sysml.gpu.recompute.activations to enable recomputation of ReLU. This configuration is disabled by default, but can be enabled for large networks. Closes
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- Allocator to use to allocate GPU device memory. Supported values are cuda, unified_memory (default: cuda) -->\n<sysml.gpu.memory.allocator>cuda</sysml.gpu.memory.allocator>\n+\n+ <!-- Should perform recomputation of activations such as ReLU to reduce memory consumption. Set this to true\n+ when performing inference or for training very large networks (default: false) -->\n+ <sysml.gpu.recompute.activations>false</sysml.gpu.recompute.activations>\n</root>\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -96,6 +96,7 @@ public class DMLConfig\npublic static final String FLOATING_POINT_PRECISION = \"sysml.floating.point.precision\"; // String to specify the datatype to use internally: supported values are double, single\npublic static final String PRINT_GPU_MEMORY_INFO = \"sysml.gpu.print.memoryInfo\";\npublic static final String EVICTION_SHADOW_BUFFERSIZE = \"sysml.gpu.eviction.shadow.bufferSize\";\n+ public static final String GPU_RECOMPUTE_ACTIVATIONS = \"sysml.gpu.recompute.activations\";\n// supported prefixes for custom map/reduce configurations\npublic static final String PREFIX_MAPRED = \"mapred\";\n@@ -147,6 +148,7 @@ public class DMLConfig\n_defaultVals.put(SYNCHRONIZE_GPU, \"false\" );\n_defaultVals.put(CACHING_BUFFER_SIZE, \"0.15\" );\n_defaultVals.put(EAGER_CUDA_FREE, \"false\" );\n+ _defaultVals.put(GPU_RECOMPUTE_ACTIVATIONS, \"false\" );\n_defaultVals.put(FLOATING_POINT_PRECISION, \"double\" );\n}\n@@ -430,7 +432,7 @@ public class DMLConfig\nCODEGEN, CODEGEN_COMPILER, CODEGEN_OPTIMIZER, CODEGEN_PLANCACHE, CODEGEN_LITERALS,\nEXTRA_FINEGRAINED_STATS, STATS_MAX_WRAP_LEN, PRINT_GPU_MEMORY_INFO, CACHING_BUFFER_SIZE,\nAVAILABLE_GPUS, SYNCHRONIZE_GPU, EAGER_CUDA_FREE, FLOATING_POINT_PRECISION, GPU_EVICTION_POLICY, EVICTION_SHADOW_BUFFERSIZE,\n- GPU_MEMORY_ALLOCATOR, GPU_MEMORY_UTILIZATION_FACTOR\n+ GPU_MEMORY_ALLOCATOR, GPU_MEMORY_UTILIZATION_FACTOR, GPU_RECOMPUTE_ACTIVATIONS\n};\nStringBuilder sb = new StringBuilder();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DnnOp.java", "diff": "package org.apache.sysml.hops;\nimport org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.lops.DnnTransform;\nimport org.apache.sysml.lops.DnnTransform.OperationTypes;\n@@ -47,6 +48,8 @@ public class DnnOp extends MultiThreadedHop\nprivate static final boolean THROW_ERROR_IF_INFERRED_SHAPE_MISMATCH = true;\n// -------------------------------------------------------------------------\n+ private static final boolean GPU_RECOMPUTE_ACTIVATIONS = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.GPU_RECOMPUTE_ACTIVATIONS);\n+\n// Specifies the type of this hop\nprivate Hop.OpOpDnn op;\n@@ -273,11 +276,16 @@ public class DnnOp extends MultiThreadedHop\n// by reducing unnecessary sparse-to-dense-to-sparse conversion.\n// For other backends, this operators is not necessary as it reduces an additional relu operator.\nHop parentReLU = isInputReLU(inputs.get(0));\n- if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == OpOpDnn.MAX_POOL && parentReLU != null) {\n+\n+ if(OptimizerUtils.ALLOW_OPERATOR_FUSION &&\n+ (et == ExecType.CP || (et == ExecType.GPU && GPU_RECOMPUTE_ACTIVATIONS))\n+ && op == OpOpDnn.MAX_POOL && parentReLU != null) {\nlhsInputLop = parentReLU.constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n- else if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == OpOpDnn.MAX_POOL_BACKWARD && parentReLU != null) {\n+ else if(OptimizerUtils.ALLOW_OPERATOR_FUSION &&\n+ (et == ExecType.CP || (et == ExecType.GPU && GPU_RECOMPUTE_ACTIVATIONS))\n+ && op == OpOpDnn.MAX_POOL_BACKWARD && parentReLU != null) {\nlhsInputLop = parentReLU.constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING_BACKWARD;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -52,6 +52,8 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"conv2d_backward_data\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"maxpooling\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"maxpooling_backward\", GPUINSTRUCTION_TYPE.Dnn);\n+ String2GPUInstructionType.put( \"relu_maxpooling\", GPUINSTRUCTION_TYPE.Dnn);\n+ String2GPUInstructionType.put( \"relu_maxpooling_backward\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"avgpooling\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"avgpooling_backward\", GPUINSTRUCTION_TYPE.Dnn);\nString2GPUInstructionType.put( \"bias_add\", GPUINSTRUCTION_TYPE.Dnn);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/DnnGPUInstruction.java", "diff": "@@ -238,7 +238,8 @@ public class DnnGPUInstruction extends GPUInstruction {\nreturn new DnnGPUInstruction(in1, in2, out, opcode, str, stride,\npadding, input_shape, filter_shape, Double.parseDouble(parts[16]));\n}\n- else if( opcode.equalsIgnoreCase(\"maxpooling_backward\") || opcode.equalsIgnoreCase(\"avgpooling_backward\") ) {\n+ else if( opcode.equalsIgnoreCase(\"maxpooling_backward\") || opcode.equalsIgnoreCase(\"relu_maxpooling_backward\")\n+ || opcode.equalsIgnoreCase(\"avgpooling_backward\") ) {\nboolean withMaxPoolOut = false;\nif(parts.length == 18) {\nwithMaxPoolOut = true;\n@@ -298,7 +299,8 @@ public class DnnGPUInstruction extends GPUInstruction {\nreturn new DnnGPUInstruction(in1, in2, in3, out, opcode, str, stride,\npadding, input_shape, filter_shape, Double.parseDouble(parts[17]));\n}\n- else if (opcode.equalsIgnoreCase(\"maxpooling\") || opcode.equalsIgnoreCase(\"avgpooling\")) {\n+ else if (opcode.equalsIgnoreCase(\"maxpooling\") || opcode.equalsIgnoreCase(\"relu_maxpooling\")\n+ || opcode.equalsIgnoreCase(\"avgpooling\")) {\nInstructionUtils.checkNumFields(parts, 15);\nCPOperand in1 = new CPOperand(parts[1]);\nCPOperand out = new CPOperand(parts[14]);\n@@ -1005,8 +1007,19 @@ public class DnnGPUInstruction extends GPUInstruction {\nLibMatrixCuDNN.conv2dBackwardData(ec.getGPUContext(0), getExtendedOpcode(), filter, dout, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, _intermediateMemoryBudget);\n}\n- else if (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"avgpooling\")) {\n+ else if (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")\n+ || instOpcode.equalsIgnoreCase(\"avgpooling\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\n+ Pointer x = null;\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\n+ Pointer tmpX = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, image, instName);\n+ long CHW = ((long)C)*((long)H)*((long)W);\n+ x = gCtx.allocate(instName, ((long)N)*CHW*LibMatrixCUDA.sizeOfDataType);\n+ LibMatrixCuDNN.getCudaKernels(gCtx).launchKernel(\"relu\",\n+ ExecutionConfig.getConfigForSimpleMatrixOperations(toInt(N), toInt(CHW)),\n+ tmpX, x, N, toInt(CHW));\n+ ec.releaseMatrixInputForGPUInstruction(_input1.getName());\n+ }\nif(image.getNumRows() != N || image.getNumColumns() != C*H*W)\nthrow new DMLRuntimeException(\"Incorrect dimensions for image in maxpooling: \" +\n@@ -1014,11 +1027,30 @@ public class DnnGPUInstruction extends GPUInstruction {\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName(), N, C * P * Q);\nPoolingType poolType = instOpcode.equalsIgnoreCase(\"maxpooling\") ? PoolingType.MAX : PoolingType.AVG;\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\n+ LibMatrixCuDNN.pooling(ec.getGPUContext(0), getExtendedOpcode(), x, out, N, C, H, W,\n+ K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolType, _intermediateMemoryBudget);\n+ gCtx.cudaFreeHelper(instName, x, gCtx.EAGER_CUDA_FREE);\n+ }\n+ else {\nLibMatrixCuDNN.pooling(ec.getGPUContext(0), getExtendedOpcode(), image, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolType, _intermediateMemoryBudget);\n}\n- else if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\") || instOpcode.equalsIgnoreCase(\"avgpooling_backward\")) {\n+ }\n+ else if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\")\n+ || instOpcode.equalsIgnoreCase(\"avgpooling_backward\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\n+ Pointer x = null;\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\")) {\n+ Pointer tmpX = LibMatrixCuDNN.getDensePointerForCuDNN(gCtx, image, instName);\n+ long CHW = ((long)C)*((long)H)*((long)W);\n+ x = gCtx.allocate(instName, ((long)N)*CHW*LibMatrixCUDA.sizeOfDataType);\n+ LibMatrixCuDNN.getCudaKernels(gCtx).launchKernel(\"relu\",\n+ ExecutionConfig.getConfigForSimpleMatrixOperations(toInt(N), toInt(CHW)),\n+ tmpX, x, N, toInt(CHW));\n+ ec.releaseMatrixInputForGPUInstruction(_input1.getName());\n+ }\n+\nMatrixObject dout = getMatrixInputForGPUInstruction(ec, _input2.getName());\nMatrixObject maxPoolOutput = _input3 != null ? getMatrixInputForGPUInstruction(ec, _input3.getName()) : null;\nif(dout.getNumRows() != N || dout.getNumColumns() != C*P*Q)\n@@ -1029,18 +1061,26 @@ public class DnnGPUInstruction extends GPUInstruction {\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName(), N, C * H * W);\nPoolingType poolType = instOpcode.equalsIgnoreCase(\"maxpooling_backward\") ? PoolingType.MAX : PoolingType.AVG;\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\")) {\n+ LibMatrixCuDNN.poolingBackward(ec.getGPUContext(0), getExtendedOpcode(), x, dout, maxPoolOutput, out, N, C, H, W,\n+ K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolType, _intermediateMemoryBudget);\n+ gCtx.cudaFreeHelper(instName, x, gCtx.EAGER_CUDA_FREE);\n+ }\n+ else {\nLibMatrixCuDNN.poolingBackward(ec.getGPUContext(0), getExtendedOpcode(), image, dout, maxPoolOutput, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolType, _intermediateMemoryBudget);\n}\n+ }\nelse {\nthrow new DMLRuntimeException(\"Unsupported GPU context for \" + instOpcode);\n}\n// release inputs/outputs\n+ if(!instOpcode.equalsIgnoreCase(\"relu_maxpooling\") && !instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\"))\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\n- boolean isPool = instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"avgpooling\");\n- boolean isPoolBackward = instOpcode.equalsIgnoreCase(\"maxpooling_backward\") || instOpcode.equalsIgnoreCase(\"avgpooling_backward\");\n+ boolean isPool = instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"avgpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\");\n+ boolean isPoolBackward = instOpcode.equalsIgnoreCase(\"maxpooling_backward\") || instOpcode.equalsIgnoreCase(\"avgpooling_backward\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\");\nif ( !isPool )\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -644,6 +644,44 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n}\n}\n+ /**\n+ * performs maxpooling on GPU by exploiting cudnnPoolingForward(...)\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param x image as pointer\n+ * @param outputBlock output matrix\n+ * @param N batch size\n+ * @param C number of channels\n+ * @param H height of image\n+ * @param W width of image\n+ * @param K number of filters\n+ * @param R height of filter\n+ * @param S width of filter\n+ * @param pad_h vertical padding\n+ * @param pad_w horizontal padding\n+ * @param stride_h horizontal stride\n+ * @param stride_w vertical stride\n+ * @param P (H - R + 1 + 2*pad_h)/stride_h\n+ * @param Q (W - S + 1 + 2*pad_w)/stride_w\n+ * @param poolingType type of pooling\n+ * @param intermediateMemoryBudget intermediate memory budget\n+ */\n+ public static void pooling(GPUContext gCtx, String instName, Pointer x,\n+ MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ int S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\n+ int Q, PoolingType poolingType, double intermediateMemoryBudget) {\n+ long CHW = C*H*W; long CPQ = C*P*Q;\n+ long NCHW = N*CHW; long NCPQ = N*CPQ;\n+\n+ if(NCHW < maxNumElementsOfCuDNNTensor && NCPQ < maxNumElementsOfCuDNNTensor) {\n+ Pointer y = getDensePointerForCuDNN(gCtx, outputBlock, instName);\n+ cudnnPoolingHelper(gCtx, instName, x, y, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolingType);\n+ }\n+ else {\n+ throwCuDNNDimensionError(N, CHW, N, CPQ);\n+ }\n+ }\n+\nprivate static void cudnnPoolingHelper(GPUContext gCtx, String instName, Pointer x,\nPointer y, int N, int C, int H, int W, int K, int R,\nint S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\n@@ -738,6 +776,53 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n}\n}\n+ /**\n+ * Performs maxpoolingBackward on GPU by exploiting cudnnPoolingBackward(...)\n+ * This method computes the backpropogation errors for previous layer of maxpooling operation\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param x image as dense pointer\n+ * @param dout delta matrix, output of previous layer\n+ * @param maxpoolOutput (optional and can be null) output of maxpool forward function\n+ * @param outputBlock output matrix\n+ * @param N batch size\n+ * @param C number of channels\n+ * @param H height of image\n+ * @param W width of image\n+ * @param K number of filters\n+ * @param R height of filter\n+ * @param S width of filter\n+ * @param pad_h vertical padding\n+ * @param pad_w horizontal padding\n+ * @param stride_h horizontal stride\n+ * @param stride_w vertical stride\n+ * @param P (H - R + 1 + 2*pad_h)/stride_h\n+ * @param Q (W - S + 1 + 2*pad_w)/stride_w\n+ * @param poolingType type of pooling\n+ * @param intermediateMemoryBudget intermediate memory budget\n+ */\n+ public static void poolingBackward(GPUContext gCtx, String instName, Pointer x, MatrixObject dout,\n+ MatrixObject maxpoolOutput, MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ int S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\n+ int Q, PoolingType poolingType, double intermediateMemoryBudget) {\n+ long CHW = C*H*W; long CPQ = C*P*Q;\n+ long NCHW = N*CHW; long NCPQ = N*CPQ;\n+\n+ final boolean isMaxPoolOutputProvided = maxpoolOutput != null;\n+\n+ if(NCHW < maxNumElementsOfCuDNNTensor && NCPQ < maxNumElementsOfCuDNNTensor) {\n+ // Filter and output are accounted as dense in the memory estimation for conv2dBackwardData\n+ Pointer dx = getDensePointerForCuDNN(gCtx, outputBlock, instName);\n+ Pointer dy = getDensePointerForCuDNN(gCtx, dout, instName);\n+ Pointer y = isMaxPoolOutputProvided ? getDensePointerForCuDNN(gCtx, maxpoolOutput, instName) : null;\n+ cudnnPoolingBackwardHelper(gCtx, instName, x, dy, y, dx, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, poolingType);\n+\n+ }\n+ else {\n+ throwCuDNNDimensionError(N, CHW, N, CPQ);\n+ }\n+ }\n+\nprivate static void cudnnPoolingBackwardHelper(GPUContext gCtx, String instName,\nPointer x, Pointer dy, Pointer y, Pointer dx,\nint N, int C, int H, int W, int K, int R,\n@@ -1457,7 +1542,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n* @param instName name of the instruction\n* @return jcuda pointer\n*/\n- protected static Pointer getDensePointerForCuDNN(GPUContext gCtx, MatrixObject image, String instName) {\n+ public static Pointer getDensePointerForCuDNN(GPUContext gCtx, MatrixObject image, String instName) {\nlong numElems = image.getNumRows()*image.getNumColumns();\nif(numElems > maxNumElementsOfCuDNNTensor) {\nthrow new DMLRuntimeException(\"CuDNN restriction: the size of input tensor cannot have greater than 2 giga-elements, but has \" + numElems + \" (i.e. [\" + image.getNumRows() + \" X \" + image.getNumColumns() + \"]). Hint: try reducing the mini-batch size.\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Support recomputation of activations to reduce the memory footprint - Added a configuration property sysml.gpu.recompute.activations to enable recomputation of ReLU. - This configuration is disabled by default, but can be enabled for large networks. Closes #841.
49,736
12.11.2018 18:38:20
-19,080
c022f1a5a4479c7cfd380190d20d81b7747c7b92
Fix a performance bug when using GPU backend with JMLC This commit simplifies the clearTemporaryMemory logic in GPU memory manager and performs aggressive cleanup to reduce the memory pressure. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -287,6 +287,7 @@ public class ScriptExecutorUtils {\n} finally { // ensure cleanup/shutdown\nif (ConfigurationManager.isGPU() && !ec.getGPUContexts().isEmpty()) {\ntry {\n+ HashSet<MatrixObject> outputMatrixObjects = new HashSet<>();\n// -----------------------------------------------------------------\n// The below code pulls the output variables on the GPU to the host. This is required especially when:\n// The output variable was generated as part of a MLContext session with GPU enabled\n@@ -302,12 +303,13 @@ public class ScriptExecutorUtils {\ngpuObj.acquireHostRead(null);\n}\n}\n+ outputMatrixObjects.add(((MatrixObject)data));\n}\n}\n}\n// -----------------------------------------------------------------\nfor(GPUContext gCtx : ec.getGPUContexts()) {\n- gCtx.clearTemporaryMemory();\n+ gCtx.clearTemporaryMemory(outputMatrixObjects);\n}\n} catch (Exception e1) {\nexceptionThrown = true;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -31,6 +31,8 @@ import static jcuda.runtime.JCuda.cudaGetDeviceCount;\nimport static jcuda.runtime.JCuda.cudaSetDevice;\nimport static jcuda.runtime.JCuda.cudaSetDeviceFlags;\n+import java.util.HashSet;\n+\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript.EvictionPolicy;\n@@ -424,8 +426,8 @@ public class GPUContext {\nmemoryManager.clearMemory();\n}\n- public void clearTemporaryMemory() {\n- memoryManager.clearTemporaryMemory();\n+ public void clearTemporaryMemory(HashSet<MatrixObject> outputMatrixObjects) {\n+ memoryManager.clearTemporaryMemory(outputMatrixObjects);\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMatrixMemoryManager.java", "diff": "@@ -20,7 +20,6 @@ package org.apache.sysml.runtime.instructions.gpu.context;\nimport java.util.Collections;\nimport java.util.HashSet;\n-import java.util.List;\nimport java.util.Set;\nimport java.util.stream.Collectors;\n@@ -46,33 +45,6 @@ public class GPUMatrixMemoryManager {\ngpuObjects.add(gpuObj);\n}\n- /**\n- * Get list of all Pointers in a GPUObject\n- * @param gObj gpu object\n- * @return set of pointers\n- */\n- Set<Pointer> getPointers(GPUObject gObj) {\n- Set<Pointer> ret = new HashSet<>();\n- if(!gObj.isDensePointerNull() && gObj.getSparseMatrixCudaPointer() != null) {\n- LOG.warn(\"Matrix allocated in both dense and sparse format\");\n- }\n- if(!gObj.isDensePointerNull()) {\n- // && gObj.evictedDenseArr == null - Ignore evicted array\n- ret.add(gObj.getDensePointer());\n- }\n- if(gObj.getSparseMatrixCudaPointer() != null) {\n- CSRPointer sparsePtr = gObj.getSparseMatrixCudaPointer();\n- if(sparsePtr != null) {\n- if(sparsePtr.rowPtr != null)\n- ret.add(sparsePtr.rowPtr);\n- else if(sparsePtr.colInd != null)\n- ret.add(sparsePtr.colInd);\n- else if(sparsePtr.val != null)\n- ret.add(sparsePtr.val);\n- }\n- }\n- return ret;\n- }\n/**\n* list of allocated {@link GPUObject} instances allocated on {@link GPUContext#deviceNum} GPU\n@@ -91,18 +63,22 @@ public class GPUMatrixMemoryManager {\nSet<GPUObject> getGpuObjects(Set<Pointer> pointers) {\nSet<GPUObject> gObjs = new HashSet<>();\nfor (GPUObject g : gpuObjects) {\n- if (!Collections.disjoint(getPointers(g), pointers))\n+ if (!Collections.disjoint(g.getPointers(), pointers))\ngObjs.add(g);\n}\nreturn gObjs;\n}\n+ Set<GPUObject> getGpuObjects() {\n+ return gpuObjects;\n+ }\n+\n/**\n* Return all pointers in the first section\n* @return all pointers in this section\n*/\nSet<Pointer> getPointers() {\n- return gpuObjects.stream().flatMap(gObj -> getPointers(gObj).stream()).collect(Collectors.toSet());\n+ return gpuObjects.stream().flatMap(gObj -> gObj.getPointers().stream()).collect(Collectors.toSet());\n}\n/**\n@@ -116,7 +92,7 @@ public class GPUMatrixMemoryManager {\nreturn gpuObjects.stream().filter(\ngObj -> (gObj.isLocked() == locked && gObj.isDirty() == dirty) ||\n(gObj.mat.isCleanupEnabled() == isCleanupEnabled)).flatMap(\n- gObj -> getPointers(gObj).stream()).collect(Collectors.toSet());\n+ gObj -> gObj.getPointers().stream()).collect(Collectors.toSet());\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUMemoryManager.java", "diff": "@@ -39,6 +39,7 @@ import org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\nimport org.apache.sysml.utils.GPUStatistics;\n@@ -518,20 +519,27 @@ public class GPUMemoryManager {\n/**\n* Clears up the memory used by non-dirty pointers.\n*/\n- public void clearTemporaryMemory() {\n- // To record the cuda block sizes needed by allocatedGPUObjects, others are cleared up.\n- Set<Pointer> unlockedDirtyPointers = matrixMemoryManager.getPointers(false, true, false);\n- Set<Pointer> temporaryPointers = nonIn(allPointers.keySet(), unlockedDirtyPointers);\n- for (Pointer tmpPtr : temporaryPointers) {\n- guardedCudaFree(tmpPtr);\n+ public void clearTemporaryMemory(HashSet<MatrixObject> outputMatrixObjects) {\n+ Set<Pointer> donotClearPointers = new HashSet<>();\n+ // First clean up all GPU objects except:\n+ // 1. Output matrix objects\n+ // 2. GPU objects that are currently being used (i.e. locked)\n+ // 3. Matrix object are\n+ Set<GPUObject> allGPUObjects = new HashSet<>(matrixMemoryManager.getGpuObjects());\n+ for (GPUObject gpuObj : allGPUObjects) {\n+ boolean isOutput = outputMatrixObjects.contains(gpuObj.mat);\n+ if(!isOutput && !gpuObj.isLocked() && gpuObj.mat.isCleanupEnabled()) {\n+ gpuObj.clearData(null, gpuObj.getGPUContext().EAGER_CUDA_FREE);\n+ }\n+ else {\n+ donotClearPointers.addAll(gpuObj.getPointers());\n+ }\n}\n- // Also set the pointer(s) to null in the corresponding GPU objects to avoid double freeing pointers\n- Set<GPUObject> gObjs = matrixMemoryManager.getGpuObjects(temporaryPointers);\n- for (GPUObject g : gObjs) {\n- g.jcudaDenseMatrixPtr = null;\n- g.jcudaSparseMatrixPtr = null;\n- removeGPUObject(g);\n+ // Next, cleanup workspace and other temporary pointers\n+ Set<Pointer> temporaryPointers = nonIn(allPointers.keySet(), donotClearPointers);\n+ for (Pointer tmpPtr : temporaryPointers) {\n+ guardedCudaFree(tmpPtr);\n}\n}\n@@ -579,18 +587,18 @@ public class GPUMemoryManager {\nif(gpuObj.isLocked()) {\nnumLockedGPUObjects++;\nsizeOfLockedGPUObjects += gpuObj.getSizeOnDevice();\n- numLockedPointers += matrixMemoryManager.getPointers(gpuObj).size();\n+ numLockedPointers += gpuObj.getPointers().size();\n}\nelse {\nif(gpuObj.isDirty()) {\nnumUnlockedDirtyGPUObjects++;\nsizeOfUnlockedDirtyGPUObjects += gpuObj.getSizeOnDevice();\n- numUnlockedDirtyPointers += matrixMemoryManager.getPointers(gpuObj).size();\n+ numUnlockedDirtyPointers += gpuObj.getPointers().size();\n}\nelse {\nnumUnlockedNonDirtyGPUObjects++;\nsizeOfUnlockedNonDirtyGPUObjects += gpuObj.getSizeOnDevice();\n- numUnlockedNonDirtyPointers += matrixMemoryManager.getPointers(gpuObj).size();\n+ numUnlockedNonDirtyPointers += gpuObj.getPointers().size();\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -24,6 +24,8 @@ import static jcuda.runtime.JCuda.cudaMemset;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\n+import java.util.HashSet;\n+import java.util.Set;\nimport java.util.concurrent.atomic.AtomicLong;\nimport java.util.concurrent.atomic.LongAdder;\n@@ -151,6 +153,28 @@ public class GPUObject {\njcudaSparseMatrixPtr = null;\n}\n+ Set<Pointer> getPointers() {\n+ Set<Pointer> ret = new HashSet<>();\n+ if(!isDensePointerNull() && getSparseMatrixCudaPointer() != null) {\n+ LOG.warn(\"Matrix allocated in both dense and sparse format\");\n+ }\n+ if(!isDensePointerNull()) {\n+ // && evictedDenseArr == null - Ignore evicted array\n+ ret.add(getDensePointer());\n+ }\n+ if(getSparseMatrixCudaPointer() != null) {\n+ CSRPointer sparsePtr = getSparseMatrixCudaPointer();\n+ if(sparsePtr != null) {\n+ if(sparsePtr.rowPtr != null)\n+ ret.add(sparsePtr.rowPtr);\n+ else if(sparsePtr.colInd != null)\n+ ret.add(sparsePtr.colInd);\n+ else if(sparsePtr.val != null)\n+ ret.add(sparsePtr.val);\n+ }\n+ }\n+ return ret;\n+ }\n/**\n* Convenience method to directly set the dense matrix pointer on GPU\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1325] Fix a performance bug when using GPU backend with JMLC This commit simplifies the clearTemporaryMemory logic in GPU memory manager and performs aggressive cleanup to reduce the memory pressure. Closes #842.
49,741
12.11.2018 18:44:31
-19,080
54f5ea975f5e5a209d7b016c52c10e61bf02af7e
[MINOR] Fixes bug causing stats output to be cleared in JMLC Closes
[ { "change_type": "MODIFY", "old_path": "docs/jmlc.md", "new_path": "docs/jmlc.md", "diff": "@@ -53,7 +53,7 @@ dependent on the nature of the business use case being addressed.\nJMLC can be configured to gather runtime statistics, as in the MLContext API, by calling Connection's `setStatistics()`\nmethod with a value of `true`. JMLC can also be configured to gather statistics on the memory used by matrices and\n-frames in the DML script. To enable collection of memory statistics, call Connection's `gatherMemStats()` method\n+frames in the DML script. To enable collection of memory statistics, call PreparedScript's `gatherMemStats()` method\nwith a value of `true`. When finegrained statistics are enabled in `SystemML.conf`, JMLC will also report the variables\nin the DML script which used the most memory. An example showing how to enable statistics in JMLC is presented in the\nsection below.\n@@ -123,10 +123,6 @@ the resulting `\"predicted_y\"` matrix. We repeat this process. When done, we clos\n// obtain connection to SystemML\nConnection conn = new Connection();\n- // turn on gathering of runtime statistics and memory use\n- conn.setStatistics(true);\n- conn.gatherMemStats(true);\n-\n// read in and precompile DML script, registering inputs and outputs\nString dml = conn.readScript(\"scoring-example.dml\");\nPreparedScript script = conn.prepareScript(dml, new String[] { \"W\", \"X\" }, new String[] { \"predicted_y\" }, false);\n@@ -135,6 +131,10 @@ the resulting `\"predicted_y\"` matrix. We repeat this process. When done, we clos\nString plan = script.explain();\nSystem.out.println(plan);\n+ // turn on gathering of runtime statistics and memory use\n+ script.setStatistics(true);\n+ script.gatherMemStats(true);\n+\ndouble[][] mtx = matrix(4, 3, new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 });\ndouble[][] result = null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "diff": "@@ -454,6 +454,10 @@ public class PreparedScript implements ConfigurableAPI\n//add reused variables\n_vars.putAll(_inVarReuse);\n+ // clear thread local configurations (left over from previous run)\n+ ConfigurationManager.clearLocalConfigs();\n+ ConfigurationManager.resetStatistics();\n+\n//set thread-local configurations\nConfigurationManager.setLocalConfig(_dmlconf);\nConfigurationManager.setLocalConfig(_cconf);\n@@ -475,10 +479,6 @@ public class PreparedScript implements ConfigurableAPI\nrvars.addResult(ovar, tmpVar);\n}\n- // clear prior thread local configurations (for subsequent run)\n- ConfigurationManager.clearLocalConfigs();\n- ConfigurationManager.resetStatistics();\n-\nreturn rvars;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixes bug causing stats output to be cleared in JMLC Closes #843.
49,738
16.11.2018 17:48:03
-3,600
d58ca31699f0f9225e5c40048fe143bdd2613d7a
DenseBlock extension for tensors and multiple data types, part II
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/codegen/LibSpoofPrimitives.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/codegen/LibSpoofPrimitives.java", "diff": "@@ -22,7 +22,7 @@ package org.tugraz.sysds.runtime.codegen;\nimport java.util.Arrays;\nimport org.apache.commons.math3.util.FastMath;\n-import org.tugraz.sysds.runtime.data.DenseBlockDRB;\n+import org.tugraz.sysds.runtime.data.DenseBlockFP64;\nimport org.tugraz.sysds.runtime.functionobjects.BitwAnd;\nimport org.tugraz.sysds.runtime.functionobjects.IntegerDivide;\nimport org.tugraz.sysds.runtime.functionobjects.Modulus;\n@@ -2118,8 +2118,8 @@ public class LibSpoofPrimitives\ndouble[] c = allocVector(K*P*Q, true);\nint CRS = C*R*S, PQ = P*Q;\nLibMatrixMult.matrixMultDenseDenseMM(\n- new DenseBlockDRB(a, K, CRS), new DenseBlockDRB(b, CRS, PQ),\n- new DenseBlockDRB(c, K, PQ), PQ, CRS, 0, K, 0, PQ);\n+ new DenseBlockFP64(new int[]{K, CRS}, a), new DenseBlockFP64(new int[]{CRS, PQ}, b),\n+ new DenseBlockFP64(new int[]{K, PQ}, c), PQ, CRS, 0, K, 0, PQ);\nreturn c;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "diff": "@@ -44,8 +44,8 @@ public abstract class DenseBlock implements Serializable\nLDRB, //large dense row block\n}\n- protected int _rlen;\n- protected int _odims;\n+ protected int _rlen; //number of rows\n+ protected int _odims; //length of other dimensions\nprotected DenseBlock(int[] dims) {\nlong odims = UtilFunctions.prod(dims, 1);\n@@ -72,13 +72,29 @@ public abstract class DenseBlock implements Serializable\npublic abstract void reset(int[] dims);\n/**\n- * Resets the dense block by setting the given value.\n+ * Resets the dense block by deleting non-zeros.\n*\n* @param dims lenth and size of dimensions\n* @param v value\n*/\npublic abstract void reset(int[] dims, double v);\n+ /**\n+ * Resets the dense block by deleting non-zeros.\n+ *\n+ * @param rlen number of rows\n+ * @param odims other dimensions\n+ */\n+ public abstract void reset(int rlen, int odims);\n+\n+ /**\n+ * Resets the dense block by setting the given value.\n+ *\n+ * @param rlen number of rows\n+ * @param odims other dimensions\n+ * @param v value\n+ */\n+ public abstract void reset(int rlen, int odims, double v);\n/**\n* Get the number of rows.\n@@ -190,14 +206,6 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract long countNonZeros(int rl, int ru, int cl, int cu);\n-\n- /**\n- * Get the allocated blocks.\n- *\n- * @return blocks\n- */\n- public abstract double[][] values();\n-\n/**\n* Get the allocated block for the given row. This call\n* is equivalent to valuesAt(indexes(r)).\n@@ -360,9 +368,10 @@ public abstract class DenseBlock implements Serializable\npublic abstract double get(int r, int c);\n/**\n+ * Get the value of a given cell\n*\n- * @param ix\n- * @return\n+ * @param ix cell indexes\n+ * @return value\n*/\npublic abstract double get(int[] ix);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "diff": "package org.tugraz.sysds.runtime.data;\n-import java.util.Arrays;\n-\n-import org.tugraz.sysds.runtime.util.UtilFunctions;\n-\npublic abstract class DenseBlockDRB extends DenseBlock\n{\n+ private static final long serialVersionUID = 3581157975703708947L;\n+\nprotected DenseBlockDRB(int[] dims) {\nsuper(dims);\n}\n@@ -39,12 +37,12 @@ public abstract class DenseBlockDRB extends DenseBlock\n@Override\npublic int blockSize() {\n- return _dims[0];\n+ return _rlen;\n}\n@Override\npublic int blockSize(int bix) {\n- return _dims[0];\n+ return _rlen;\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "diff": "@@ -30,74 +30,94 @@ public class DenseBlockFP64 extends DenseBlockDRB\n{\nprivate static final long serialVersionUID = 8546723684649816489L;\n- private double[] data;\n+ private double[] _data;\n+\n+ public DenseBlockFP64(int[] dims) {\n+ super(dims);\n+ reset(_rlen, _odims, 0);\n+ }\n+\n+ public DenseBlockFP64(int[] dims, double[] data) {\n+ super(dims);\n+ _data = data;\n+ }\n+\n+ @Override\n+ public boolean isNumeric() {\n+ return true;\n+ }\n@Override\npublic void reset() {\n- reset(rlen, clen, 0);\n+ reset(_rlen, _odims, 0);\n+ }\n+\n+ @Override\n+ public void reset(int[] dims) {\n+ reset(dims[0], (int)UtilFunctions.prod(dims, 1), 0);\n}\n@Override\n- public void reset(int rlen, int clen) {\n- reset(rlen, clen, 0);\n+ public void reset(int[] dims, double v) {\n+ reset(dims[0], (int)UtilFunctions.prod(dims, 1), v);\n}\n@Override\n- public void reset(int rlen, int clen, double v) {\n- int len = rlen * clen;\n+ public void reset(int rlen, int odims) {\n+ reset(rlen, odims, 0);\n+ }\n+\n+ @Override\n+ public void reset(int rlen, int odims, double v) {\n+ int len = rlen * odims;\nif( len > capacity() ) {\n- data = new double[len];\n+ _data = new double[len];\nif( v != 0 )\n- Arrays.fill(data, v);\n+ Arrays.fill(_data, v);\n}\nelse {\n- Arrays.fill(data, 0, len, v);\n+ Arrays.fill(_data, 0, len, v);\n}\n- this.rlen = rlen;\n- this.clen = clen;\n+ _rlen = rlen;\n+ _odims = odims;\n}\n@Override\npublic long capacity() {\n- return (data!=null) ? data.length : -1;\n+ return (_data!=null) ? _data.length : -1;\n}\n@Override\npublic long countNonZeros() {\n- return UtilFunctions.computeNnz(data, 0, rlen*clen);\n+ return UtilFunctions.computeNnz(_data, 0, _rlen*_odims);\n}\n@Override\npublic int countNonZeros(int r) {\n- return UtilFunctions.computeNnz(data, r*clen, clen);\n+ return UtilFunctions.computeNnz(_data, r*_odims, _odims);\n}\n@Override\n- public long countNonZeros(int rl, int ru, int cl, int cu) {\n+ public long countNonZeros(int rl, int ru, int ol, int ou) {\nlong nnz = 0;\n- if( cl == 0 && cu == clen ) { //specific case: all cols\n- nnz += UtilFunctions.computeNnz(data, rl*clen, (ru-rl)*clen);\n+ if( ol == 0 && ou == _odims ) { //specific case: all cols\n+ nnz += UtilFunctions.computeNnz(_data, rl*_odims, (ru-rl)*_odims);\n}\nelse {\n- for( int i=rl, ix=rl*clen; i<ru; i++, ix+=clen )\n- nnz += UtilFunctions.computeNnz(data, ix+cl, cu-cl);\n+ for( int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims )\n+ nnz += UtilFunctions.computeNnz(_data, ix+ol, ou-ol);\n}\nreturn nnz;\n}\n- @Override\n- public double[][] values() {\n- return new double[][]{data};\n- }\n-\n@Override\npublic double[] values(int r) {\n- return data;\n+ return _data;\n}\n@Override\npublic double[] valuesAt(int bix) {\n- return data;\n+ return _data;\n}\n@Override\n@@ -107,73 +127,79 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic int pos(int r) {\n- return r * clen;\n+ return r * _odims;\n}\n@Override\npublic int pos(int r, int c) {\n- return r * clen + c;\n+ return r * _odims + c;\n}\n@Override\npublic void incr(int r, int c) {\n- data[pos(r, c)] ++;\n+ _data[pos(r, c)] ++;\n}\n@Override\npublic void incr(int r, int c, double delta) {\n- data[pos(r, c)] += delta;\n+ _data[pos(r, c)] += delta;\n}\n@Override\npublic DenseBlock set(double v) {\n- Arrays.fill(data, 0, rlen*clen, v);\n+ Arrays.fill(_data, 0, _rlen*_odims, v);\nreturn this;\n}\n@Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, double v) {\n- if( cl==0 && cu == clen )\n- Arrays.fill(data, rl*clen, ru*clen, v);\n+ public DenseBlock set(int rl, int ru, int ol, int ou, double v) {\n+ if( ol==0 && ou == _odims )\n+ Arrays.fill(_data, rl*_odims, ru*_odims, v);\nelse\n- for(int i=rl, ix=rl*clen; i<ru; i++, ix+=clen)\n- Arrays.fill(data, ix+cl, ix+cu, v);\n+ for(int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims)\n+ Arrays.fill(_data, ix+ol, ix+ou, v);\nreturn this;\n}\n@Override\npublic DenseBlock set(int r, int c, double v) {\n- data[pos(r, c)] = v;\n+ _data[pos(r, c)] = v;\nreturn this;\n}\n@Override\npublic DenseBlock set(DenseBlock db) {\n- System.arraycopy(db.valuesAt(0), 0, data, 0, rlen*clen);\n+ System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims);\nreturn this;\n}\n@Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, DenseBlock db) {\n+ public DenseBlock set(int rl, int ru, int ol, int ou, DenseBlock db) {\ndouble[] a = db.valuesAt(0);\n- if( cl == 0 && cu == clen)\n- System.arraycopy(a, 0, data, rl*clen+cl, (int)db.size());\n+ if( ol == 0 && ou == _odims)\n+ System.arraycopy(a, 0, _data, rl*_odims+ol, (int)db.size());\nelse {\n- int len = cu - cl;\n- for(int i=rl, ix1=0, ix2=rl*clen+cl; i<ru; i++, ix1+=len, ix2+=clen)\n- System.arraycopy(a, ix1, data, ix2, len);\n+ int len = ou - ol;\n+ for(int i=rl, ix1=0, ix2=rl*_odims+ol; i<ru; i++, ix1+=len, ix2+=_odims)\n+ System.arraycopy(a, ix1, _data, ix2, len);\n}\nreturn this;\n}\n@Override\npublic DenseBlock set(int r, double[] v) {\n- System.arraycopy(v, 0, data, pos(r), clen);\n+ System.arraycopy(v, 0, _data, pos(r), _odims);\nreturn this;\n}\n@Override\npublic double get(int r, int c) {\n- return data[pos(r, c)];\n+ return _data[pos(r, c)];\n+ }\n+\n+ @Override\n+ public double get(int[] ix) {\n+ // TODO Auto-generated method stub\n+ return 0;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "diff": "package org.tugraz.sysds.runtime.data;\n+import org.apache.commons.lang.NotImplementedException;\n+import org.tugraz.sysds.runtime.util.UtilFunctions;\n+\npublic abstract class DenseBlockFactory\n{\n- public static DenseBlock createDenseBlock(int rlen, int clen) {\n- DenseBlock.Type type = ((long)rlen*clen < Integer.MAX_VALUE) ?\n+ public static DenseBlock createDenseBlock(int[] dims) {\n+ DenseBlock.Type type =\n+ (UtilFunctions.prod(dims) < Integer.MAX_VALUE) ?\nDenseBlock.Type.DRB : DenseBlock.Type.LDRB;\n- return createDenseBlock(type, rlen, clen);\n+ return createDenseBlock(type, dims);\n+ }\n+\n+ public static DenseBlock createDenseBlock(int rlen, int clen) {\n+ return createDenseBlock(new int[]{rlen, clen});\n+ }\n+\n+ public static DenseBlock createDenseBlock(double[] data, int[] dims) {\n+ return new DenseBlockFP64(dims, data);\n}\npublic static DenseBlock createDenseBlock(double[] data, int rlen, int clen) {\n- return new DenseBlockDRB(data, rlen, clen);\n+ return createDenseBlock(data, new int[]{rlen, clen});\n}\n- public static DenseBlock createDenseBlock(DenseBlock.Type type, int rlen, int clen) {\n+ public static DenseBlock createDenseBlock(DenseBlock.Type type, int[] dims) {\nswitch( type ) {\n- case DRB: return new DenseBlockDRB(rlen, clen);\n- case LDRB: return new DenseBlockLDRB(rlen, clen);\n+ case DRB: return new DenseBlockFP64(dims);\n+ case LDRB: throw new NotImplementedException();\ndefault:\nthrow new RuntimeException(\"Unexpected dense block type: \"+type.name());\n}\n@@ -49,6 +61,6 @@ public abstract class DenseBlockFactory\npublic static DenseBlock.Type getDenseBlockType(DenseBlock dblock) {\nreturn (dblock instanceof DenseBlockDRB) ? DenseBlock.Type.DRB :\n- (dblock instanceof DenseBlockLDRB) ? DenseBlock.Type.LDRB : null;\n+ (dblock instanceof DenseBlockDRB) ? DenseBlock.Type.LDRB : null; //TODO\n}\n}\n" }, { "change_type": "DELETE", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockLDRB.java", "new_path": null, "diff": "-/*\n- * Modifications Copyright 2018 Graz University of Technology\n- *\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-\n-package org.tugraz.sysds.runtime.data;\n-\n-import java.util.Arrays;\n-import java.util.stream.IntStream;\n-\n-import org.tugraz.sysds.runtime.util.UtilFunctions;\n-\n-public class DenseBlockLDRB extends DenseBlock\n-{\n- private static final long serialVersionUID = -7285459683402612969L;\n-\n- private static final boolean PARALLEL_ALLOC = true;\n-\n- private double[][] data;\n- private int rlen;\n- private int clen;\n- private int blen;\n-\n- public DenseBlockLDRB(int rlen, int clen) {\n- this(rlen, clen, blocksize(rlen, clen));\n- }\n-\n- public DenseBlockLDRB(int rlen, int clen, int blen) {\n- reset(rlen, clen, blen, 0);\n- }\n-\n- @Override\n- public void reset() {\n- reset(rlen, clen, blen, 0);\n- }\n-\n- @Override\n- public void reset(int rlen, int clen) {\n- reset(rlen, clen, blen, 0);\n- }\n-\n- @Override\n- public void reset(int rlen, int clen, double v) {\n- reset(rlen, clen, blen, v);\n- }\n-\n- @SuppressWarnings(\"resource\")\n- private void reset(int rlen, int clen, int blen, double v) {\n- long llen = (long) rlen * clen;\n- int numPart = (int)Math.ceil((double)rlen / blen);\n- if( this.blen == blen && llen < capacity() ) {\n- for(int i=0; i<numPart; i++) {\n- int lrlen = (int)(Math.min((i+1)*blen,rlen)-i*blen);\n- Arrays.fill(data[i], 0, lrlen*clen, v);\n- }\n- }\n- else {\n- data = new double[numPart][];\n- IntStream range = PARALLEL_ALLOC ?\n- IntStream.range(0, numPart).parallel() :\n- IntStream.range(0, numPart);\n- range.forEach(i ->\n- data[i] = allocArray(i, rlen, clen, blen, v));\n- }\n- this.rlen = rlen;\n- this.clen = clen;\n- this.blen = blen;\n- }\n-\n- private static double[] allocArray(int i, int rlen, int clen, int blen, double v) {\n- int lrlen = (int)(Math.min((i+1)*blen,rlen)-i*blen);\n- double[] ret = new double[lrlen*clen];\n- if( v != 0 )\n- Arrays.fill(ret, v);\n- return ret;\n- }\n-\n- @Override\n- public int numRows() {\n- return rlen;\n- }\n-\n- @Override\n- public int numBlocks() {\n- return data.length;\n- }\n-\n- @Override\n- public int blockSize() {\n- return blen;\n- }\n-\n- @Override\n- public int blockSize(int bix) {\n- return Math.min(blen, rlen-bix*blen);\n- }\n-\n- @Override\n- public boolean isContiguous() {\n- return rlen <= blen;\n- }\n-\n- @Override\n- public boolean isContiguous(int rl, int ru) {\n- return isContiguous() || index(rl)==index(ru);\n- }\n-\n- @Override\n- public long size() {\n- return (long)rlen * clen;\n- }\n-\n- @Override\n- public int size(int bix) {\n- return blockSize(bix) * clen;\n- }\n-\n- @Override\n- public long capacity() {\n- long len = 0;\n- for(int i=0; i<numBlocks(); i++)\n- len += data[i].length;\n- return len;\n- }\n-\n- @Override\n- public long countNonZeros() {\n- long nnz = 0;\n- for(int i=0; i<numBlocks(); i++ )\n- nnz += UtilFunctions.computeNnz(valuesAt(i), 0, size(i));\n- return nnz;\n- }\n-\n- @Override\n- public int countNonZeros(int r) {\n- return UtilFunctions.computeNnz(values(r), pos(r), clen);\n- }\n-\n- @Override\n- public long countNonZeros(int rl, int ru, int cl, int cu) {\n- long nnz = 0;\n- boolean rowBlock = (cl == 0 && cu == clen);\n- final int bil = index(rl);\n- final int biu = index(ru-1);\n- for(int bi=bil; bi<=biu; bi++) {\n- int lpos = (bi==bil) ? pos(rl) : 0;\n- int len = ((bi==biu) ? pos(ru-1)+clen :\n- blockSize(bi)*clen) - lpos;\n- if( rowBlock )\n- nnz += UtilFunctions.computeNnz(data[bi], lpos, len);\n- else\n- for(int i=lpos; i<lpos+len; i+=clen)\n- nnz += UtilFunctions.computeNnz(data[i], i+cl, cu-cl);\n- }\n- return nnz;\n- }\n-\n- @Override\n- public double[][] values() {\n- return data;\n- }\n-\n- @Override\n- public double[] values(int r) {\n- return data[r / blen];\n- }\n-\n- @Override\n- public double[] valuesAt(int bix) {\n- return data[bix];\n- }\n-\n- @Override\n- public int index(int r) {\n- return r / blen;\n- }\n-\n- @Override\n- public int pos(int r) {\n- return (r % blen) * clen;\n- }\n-\n- @Override\n- public int pos(int r, int c) {\n- return (r % blen) * clen + c;\n- }\n-\n- @Override\n- public void incr(int r, int c) {\n- data[index(r)][pos(r, c)] ++;\n- }\n-\n- @Override\n- public void incr(int r, int c, double delta) {\n- data[index(r)][pos(r, c)] += delta;\n- }\n-\n- @Override\n- public DenseBlock set(double v) {\n- for(int i=0; i<numBlocks(); i++)\n- Arrays.fill(data[i], v);\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, double v) {\n- boolean rowBlock = (cl == 0 && cu == clen);\n- final int bil = index(rl);\n- final int biu = index(ru-1);\n- for(int bi=bil; bi<=biu; bi++) {\n- int lpos = (bi==bil) ? pos(rl) : 0;\n- int len = (bi==biu) ? pos(ru-1)-lpos+clen : blockSize(bi)*clen;\n- if( rowBlock )\n- Arrays.fill(data[bi], lpos, lpos+len, v);\n- else\n- for(int i=lpos; i<lpos+len; i+=clen)\n- Arrays.fill(data[bi], i+cl, i+cu, v);\n- }\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int r, int c, double v) {\n- data[index(r)][pos(r, c)] = v;\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int r, double[] v) {\n- System.arraycopy(v, 0, data[index(r)], pos(r), clen);\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(DenseBlock db) {\n- for(int bi=0; bi<numBlocks(); bi++)\n- System.arraycopy(db.valuesAt(bi), 0, data[bi], 0, size(bi));\n- return this;\n- }\n-\n- @Override\n- public DenseBlock set(int rl, int ru, int cl, int cu, DenseBlock db) {\n- for(int i=rl; i<ru; i++) {\n- System.arraycopy(db.values(i-rl),\n- db.pos(i-rl), values(i), pos(i, cl), cu-cl);\n- }\n- return this;\n- }\n-\n- @Override\n- public double get(int r, int c) {\n- return data[index(r)][pos(r, c)];\n- }\n-\n-\n-\n- private static int blocksize(int rlen, int clen) {\n- return Math.min(rlen, Integer.MAX_VALUE / clen);\n- }\n-}\n" } ]
Java
Apache License 2.0
apache/systemds
DenseBlock extension for tensors and multiple data types, part II
49,738
16.11.2018 18:32:02
-3,600
a0c26cd2b539a7038c8cbdd02befa8f05c3bfb46
New FP32 (single-precision) dense block
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/tugraz/sysds/common/Warnings.java", "diff": "+package org.tugraz.sysds.common;\n+\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\n+import org.tugraz.sysds.conf.DMLConfig;\n+\n+public class Warnings\n+{\n+ private static final Log LOG = LogFactory.getLog(DMLConfig.class.getName());\n+\n+ public static void warnFullFP64Conversion(long len) {\n+ LOG.warn(\"Performance warning: conversion to FP64 array of size \"+len+\".\");\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "diff": "@@ -25,6 +25,7 @@ package org.tugraz.sysds.runtime.data;\nimport java.io.Serializable;\nimport java.util.Arrays;\n+import org.tugraz.sysds.runtime.DMLRuntimeException;\nimport org.tugraz.sysds.runtime.instructions.cp.KahanObject;\nimport org.tugraz.sysds.runtime.util.UtilFunctions;\n@@ -46,11 +47,12 @@ public abstract class DenseBlock implements Serializable\nprotected int _rlen; //number of rows\nprotected int _odims; //length of other dimensions\n+ private double[] _reuse;\nprotected DenseBlock(int[] dims) {\nlong odims = UtilFunctions.prod(dims, 1);\nif( odims > Integer.MAX_VALUE )\n- throw new RuntimeException(\"Invalid dims: \"+Arrays.toString(dims));\n+ throw new DMLRuntimeException(\"Invalid dims: \"+Arrays.toString(dims));\n_rlen = dims[0];\n_odims = (int) odims;\n}\n@@ -59,7 +61,9 @@ public abstract class DenseBlock implements Serializable\n* Resets the dense block by deleting non-zero values. After this\n* call all countNonZeros() calls are guaranteed to return 0.\n*/\n- public abstract void reset();\n+ public final void reset() {\n+ reset(_rlen, _odims, 0);\n+ }\n/**\n* Resets the dense block by deleting non-zero values. After this\n@@ -69,7 +73,9 @@ public abstract class DenseBlock implements Serializable\n*\n* @param dims length and size of dimensions.\n*/\n- public abstract void reset(int[] dims);\n+ public final void reset(int[] dims) {\n+ reset(dims[0], (int)UtilFunctions.prod(dims, 1), 0);\n+ }\n/**\n* Resets the dense block by deleting non-zeros.\n@@ -77,7 +83,9 @@ public abstract class DenseBlock implements Serializable\n* @param dims lenth and size of dimensions\n* @param v value\n*/\n- public abstract void reset(int[] dims, double v);\n+ public final void reset(int[] dims, double v) {\n+ reset(dims[0], (int)UtilFunctions.prod(dims, 1), v);\n+ }\n/**\n* Resets the dense block by deleting non-zeros.\n@@ -85,7 +93,9 @@ public abstract class DenseBlock implements Serializable\n* @param rlen number of rows\n* @param odims other dimensions\n*/\n- public abstract void reset(int rlen, int odims);\n+ public final void reset(int rlen, int odims) {\n+ reset(rlen, odims, 0);\n+ }\n/**\n* Resets the dense block by setting the given value.\n@@ -128,7 +138,7 @@ public abstract class DenseBlock implements Serializable\npublic abstract int blockSize(int bix);\n/**\n- * Indicates of the dnse block is numeric.\n+ * Indicates if the dense block is numeric.\n* @return true if numeric (FP, INT, BOOLEAN)\n*/\npublic abstract boolean isNumeric();\n@@ -158,7 +168,7 @@ public abstract class DenseBlock implements Serializable\n* @return length\n*/\npublic final long size() {\n- return _rlen * _odims;\n+ return (long)_rlen * _odims;\n}\n/**\n@@ -390,4 +400,12 @@ public abstract class DenseBlock implements Serializable\n}\nreturn sb.toString();\n}\n+\n+ protected double[] getReuseRow(boolean reset) {\n+ if( _reuse != null && reset )\n+ Arrays.fill(_reuse, 0);\n+ if( _reuse == null )\n+ _reuse = new double[_odims];\n+ return _reuse;\n+ }\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP32.java", "diff": "+/*\n+ * Modifications Copyright 2018 Graz University of Technology\n+ *\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.tugraz.sysds.runtime.data;\n+\n+import java.util.Arrays;\n+\n+import org.tugraz.sysds.common.Warnings;\n+import org.tugraz.sysds.runtime.util.DataConverter;\n+import org.tugraz.sysds.runtime.util.UtilFunctions;\n+\n+public class DenseBlockFP32 extends DenseBlockDRB\n+{\n+ private static final long serialVersionUID = 8546723684649816489L;\n+\n+ private float[] _data;\n+\n+ public DenseBlockFP32(int[] dims) {\n+ super(dims);\n+ reset(_rlen, _odims, 0);\n+ }\n+\n+ public DenseBlockFP32(int[] dims, float[] data) {\n+ super(dims);\n+ _data = data;\n+ }\n+\n+ @Override\n+ public boolean isNumeric() {\n+ return true;\n+ }\n+\n+ @Override\n+ public void reset(int rlen, int odims, double v) {\n+ float fv = (float) v;\n+ int len = rlen * odims;\n+ if( len > capacity() ) {\n+ _data = new float[len];\n+ if( v != 0 )\n+ Arrays.fill(_data, fv);\n+ }\n+ else {\n+ Arrays.fill(_data, 0, len, fv);\n+ }\n+ _rlen = rlen;\n+ _odims = odims;\n+ }\n+\n+ @Override\n+ public long capacity() {\n+ return (_data!=null) ? _data.length : -1;\n+ }\n+\n+ @Override\n+ public long countNonZeros() {\n+ return UtilFunctions.computeNnz(_data, 0, _rlen*_odims);\n+ }\n+\n+ @Override\n+ public int countNonZeros(int r) {\n+ return UtilFunctions.computeNnz(_data, r*_odims, _odims);\n+ }\n+\n+ @Override\n+ public long countNonZeros(int rl, int ru, int ol, int ou) {\n+ long nnz = 0;\n+ if( ol == 0 && ou == _odims ) { //specific case: all cols\n+ nnz += UtilFunctions.computeNnz(_data, rl*_odims, (ru-rl)*_odims);\n+ }\n+ else {\n+ for( int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims )\n+ nnz += UtilFunctions.computeNnz(_data, ix+ol, ou-ol);\n+ }\n+ return nnz;\n+ }\n+\n+ @Override\n+ public double[] values(int r) {\n+ double[] ret = getReuseRow(false);\n+ int ix = pos(r);\n+ for(int j=0; j<_odims; j++)\n+ ret[j] = _data[ix+j];\n+ return ret;\n+ }\n+\n+ @Override\n+ public double[] valuesAt(int bix) {\n+ Warnings.warnFullFP64Conversion(_data.length);\n+ return DataConverter.toDouble(_data);\n+ }\n+\n+ @Override\n+ public int index(int r) {\n+ return 0;\n+ }\n+\n+ @Override\n+ public int pos(int r) {\n+ return r * _odims;\n+ }\n+\n+ @Override\n+ public int pos(int r, int c) {\n+ return r * _odims + c;\n+ }\n+\n+ @Override\n+ public void incr(int r, int c) {\n+ _data[pos(r, c)] ++;\n+ }\n+\n+ @Override\n+ public void incr(int r, int c, double delta) {\n+ _data[pos(r, c)] += delta;\n+ }\n+\n+ @Override\n+ public DenseBlock set(double v) {\n+ Arrays.fill(_data, 0, _rlen*_odims, (float)v);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int rl, int ru, int ol, int ou, double v) {\n+ float fv = (float) v;\n+ if( ol==0 && ou == _odims )\n+ Arrays.fill(_data, rl*_odims, ru*_odims, fv);\n+ else\n+ for(int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims)\n+ Arrays.fill(_data, ix+ol, ix+ou, fv);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int r, int c, double v) {\n+ _data[pos(r, c)] = (float)v;\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(DenseBlock db) {\n+ System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int rl, int ru, int ol, int ou, DenseBlock db) {\n+ double[] a = db.valuesAt(0);\n+ if( ol == 0 && ou == _odims)\n+ System.arraycopy(a, 0, _data, rl*_odims+ol, (int)db.size());\n+ else {\n+ int len = ou - ol;\n+ for(int i=rl, ix1=0, ix2=rl*_odims+ol; i<ru; i++, ix1+=len, ix2+=_odims)\n+ System.arraycopy(a, ix1, _data, ix2, len);\n+ }\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int r, double[] v) {\n+ System.arraycopy(v, 0, _data, pos(r), _odims);\n+ return this;\n+ }\n+\n+ @Override\n+ public double get(int r, int c) {\n+ return _data[pos(r, c)];\n+ }\n+\n+ @Override\n+ public double get(int[] ix) {\n+ // TODO Auto-generated method stub\n+ return 0;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "diff": "@@ -47,26 +47,6 @@ public class DenseBlockFP64 extends DenseBlockDRB\nreturn true;\n}\n- @Override\n- public void reset() {\n- reset(_rlen, _odims, 0);\n- }\n-\n- @Override\n- public void reset(int[] dims) {\n- reset(dims[0], (int)UtilFunctions.prod(dims, 1), 0);\n- }\n-\n- @Override\n- public void reset(int[] dims, double v) {\n- reset(dims[0], (int)UtilFunctions.prod(dims, 1), v);\n- }\n-\n- @Override\n- public void reset(int rlen, int odims) {\n- reset(rlen, odims, 0);\n- }\n-\n@Override\npublic void reset(int rlen, int odims, double v) {\nint len = rlen * odims;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/matrix/data/LibMatrixAgg.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/matrix/data/LibMatrixAgg.java", "diff": "@@ -1786,7 +1786,7 @@ public class LibMatrixAgg\ndouble sum = (agg != null) ? agg[0] : 0;\n//scan once and compute prefix sums\ndouble[] avals = a.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n+ double[] cvals = c.values(0);\nfor( int i=rl, ix=rl*2; i<ru; i++, ix+=2 ) {\nsum = cvals[i] = avals[ix] + avals[ix+1] * sum;\n}\n@@ -1914,7 +1914,7 @@ public class LibMatrixAgg\n//init output (base for incremental agg)\nc.set(init);\n//execute builtin aggregate\n- double[] lc = c.valuesAt(0); //guaranteed single row\n+ double[] lc = c.values(0); //guaranteed single row\nfor( int i=rl; i<ru; i++ )\nbuiltinAgg( a.values(i), lc, a.pos(i), n, builtin );\n}\n@@ -2138,7 +2138,7 @@ public class LibMatrixAgg\n* @param ru row upper index\n*/\nprivate static void d_uarm( DenseBlock a, DenseBlock c, int n, int rl, int ru ) {\n- double[] lc = c.valuesAt(0);\n+ double[] lc = c.values(0);\nfor( int i=rl; i<ru; i++ )\nlc[i] = product(a.values(i), a.pos(i), n);\n}\n@@ -2153,7 +2153,7 @@ public class LibMatrixAgg\n* @param ru row upper index\n*/\nprivate static void d_uacm( DenseBlock a, DenseBlock c, int n, int rl, int ru ) {\n- double[] lc = c.set(1).valuesAt(0); //guaranteed single row\n+ double[] lc = c.set(1).values(0); //guaranteed single row\nfor( int i=rl; i<ru; i++ )\nLibMatrixMult.vectMultiplyWrite(a.values(i), lc, lc, a.pos(i), 0, 0, n);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/util/DataConverter.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/util/DataConverter.java", "diff": "@@ -964,4 +964,18 @@ public class DataConverter\nreturn sb.toString();\n}\n+\n+ public static double[] toDouble(float[] data) {\n+ double[] ret = new double[data.length];\n+ for(int i=0; i<data.length; i++)\n+ ret[i] = data[i];\n+ return ret;\n+ }\n+\n+ public static double[] toDouble(long[] data) {\n+ double[] ret = new double[data.length];\n+ for(int i=0; i<data.length; i++)\n+ ret[i] = data[i];\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "diff": "@@ -593,6 +593,20 @@ public class UtilFunctions\nreturn lnnz;\n}\n+ public static int computeNnz(float[] a, int ai, int len) {\n+ int lnnz = 0;\n+ for( int i=ai; i<ai+len; i++ )\n+ lnnz += (a[i] != 0) ? 1 : 0;\n+ return lnnz;\n+ }\n+\n+ public static int computeNnz(long[] a, int ai, int len) {\n+ int lnnz = 0;\n+ for( int i=ai; i<ai+len; i++ )\n+ lnnz += (a[i] != 0) ? 1 : 0;\n+ return lnnz;\n+ }\n+\npublic static long computeNnz(SparseBlock a, int[] aix, int ai, int alen) {\nlong lnnz = 0;\nfor( int k=ai; k<ai+alen; k++ )\n" } ]
Java
Apache License 2.0
apache/systemds
New FP32 (single-precision) dense block
49,738
16.11.2018 20:49:49
-3,600
3c0a35b6e22b2d97c495e7c1dd5906786bf73bdb
Fix multi-dimensional indexing dense blocks, incl tests
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlock.java", "diff": "@@ -45,8 +45,10 @@ public abstract class DenseBlock implements Serializable\nLDRB, //large dense row block\n}\n+ //NOTE: for a MxNxPxQ tensor the dimensions are given as\n+ //rlen=M, odims=[NxPxQ, PxQ, Q]\nprotected int _rlen; //number of rows\n- protected int _odims; //length of other dimensions\n+ protected int[] _odims; //cumprod other dims\nprivate double[] _reuse;\nprotected DenseBlock(int[] dims) {\n@@ -54,7 +56,8 @@ public abstract class DenseBlock implements Serializable\nif( odims > Integer.MAX_VALUE )\nthrow new DMLRuntimeException(\"Invalid dims: \"+Arrays.toString(dims));\n_rlen = dims[0];\n- _odims = (int) odims;\n+ //materialize dim offsets (reverse cumprod)\n+ _odims = createDimOffsets(dims);\n}\n/**\n@@ -74,7 +77,7 @@ public abstract class DenseBlock implements Serializable\n* @param dims length and size of dimensions.\n*/\npublic final void reset(int[] dims) {\n- reset(dims[0], (int)UtilFunctions.prod(dims, 1), 0);\n+ reset(dims[0], createDimOffsets(dims), 0);\n}\n/**\n@@ -84,19 +87,41 @@ public abstract class DenseBlock implements Serializable\n* @param v value\n*/\npublic final void reset(int[] dims, double v) {\n- reset(dims[0], (int)UtilFunctions.prod(dims, 1), v);\n+ reset(dims[0], createDimOffsets(dims), v);\n}\n/**\n* Resets the dense block by deleting non-zeros.\n*\n* @param rlen number of rows\n- * @param odims other dimensions\n+ * @param clen number of columns\n+ */\n+ public final void reset(int rlen, int clen) {\n+ reset(rlen, new int[]{clen}, 0);\n+ }\n+\n+ /**\n+ * Resets the dense block by deleting non-zeros.\n+ *\n+ * @param rlen number of rows\n+ * @param odims offsets of other dimensions\n*/\n- public final void reset(int rlen, int odims) {\n+ public final void reset(int rlen, int[] odims) {\nreset(rlen, odims, 0);\n}\n+ /**\n+ * Resets the dense block by setting the given value.\n+ *\n+ * @param rlen number of rows\n+ * @param clen number of columns\n+ * @param v value\n+ */\n+ public final void reset(int rlen, int clen, double v) {\n+ reset(rlen, new int[]{clen}, v);\n+ }\n+\n+\n/**\n* Resets the dense block by setting the given value.\n*\n@@ -104,7 +129,7 @@ public abstract class DenseBlock implements Serializable\n* @param odims other dimensions\n* @param v value\n*/\n- public abstract void reset(int rlen, int odims, double v);\n+ public abstract void reset(int rlen, int[] odims, double v);\n/**\n* Get the number of rows.\n@@ -168,7 +193,7 @@ public abstract class DenseBlock implements Serializable\n* @return length\n*/\npublic final long size() {\n- return (long)_rlen * _odims;\n+ return (long)_rlen * _odims[0];\n}\n/**\n@@ -261,6 +286,16 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract int pos(int r, int c);\n+ /**\n+ * Get the position for a given cell\n+ * within the associated block.\n+ *\n+ * @param ix cell indexes\n+ * @return block position\n+ */\n+ public abstract int pos(int[] ix);\n+\n+\n/**\n* Increments the given value for a given row and column.\n*\n@@ -392,7 +427,7 @@ public abstract class DenseBlock implements Serializable\nfor(int i=0; i<_rlen; i++) {\ndouble[] data = values(i);\nint ix = pos(i);\n- for(int j=0; j<_odims; j++) {\n+ for(int j=0; j<_odims[0]; j++) {\nsb.append(data[ix+j]);\nsb.append(\"\\t\");\n}\n@@ -405,7 +440,17 @@ public abstract class DenseBlock implements Serializable\nif( _reuse != null && reset )\nArrays.fill(_reuse, 0);\nif( _reuse == null )\n- _reuse = new double[_odims];\n+ _reuse = new double[_odims[0]];\nreturn _reuse;\n}\n+\n+ private static int[] createDimOffsets(int[] dims) {\n+ int[] ret = new int[dims.length-1];\n+ int prod = 1;\n+ for(int i=dims.length-1; i>=1; i--) {\n+ prod *= dims[i];\n+ ret[i-1] = prod;\n+ }\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockDRB.java", "diff": "@@ -59,4 +59,22 @@ public abstract class DenseBlockDRB extends DenseBlock\npublic int size(int bix) {\nreturn (int)size();\n}\n+\n+ @Override\n+ public int pos(int r) {\n+ return r * _odims[0];\n+ }\n+\n+ @Override\n+ public int pos(int r, int c) {\n+ return r * _odims[0] + c;\n+ }\n+\n+ @Override\n+ public int pos(int[] ix) {\n+ int pos = ix[ix.length-1];\n+ for(int i=0; i<ix.length-1; i++)\n+ pos += ix[i] * _odims[i];\n+ return pos;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP32.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP32.java", "diff": "@@ -50,9 +50,9 @@ public class DenseBlockFP32 extends DenseBlockDRB\n}\n@Override\n- public void reset(int rlen, int odims, double v) {\n+ public void reset(int rlen, int[] odims, double v) {\nfloat fv = (float) v;\n- int len = rlen * odims;\n+ int len = rlen * odims[0];\nif( len > capacity() ) {\n_data = new float[len];\nif( v != 0 )\n@@ -72,22 +72,22 @@ public class DenseBlockFP32 extends DenseBlockDRB\n@Override\npublic long countNonZeros() {\n- return UtilFunctions.computeNnz(_data, 0, _rlen*_odims);\n+ return UtilFunctions.computeNnz(_data, 0, _rlen*_odims[0]);\n}\n@Override\npublic int countNonZeros(int r) {\n- return UtilFunctions.computeNnz(_data, r*_odims, _odims);\n+ return UtilFunctions.computeNnz(_data, r*_odims[0], _odims[0]);\n}\n@Override\npublic long countNonZeros(int rl, int ru, int ol, int ou) {\nlong nnz = 0;\n- if( ol == 0 && ou == _odims ) { //specific case: all cols\n- nnz += UtilFunctions.computeNnz(_data, rl*_odims, (ru-rl)*_odims);\n+ if( ol == 0 && ou == _odims[0] ) { //specific case: all cols\n+ nnz += UtilFunctions.computeNnz(_data, rl*_odims[0], (ru-rl)*_odims[0]);\n}\nelse {\n- for( int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims )\n+ for( int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0] )\nnnz += UtilFunctions.computeNnz(_data, ix+ol, ou-ol);\n}\nreturn nnz;\n@@ -97,7 +97,8 @@ public class DenseBlockFP32 extends DenseBlockDRB\npublic double[] values(int r) {\ndouble[] ret = getReuseRow(false);\nint ix = pos(r);\n- for(int j=0; j<_odims; j++)\n+ int ncol = _odims[0];\n+ for(int j=0; j<ncol; j++)\nret[j] = _data[ix+j];\nreturn ret;\n}\n@@ -113,16 +114,6 @@ public class DenseBlockFP32 extends DenseBlockDRB\nreturn 0;\n}\n- @Override\n- public int pos(int r) {\n- return r * _odims;\n- }\n-\n- @Override\n- public int pos(int r, int c) {\n- return r * _odims + c;\n- }\n-\n@Override\npublic void incr(int r, int c) {\n_data[pos(r, c)] ++;\n@@ -135,17 +126,17 @@ public class DenseBlockFP32 extends DenseBlockDRB\n@Override\npublic DenseBlock set(double v) {\n- Arrays.fill(_data, 0, _rlen*_odims, (float)v);\n+ Arrays.fill(_data, 0, _rlen*_odims[0], (float)v);\nreturn this;\n}\n@Override\npublic DenseBlock set(int rl, int ru, int ol, int ou, double v) {\nfloat fv = (float) v;\n- if( ol==0 && ou == _odims )\n- Arrays.fill(_data, rl*_odims, ru*_odims, fv);\n+ if( ol==0 && ou == _odims[0] )\n+ Arrays.fill(_data, rl*_odims[0], ru*_odims[0], fv);\nelse\n- for(int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims)\n+ for(int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0])\nArrays.fill(_data, ix+ol, ix+ou, fv);\nreturn this;\n}\n@@ -158,18 +149,18 @@ public class DenseBlockFP32 extends DenseBlockDRB\n@Override\npublic DenseBlock set(DenseBlock db) {\n- System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims);\n+ System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims[0]);\nreturn this;\n}\n@Override\npublic DenseBlock set(int rl, int ru, int ol, int ou, DenseBlock db) {\ndouble[] a = db.valuesAt(0);\n- if( ol == 0 && ou == _odims)\n- System.arraycopy(a, 0, _data, rl*_odims+ol, (int)db.size());\n+ if( ol == 0 && ou == _odims[0])\n+ System.arraycopy(a, 0, _data, rl*_odims[0]+ol, (int)db.size());\nelse {\nint len = ou - ol;\n- for(int i=rl, ix1=0, ix2=rl*_odims+ol; i<ru; i++, ix1+=len, ix2+=_odims)\n+ for(int i=rl, ix1=0, ix2=rl*_odims[0]+ol; i<ru; i++, ix1+=len, ix2+=_odims[0])\nSystem.arraycopy(a, ix1, _data, ix2, len);\n}\nreturn this;\n@@ -177,7 +168,7 @@ public class DenseBlockFP32 extends DenseBlockDRB\n@Override\npublic DenseBlock set(int r, double[] v) {\n- System.arraycopy(v, 0, _data, pos(r), _odims);\n+ System.arraycopy(v, 0, _data, pos(r), _odims[0]);\nreturn this;\n}\n@@ -188,7 +179,6 @@ public class DenseBlockFP32 extends DenseBlockDRB\n@Override\npublic double get(int[] ix) {\n- // TODO Auto-generated method stub\n- return 0;\n+ return _data[pos(ix)];\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP64.java", "diff": "@@ -48,8 +48,8 @@ public class DenseBlockFP64 extends DenseBlockDRB\n}\n@Override\n- public void reset(int rlen, int odims, double v) {\n- int len = rlen * odims;\n+ public void reset(int rlen, int[] odims, double v) {\n+ int len = rlen * odims[0];\nif( len > capacity() ) {\n_data = new double[len];\nif( v != 0 )\n@@ -69,22 +69,22 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic long countNonZeros() {\n- return UtilFunctions.computeNnz(_data, 0, _rlen*_odims);\n+ return UtilFunctions.computeNnz(_data, 0, _rlen*_odims[0]);\n}\n@Override\npublic int countNonZeros(int r) {\n- return UtilFunctions.computeNnz(_data, r*_odims, _odims);\n+ return UtilFunctions.computeNnz(_data, r*_odims[0], _odims[0]);\n}\n@Override\npublic long countNonZeros(int rl, int ru, int ol, int ou) {\nlong nnz = 0;\n- if( ol == 0 && ou == _odims ) { //specific case: all cols\n- nnz += UtilFunctions.computeNnz(_data, rl*_odims, (ru-rl)*_odims);\n+ if( ol == 0 && ou == _odims[0] ) { //specific case: all cols\n+ nnz += UtilFunctions.computeNnz(_data, rl*_odims[0], (ru-rl)*_odims[0]);\n}\nelse {\n- for( int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims )\n+ for( int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0] )\nnnz += UtilFunctions.computeNnz(_data, ix+ol, ou-ol);\n}\nreturn nnz;\n@@ -107,12 +107,12 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic int pos(int r) {\n- return r * _odims;\n+ return r * _odims[0];\n}\n@Override\npublic int pos(int r, int c) {\n- return r * _odims + c;\n+ return r * _odims[0] + c;\n}\n@Override\n@@ -127,16 +127,16 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic DenseBlock set(double v) {\n- Arrays.fill(_data, 0, _rlen*_odims, v);\n+ Arrays.fill(_data, 0, _rlen*_odims[0], v);\nreturn this;\n}\n@Override\npublic DenseBlock set(int rl, int ru, int ol, int ou, double v) {\n- if( ol==0 && ou == _odims )\n- Arrays.fill(_data, rl*_odims, ru*_odims, v);\n+ if( ol==0 && ou == _odims[0] )\n+ Arrays.fill(_data, rl*_odims[0], ru*_odims[0], v);\nelse\n- for(int i=rl, ix=rl*_odims; i<ru; i++, ix+=_odims)\n+ for(int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0])\nArrays.fill(_data, ix+ol, ix+ou, v);\nreturn this;\n}\n@@ -149,18 +149,18 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic DenseBlock set(DenseBlock db) {\n- System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims);\n+ System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims[0]);\nreturn this;\n}\n@Override\npublic DenseBlock set(int rl, int ru, int ol, int ou, DenseBlock db) {\ndouble[] a = db.valuesAt(0);\n- if( ol == 0 && ou == _odims)\n- System.arraycopy(a, 0, _data, rl*_odims+ol, (int)db.size());\n+ if( ol == 0 && ou == _odims[0])\n+ System.arraycopy(a, 0, _data, rl*_odims[0]+ol, (int)db.size());\nelse {\nint len = ou - ol;\n- for(int i=rl, ix1=0, ix2=rl*_odims+ol; i<ru; i++, ix1+=len, ix2+=_odims)\n+ for(int i=rl, ix1=0, ix2=rl*_odims[0]+ol; i<ru; i++, ix1+=len, ix2+=_odims[0])\nSystem.arraycopy(a, ix1, _data, ix2, len);\n}\nreturn this;\n@@ -168,7 +168,7 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic DenseBlock set(int r, double[] v) {\n- System.arraycopy(v, 0, _data, pos(r), _odims);\n+ System.arraycopy(v, 0, _data, pos(r), _odims[0]);\nreturn this;\n}\n@@ -179,7 +179,6 @@ public class DenseBlockFP64 extends DenseBlockDRB\n@Override\npublic double get(int[] ix) {\n- // TODO Auto-generated method stub\n- return 0;\n+ return _data[pos(ix)];\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "diff": "package org.tugraz.sysds.runtime.data;\nimport org.apache.commons.lang.NotImplementedException;\n+import org.tugraz.sysds.common.Types.ValueType;\n+import org.tugraz.sysds.runtime.DMLRuntimeException;\nimport org.tugraz.sysds.runtime.util.UtilFunctions;\npublic abstract class DenseBlockFactory\n{\n+ public static DenseBlock createDenseBlock(int rlen, int clen) {\n+ return createDenseBlock(new int[]{rlen, clen});\n+ }\n+\npublic static DenseBlock createDenseBlock(int[] dims) {\n- DenseBlock.Type type =\n- (UtilFunctions.prod(dims) < Integer.MAX_VALUE) ?\n- DenseBlock.Type.DRB : DenseBlock.Type.LDRB;\n- return createDenseBlock(type, dims);\n+ return createDenseBlock(ValueType.FP64, dims);\n}\n- public static DenseBlock createDenseBlock(int rlen, int clen) {\n- return createDenseBlock(new int[]{rlen, clen});\n+ public static DenseBlock createDenseBlock(ValueType vt, int[] dims) {\n+ DenseBlock.Type type = (UtilFunctions.prod(dims) < Integer.MAX_VALUE) ?\n+ DenseBlock.Type.DRB : DenseBlock.Type.LDRB;\n+ return createDenseBlock(vt, type, dims);\n}\npublic static DenseBlock createDenseBlock(double[] data, int[] dims) {\n@@ -46,12 +51,27 @@ public abstract class DenseBlockFactory\nreturn createDenseBlock(data, new int[]{rlen, clen});\n}\n- public static DenseBlock createDenseBlock(DenseBlock.Type type, int[] dims) {\n+ public static DenseBlock createDenseBlock(float[] data, int[] dims) {\n+ return new DenseBlockFP32(dims, data);\n+ }\n+\n+ public static DenseBlock createDenseBlock(float[] data, int rlen, int clen) {\n+ return createDenseBlock(data, new int[]{rlen, clen});\n+ }\n+\n+ public static DenseBlock createDenseBlock(ValueType vt, DenseBlock.Type type, int[] dims) {\nswitch( type ) {\n- case DRB: return new DenseBlockFP64(dims);\n+ case DRB:\n+ switch(vt) {\n+ case FP32: return new DenseBlockFP32(dims);\n+ case FP64: return new DenseBlockFP64(dims);\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported dense block value type: \"+vt.name());\n+ }\ncase LDRB: throw new NotImplementedException();\n+ //TODO single call to LDRB with value type\ndefault:\n- throw new RuntimeException(\"Unexpected dense block type: \"+type.name());\n+ throw new DMLRuntimeException(\"Unexpected dense block type: \"+type.name());\n}\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstIndexingTest.java", "diff": "+/*\n+ * Copyright 2018 Graz University of Technology\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+package org.tugraz.sysds.test.tensor;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.tugraz.sysds.common.Types.ValueType;\n+import org.tugraz.sysds.runtime.data.DenseBlock;\n+import org.tugraz.sysds.runtime.data.DenseBlockFactory;\n+\n+\n+public class DenseBlockConstIndexingTest\n+{\n+ @Test\n+ public void testIndexDenseBlock2FP32Const() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.FP32);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ Assert.assertEquals(7.3, db.get(i, j), 1e-5);\n+ }\n+\n+ @Test\n+ public void testIndexDenseBlock2FP64Const() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.FP64);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ Assert.assertEquals(7.3, db.get(i, j), 0);\n+ }\n+\n+ @Test\n+ public void testIndexDenseBlock3FP32Const() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.FP32);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ for(int k=0; k<7; k++)\n+ Assert.assertEquals(7.3, db.get(new int[]{i,j,k}), 1e-5);\n+ }\n+\n+ @Test\n+ public void testIndexDenseBlock3FP64Const() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.FP64);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ for(int k=0; k<7; k++)\n+ Assert.assertEquals(7.3, db.get(new int[]{i,j,k}), 0);\n+ }\n+\n+ private DenseBlock getDenseBlock2(ValueType vt) {\n+ return DenseBlockFactory.createDenseBlock(vt, new int[] {3,5});\n+ }\n+\n+ private DenseBlock getDenseBlock3(ValueType vt) {\n+ return DenseBlockFactory.createDenseBlock(vt, new int[] {3,5,7});\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstructionTest.java", "diff": "+/*\n+ * Copyright 2018 Graz University of Technology\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+package org.tugraz.sysds.test.tensor;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.tugraz.sysds.common.Types.ValueType;\n+import org.tugraz.sysds.runtime.data.DenseBlock;\n+import org.tugraz.sysds.runtime.data.DenseBlockFactory;\n+\n+\n+public class DenseBlockConstructionTest\n+{\n+ @Test\n+ public void testMetaDenseBlock2FP32() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.FP32);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5, db.size());\n+ Assert.assertEquals(3*5, db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\n+ @Test\n+ public void testMetaDenseBlock2FP64() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.FP64);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5, db.size());\n+ Assert.assertEquals(3*5, db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\n+ @Test\n+ public void testMetaDenseBlock3FP32() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.FP32);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5*7, db.size());\n+ Assert.assertEquals(3*5*7, db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\n+ @Test\n+ public void testMetaDenseBlock3FP64() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.FP64);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5*7, db.size());\n+ Assert.assertEquals(3*5*7, db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\n+ private DenseBlock getDenseBlock2(ValueType vt) {\n+ return DenseBlockFactory.createDenseBlock(vt, new int[] {3,5});\n+ }\n+\n+ private DenseBlock getDenseBlock3(ValueType vt) {\n+ return DenseBlockFactory.createDenseBlock(vt, new int[] {3,5,7});\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
Fix multi-dimensional indexing dense blocks, incl tests
49,738
16.11.2018 21:29:13
-3,600
5381d1dee215088baf9a949e0615b63cded11db4
New boolean dense block (using Java's Bitset), incl tests
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/common/Warnings.java", "new_path": "src/main/java/org/tugraz/sysds/common/Warnings.java", "diff": "@@ -11,4 +11,8 @@ public class Warnings\npublic static void warnFullFP64Conversion(long len) {\nLOG.warn(\"Performance warning: conversion to FP64 array of size \"+len+\".\");\n}\n+\n+ public static void warnInvaldBooleanIncrement(double delta) {\n+ LOG.warn(\"Correctness warning: invalid boolean increment by \"+delta+\".\");\n+ }\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockBool.java", "diff": "+/*\n+ * Modifications Copyright 2018 Graz University of Technology\n+ *\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.tugraz.sysds.runtime.data;\n+\n+import java.util.BitSet;\n+\n+import org.tugraz.sysds.common.Warnings;\n+import org.tugraz.sysds.runtime.util.DataConverter;\n+import org.tugraz.sysds.runtime.util.UtilFunctions;\n+\n+public class DenseBlockBool extends DenseBlockDRB\n+{\n+ private static final long serialVersionUID = -2228057308997136969L;\n+\n+ private BitSet _data;\n+\n+ public DenseBlockBool(int[] dims) {\n+ super(dims);\n+ reset(_rlen, _odims, 0);\n+ }\n+\n+ public DenseBlockBool(int[] dims, boolean[] data) {\n+ super(dims);\n+ _data = new BitSet(data.length);\n+ for(int i=0; i<data.length; i++)\n+ if( data[i] )\n+ _data.set(i);\n+ }\n+\n+ @Override\n+ public boolean isNumeric() {\n+ return true;\n+ }\n+\n+ @Override\n+ public void reset(int rlen, int[] odims, double v) {\n+ boolean bv = v != 0;\n+ int len = rlen * odims[0];\n+ if( len > capacity() ) {\n+ _data = new BitSet(len);\n+ if( bv )\n+ _data.set(0, len);\n+ }\n+ else {\n+ _data.set(0, len, bv);\n+ }\n+ _rlen = rlen;\n+ _odims = odims;\n+ }\n+\n+ @Override\n+ public long capacity() {\n+ return (_data!=null) ? _data.size() : -1;\n+ }\n+\n+ @Override\n+ public long countNonZeros() {\n+ return _data.cardinality();\n+ }\n+\n+ @Override\n+ public int countNonZeros(int r) {\n+ return UtilFunctions.computeNnz(_data, r*_odims[0], _odims[0]);\n+ }\n+\n+ @Override\n+ public long countNonZeros(int rl, int ru, int ol, int ou) {\n+ long nnz = 0;\n+ if( ol == 0 && ou == _odims[0] ) { //specific case: all cols\n+ nnz += UtilFunctions.computeNnz(_data, rl*_odims[0], (ru-rl)*_odims[0]);\n+ }\n+ else {\n+ for( int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0] )\n+ nnz += UtilFunctions.computeNnz(_data, ix+ol, ou-ol);\n+ }\n+ return nnz;\n+ }\n+\n+ @Override\n+ public double[] values(int r) {\n+ double[] ret = getReuseRow(false);\n+ int ix = pos(r);\n+ int ncol = _odims[0];\n+ for(int j=0; j<ncol; j++)\n+ ret[j] = _data.get(ix+j) ? 1 : 0;\n+ return ret;\n+ }\n+\n+ @Override\n+ public double[] valuesAt(int bix) {\n+ int len = _rlen*_odims[0];\n+ Warnings.warnFullFP64Conversion(len);\n+ return DataConverter.toDouble(_data, len);\n+ }\n+\n+ @Override\n+ public int index(int r) {\n+ return 0;\n+ }\n+\n+ @Override\n+ public void incr(int r, int c) {\n+ Warnings.warnInvaldBooleanIncrement(1);\n+ _data.set(pos(r, c));\n+ }\n+\n+ @Override\n+ public void incr(int r, int c, double delta) {\n+ Warnings.warnInvaldBooleanIncrement(delta);\n+ _data.set(pos(r, c));\n+ }\n+\n+ @Override\n+ public DenseBlock set(double v) {\n+ _data.set(0, _rlen*_odims[0], v != 0);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int rl, int ru, int ol, int ou, double v) {\n+ boolean bv = v != 0;\n+ if( ol==0 && ou == _odims[0] )\n+ _data.set(rl*_odims[0], ru*_odims[0], bv);\n+ else\n+ for(int i=rl, ix=rl*_odims[0]; i<ru; i++, ix+=_odims[0])\n+ _data.set(ix+ol, ix+ou, bv);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int r, int c, double v) {\n+ _data.set(pos(r, c), v != 0);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(DenseBlock db) {\n+ System.arraycopy(db.valuesAt(0), 0, _data, 0, _rlen*_odims[0]);\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int rl, int ru, int ol, int ou, DenseBlock db) {\n+ double[] a = db.valuesAt(0);\n+ if( ol == 0 && ou == _odims[0])\n+ System.arraycopy(a, 0, _data, rl*_odims[0]+ol, (int)db.size());\n+ else {\n+ int len = ou - ol;\n+ for(int i=rl, ix1=0, ix2=rl*_odims[0]+ol; i<ru; i++, ix1+=len, ix2+=_odims[0])\n+ System.arraycopy(a, ix1, _data, ix2, len);\n+ }\n+ return this;\n+ }\n+\n+ @Override\n+ public DenseBlock set(int r, double[] v) {\n+ System.arraycopy(v, 0, _data, pos(r), _odims[0]);\n+ return this;\n+ }\n+\n+ @Override\n+ public double get(int r, int c) {\n+ return _data.get(pos(r, c)) ? 1 : 0;\n+ }\n+\n+ @Override\n+ public double get(int[] ix) {\n+ return _data.get(pos(ix)) ? 1 : 0;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP32.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFP32.java", "diff": "@@ -30,7 +30,7 @@ import org.tugraz.sysds.runtime.util.UtilFunctions;\npublic class DenseBlockFP32 extends DenseBlockDRB\n{\n- private static final long serialVersionUID = 8546723684649816489L;\n+ private static final long serialVersionUID = 1950471811056914020L;\nprivate float[] _data;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/data/DenseBlockFactory.java", "diff": "@@ -65,6 +65,7 @@ public abstract class DenseBlockFactory\nswitch(vt) {\ncase FP32: return new DenseBlockFP32(dims);\ncase FP64: return new DenseBlockFP64(dims);\n+ case BOOLEAN: return new DenseBlockBool(dims);\ndefault:\nthrow new DMLRuntimeException(\"Unsupported dense block value type: \"+vt.name());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/util/DataConverter.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/util/DataConverter.java", "diff": "@@ -23,6 +23,7 @@ import java.io.IOException;\nimport java.text.DecimalFormat;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.BitSet;\nimport java.util.HashMap;\nimport java.util.Iterator;\nimport java.util.List;\n@@ -978,4 +979,11 @@ public class DataConverter\nret[i] = data[i];\nreturn ret;\n}\n+\n+ public static double[] toDouble(BitSet data, int len) {\n+ double[] ret = new double[len];\n+ for(int i=0; i<len; i++)\n+ ret[i] = data.get(i) ? 1 : 0;\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "new_path": "src/main/java/org/tugraz/sysds/runtime/util/UtilFunctions.java", "diff": "@@ -21,6 +21,7 @@ package org.tugraz.sysds.runtime.util;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.BitSet;\nimport java.util.HashSet;\nimport java.util.Iterator;\nimport java.util.List;\n@@ -607,6 +608,13 @@ public class UtilFunctions\nreturn lnnz;\n}\n+ public static int computeNnz(BitSet a, int ai, int len) {\n+ int lnnz = 0;\n+ for( int i=ai; i<ai+len; i++ )\n+ lnnz += a.get(i) ? 1 : 0;\n+ return lnnz;\n+ }\n+\npublic static long computeNnz(SparseBlock a, int[] aix, int ai, int alen) {\nlong lnnz = 0;\nfor( int k=ai; k<ai+alen; k++ )\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstIndexingTest.java", "new_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstIndexingTest.java", "diff": "@@ -43,6 +43,15 @@ public class DenseBlockConstIndexingTest\nAssert.assertEquals(7.3, db.get(i, j), 0);\n}\n+ @Test\n+ public void testIndexDenseBlock2BoolConst() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.BOOLEAN);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ Assert.assertEquals(1, db.get(i, j), 0);\n+ }\n+\n@Test\npublic void testIndexDenseBlock3FP32Const() throws Exception {\nDenseBlock db = getDenseBlock3(ValueType.FP32);\n@@ -63,6 +72,16 @@ public class DenseBlockConstIndexingTest\nAssert.assertEquals(7.3, db.get(new int[]{i,j,k}), 0);\n}\n+ @Test\n+ public void testIndexDenseBlock3BoolConst() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.BOOLEAN);\n+ db.set(7.3);\n+ for(int i=0; i<db.numRows(); i++)\n+ for(int j=0; j<5; j++)\n+ for(int k=0; k<7; k++)\n+ Assert.assertEquals(1, db.get(new int[]{i,j,k}), 0);\n+ }\n+\nprivate DenseBlock getDenseBlock2(ValueType vt) {\nreturn DenseBlockFactory.createDenseBlock(vt, new int[] {3,5});\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstructionTest.java", "new_path": "src/test/java/org/tugraz/sysds/test/tensor/DenseBlockConstructionTest.java", "diff": "@@ -55,6 +55,21 @@ public class DenseBlockConstructionTest\nDenseBlockFactory.getDenseBlockType(db));\n}\n+ @Test\n+ public void testMetaDenseBlock2Bool() throws Exception {\n+ DenseBlock db = getDenseBlock2(ValueType.BOOLEAN);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5, db.size());\n+ Assert.assertTrue(3*5 <= db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\n@Test\npublic void testMetaDenseBlock3FP32() throws Exception {\nDenseBlock db = getDenseBlock3(ValueType.FP32);\n@@ -85,6 +100,21 @@ public class DenseBlockConstructionTest\nDenseBlockFactory.getDenseBlockType(db));\n}\n+ @Test\n+ public void testMetaDenseBlock3Bool() throws Exception {\n+ DenseBlock db = getDenseBlock3(ValueType.BOOLEAN);\n+ Assert.assertEquals(3, db.numRows());\n+ Assert.assertEquals(true, db.isNumeric());\n+ Assert.assertEquals(true, db.isContiguous());\n+ Assert.assertEquals(1, db.numBlocks());\n+ Assert.assertEquals(3, db.blockSize());\n+ Assert.assertEquals(3*5*7, db.size());\n+ Assert.assertTrue(3*5*7 <= db.capacity());\n+ Assert.assertEquals(0, db.countNonZeros());\n+ Assert.assertEquals(DenseBlock.Type.DRB,\n+ DenseBlockFactory.getDenseBlockType(db));\n+ }\n+\nprivate DenseBlock getDenseBlock2(ValueType vt) {\nreturn DenseBlockFactory.createDenseBlock(vt, new int[] {3,5});\n}\n" } ]
Java
Apache License 2.0
apache/systemds
New boolean dense block (using Java's Bitset), incl tests
49,738
23.11.2018 17:44:43
-3,600
2ca465b18c33948dfb4ecda8fce63e9fdc5ed4eb
[MINOR] Minor fixes of the readme file
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -21,11 +21,11 @@ limitations under the License.\n# SystemDS\n-SystemDS is an open source system for the end-to-end data science lifecycle from data integration, cleaning, and feature engineering, over efficient, local and distributed ML model training, to deployment and serving. To this end, we aim to provide a stack of declarative languages with R-like syntax for (1) the different tasks of the data-science lifecycle, and (2) users with different expertise. These high-level scripts are compiled into hybrid execution plans of local, in-memory CPU and GPU operations, as well as distributed operations on Apache Spark. In contrast to existing systems - that either provide homogeneous tensors or 2D Datasets - and in order to serve the entire data science lifecycle, the underlying data model are DataTensors, i.e., tensors (multi-dimensional arrays) whose first dimension may have a heterogeneous and nested schema.\n+SystemDS is a versatile system for the end-to-end data science lifecycle from data integration, cleaning, and feature engineering, over efficient, local and distributed ML model training, to deployment and serving. To this end, we aim to provide a stack of declarative languages with R-like syntax for (1) the different tasks of the data-science lifecycle, and (2) users with different expertise. These high-level scripts are compiled into hybrid execution plans of local, in-memory CPU and GPU operations, as well as distributed operations on Apache Spark. In contrast to existing systems - that either provide homogeneous tensors or 2D Datasets - and in order to serve the entire data science lifecycle, the underlying data model are DataTensors, i.e., tensors (multi-dimensional arrays) whose first dimension may have a heterogeneous and nested schema.\n-**Status:** SystemDS is still in pre-alpha status. The original code base was forked from [**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2019. We will continue to support linear algebra programs over matrices, while we replace the underlying data model and compiler, as well as substantially extend the supported functionalities.\n+**Status:** SystemDS is still in pre-alpha status. The original code base was forked from [**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2019. We will continue to support linear algebra programs over matrices, while replacing the underlying data model and compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own snapshot via Apache Maven:\n+```\n+mvn -DskipTests clean package\n+```\n**Documentation:** [SystemDS Documentation](http://apache.github.io/systemml/dml-language-reference)<br/>\n-\n-**Build:** [![Build Status](https://travis-ci.org/apache/systemml.svg?branch=master)](https://travis-ci.org/apache/systemml)<br/>\n-A snapshot of SystemDS can be build via a basic `mvn package` in the SystemDS root directory.\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Minor fixes of the readme file