author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
49,738 | 01.04.2020 19:18:42 | -7,200 | 809c02580570e136e2d150400abf184cbff01a74 | [MINOR] Fix typo internal builtin function names (sigmoid) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -154,7 +154,7 @@ public enum Builtins {\nSAMPLE(\"sample\", false),\nSD(\"sd\", false),\nSEQ(\"seq\", false),\n- SIGMOD(\"sigmoid\", true), // 1 / (1 + exp(-X))\n+ SIGMOID(\"sigmoid\", true), // 1 / (1 + exp(-X))\nSIGN(\"sign\", false),\nSIN(\"sin\", false),\nSINH(\"sinh\", false),\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix typo internal builtin function names (sigmoid) |
49,738 | 09.04.2020 19:25:37 | -7,200 | 395b5d08b75ac0cd71421ac83f7792ff02e2086a | [MINOR] Fix unnecessarily detailed test output in tests/functions/misc | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/ConditionalValidateTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/ConditionalValidateTest.java",
"diff": "@@ -132,8 +132,7 @@ public class ConditionalValidateTest extends AutomatedTestBase\nHDFSTool.deleteFileIfExistOnHDFS(input+\".mtd\");\nHDFSTool.deleteFileIfExistOnHDFS(input+\"b.mtd\");\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nthrow new RuntimeException(ex);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/ExistsVariableTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/ExistsVariableTest.java",
"diff": "@@ -68,7 +68,7 @@ public class ExistsVariableTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nString param = pos ? \"1\" : \"0\";\nfullDMLScriptName = HOME + testName + \".dml\";\n- programArgs = new String[]{\"-explain\", \"-stats\", \"-args\", param, output(\"R\") };\n+ programArgs = new String[]{\"-stats\", \"-args\", param, output(\"R\") };\n//run script and compare output\nrunTest(true, false, null, -1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionInExpressionTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionInExpressionTest.java",
"diff": "@@ -87,7 +87,7 @@ public class FunctionInExpressionTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testName + \".dml\";\n- programArgs = new String[]{\"-explain\", \"-stats\", \"-args\", output(\"R\") };\n+ programArgs = new String[]{\"-stats\", \"-args\", output(\"R\") };\nfullRScriptName = HOME + testName + \".R\";\nrCmd = getRCmd(expectedDir());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionInliningTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionInliningTest.java",
"diff": "@@ -94,7 +94,7 @@ public class FunctionInliningTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[] {\"-explain\",\"-args\",String.valueOf(rows),\n+ programArgs = new String[] {\"-args\",String.valueOf(rows),\nString.valueOf(cols), String.valueOf(val), output(\"Rout\") };\nOptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS = IPA;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionNotFoundTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/FunctionNotFoundTest.java",
"diff": "@@ -57,7 +57,7 @@ public class FunctionNotFoundTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testName + \".dml\";\n- programArgs = new String[]{\"-explain\", \"-stats\"};\n+ programArgs = new String[]{};\n//run script and compare output\nrunTest(true, error, DMLException.class, -1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/IPAConstantFoldingScalarVariablePropagationTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/IPAConstantFoldingScalarVariablePropagationTest.java",
"diff": "@@ -101,7 +101,7 @@ public class IPAConstantFoldingScalarVariablePropagationTest extends AutomatedTe\nloadTestConfiguration(config);\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{\"-stats\", \"-explain\", \"recompile_hops\"};\n+ programArgs = new String[]{\"-stats\"};\nOptimizerUtils.IPA_NUM_REPETITIONS = IPA_SECOND_CHANCE ? 2 : 1;\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\nrtplatform = ExecMode.HYBRID;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/IPANnzPropagationTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/IPANnzPropagationTest.java",
"diff": "@@ -65,7 +65,7 @@ public class IPANnzPropagationTest extends AutomatedTestBase\nloadTestConfiguration(config);\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{\"-stats\", \"-explain\", \"recompile_hops\"};\n+ programArgs = new String[]{\"-stats\"};\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\nrtplatform = ExecMode.HYBRID;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/ListAndStructTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/ListAndStructTest.java",
"diff": "@@ -171,7 +171,7 @@ public class ListAndStructTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{ \"-stats\",\"-explain\",\"-args\", output(\"R\") };\n+ programArgs = new String[]{ \"-args\", output(\"R\") };\nfullRScriptName = HOME + testname + \".R\";\nrCmd = getRCmd(expectedDir());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/PrintMatrixTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/PrintMatrixTest.java",
"diff": "@@ -43,10 +43,6 @@ public class PrintMatrixTest extends AutomatedTestBase\nrunTest( TEST_NAME1, false );\n}\n- /**\n- *\n- * @param testName\n- */\nprivate void runTest( String testName, boolean exceptionExpected )\n{\nTestConfiguration config = getTestConfiguration(TEST_NAME1);\n@@ -54,7 +50,7 @@ public class PrintMatrixTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n- programArgs = new String[]{\"-explain\"};\n+ programArgs = new String[]{\"\"};\n//run tests\nrunTest(true, exceptionExpected, DMLException.class, -1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/RemoveUnnecessaryCTableTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/RemoveUnnecessaryCTableTest.java",
"diff": "@@ -140,7 +140,7 @@ public class RemoveUnnecessaryCTableTest extends AutomatedTestBase\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = true;\nArrayList<String> programArgsBuilder = new ArrayList<>(\n- Arrays.asList(\"-explain\", \"-stats\", \"-args\" ));\n+ Arrays.asList(\"-stats\", \"-args\" ));\n// Get Matrix Input\nif (A != null){\nprogramArgsBuilder.add(input(\"A\"));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/RewriteListTsmmCVTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/RewriteListTsmmCVTest.java",
"diff": "@@ -90,7 +90,7 @@ public class RewriteListTsmmCVTest extends AutomatedTestBase\n//lineage tracing with and without reuse\nReuseCacheType reuse = lineage ? ReuseCacheType.REUSE_FULL : ReuseCacheType.NONE;\n- programArgs = new String[]{\"-explain\",\"recompile_runtime\", \"-lineage\", reuse.name().toLowerCase(),\n+ programArgs = new String[]{\"-lineage\", reuse.name().toLowerCase(),\n\"-stats\",\"-args\", String.valueOf(rows), String.valueOf(cols), output(\"S\") };\nfullRScriptName = HOME + testname + \".R\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/RewriteSlicedMatrixMultTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/RewriteSlicedMatrixMultTest.java",
"diff": "@@ -91,7 +91,7 @@ public class RewriteSlicedMatrixMultTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{ \"-stats\",\"-args\",\n+ programArgs = new String[]{ \"-args\",\ninput(\"A\"), input(\"B\"), output(\"R\") };\nfullRScriptName = HOME + testname + \".R\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/SizePropagationTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/SizePropagationTest.java",
"diff": "@@ -114,7 +114,7 @@ public class SizePropagationTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{ \"-explain\", \"hops\", \"-stats\",\"-args\", String.valueOf(N), output(\"R\") };\n+ programArgs = new String[]{ \"hops\", \"-stats\",\"-args\", String.valueOf(N), output(\"R\") };\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\nrtplatform = ExecMode.HYBRID;\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/misc/ZeroRowsColsMatrixTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/misc/ZeroRowsColsMatrixTest.java",
"diff": "@@ -180,7 +180,7 @@ public class ZeroRowsColsMatrixTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[]{\"-explain\",\"recompile_runtime\",\"-args\", String.valueOf(dim),\n+ programArgs = new String[]{\"-args\", String.valueOf(dim),\nString.valueOf(emptyRet).toUpperCase(), output(\"R\")};\nfullRScriptName = HOME + TEST_NAME +\".R\";\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix unnecessarily detailed test output in tests/functions/misc |
49,741 | 09.04.2020 19:29:55 | -7,200 | 39c56541ca83ea36093384220d19a31b5578537e | [MINOR] Extended JMLC API (handling of pinned variables)
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/jmlc/PreparedScript.java",
"new_path": "src/main/java/org/apache/sysds/api/jmlc/PreparedScript.java",
"diff": "@@ -72,7 +72,7 @@ public class PreparedScript implements ConfigurableAPI\n//input/output specification\nprivate final HashSet<String> _inVarnames;\nprivate final HashSet<String> _outVarnames;\n- private final HashMap<String,Data> _inVarReuse;\n+ private final LocalVariableMap _inVarReuse;\n//internal state (reused)\nprivate final Program _prog;\n@@ -91,7 +91,7 @@ public class PreparedScript implements ConfigurableAPI\n_vars.setRegisteredOutputs(that._outVarnames);\n_inVarnames = that._inVarnames;\n_outVarnames = that._outVarnames;\n- _inVarReuse = new HashMap<>(that._inVarReuse);\n+ _inVarReuse = new LocalVariableMap(that._inVarReuse);\n_dmlconf = that._dmlconf;\n_cconf = that._cconf;\n}\n@@ -115,7 +115,7 @@ public class PreparedScript implements ConfigurableAPI\nCollections.addAll(_inVarnames, inputs);\n_outVarnames = new HashSet<>();\nCollections.addAll(_outVarnames, outputs);\n- _inVarReuse = new HashMap<>();\n+ _inVarReuse = new LocalVariableMap();\n//attach registered outputs (for dynamic recompile)\n_vars.setRegisteredOutputs(_outVarnames);\n@@ -416,6 +416,15 @@ public class PreparedScript implements ConfigurableAPI\n_vars.removeAll();\n}\n+ /**\n+ * Remove all references to pinned variables from this script.\n+ * Note: this *does not* remove the underlying data. It merely\n+ * removes a reference to it from this prepared script. This is\n+ * useful if you want to maintain an independent cache of weights\n+ * and allow the JVM to garbage collect under memory pressure.\n+ */\n+ public void clearPinnedData() { _inVarReuse.removeAll(); }\n+\n/**\n* Executes the prepared script over the bound inputs, creating the\n* result variables according to bound and registered outputs.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/LocalVariableMap.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/LocalVariableMap.java",
"diff": "@@ -94,6 +94,10 @@ public class LocalVariableMap implements Cloneable\nlocalMap.putAll(vals);\n}\n+ public void putAll(LocalVariableMap vars) {\n+ putAll(vars.localMap);\n+ }\n+\npublic Data remove( String name ) {\nreturn localMap.remove( name );\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Extended JMLC API (handling of pinned variables)
Closes #835. |
49,714 | 09.04.2020 19:55:39 | -7,200 | a3c0cce761c855b034302e1f0871d68d8eccd089 | Fix named arguments in MNIST LeNet example script
This fix backports the fix from into the merged SystemDS code line.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_lenet.dml",
"new_path": "scripts/nn/examples/mnist_lenet.dml",
"diff": "@@ -118,13 +118,13 @@ train = function(matrix[double] X, matrix[double] Y,\nstride, stride, pad, pad)\noutr1 = relu::forward(outc1)\n[outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n[outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\n## layer 3: affine3 -> relu3 -> dropout\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n@@ -166,13 +166,13 @@ train = function(matrix[double] X, matrix[double] Y,\n[doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)\n## layer 2: conv2 -> relu2 -> pool2\ndoutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\ndoutc2 = relu::backward(doutr2, outc2)\n[doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, F1,\nHoutp1, Woutp1, Hf, Wf, stride, stride, pad, pad)\n## layer 1: conv1 -> relu1 -> pool1\ndoutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\ndoutc1 = relu::backward(doutr1, outc1)\n[dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, W1, b1, C, Hin, Win,\nHf, Wf, stride, stride, pad, pad)\n@@ -264,13 +264,13 @@ predict = function(matrix[double] X, int C, int Hin, int Win,\npad, pad)\noutr1 = relu::forward(outc1)\n[outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\n## layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,\nstride, stride, pad, pad)\noutr2 = relu::forward(outc2)\n[outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2, pad=0, pad=0)\n+ strideh=2, stridew=2, padh=0, padw=0)\n## layer 3: affine3 -> relu3\nouta3 = affine::forward(outp2, W3, b3)\noutr3 = relu::forward(outa3)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-2533] Fix named arguments in MNIST LeNet example script
This fix backports the fix from #866 into the merged SystemDS code line.
Closes #867. |
49,706 | 10.04.2020 17:16:53 | -7,200 | 4e0edec2d19fb28b59b830ac5dee479c8596041f | [MINOR] Remove Travis Testing
The travis testing is removed since our testing is now executed
using Github Actions
The travis testing was only covering the component tests.
Closes | [
{
"change_type": "DELETE",
"old_path": ".travis.yml",
"new_path": null,
"diff": "-#\n-# Licensed to the Apache Software Foundation (ASF) under one or more\n-# contributor license agreements. See the NOTICE file distributed with\n-# this work for additional information regarding copyright ownership.\n-# The ASF licenses this file to You under the Apache License, Version 2.0\n-# (the \"License\"); you may not use this file except in compliance with\n-# the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-#\n-\n-dist: xenial\n-\n-language: java\n-\n-jdk:\n- - openjdk8\n-\n-addons:\n-# apt:\n-# sources:\n-# - r-packages-trusty\n-# packages:\n-# - r-base-dev\n-\n-cache:\n- apt: true\n- directories:\n-# caching .m2 causes an error loading hadoop-yarn-common-2.6.0.jar. Not sure why.\n-# - ${HOME}/.m2\n-# - ${HOME}/R\n-# - /usr/local/lib/R/site-library\n-\n-install:\n-# - sudo Rscript ./src/test/scripts/installDependencies.R\n-\n-before_script:\n-# this is not needed anymore since adding authentication object in code for running hadoop/spark local\n-# - chmod -R 755 *\n-\n-script:\n- # - mvn clean verify jacoco:report coveralls:report\n- - mvn test-compile\n- - mvn surefire:test -Dtest=org.apache.sysds.test.component.**\n-\n-after_success:\n-# - mvn test jacoco:report coveralls:report\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove Travis Testing
The travis testing is removed since our testing is now executed
using Github Actions
The travis testing was only covering the component tests.
Closes #884. |
49,738 | 10.04.2020 18:39:39 | -7,200 | 3fd87695591bdba30964db995066472d148b252e | [MINOR] Fix opcodes for lineage-based reuse (corrupted by rework) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -30,7 +30,7 @@ import java.util.ArrayList;\npublic class LineageCacheConfig {\nprivate static final String[] REUSE_OPCODES = new String[] {\n- \"tmm\", \"ba+*\", \"*\", \"/\", \"+\", \"nrow\", \"ncol\",\n+ \"tsmm\", \"ba+*\", \"*\", \"/\", \"+\", \"nrow\", \"ncol\",\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\"\n};\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix opcodes for lineage-based reuse (corrupted by rework) |
49,738 | 11.04.2020 23:12:37 | -7,200 | 84ef71326c6781bad4ed9b39a210ee2cd4a6d4bd | Fix libsvm reader/writer integration and correctness
This patch fixes a correctness issue of the libsvm local writers, which
incorrectly shifted the output indexes twice for space inputs.
Furthermore, the libsvm local readers were not fully integrated in all
code path yet.
The distributed libsvm readers/writers still remain to be integrated. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/MatrixReaderFactory.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/MatrixReaderFactory.java",
"diff": "@@ -28,32 +28,30 @@ import org.apache.sysds.runtime.matrix.data.MatrixBlock;\npublic class MatrixReaderFactory\n{\n-\npublic static MatrixReader createMatrixReader(InputInfo iinfo)\n{\nMatrixReader reader = null;\n+ boolean par = ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS);\n+ boolean mcsr = MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR;\n- if( iinfo == InputInfo.TextCellInputInfo || iinfo == InputInfo.MatrixMarketInputInfo )\n- {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderTextCellParallel( iinfo );\n- else\n- reader = new ReaderTextCell( iinfo );\n+ if( iinfo == InputInfo.TextCellInputInfo || iinfo == InputInfo.MatrixMarketInputInfo ) {\n+ reader = (par & mcsr) ?\n+ new ReaderTextCellParallel(iinfo) : new ReaderTextCell(iinfo);\n}\n- else if( iinfo == InputInfo.CSVInputInfo )\n- {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderTextCSVParallel(new FileFormatPropertiesCSV());\n- else\n- reader = new ReaderTextCSV(new FileFormatPropertiesCSV());\n+ else if( iinfo == InputInfo.CSVInputInfo ) {\n+ reader = (par & mcsr) ?\n+ new ReaderTextCSVParallel(new FileFormatPropertiesCSV()) :\n+ new ReaderTextCSV(new FileFormatPropertiesCSV());\n+ }\n+ else if( iinfo == InputInfo.LIBSVMInputInfo) {\n+ reader = (par & mcsr) ?\n+ new ReaderTextLIBSVMParallel() : new ReaderTextLIBSVM();\n}\nelse if( iinfo == InputInfo.BinaryCellInputInfo )\nreader = new ReaderBinaryCell();\nelse if( iinfo == InputInfo.BinaryBlockInputInfo ) {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_BINARYFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderBinaryBlockParallel( false );\n- else\n- reader = new ReaderBinaryBlock( false );\n+ reader = (par & mcsr) ?\n+ new ReaderBinaryBlockParallel(false) : new ReaderBinaryBlock(false);\n}\nelse {\nthrow new DMLRuntimeException(\"Failed to create matrix reader for unknown input info: \"\n@@ -71,32 +69,29 @@ public class MatrixReaderFactory\nMatrixReader reader = null;\nInputInfo iinfo = props.inputInfo;\n+ boolean par = ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS);\n+ boolean mcsr = MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR;\nif( iinfo == InputInfo.TextCellInputInfo || iinfo == InputInfo.MatrixMarketInputInfo ) {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderTextCellParallel( iinfo );\n- else\n- reader = new ReaderTextCell( iinfo );\n+ reader = (par & mcsr) ?\n+ new ReaderTextCellParallel(iinfo) : new ReaderTextCell(iinfo);\n}\nelse if( iinfo == InputInfo.CSVInputInfo ) {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderTextCSVParallel( props.formatProperties!=null ? (FileFormatPropertiesCSV)props.formatProperties : new FileFormatPropertiesCSV());\n- else\n- reader = new ReaderTextCSV( props.formatProperties!=null ? (FileFormatPropertiesCSV)props.formatProperties : new FileFormatPropertiesCSV());\n+ reader = (par & mcsr) ?\n+ new ReaderTextCSVParallel( props.formatProperties!=null ?\n+ (FileFormatPropertiesCSV)props.formatProperties : new FileFormatPropertiesCSV()) :\n+ new ReaderTextCSV( props.formatProperties!=null ?\n+ (FileFormatPropertiesCSV)props.formatProperties : new FileFormatPropertiesCSV());\n}\nelse if( iinfo == InputInfo.LIBSVMInputInfo) {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderTextLIBSVMParallel();\n- else\n- reader = new ReaderTextLIBSVM();\n+ reader = (par & mcsr) ?\n+ new ReaderTextLIBSVMParallel() : new ReaderTextLIBSVM();\n}\nelse if( iinfo == InputInfo.BinaryCellInputInfo )\nreader = new ReaderBinaryCell();\nelse if( iinfo == InputInfo.BinaryBlockInputInfo ) {\n- if( ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_BINARYFORMATS) && MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR )\n- reader = new ReaderBinaryBlockParallel( props.localFS );\n- else\n- reader = new ReaderBinaryBlock( props.localFS );\n+ reader = (par & mcsr) ?\n+ new ReaderBinaryBlockParallel(props.localFS) : new ReaderBinaryBlock(props.localFS);\n}\nelse {\nthrow new DMLRuntimeException(\"Failed to create matrix reader for unknown input info: \"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/ReaderTextLIBSVMParallel.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/ReaderTextLIBSVMParallel.java",
"diff": "@@ -297,7 +297,7 @@ public class ReaderTextLIBSVMParallel extends MatrixReader\nlong rlen, long clen, int splitCount)\n{\n_split = split;\n- _splitoffsets = offsets; // new SplitOffsetInfos(offsets);\n+ _splitoffsets = offsets;\n_informat = informat;\n_job = job;\n_dest = dest;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/WriterTextLIBSVM.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/WriterTextLIBSVM.java",
"diff": "@@ -38,7 +38,6 @@ public class WriterTextLIBSVM extends MatrixWriter\n}\n- @SuppressWarnings(\"resource\")\n@Override\npublic final void writeMatrixToHDFS(MatrixBlock src, String fname, long rlen, long clen, int blen, long nnz, boolean diag)\nthrows IOException, DMLRuntimeException\n@@ -99,7 +98,6 @@ public class WriterTextLIBSVM extends MatrixWriter\ndouble label = (sblock!=null) ?\nsblock.get(i, clen-1) : 0;\nsb.append(label);\n- sb.append(IOUtilFunctions.LIBSVM_DELIM);\nif( sblock!=null && i<sblock.numRows() && !sblock.isEmpty(i) ) {\nint pos = sblock.pos(i);\n@@ -108,9 +106,10 @@ public class WriterTextLIBSVM extends MatrixWriter\ndouble[] avals = sblock.values(i);\n// append sparse row\nfor( int k=pos; k<pos+alen; k++ ) {\n- if( aix[k] != clen-1 )\n- appendIndexValLibsvm(sb, aix[k]+1, avals[k]);\n+ if( aix[k]!=clen-1 ) {\nsb.append(IOUtilFunctions.LIBSVM_DELIM);\n+ appendIndexValLibsvm(sb, aix[k], avals[k]);\n+ }\n}\n}\n// write the string row\n@@ -125,14 +124,13 @@ public class WriterTextLIBSVM extends MatrixWriter\n// append the class label as the 1st column\ndouble label = src.getValueDenseUnsafe(i, clen-1);\nsb.append(label);\n- sb.append(IOUtilFunctions.LIBSVM_DELIM);\n// append dense row\nfor( int j=0; j<clen-1; j++ ) {\ndouble val = src.getValueDenseUnsafe(i, j);\nif( val != 0 ) {\n- appendIndexValLibsvm(sb, j, val);\nsb.append(IOUtilFunctions.LIBSVM_DELIM);\n+ appendIndexValLibsvm(sb, j, val);\n}\n}\n// write the string row\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/data/misc/NoRenameTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/data/misc/NoRenameTest.java",
"diff": "@@ -87,17 +87,17 @@ public class NoRenameTest extends AutomatedTestBase\npublic void testTextmmSparseSinglenode() {\nrunRenameTest(\"mm\", true, ExecMode.SINGLE_NODE);\n}\n-//\n-// @Test\n-// public void testTextlibsvmDenseSinglenode() {\n-// runRenameTest(\"libsvm\", false, ExecMode.SINGLE_NODE);\n-// }\n-//\n-// @Test\n-// public void testTextlibsvmSparseSinglenode() {\n-// runRenameTest(\"libsvm\", true, ExecMode.SINGLE_NODE);\n-// }\n-//\n+\n+ @Test\n+ public void testTextlibsvmDenseSinglenode() {\n+ runRenameTest(\"libsvm\", false, ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testTextlibsvmSparseSinglenode() {\n+ runRenameTest(\"libsvm\", true, ExecMode.SINGLE_NODE);\n+ }\n+\n@Test\npublic void testBinaryDenseSinglenode() {\nrunRenameTest(\"binary\", false, ExecMode.SINGLE_NODE);\n@@ -137,16 +137,16 @@ public class NoRenameTest extends AutomatedTestBase\npublic void testTextmmSparseHybrid() {\nrunRenameTest(\"mm\", true, ExecMode.HYBRID);\n}\n-//\n-// @Test\n-// public void testTextlibsvmDenseHybrid() {\n-// runRenameTest(\"libsvm\", false, ExecMode.HYBRID);\n-// }\n-//\n-// @Test\n-// public void testTextlibsvmSparseHybrid() {\n-// runRenameTest(\"libsvm\", true, ExecMode.HYBRID);\n-// }\n+\n+ @Test\n+ public void testTextlibsvmDenseHybrid() {\n+ runRenameTest(\"libsvm\", false, ExecMode.HYBRID);\n+ }\n+\n+ @Test\n+ public void testTextlibsvmSparseHybrid() {\n+ runRenameTest(\"libsvm\", true, ExecMode.HYBRID);\n+ }\n@Test\npublic void testBinaryDenseHybrid() {\n@@ -187,7 +187,7 @@ public class NoRenameTest extends AutomatedTestBase\npublic void testTextmmSparseSpark() {\nrunRenameTest(\"mm\", true, ExecMode.SPARK);\n}\n-//\n+\n// @Test\n// public void testTextlibsvmDenseSpark() {\n// runRenameTest(\"libsvm\", false, ExecMode.SPARK);\n@@ -197,7 +197,7 @@ public class NoRenameTest extends AutomatedTestBase\n// public void testTextlibsvmSparseSpark() {\n// runRenameTest(\"libsvm\", true, ExecMode.SPARK);\n// }\n-\n+//\n@Test\npublic void testBinaryDenseSpark() {\nrunRenameTest(\"binary\", false, ExecMode.SPARK);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/data/NoRenameTest1.dml",
"new_path": "src/test/scripts/functions/data/NoRenameTest1.dml",
"diff": "#-------------------------------------------------------------\nA = read($1, format=$2, rows=$3, cols=$4);\n-print(nrow(A))\n+print(nrow(A));\nwrite(A, $5, format=$2);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-52] Fix libsvm reader/writer integration and correctness
This patch fixes a correctness issue of the libsvm local writers, which
incorrectly shifted the output indexes twice for space inputs.
Furthermore, the libsvm local readers were not fully integrated in all
code path yet.
The distributed libsvm readers/writers still remain to be integrated. |
49,708 | 12.04.2020 20:43:47 | -7,200 | 0dae42705f91b00abc03be09d810b3a9286338c5 | Initial design ONNX graph importer
Since ONNX does support conditional operators (loop, if), I've tailored
the design towards a command-line tool that generates a DML script as
discussed.
AMLS project SS2020.
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -205,6 +205,7 @@ SYSTEMDS-250 Extended Slice Finding\nSYSTEMDS-260 Misc Tools\n* 261 Stable marriage algorithm OK\n* 262 Data augmentation tool for data cleaning OK\n+ * 263 ONNX graph importer/exporter\nSYSTEMDS-270 Compressed Matrix Blocks\n* 271 Reintroduce compressed matrix blocks from SystemML OK\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/onnx-systemds-design.md",
"diff": "+# onnx-systemds\n+\n+A tool for importing/exporting [ONNX](https://github.com/onnx/onnx/blob/master/docs/IR.md) graphs into/from SystemDS DML scripts.\n+\n+\n+## Goals\n+\n+* Support for importing [operators of the ONNX base definition](https://github.com/onnx/onnx/blob/master/docs/Operators.md)\n+\n+* Support for importing [operators defined by ONNX-ML](https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md)\n+\n+* Support for exporting DML script to ONNX graphs\n+\n+## Limitations\n+\n+* Not able to support all data types / operators as they are not currently supported by SystemDS\n+\n+\n+\n+## Suggested Implementation\n+\n+Since the ONNX specification includes the conditional operators [loop](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop) and [if](https://github.com/onnx/onnx/blob/master/docs/Operators.md#If), a direct conversion from ONNX to the internal HOP might not be ideal.\n+\n+Hence my suggested implementation is a dedicated tool invoked from command line which generates DML scripts. This also enables optimizations performed by the compiler at both graph and program level.\n+\n+### Example Call\n+\n+```bash\n+onnx-systemds model.onx --out model_script.dml\n+```\n+\n+\n+### Tooling\n+\n+* Due to the availability of a [Python API](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md) for ONNX, I would suggest implementing the tool in Python\n+* Another advantage of Python is good support for template engines e.g. [Jinja](https://jinja.palletsprojects.com/en/2.11.x/)\n+* An implementation could use templates for various operators which are then combined into a script\n+\n+### Implementation Details\n+\n+ONNX is a [serialized graph](https://github.com/onnx/onnx/blob/master/docs/IR.md#graphs) structured as a sorted list of nodes that form a DAG (directed acyclic graph).\n+\n+1. Loading in the serialized structure\n+2. [Checking](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md#checking-an-onnx-model) model and [converting](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md#converting-version-of-an-onnx-model-within-default-domain-aionnx) models to a common version\n+3. Building a simple internal graph structure (for arbitrary operators)\n+4. Generating the DML script while traversing this graph (provided information in doc_strings and other description variables are added as comments to improve human-readability of the generated script)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-263] Initial design ONNX graph importer
Since ONNX does support conditional operators (loop, if), I've tailored
the design towards a command-line tool that generates a DML script as
discussed.
AMLS project SS2020.
Closes #885. |
49,738 | 13.04.2020 18:39:47 | -7,200 | 5f1cdf367b0616359461f1fd198898d59f0598a4 | Extended eval lazy function compilation (nested builtins)
This patch extends the lazy function compilation of dml-bodied builtin
functions called through eval. We now support nested dml-bodied function
calls (e.g., eval -> lm -> lmDS/lmCG) which is crucial for generic
primitives of hyper-parameter optimization and the enumeration of
cleaning pipelines. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/DMLProgram.java",
"new_path": "src/main/java/org/apache/sysds/parser/DMLProgram.java",
"diff": "@@ -131,6 +131,10 @@ public class DMLProgram\nreturn ret;\n}\n+ public boolean containsFunctionStatementBlock(String name) {\n+ return _functionBlocks.containsKey(name);\n+ }\n+\npublic void addFunctionStatementBlock(String fname, FunctionStatementBlock fsb) {\n_functionBlocks.put(fname, fsb);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"new_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"diff": "@@ -412,7 +412,7 @@ public class DMLTranslator\nthrows LanguageException, DMLRuntimeException, LopsException, HopsException\n{\n// constructor resets the set of registered functions\n- Program rtprog = new Program();\n+ Program rtprog = new Program(prog);\n// for all namespaces, translate function statement blocks into function program blocks\nfor (String namespace : prog.getNamespaces().keySet()){\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/dml/DmlSyntacticValidator.java",
"new_path": "src/main/java/org/apache/sysds/parser/dml/DmlSyntacticValidator.java",
"diff": "@@ -610,18 +610,20 @@ public class DmlSyntacticValidator implements DmlListener {\n}\n}\n- public static FunctionStatementBlock loadAndParseBuiltinFunction(String name, String namespace, DataType dt) {\n+ public static Map<String,FunctionStatementBlock> loadAndParseBuiltinFunction(String name, String namespace) {\nif( !Builtins.contains(name, true, false) ) {\nthrow new DMLRuntimeException(\"Function \"\n+ DMLProgram.constructFunctionKey(namespace, name)+\" is not a builtin function.\");\n}\n//load and add builtin DML-bodied functions (via tmp validator instance)\n+ //including nested builtin function calls unless already loaded\nDmlSyntacticValidator tmp = new DmlSyntacticValidator(\nnew CustomErrorListener(), new HashMap<>(), namespace, new HashSet<>());\nString filePath = Builtins.getFilePath(name);\nDMLProgram prog = tmp.parseAndAddImportedFunctions(namespace, filePath, null);\n- String name2 = Builtins.getInternalFName(name, dt);\n- return prog.getNamedFunctionStatementBlocks().get(name2);\n+\n+ //construct output map of all functions\n+ return prog.getNamedFunctionStatementBlocks();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/Program.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/Program.java",
"diff": "@@ -33,7 +33,8 @@ public class Program\n{\npublic static final String KEY_DELIM = \"::\";\n- public ArrayList<ProgramBlock> _programBlocks;\n+ private DMLProgram _prog;\n+ private ArrayList<ProgramBlock> _programBlocks;\nprivate HashMap<String, HashMap<String,FunctionProgramBlock>> _namespaceFunctions;\n@@ -43,6 +44,19 @@ public class Program\n_programBlocks = new ArrayList<>();\n}\n+ public Program(DMLProgram prog) {\n+ this();\n+ setDMLProg(prog);\n+ }\n+\n+ public void setDMLProg(DMLProgram prog) {\n+ _prog = prog;\n+ }\n+\n+ public DMLProgram getDMLProg() {\n+ return _prog;\n+ }\n+\npublic synchronized void addFunctionProgramBlock(String namespace, String fname, FunctionProgramBlock fpb) {\nif( fpb == null )\nthrow new DMLRuntimeException(\"Invalid null function program block.\");\n@@ -124,7 +138,7 @@ public class Program\npublic Program clone(boolean deep) {\nif( deep )\nthrow new NotImplementedException();\n- Program ret = new Program();\n+ Program ret = new Program(_prog);\n//shallow copy of all program blocks\nret._programBlocks.addAll(_programBlocks);\n//shallow copy of all functions, except external\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/paramserv/ParamservUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/paramserv/ParamservUtils.java",
"diff": "@@ -252,7 +252,7 @@ public class ParamservUtils {\n}\nprivate static Program copyProgramFunctions(Program prog) {\n- Program newProg = new Program();\n+ Program newProg = new Program(prog.getDMLProg());\nprog.getFunctionProgramBlocks()\n.forEach((func, pb) -> putFunction(newProg, copyFunction(func, pb)));\nreturn newProg;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/EvalNaryCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/EvalNaryCPInstruction.java",
"diff": "@@ -21,7 +21,10 @@ package org.apache.sysds.runtime.instructions.cp;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.Map;\n+import java.util.Map.Entry;\n+import org.apache.sysds.common.Builtins;\nimport org.apache.sysds.common.Types.DataType;\nimport org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.hops.rewrite.ProgramRewriter;\n@@ -69,11 +72,11 @@ public class EvalNaryCPInstruction extends BuiltinNaryCPInstruction {\n//2. copy the created output matrix\nMatrixObject outputMO = new MatrixObject(ec.getMatrixObject(output.getName()));\n- //3. lazy loading of dml-bodied builtin functions\n+ //3. lazy loading of dml-bodied builtin functions (incl. rename\n+ // of function name to dml-bodied builtin scheme (data-type-specific)\nif( !ec.getProgram().containsFunctionProgramBlock(null, funcName) ) {\n- FunctionProgramBlock fpb = compileFunctionProgramBlock(\n- funcName, boundInputs[0].getDataType(), ec.getProgram());\n- ec.getProgram().addFunctionProgramBlock(null, funcName, fpb);\n+ compileFunctionProgramBlock(funcName, boundInputs[0].getDataType(), ec.getProgram());\n+ funcName = Builtins.getInternalFName(funcName, boundInputs[0].getDataType());\n}\n//4. call the function\n@@ -101,32 +104,51 @@ public class EvalNaryCPInstruction extends BuiltinNaryCPInstruction {\nec.setVariable(output.getName(), outputMO);\n}\n- private static FunctionProgramBlock compileFunctionProgramBlock(String name, DataType dt, Program prog) {\n+ private static void compileFunctionProgramBlock(String name, DataType dt, Program prog) {\n//load builtin file and parse function statement block\n- FunctionStatementBlock fsb = DmlSyntacticValidator\n- .loadAndParseBuiltinFunction(name, DMLProgram.DEFAULT_NAMESPACE, dt);\n+ Map<String,FunctionStatementBlock> fsbs = DmlSyntacticValidator\n+ .loadAndParseBuiltinFunction(name, DMLProgram.DEFAULT_NAMESPACE);\n+ if( fsbs.isEmpty() )\n+ throw new DMLRuntimeException(\"Failed to compile function '\"+name+\"'.\");\n- // validate function (could be avoided for performance because known builtin functions)\n- DMLProgram dmlp = fsb.getDMLProg();\n+ // prepare common data structures, including a consolidated dml program\n+ // to facilitate function validation which tries to inline lazily loaded\n+ // and existing functions.\n+ DMLProgram dmlp = (prog.getDMLProg() != null) ? prog.getDMLProg() :\n+ fsbs.get(Builtins.getInternalFName(name, dt)).getDMLProg();\n+ for( Entry<String,FunctionStatementBlock> fsb : fsbs.entrySet() ) {\n+ if( !dmlp.containsFunctionStatementBlock(fsb.getKey()) )\n+ dmlp.addFunctionStatementBlock(fsb.getKey(), fsb.getValue());\n+ fsb.getValue().setDMLProg(dmlp);\n+ }\nDMLTranslator dmlt = new DMLTranslator(dmlp);\n+ ProgramRewriter rewriter = new ProgramRewriter(true, false);\n+ ProgramRewriter rewriter2 = new ProgramRewriter(false, true);\n+\n+ // validate functions, in two passes for cross references\n+ for( FunctionStatementBlock fsb : fsbs.values() ) {\ndmlt.liveVariableAnalysisFunction(dmlp, fsb);\ndmlt.validateFunction(dmlp, fsb);\n+ }\n// compile hop dags, rewrite hop dags and compile lop dags\n+ for( FunctionStatementBlock fsb : fsbs.values() ) {\ndmlt.constructHops(fsb);\n- ProgramRewriter rewriter = new ProgramRewriter(true, false);\nrewriter.rewriteHopDAGsFunction(fsb, false); //rewrite and merge\nDMLTranslator.resetHopsDAGVisitStatus(fsb);\nrewriter.rewriteHopDAGsFunction(fsb, true); //rewrite and split\nDMLTranslator.resetHopsDAGVisitStatus(fsb);\n- ProgramRewriter rewriter2 = new ProgramRewriter(false, true);\nrewriter2.rewriteHopDAGsFunction(fsb, true);\nDMLTranslator.resetHopsDAGVisitStatus(fsb);\nDMLTranslator.refreshMemEstimates(fsb);\ndmlt.constructLops(fsb);\n+ }\n// compile runtime program\n- return (FunctionProgramBlock) dmlt.createRuntimeProgramBlock(\n- prog, fsb, ConfigurationManager.getDMLConfig());\n+ for( Entry<String,FunctionStatementBlock> fsb : fsbs.entrySet() ) {\n+ FunctionProgramBlock fpb = (FunctionProgramBlock) dmlt\n+ .createRuntimeProgramBlock(prog, fsb.getValue(), ConfigurationManager.getDMLConfig());\n+ prog.addFunctionProgramBlock(null, fsb.getKey(), fpb);\n+ }\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"diff": "@@ -118,6 +118,16 @@ public class MLContextTest extends MLContextTestBase {\nml.setExplain(false);\n}\n+ @Test\n+ public void testExecuteEvalNestedBuiltinTest() {\n+ System.out.println(\"MLContextTest - eval builtin test\");\n+ setExpectedStdOut(\"TRUE\");\n+ ml.setExplain(true);\n+ Script script = dmlFromFile(baseDirectory + File.separator + \"eval4-nested_builtin-test.dml\");\n+ ml.execute(script);\n+ ml.setExplain(false);\n+ }\n+\n@Test\npublic void testCreateDMLScriptBasedOnStringAndExecute() {\nSystem.out.println(\"MLContextTest - create DML script based on string and execute\");\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/mlcontext/eval4-nested_builtin-test.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=100, cols=10, seed=37)\n+y = rand(rows=100, cols=1, seed=38)\n+\n+F = cbind(as.frame(\"lm\"),as.frame(\"mlogreg\"));\n+ix = ifelse(sum(X)>1, 1, 2);\n+R1 = eval(as.scalar(F[1,ix]), X, y, 0, 1e-7, 1e-7, 0, FALSE); #calls lm->lmDS\n+R2 = lmCG(X=X, y=y, verbose=FALSE);\n+\n+print(sum(abs(R1-R2)<1e-6)==ncol(X));\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-291] Extended eval lazy function compilation (nested builtins)
This patch extends the lazy function compilation of dml-bodied builtin
functions called through eval. We now support nested dml-bodied function
calls (e.g., eval -> lm -> lmDS/lmCG) which is crucial for generic
primitives of hyper-parameter optimization and the enumeration of
cleaning pipelines. |
49,706 | 13.04.2020 18:44:24 | -7,200 | 4bbba4051e63e67a3a2366ee3f414f01cc7d0b93 | Travis remove badge
Missed that the badge still was in the README.
This is now removed, furthermore the task associated with travis have
been modified to reflect that it is removed, and why.
Closes | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -27,9 +27,7 @@ limitations under the License.\n## Status\n-[](https://travis-ci.org/apache/systemml)\n[](https://opensource.org/licenses/Apache-2.0)\n-\n\n\n\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -9,7 +9,7 @@ SYSTEMDS-10 Compiler Rework / Misc\n* 12 Remove unnecessary HOP/LOP indirections OK\n* 13 Refactoring test cases into component/integration OK\n* 14 Complete removal of external functions from all scripts\n- * 15 Travis integration w/ subset of tests OK\n+ * 15 Travis integration w/ subset of tests OK (removed for Github Actions)\n* 16 Remove instruction patching\n* 17 Refactoring of program block hierarchy OK\n* 18 Improve API for new dml-bodied builtin functions OK\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-15] Travis remove badge
Missed that the badge still was in the README.
This is now removed, furthermore the task associated with travis have
been modified to reflect that it is removed, and why.
Closes #886. |
49,738 | 21.04.2020 21:43:08 | -7,200 | 0426099b80fb451239e7d9a39bdacf752c79c80e | [MINOR] Removal of remaining pydml test files and tests | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"diff": "@@ -1063,22 +1063,6 @@ public class MLContextTest extends MLContextTestBase {\nml.execute(script);\n}\n- @Test(expected = MLContextException.class)\n- public void testJavaRDDBadMetadataPYDML() {\n- System.out.println(\"MLContextTest - JavaRDD<String> bad metadata PYML\");\n-\n- List<String> list = new ArrayList<>();\n- list.add(\"1,2,3\");\n- list.add(\"4,5,6\");\n- list.add(\"7,8,9\");\n- JavaRDD<String> javaRDD = sc.parallelize(list);\n-\n- MatrixMetadata mm = new MatrixMetadata(1, 1, 9);\n-\n- Script script = dml(\"print('sum: ' + sum(M))\").in(\"M\", javaRDD, mm);\n- ml.execute(script);\n- }\n-\n@Test\npublic void testRDDGoodMetadataDML() {\nSystem.out.println(\"MLContextTest - RDD<String> good metadata DML\");\n@@ -1273,28 +1257,6 @@ public class MLContextTest extends MLContextTestBase {\nml.execute(script);\n}\n- @Test\n- public void testDataFrameSumPYDMLVectorWithIDColumnNoFormatSpecified() {\n- System.out.println(\"MLContextTest - DataFrame sum PYDML, vector with ID column, no format specified\");\n-\n- List<Tuple2<Double, Vector>> list = new ArrayList<>();\n- list.add(new Tuple2<>(1.0, Vectors.dense(1.0, 2.0, 3.0)));\n- list.add(new Tuple2<>(2.0, Vectors.dense(4.0, 5.0, 6.0)));\n- list.add(new Tuple2<>(3.0, Vectors.dense(7.0, 8.0, 9.0)));\n- JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);\n-\n- JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());\n- List<StructField> fields = new ArrayList<>();\n- fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));\n- fields.add(DataTypes.createStructField(\"C1\", new VectorUDT(), true));\n- StructType schema = DataTypes.createStructType(fields);\n- Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);\n-\n- Script script = dml(\"print('sum: ' + sum(M))\").in(\"M\", dataFrame);\n- setExpectedStdOut(\"sum: 45.0\");\n- ml.execute(script);\n- }\n-\n@Test\npublic void testDataFrameSumDMLVectorWithNoIDColumnNoFormatSpecified() {\nSystem.out.println(\"MLContextTest - DataFrame sum DML, vector with no ID column, no format specified\");\n@@ -1316,27 +1278,6 @@ public class MLContextTest extends MLContextTestBase {\nml.execute(script);\n}\n- @Test\n- public void testDataFrameSumPYDMLVectorWithNoIDColumnNoFormatSpecified() {\n- System.out.println(\"MLContextTest - DataFrame sum PYDML, vector with no ID column, no format specified\");\n-\n- List<Vector> list = new ArrayList<>();\n- list.add(Vectors.dense(1.0, 2.0, 3.0));\n- list.add(Vectors.dense(4.0, 5.0, 6.0));\n- list.add(Vectors.dense(7.0, 8.0, 9.0));\n- JavaRDD<Vector> javaRddVector = sc.parallelize(list);\n-\n- JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());\n- List<StructField> fields = new ArrayList<>();\n- fields.add(DataTypes.createStructField(\"C1\", new VectorUDT(), true));\n- StructType schema = DataTypes.createStructType(fields);\n- Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);\n-\n- Script script = dml(\"print('sum: ' + sum(M))\").in(\"M\", dataFrame);\n- setExpectedStdOut(\"sum: 45.0\");\n- ml.execute(script);\n- }\n-\n@Test\npublic void testDisplayBooleanDML() {\nSystem.out.println(\"MLContextTest - display boolean DML\");\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/scripts/functions/misc/PackageFunCall1.pydml",
"new_path": null,
"diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-\n-source(\"PackageFunLib.pydml\") as Other\n-dummy = Other.hello()\n-\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/scripts/functions/misc/PackageFunCall2.pydml",
"new_path": null,
"diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-\n-setwd(\".\")\n-source(\"PackageFunLib.pydml\") as Other\n-dummy = Other.hello()\n-\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/scripts/functions/misc/PackageFunLib.pydml",
"new_path": null,
"diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-\n-def hello():\n- print(\"Hi!\")\n-\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Removal of remaining pydml test files and tests |
49,746 | 22.04.2020 20:21:33 | -19,080 | dbe00f140a63329e26f269890828679cd81f2a97 | [MINOR][BUILD] Package python dist. with mvn `-P` flag
Maven build
- Maven packaging with new `-P distribution` option
- New pypi release instructions and pre_setup.py
Testing
- Run all tests with single command in workflow
Doc
- Updates the setup.py, to read `README.md` to remove
redundancy
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/python.yml",
"new_path": ".github/workflows/python.yml",
"diff": "@@ -57,7 +57,7 @@ jobs:\n${{ runner.os }}-maven-\n- name: Maven clean & package\n- run: mvn clean package\n+ run: mvn clean package -P distribution\n- name: Setup Python\nuses: actions/setup-python@v1\n@@ -86,16 +86,5 @@ jobs:\ncd src/main/python\nls tests/\necho \"Beginning tests\"\n- python tests/test_matrix_binary_op.py\n- echo \"Exit Status: \" $?\n- sleep 3\n- python tests/test_matrix_aggregations.py\n- echo \"Exit Status: \" $?\n- sleep 3\n- python tests/test_l2svm.py\n- echo \"Exit Status: \" $?\n- python tests/test_lineagetrace.py\n- sleep 3\n- echo \"Exit Status: \" $?\n- python tests/test_l2svm_lineage.py\n+ python -m unittest tests/*.py\necho \"Exit Status: \" $?\n"
},
{
"change_type": "DELETE",
"old_path": "src/main/python/BUILD_INSTRUCTIONS.md",
"new_path": null,
"diff": "-<!--\n-{% comment %}\n-Licensed to the Apache Software Foundation (ASF) under one or more\n-contributor license agreements. See the NOTICE file distributed with\n-this work for additional information regarding copyright ownership.\n-The ASF licenses this file to you under the Apache License, Version 2.0\n-(the \"License\"); you may not use this file except in compliance with\n-the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software\n-distributed under the License is distributed on an \"AS IS\" BASIS,\n-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-See the License for the specific language governing permissions and\n-limitations under the License.\n-{% end comment %}\n--->\n-\n-# Build instructions\n-\n-## Basic steps\n-\n-The following steps have to be done for both cases\n-\n-- Build SystemDS with maven first `mvn package -DskipTests`, with the working directory being `SYSTEMDS_ROOT` (Root directory of SystemDS)\n-- `cd` to this folder (basically `SYSTEMDS_ROOT/src/main/python`\n-\n-### Building package\n-\n-If we want to build the package for uploading to the repository via `python3 -m twine upload --repository-url [URL] dist/*` (will be automated in the future)\n-\n-- Run `create_python_dist.py`\n-\n-```bash\n-python3 create_python_dist.py\n-```\n-\n-- now in the `./dist` directory there will exist the source distribution `systemds-VERSION.tar.gz` and the wheel distribution `systemds-VERSION-py3-none-any.whl`, with `VERSION` being the current version number\n-- Finished. We can now upload it with `python3 -m twine upload --repository-url [URL] dist/*`\n-\n-### Building for development\n-\n-If we want to build the package just locally for development, the following steps will suffice\n-\n-- Run `pre_setup.py` (this will copy `lib` and `systemds-VERSION-SNAPSHOT.jar`)\n-\n-```bash\n-python3 create_python_dist.py\n-```\n-\n-- Finished. Test by running a test case of your choice:\n-\n-```bash\n-python3 tests/test_matrix_binary_op.py\n-```\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/PUBLISH_INSTRUCTIONS.md",
"diff": "+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% end comment %}\n+-->\n+\n+# Publishing Instructions\n+\n+## Building SystemDS jar (with dependency jars)\n+\n+The following steps have to be done for both the cases\n+\n+- Build SystemDS with maven first `mvn package -P distribution`, with the working\n+ directory being `SYSTEMDS_ROOT` (Root directory of SystemDS)\n+- `cd` to this folder (basically `SYSTEMDS_ROOT/src/main/python`)\n+\n+## Building python package\n+\n+- Run `create_python_dist.py`\n+\n+```bash\n+python3 create_python_dist.py\n+```\n+\n+- now in the `./dist` directory there will exist the source distribution `systemds-VERSION.tar.gz`\n+ and the wheel distribution `systemds-VERSION-py3-none-any.whl`, with `VERSION` being the current version number\n+\n+## Publishing package\n+\n+If we want to build the package for uploading to the repository via `python3 -m twine upload dist/*`\n+ (will be automated in the future)\n+\n+- Install twine with `pip install --upgrade twine`\n+\n+- Follow the instructions from the [Guide](https://packaging.python.org/tutorials/packaging-projects/)\n+ 1. Create an API-Token in the account (leave the page open or copy the token, it will only be shown once)\n+ 2. Execute the command `python3 -m twine upload dist/*`\n+ - Optional: `pip install keyrings.alt`(use with caution!) if you get `UserWarning: No recommended backend was available.`\n+ 3. Username is `__token__`\n+ 4. Password is the created API-Token **with** `pypi-` prefix\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/README.md",
"new_path": "src/main/python/README.md",
"diff": "-# Python Readme\n+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% end comment %}\n+-->\n+# SystemDS\n+\n+\nThis package provides a Pythonic interface for working with SystemDS.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/docs/source/install.rst",
"new_path": "src/main/python/docs/source/install.rst",
"diff": "@@ -68,7 +68,7 @@ Then to build the system you do the following\n- Clone the Git Repository: https://github.com/apache/systemml.git\n- Open an terminal at the root of the repository.\n-- Package the Java code using the ``mvn package`` command\n+- Package the Java code using the ``mvn package -P distribution`` command\n- ``cd src/main/python`` to point at the root of the SystemDS Python library.\n- Copy `jars` with ``python pre_setup.py``\n- Install with ``pip install .``\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/pre_setup.py",
"new_path": "src/main/python/pre_setup.py",
"diff": "import os\nimport shutil\nimport fnmatch\n+from zipfile import ZipFile\n+this_path = os.path.dirname(os.path.realpath(__file__))\npython_dir = 'systemds'\njava_dir = 'systemds-java'\n-java_dir_full_path = os.path.join(python_dir, java_dir)\n+java_dir_full_path = os.path.join(this_path, python_dir, java_dir)\nif os.path.exists(java_dir_full_path):\nshutil.rmtree(java_dir_full_path, True)\n-os.mkdir(java_dir_full_path)\n-root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\n-JAR_FILE_NAMES = ['systemds-*-SNAPSHOT.jar', 'systemds-*.jar', 'systemds-*-SNAPSHOT-extra', 'systemds-*-extra.jar']\n-EXCLUDE_JAR_FILE_NAMES = ['systemds-*javadoc.jar', 'systemds-*sources.jar', 'systemds-*standalone.jar',\n- 'systemds-*lite.jar']\n+root_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_path)))\n+\n+# temporary directory for unzipping of bin zip\n+TMP_DIR = os.path.join(this_path, 'tmp')\n+if os.path.exists(TMP_DIR):\n+ shutil.rmtree(TMP_DIR, True)\n+os.mkdir(TMP_DIR)\n+\n+SYSTEMDS_BIN = 'systemds-*-SNAPSHOT-bin.zip'\nfor file in os.listdir(os.path.join(root_dir, 'target')):\n- if any((fnmatch.fnmatch(file, valid_name) for valid_name in JAR_FILE_NAMES)) and not any(\n- (fnmatch.fnmatch(file, exclude_name) for exclude_name in EXCLUDE_JAR_FILE_NAMES)):\n- shutil.copyfile(os.path.join(root_dir, 'target', file), os.path.join(java_dir_full_path, file))\n- if file == 'lib':\n- shutil.copytree(os.path.join(root_dir, 'target', file), os.path.join(java_dir_full_path, file))\n+ if fnmatch.fnmatch(file, SYSTEMDS_BIN):\n+ new_path = os.path.join(TMP_DIR, file)\n+ shutil.copyfile(os.path.join(root_dir, 'target', file), new_path)\n+ extract_dir = os.path.join(TMP_DIR)\n+ with ZipFile(new_path, 'r') as zip:\n+ for f in zip.namelist():\n+ split_path = os.path.split(os.path.dirname(f))\n+ if split_path[1] == 'lib':\n+ zip.extract(f, TMP_DIR)\n+ unzipped_dir_name = file.rsplit('.', 1)[0]\n+ shutil.copytree(os.path.join(TMP_DIR, unzipped_dir_name), java_dir_full_path)\n+ if os.path.exists(TMP_DIR):\n+ shutil.rmtree(TMP_DIR, True)\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\nshutil.copyfile(os.path.join(root_dir, 'LICENSE'), 'LICENSE')\nshutil.copyfile(os.path.join(root_dir, 'NOTICE'), 'NOTICE')\n+\n+# delete old build and dist path\n+build_path = os.path.join(this_path, 'build')\n+if os.path.exists(build_path):\n+ shutil.rmtree(build_path, True)\n+dist_path = os.path.join(this_path, 'dist')\n+if os.path.exists(dist_path):\n+ shutil.rmtree(dist_path, True)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/setup.py",
"new_path": "src/main/python/setup.py",
"diff": "#\n#-------------------------------------------------------------\n-from __future__ import print_function\nimport sys\nfrom setuptools import find_packages, setup\nimport time\n@@ -37,7 +36,7 @@ ARTIFACT_VERSION = __project_version__\nARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split(\"-\")[0]\nREQUIRED_PACKAGES = [\n- 'numpy >= 1.8.2', # we might want to recheck this version\n+ 'numpy >= 1.8.2',\n'py4j >= 0.10.0'\n]\n@@ -49,19 +48,7 @@ setup(\nname=ARTIFACT_NAME,\nversion=ARTIFACT_VERSION_SHORT,\ndescription='SystemDS is a distributed and declarative machine learning platform.',\n- long_description='''SystemDS is a versatile system for the end-to-end data science lifecycle from data integration,\n- cleaning, and feature engineering, over efficient, local and distributed ML model training,\n- to deployment and serving.\n- To facilitate this, bindings from different languages and different system abstractions provide help for:\n- (1) the different tasks of the data-science lifecycle, and\n- (2) users with different expertise.\n-\n-\n- These high-level scripts are compiled into hybrid execution plans of local, in-memory CPU and GPU operations,\n- as well as distributed operations on Apache Spark. In contrast to existing systems\n- - that either provide homogeneous tensors or 2D Datasets - and in order to serve the entire\n- data science lifecycle, the underlying data model are DataTensors, i.e.,\n- tensors (multi-dimensional arrays) whose first dimension may have a heterogeneous and nested schema.''',\n+ long_description=open('README.md').read(),\nurl='https://github.com/apache/systemml',\nauthor='SystemDS',\nauthor_email='[email protected]',\n@@ -79,6 +66,7 @@ setup(\n'Programming Language :: Python :: 3',\n'Programming Language :: Python :: 3.6',\n'Programming Language :: Python :: 3.7',\n+ 'Programming Language :: Python :: 3.8',\n'Programming Language :: Python :: 3 :: Only',\n'Topic :: Scientific/Engineering :: Mathematics',\n'Topic :: Software Development :: Libraries :: Python Modules',\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][BUILD] Package python dist. with mvn `-P` flag
Maven build
- Maven packaging with new `-P distribution` option
- New pypi release instructions and pre_setup.py
Testing
- Run all tests with single command in workflow
Doc
- Updates the setup.py, to read `README.md` to remove
redundancy
Closes #873. |
49,689 | 23.04.2020 22:12:25 | -7,200 | 12f69c7c111cbe5e0ccc35d8bac58674b06480af | [SYSTEMDS-333,337] Improved lineage cache eviction
This patch improves lineage cache eviction by taking into account actual
execution time of instructions/functions. The ordering policy is still
LRU. Future commits will bring better approach to estimate spilling time
and new eviction policies.
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -244,7 +244,11 @@ SYSTEMDS-320 Merge SystemDS into Apache SystemML OK\nSYSTEMDS-330 Lineage Tracing, Reuse and Integration\n* 331 Cache and reuse scalar outputs (instruction and multi-level) OK\n* 332 Parfor integration with multi-level reuse OK\n- * 333 Use exact execution time for cost based eviction\n+ * 333 Improve cache eviction with actual compute time OK\n+ * 334 Cache scalars only with atleast one matrix inputs\n+ * 335 Weighted eviction policy (function of size & computetime)\n+ * 336 Better use of cache status to handle multithreading\n+ * 337 Adjust disk I/O speed by recording actual time taken OK\nSYSTEMDS-340 Compiler Assisted Lineage Caching and Reuse\n* 341 Finalize unmarking of loop dependent operations\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/BasicProgramBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/BasicProgramBlock.java",
"diff": "@@ -108,14 +108,17 @@ public class BasicProgramBlock extends ProgramBlock\n//statement-block-level, lineage-based reuse\nLineageItem[] liInputs = null;\n+ long t0 = 0;\nif (_sb != null && LineageCacheConfig.isMultiLevelReuse()) {\nliInputs = LineageItemUtils.getLineageItemInputstoSB(_sb.getInputstoSB(), ec);\nList<String> outNames = _sb.getOutputNamesofSB();\n- if( LineageCache.reuse(outNames, _sb.getOutputsofSB(), outNames.size(), liInputs, _sb.getName(), ec) ) {\n+ if(liInputs != null && LineageCache.reuse(outNames, _sb.getOutputsofSB(),\n+ outNames.size(), liInputs, _sb.getName(), ec) ) {\nif( DMLScript.STATISTICS )\nLineageCacheStatistics.incrementSBHits();\nreturn;\n}\n+ t0 = System.nanoTime();\n}\n//actual instruction execution\n@@ -123,6 +126,7 @@ public class BasicProgramBlock extends ProgramBlock\n//statement-block-level, lineage-based caching\nif (_sb != null && liInputs != null)\n- LineageCache.putValue(_sb.getOutputsofSB(), liInputs, _sb.getName(), ec);\n+ LineageCache.putValue(_sb.getOutputsofSB(), liInputs, _sb.getName(),\n+ ec, System.nanoTime()-t0);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ProgramBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ProgramBlock.java",
"diff": "@@ -43,6 +43,7 @@ import org.apache.sysds.runtime.instructions.cp.IntObject;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysds.runtime.instructions.cp.StringObject;\nimport org.apache.sysds.runtime.lineage.LineageCache;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.utils.Statistics;\n@@ -217,10 +218,11 @@ public abstract class ProgramBlock implements ParseInfo\n// try to reuse instruction result from lineage cache\nif( !LineageCache.reuse(tmp, ec) ) {\n// process actual instruction\n+ long et0 = !ReuseCacheType.isNone() ? System.nanoTime() : 0;\ntmp.processInstruction(ec);\n// cache result\n- LineageCache.putValue(tmp, ec);\n+ LineageCache.putValue(tmp, ec, System.nanoTime()-et0);\n// post-process instruction (debug)\ntmp.postprocessInstruction( ec );\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/FunctionCallCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/FunctionCallCPInstruction.java",
"diff": "@@ -40,6 +40,7 @@ import org.apache.sysds.runtime.io.IOUtilFunctions;\nimport org.apache.sysds.runtime.lineage.Lineage;\nimport org.apache.sysds.runtime.lineage.LineageCache;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.lineage.LineageCacheStatistics;\nimport org.apache.sysds.runtime.lineage.LineageItem;\nimport org.apache.sysds.runtime.lineage.LineageItemUtils;\n@@ -172,6 +173,7 @@ public class FunctionCallCPInstruction extends CPInstruction {\nfn_ec.setVariables(functionVariables);\nfn_ec.setLineage(lineage);\n// execute the function block\n+ long t0 = !ReuseCacheType.isNone() ? System.nanoTime() : 0;\ntry {\nfpb._functionName = this._functionName;\nfpb._namespace = this._namespace;\n@@ -184,6 +186,7 @@ public class FunctionCallCPInstruction extends CPInstruction {\nString fname = DMLProgram.constructFunctionKey(_namespace, _functionName);\nthrow new DMLRuntimeException(\"error executing function \" + fname, e);\n}\n+ long t1 = !ReuseCacheType.isNone() ? System.nanoTime() : 0;\n// cleanup all returned variables w/o binding\nHashSet<String> expectRetVars = new HashSet<>();\n@@ -226,8 +229,8 @@ public class FunctionCallCPInstruction extends CPInstruction {\n//update lineage cache with the functions outputs\nif( DMLScript.LINEAGE && LineageCacheConfig.isMultiLevelReuse() ) {\n- LineageCache.putValue(fpb.getOutputParams(),\n- liInputs, getCacheFunctionName(_functionName, fpb), ec);\n+ LineageCache.putValue(fpb.getOutputParams(), liInputs,\n+ getCacheFunctionName(_functionName, fpb), ec, t1-t0);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -23,7 +23,6 @@ import org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types.DataType;\nimport org.apache.sysds.common.Types.ValueType;\nimport org.apache.sysds.hops.OptimizerUtils;\n-import org.apache.sysds.hops.cost.CostEstimatorStaticRuntime;\nimport org.apache.sysds.lops.MMTSJ.MMTSJType;\nimport org.apache.sysds.parser.DataIdentifier;\nimport org.apache.sysds.parser.Statement;\n@@ -39,6 +38,7 @@ import org.apache.sysds.runtime.instructions.cp.Data;\nimport org.apache.sysds.runtime.instructions.cp.MMTSJCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ParameterizedBuiltinCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.LineageCacheStatus;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.matrix.data.InputInfo;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\n@@ -60,7 +60,8 @@ public class LineageCache\nprivate static final HashSet<LineageItem> _removelist = new HashSet<>();\nprivate static final double CACHE_FRAC = 0.05; // 5% of JVM heap size\nprivate static final long CACHE_LIMIT; //limit in bytes\n- private static String outdir = null;\n+ private static final boolean DEBUG = false;\n+ private static String _outdir = null;\nprivate static long _cachesize = 0;\nprivate static Entry _head = null;\nprivate static Entry _end = null;\n@@ -79,9 +80,7 @@ public class LineageCache\n// a complex workflow of operations that accesses the cache as well.\n- ///////////////////////////////////////\n- // Public Cache API (keep it narrow) //\n- ///////////////////////////////////////\n+ //--------------- PUBLIC CACHE API (keep it narrow) ----------------//\npublic static boolean reuse(Instruction inst, ExecutionContext ec) {\nif (ReuseCacheType.isNone())\n@@ -130,7 +129,8 @@ public class LineageCache\nreturn reuse;\n}\n- public static boolean reuse(List<String> outNames, List<DataIdentifier> outParams, int numOutputs, LineageItem[] liInputs, String name, ExecutionContext ec)\n+ public static boolean reuse(List<String> outNames, List<DataIdentifier> outParams,\n+ int numOutputs, LineageItem[] liInputs, String name, ExecutionContext ec)\n{\nif( !LineageCacheConfig.isMultiLevelReuse())\nreturn false;\n@@ -216,31 +216,29 @@ public class LineageCache\n//NOTE: safe to pin the object in memory as coming from CPInstruction\n//TODO why do we need both of these public put methods\n- public static void putMatrix(Instruction inst, ExecutionContext ec) {\n+ public static void putMatrix(Instruction inst, ExecutionContext ec, long computetime) {\nif (LineageCacheConfig.isReusable(inst, ec) ) {\nLineageItem item = ((LineageTraceable) inst).getLineageItems(ec)[0];\n//This method is called only to put matrix value\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction) inst).output);\nsynchronized( _cache ) {\n- putIntern(item, DataType.MATRIX, mo.acquireReadAndRelease(),\n- null, getRecomputeEstimate(inst, ec));\n+ putIntern(item, DataType.MATRIX, mo.acquireReadAndRelease(), null, computetime);\n}\n}\n}\n- public static void putValue(Instruction inst, ExecutionContext ec) {\n+ public static void putValue(Instruction inst, ExecutionContext ec, long computetime) {\nif (ReuseCacheType.isNone())\nreturn;\nif (LineageCacheConfig.isReusable(inst, ec) ) {\n//if (!isMarkedForCaching(inst, ec)) return;\nLineageItem item = ((LineageTraceable) inst).getLineageItems(ec)[0];\nData data = ec.getVariable(((ComputationCPInstruction) inst).output);\n- double cest = getRecomputeEstimate(inst, ec);\nsynchronized( _cache ) {\nif (data instanceof MatrixObject)\n- _cache.get(item).setValue(((MatrixObject)data).acquireReadAndRelease(), cest);\n+ _cache.get(item).setValue(((MatrixObject)data).acquireReadAndRelease(), computetime);\nelse\n- _cache.get(item).setValue((ScalarObject)data, cest);\n+ _cache.get(item).setValue((ScalarObject)data, computetime);\nlong size = _cache.get(item).getSize();\nif (!isBelowThreshold(size))\n@@ -250,7 +248,8 @@ public class LineageCache\n}\n}\n- public static void putValue(List<DataIdentifier> outputs, LineageItem[] liInputs, String name, ExecutionContext ec)\n+ public static void putValue(List<DataIdentifier> outputs, LineageItem[] liInputs,\n+ String name, ExecutionContext ec, long computetime)\n{\nif (!LineageCacheConfig.isMultiLevelReuse())\nreturn;\n@@ -278,7 +277,7 @@ public class LineageCache\nsynchronized (_cache) {\n//move or remove placeholders\nif(AllOutputsCacheable)\n- FuncLIMap.forEach((Li, boundLI) -> mvIntern(Li, boundLI));\n+ FuncLIMap.forEach((Li, boundLI) -> mvIntern(Li, boundLI, computetime));\nelse\nFuncLIMap.forEach((Li, boundLI) -> removeEntry(Li));\n}\n@@ -295,22 +294,21 @@ public class LineageCache\n// reset cache size, otherwise the cache clear leads to unusable\n// space which means evictions could run into endless loops\n_cachesize = 0;\n+ _outdir = null;\nif (DMLScript.STATISTICS)\n_removelist.clear();\n}\n}\n- /////////////////////////////////////////\n- // Internal Cache Logic Implementation //\n- /////////////////////////////////////////\n+ //----------------- INTERNAL CACHE LOGIC IMPLEMENTATION --------------//\n- private static void putIntern(LineageItem key, DataType dt, MatrixBlock Mval, ScalarObject Sval, double compcost) {\n+ private static void putIntern(LineageItem key, DataType dt, MatrixBlock Mval, ScalarObject Sval, long computetime) {\nif (_cache.containsKey(key))\n//can come here if reuse_partial option is enabled\nreturn;\n// Create a new entry.\n- Entry newItem = new Entry(key, dt, Mval, Sval, compcost);\n+ Entry newItem = new Entry(key, dt, Mval, Sval, computetime);\n// Make space by removing or spilling LRU entries.\nif( Mval != null || Sval != null ) {\n@@ -346,17 +344,18 @@ public class LineageCache\n}\n- private static void mvIntern(LineageItem item, LineageItem probeItem) {\n+ private static void mvIntern(LineageItem item, LineageItem probeItem, long computetime) {\nif (ReuseCacheType.isNone())\nreturn;\n+ // Move the value from the cache entry with key probeItem to\n+ // the placeholder entry with key item.\nif (LineageCache.probe(probeItem)) {\nEntry oe = getIntern(probeItem);\nEntry e = _cache.get(item);\n- //TODO: compute estimate for function\nif (oe.isMatrixValue())\n- e.setValue(oe.getMBValue(), 0);\n+ e.setValue(oe.getMBValue(), computetime);\nelse\n- e.setValue(oe.getSOValue(), 0);\n+ e.setValue(oe.getSOValue(), computetime);\ne._origItem = probeItem;\nlong size = oe.getSize();\n@@ -390,28 +389,67 @@ public class LineageCache\nprivate static void makeSpace(long spaceNeeded) {\n// cost based eviction\n- while ((spaceNeeded +_cachesize) > CACHE_LIMIT)\n+ Entry e = _end;\n+ while (e != _head)\n{\n- if (_cache.get(_end._key).isNullVal()) {\n- //Must be a null function/SB placeholder entry. This\n- //function is currently being executed. Skip and continue.\n- setEnd2Head(_end);\n+ if ((spaceNeeded + _cachesize) <= CACHE_LIMIT)\n+ // Enough space recovered.\n+ break;\n+\n+ if (!LineageCacheConfig.isSetSpill()) {\n+ // If eviction is disabled, just delete the entries.\n+ removeEntry(e);\n+ e = e._prev;\ncontinue;\n}\n- if (_cache.get(_end._key).isMatrixValue()) { //spill matrix blocks only\n- if (_cache.get(_end._key)._compEst > getDiskSpillEstimate()\n- && LineageCacheConfig.isSetSpill())\n- spillToLocalFS(); // If re-computation is more expensive, spill data to disk.\n+ if (!e.getCacheStatus().canEvict()) {\n+ // Don't delete if the entry's cache status doesn't allow.\n+ e = e._prev;\n+ continue;\n}\n- if (_cache.get(_end._key)._compEst == 0) {\n- //Must be a function/SB/scalar entry. Move to next.\n- //FIXME: Remove this logic after implementing new eviction logic.\n- setEnd2Head(_end);\n+ double exectime = ((double) e._computeTime) / 1000000; // in milliseconds\n+\n+ if (!e.isMatrixValue()) {\n+ // Skip scalar entries with higher computation time, as\n+ // those could be function/statementblock outputs.\n+ if (exectime < LineageCacheConfig.MIN_SPILL_TIME_ESTIMATE)\n+ removeEntry(e);\n+ e = e._prev;\ncontinue;\n}\n- removeLastEntry();\n+\n+ // Estimate time to write to FS + read from FS.\n+ double spilltime = getDiskSpillEstimate(e) * 1000; // in milliseconds\n+\n+ if (DEBUG) {\n+ if (exectime > LineageCacheConfig.MIN_SPILL_TIME_ESTIMATE) {\n+ System.out.print(\"LI \" + e._key.getOpcode());\n+ System.out.print(\" exec time \" + ((double) e._computeTime) / 1000000);\n+ System.out.print(\" estimate time \" + getDiskSpillEstimate(e) * 1000);\n+ System.out.print(\" dim \" + e.getMBValue().getNumRows() + \" \" + e.getMBValue().getNumColumns());\n+ System.out.println(\" size \" + getDiskSizeEstimate(e));\n+ }\n+ }\n+\n+ if (LineageCacheConfig.isSetSpill()) {\n+ if (spilltime < LineageCacheConfig.MIN_SPILL_TIME_ESTIMATE) {\n+ // Can't trust the estimate if less than 100ms.\n+ // Spill if it takes longer to recompute.\n+ if (exectime >= LineageCacheConfig.MIN_SPILL_TIME_ESTIMATE)\n+ spillToLocalFS(e);\n+ }\n+ else {\n+ // Spill if it takes longer to recompute than spilling.\n+ if (exectime > spilltime)\n+ spillToLocalFS(e);\n+ }\n+ }\n+\n+ // Remove the entry from cache.\n+ removeEntry(e);\n+ e = e._prev;\n}\n}\n@@ -424,21 +462,74 @@ public class LineageCache\n//---------------- COSTING RELATED METHODS -----------------\n- private static double getDiskSpillEstimate() {\n+ private static double getDiskSpillEstimate(Entry e) {\n+ if (!e.isMatrixValue() || e.isNullVal())\n+ return 0;\n// This includes sum of writing to and reading from disk\nlong t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n- MatrixBlock mb = _cache.get(_end._key).getMBValue();\n+ double size = getDiskSizeEstimate(e);\n+ double loadtime = isSparse(e) ? size/LineageCacheConfig.FSREAD_SPARSE : size/LineageCacheConfig.FSREAD_DENSE;\n+ double writetime = isSparse(e) ? size/LineageCacheConfig.FSWRITE_SPARSE : size/LineageCacheConfig.FSWRITE_DENSE;\n+\n+ //double loadtime = CostEstimatorStaticRuntime.getFSReadTime(r, c, s);\n+ //double writetime = CostEstimatorStaticRuntime.getFSWriteTime(r, c, s);\n+ if (DMLScript.STATISTICS)\n+ LineageCacheStatistics.incrementCostingTime(System.nanoTime() - t0);\n+ return loadtime + writetime;\n+ }\n+\n+ private static double getDiskSizeEstimate(Entry e) {\n+ if (!e.isMatrixValue() || e.isNullVal())\n+ return 0;\n+ MatrixBlock mb = e.getMBValue();\nlong r = mb.getNumRows();\nlong c = mb.getNumColumns();\nlong nnz = mb.getNonZeros();\ndouble s = OptimizerUtils.getSparsity(r, c, nnz);\n- double loadtime = CostEstimatorStaticRuntime.getFSReadTime(r, c, s);\n- double writetime = CostEstimatorStaticRuntime.getFSWriteTime(r, c, s);\n+ double disksize = ((double)MatrixBlock.estimateSizeOnDisk(r, c, (long)(s*r*c))) / (1024*1024);\n+ return disksize;\n+ }\n+\n+ private static void adjustReadWriteSpeed(Entry e, double IOtime, boolean read) {\n+ double size = getDiskSizeEstimate(e);\n+ if (!e.isMatrixValue() || size < LineageCacheConfig.MIN_SPILL_DATA)\n+ // Scalar or too small\n+ return;\n+\n+ long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n+ double newIOSpeed = size / IOtime; // MB per second\n+ // Adjust the read/write speed taking into account the last read/write.\n+ // These constants will eventually converge to the real speed.\n+ if (read) {\n+ if (isSparse(e))\n+ LineageCacheConfig.FSREAD_SPARSE = (LineageCacheConfig.FSREAD_SPARSE + newIOSpeed) / 2;\n+ else\n+ LineageCacheConfig.FSREAD_DENSE= (LineageCacheConfig.FSREAD_DENSE+ newIOSpeed) / 2;\n+ }\n+ else {\n+ if (isSparse(e))\n+ LineageCacheConfig.FSWRITE_SPARSE = (LineageCacheConfig.FSWRITE_SPARSE + newIOSpeed) / 2;\n+ else\n+ LineageCacheConfig.FSWRITE_DENSE= (LineageCacheConfig.FSWRITE_DENSE+ newIOSpeed) / 2;\n+ }\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementCostingTime(System.nanoTime() - t0);\n- return loadtime+writetime;\n}\n+ private static boolean isSparse(Entry e) {\n+ if (!e.isMatrixValue() || e.isNullVal())\n+ return false;\n+ MatrixBlock mb = e.getMBValue();\n+ long r = mb.getNumRows();\n+ long c = mb.getNumColumns();\n+ long nnz = mb.getNonZeros();\n+ double s = OptimizerUtils.getSparsity(r, c, nnz);\n+ boolean sparse = MatrixBlock.evalSparseFormatOnDisk(r, c, (long)(s*r*c));\n+ return sparse;\n+ }\n+\n+ @Deprecated\n+ @SuppressWarnings(\"unused\")\nprivate static double getRecomputeEstimate(Instruction inst, ExecutionContext ec) {\nif (!((ComputationCPInstruction)inst).output.isMatrix()\n|| (((ComputationCPInstruction)inst).input1 != null && !((ComputationCPInstruction)inst).input1.isMatrix()))\n@@ -586,29 +677,37 @@ public class LineageCache\n// ---------------- I/O METHODS TO LOCAL FS -----------------\n- private static void spillToLocalFS() {\n- long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n- if (outdir == null) {\n- outdir = LocalFileUtils.getUniqueWorkingDir(LocalFileUtils.CATEGORY_LINEAGE);\n- LocalFileUtils.createLocalFileIfNotExist(outdir);\n+ private static void spillToLocalFS(Entry entry) {\n+ if (!entry.isMatrixValue())\n+ throw new DMLRuntimeException (\"Spilling scalar objects to disk is not allowd. Key: \"+entry._key);\n+ if (entry.isNullVal())\n+ throw new DMLRuntimeException (\"Cannot spill null value to disk. Key: \"+entry._key);\n+\n+ long t0 = System.nanoTime();\n+ if (_outdir == null) {\n+ _outdir = LocalFileUtils.getUniqueWorkingDir(LocalFileUtils.CATEGORY_LINEAGE);\n+ LocalFileUtils.createLocalFileIfNotExist(_outdir);\n}\n- String outfile = outdir+\"/\"+_cache.get(_end._key)._key.getId();\n+ String outfile = _outdir+\"/\"+entry._key.getId();\ntry {\n- LocalFileUtils.writeMatrixBlockToLocal(outfile, _cache.get(_end._key).getMBValue());\n+ LocalFileUtils.writeMatrixBlockToLocal(outfile, entry.getMBValue());\n} catch (IOException e) {\nthrow new DMLRuntimeException (\"Write to \" + outfile + \" failed.\", e);\n}\n- if (DMLScript.STATISTICS) {\nlong t1 = System.nanoTime();\n+ // Adjust disk writing speed\n+ adjustReadWriteSpeed(entry, ((double)(t1-t0))/1000000000, false);\n+\n+ if (DMLScript.STATISTICS) {\nLineageCacheStatistics.incrementFSWriteTime(t1-t0);\nLineageCacheStatistics.incrementFSWrites();\n}\n- _spillList.put(_end._key, new SpilledItem(outfile, _end._compEst));\n+ _spillList.put(entry._key, new SpilledItem(outfile, entry._computeTime));\n}\nprivate static Entry readFromLocalFS(LineageItem key) {\n- long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n+ long t0 = System.nanoTime();\nMatrixBlock mb = null;\n// Read from local FS\ntry {\n@@ -618,27 +717,20 @@ public class LineageCache\n}\n// Restore to cache\nLocalFileUtils.deleteFileIfExists(_spillList.get(key)._outfile, true);\n- putIntern(key, DataType.MATRIX, mb, null, _spillList.get(key)._compEst);\n+ long t1 = System.nanoTime();\n+ putIntern(key, DataType.MATRIX, mb, null, _spillList.get(key)._computeTime);\n+ // Adjust disk reading speed\n+ adjustReadWriteSpeed(_cache.get(key), ((double)(t1-t0))/1000000000, true);\n+ //TODO: set cache status as RELOADED for this entry\n_spillList.remove(key);\nif (DMLScript.STATISTICS) {\n- long t1 = System.nanoTime();\nLineageCacheStatistics.incrementFSReadTime(t1-t0);\nLineageCacheStatistics.incrementFSHits();\n}\nreturn _cache.get(key);\n}\n- ////////////////////////////////////////////\n- // Cache Maintenance and Lookup Functions //\n- ////////////////////////////////////////////\n-\n- private static void removeLastEntry() {\n- if (DMLScript.STATISTICS)\n- _removelist.add(_end._key);\n- Entry e = _cache.remove(_end._key);\n- _cachesize -= e.getSize();\n- delete(_end);\n- }\n+ //--------------- CACHE MAINTENANCE & LOOKUP FUNCTIONS ---------//\nprivate static void removeEntry(LineageItem key) {\n// Remove the entry for key\n@@ -648,9 +740,18 @@ public class LineageCache\n_cache.remove(key);\n}\n- private static void setEnd2Head(Entry entry) {\n- delete(entry);\n- setHead(entry);\n+ private static void removeEntry(Entry e) {\n+ if (_cache.remove(e._key) == null)\n+ // Entry not found in cache\n+ return;\n+\n+ if (DMLScript.STATISTICS)\n+ _removelist.add(e._key);\n+\n+ _cachesize -= e.getSize();\n+ delete(e);\n+ if (DMLScript.STATISTICS)\n+ LineageCacheStatistics.incrementMemDeletes();\n}\nprivate static void delete(Entry entry) {\n@@ -674,26 +775,26 @@ public class LineageCache\n_end = _head;\n}\n- ////////////////////////////////////\n- // Internal Cache Data Structures //\n- ////////////////////////////////////\n+ //---------------- INTERNAL CACHE DATA STRUCTURES ----------------//\nprivate static class Entry {\nprivate final LineageItem _key;\nprivate final DataType _dt;\nprivate MatrixBlock _MBval;\nprivate ScalarObject _SOval;\n- double _compEst;\n+ private long _computeTime;\n+ private LineageCacheStatus _status;\nprivate Entry _prev;\nprivate Entry _next;\nprivate LineageItem _origItem;\n- public Entry(LineageItem key, DataType dt, MatrixBlock Mval, ScalarObject Sval, double computecost) {\n+ public Entry(LineageItem key, DataType dt, MatrixBlock Mval, ScalarObject Sval, long computetime) {\n_key = key;\n_dt = dt;\n_MBval = Mval;\n_SOval = Sval;\n- _compEst = computecost;\n+ _computeTime = computetime;\n+ _status = isNullVal() ? LineageCacheStatus.EMPTY : LineageCacheStatus.CACHED;\n_origItem = null;\n}\n@@ -725,6 +826,10 @@ public class LineageCache\n}\n}\n+ public synchronized LineageCacheStatus getCacheStatus() {\n+ return _status;\n+ }\n+\npublic synchronized long getSize() {\nreturn ((_MBval != null ? _MBval.getInMemorySize() : 0) + (_SOval != null ? _SOval.getSize() : 0));\n}\n@@ -737,16 +842,18 @@ public class LineageCache\nreturn _dt.isMatrix();\n}\n- public synchronized void setValue(MatrixBlock val, double compEst) {\n+ public synchronized void setValue(MatrixBlock val, long computetime) {\n_MBval = val;\n- _compEst = compEst;\n+ _computeTime = computetime;\n+ _status = isNullVal() ? LineageCacheStatus.EMPTY : LineageCacheStatus.CACHED;\n//resume all threads waiting for val\nnotifyAll();\n}\n- public synchronized void setValue(ScalarObject val, double compEst) {\n+ public synchronized void setValue(ScalarObject val, long computetime) {\n_SOval = val;\n- _compEst = compEst;\n+ _computeTime = computetime;\n+ _status = isNullVal() ? LineageCacheStatus.EMPTY : LineageCacheStatus.CACHED;\n//resume all threads waiting for val\nnotifyAll();\n}\n@@ -754,11 +861,11 @@ public class LineageCache\nprivate static class SpilledItem {\nString _outfile;\n- double _compEst;\n+ long _computeTime;\n- public SpilledItem(String outfile, double computecost) {\n+ public SpilledItem(String outfile, long computetime) {\n_outfile = outfile;\n- _compEst = computecost;\n+ _computeTime = computetime;\n}\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -67,8 +67,29 @@ public class LineageCacheConfig {\nALL\n}\n+ public enum LineageCacheStatus {\n+ EMPTY, //Placeholder with no data. Cannot be evicted.\n+ CACHED, //General cached data. Can be evicted.\n+ EVICTED, //Data is in disk. Empty value. Cannot be evicted.\n+ RELOADED, //Reloaded from disk. Can be evicted.\n+ PINNED; //Pinned to memory. Cannot be evicted.\n+ public boolean canEvict() {\n+ return this == CACHED || this == RELOADED;\n+ }\n+ }\n+\npublic ArrayList<String> _MMult = new ArrayList<>();\npublic static boolean _allowSpill = true;\n+ // Minimum reliable spilling estimate in milliseconds.\n+ public static final double MIN_SPILL_TIME_ESTIMATE = 100;\n+ // Minimum reliable data size for spilling estimate in MB.\n+ public static final double MIN_SPILL_DATA = 20;\n+\n+ // Default I/O in MB per second for binary blocks\n+ public static double FSREAD_DENSE = 200;\n+ public static double FSREAD_SPARSE = 100;\n+ public static double FSWRITE_DENSE = 150;\n+ public static double FSWRITE_SPARSE = 75;\nprivate static ReuseCacheType _cacheType = null;\nprivate static CachedItemHead _itemH = null;\n@@ -76,7 +97,7 @@ public class LineageCacheConfig {\nprivate static boolean _compilerAssistedRW = true;\nstatic {\n//setup static configuration parameters\n- setSpill(false); //disable spilling of cache entries to disk\n+ setSpill(true); //enable/disable disk spilling.\n}\npublic static boolean isReusable (Instruction inst, ExecutionContext ec) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheStatistics.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheStatistics.java",
"diff": "@@ -34,6 +34,7 @@ public class LineageCacheStatistics {\nprivate static final LongAdder _numHitsFunc = new LongAdder();\nprivate static final LongAdder _numWritesMem = new LongAdder();\nprivate static final LongAdder _numWritesFS = new LongAdder();\n+ private static final LongAdder _numMemDel = new LongAdder();\nprivate static final LongAdder _numRewrites = new LongAdder();\nprivate static final LongAdder _ctimeFSRead = new LongAdder(); //in nano sec\nprivate static final LongAdder _ctimeFSWrite = new LongAdder(); //in nano sec\n@@ -50,6 +51,7 @@ public class LineageCacheStatistics {\n_numHitsFunc.reset();\n_numWritesMem.reset();\n_numWritesFS.reset();\n+ _numMemDel.reset();\n_numRewrites.reset();\n_ctimeFSRead.reset();\n_ctimeFSWrite.reset();\n@@ -103,6 +105,12 @@ public class LineageCacheStatistics {\n_numWritesFS.increment();\n}\n+ public static void incrementMemDeletes() {\n+ // Number of deletions from cache (including spilling).\n+ _numMemDel.increment();\n+ }\n+\n+\npublic static void incrementFSReadTime(long delta) {\n// Total time spent on reading from FS.\n_ctimeFSRead.add(delta);\n@@ -161,6 +169,8 @@ public class LineageCacheStatistics {\nsb.append(_numWritesMem.longValue());\nsb.append(\"/\");\nsb.append(_numWritesFS.longValue());\n+ sb.append(\"/\");\n+ sb.append(_numMemDel.longValue());\nreturn sb.toString();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -95,11 +95,15 @@ public class LineageRewriteReuse\nreturn false;\n//execute instructions & write the o/p to symbol table\n+ long t0 = System.nanoTime();\nexecuteInst(newInst, lrwec);\n+ long t1 = System.nanoTime();\nec.setVariable(((ComputationCPInstruction)curr).output.getName(), lrwec.getVariable(LR_VAR));\n//put the result into the cache\n- LineageCache.putMatrix(curr, ec);\n+ LineageCache.putMatrix(curr, ec, t1-t0);\n+ if (DMLScript.STATISTICS)\n+ LineageCacheStatistics.incrementPRwExecTime(t1-t0);\nDMLScript.EXPLAIN = et; //TODO can't change this here\n//cleanup execution context\n@@ -755,7 +759,6 @@ public class LineageRewriteReuse\nDMLScript.EXPLAIN = ExplainType.NONE;\ntry {\n- long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n//execute instructions\nBasicProgramBlock pb = getProgramBlock();\npb.setInstructions(newInst);\n@@ -763,8 +766,6 @@ public class LineageRewriteReuse\nLineageCacheConfig.shutdownReuse();\npb.execute(lrwec);\nLineageCacheConfig.restartReuse(oldReuseOption);\n- if (DMLScript.STATISTICS)\n- LineageCacheStatistics.incrementPRwExecTime(System.nanoTime()-t0);\n}\ncatch (Exception e) {\nthrow new DMLRuntimeException(\"Error executing lineage rewrites\" , e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"new_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"diff": "@@ -946,7 +946,7 @@ public class Statistics\nif (DMLScript.LINEAGE && !ReuseCacheType.isNone()) {\nsb.append(\"LinCache hits (Mem/FS/Del): \\t\" + LineageCacheStatistics.displayHits() + \".\\n\");\nsb.append(\"LinCache MultiLevel (Ins/SB/Fn):\" + LineageCacheStatistics.displayMultiLevelHits() + \".\\n\");\n- sb.append(\"LinCache writes (Mem/FS): \\t\" + LineageCacheStatistics.displayWtrites() + \".\\n\");\n+ sb.append(\"LinCache writes (Mem/FS/Del): \\t\" + LineageCacheStatistics.displayWtrites() + \".\\n\");\nsb.append(\"LinCache FStimes (Rd/Wr): \\t\" + LineageCacheStatistics.displayTime() + \" sec.\\n\");\nsb.append(\"LinCache costing time: \\t\" + LineageCacheStatistics.displayCostingTime() + \" sec.\\n\");\nsb.append(\"LinCache Rewrites: \\t\\t\" + LineageCacheStatistics.displayRewrites() + \".\\n\");\n"
},
{
"change_type": "ADD",
"old_path": "src/test/scripts/functions/lineage/.FunctionFullReuse5.dml.swp",
"new_path": "src/test/scripts/functions/lineage/.FunctionFullReuse5.dml.swp",
"diff": "Binary files /dev/null and b/src/test/scripts/functions/lineage/.FunctionFullReuse5.dml.swp differ\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-333,337] Improved lineage cache eviction
This patch improves lineage cache eviction by taking into account actual
execution time of instructions/functions. The ordering policy is still
LRU. Future commits will bring better approach to estimate spilling time
and new eviction policies.
Closes #891. |
49,738 | 25.04.2020 19:40:58 | -7,200 | f450ead5506d1615b5979bee85b39891e0f0fc00 | [MINOR] Script-level improvements mice builtin function
* Loop vectorization of scalar assignment
* Removed unnecessary branch for table padding
* Minor modifications of rmEmpty use to increase common subexpression
elimination | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/mice.dml",
"new_path": "scripts/builtin/mice.dml",
"diff": "@@ -56,8 +56,8 @@ return(Frame[String] dataset, Frame[String] singleSet)\ncol = ncol(F)\nResult = matrix(0, rows=1, cols = col)\nMask_Result = matrix(0, rows=1, cols=col)\n- cat = t(cMask) * seq(1, ncol(cMask))\n- cat = removeEmpty(target = cat, margin = \"rows\")\n+ scat = seq(1, ncol(cMask))\n+ cat = removeEmpty(target=scat, margin=\"rows\", select=t(cMask))\ns=\"\"\nfor(i in 1: nrow(cat), check =0)\ns = s+as.integer(as.scalar(cat[i, 1]))+\",\";\n@@ -70,7 +70,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\nXO = replace(target=X, pattern=NaN, replacement=0);\n# remove categorical features and impute continous features with mean\n- eX_n = removeEmpty(target=X, margin=\"cols\", select=(1-cMask))\n+ eX_n = removeEmpty(target=X, margin=\"cols\", select=(cMask==0))\ncol_n = ncol(eX_n);\n# storing the mask/address of missing values\nMask_n = is.na(eX_n);\n@@ -80,7 +80,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\n# filling the missing data with their means\nX2_n = eX_n+(Mask_n*colMeans(eX_n))\n# matrices for computing actul data\n- p_n = table( (seq(1, ncol(eX_n))) , (removeEmpty(target = t(cMask==0)*seq(1, ncol(cMask)), margin =\"rows\")) , 1 )\n+ p_n = table(seq(1, ncol(eX_n)), removeEmpty(target=scat, margin=\"rows\", select=t(cMask==0)))\nif(ncol(p_n) < ncol(cMask))\np_n = cbind(p_n, matrix(0, nrow(p_n), ncol(cMask)-ncol(p_n)))\nq = XO * cMask\n@@ -91,8 +91,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\neX_c2 = removeEmpty(target = eX_c, margin = \"rows\", select = (rowSums(eX_c != 0)==col_c))\ncolMod = matrix(0, 1, ncol(eX_c))\n# compute columnwise mode\n- parfor(i in 1: col_c)\n- {\n+ parfor(i in 1: col_c) {\nf = eX_c2[, i] # adding one in data for dealing with zero category\ncat_counts = table(f, 1, n, 1); # counts for each category\nmode = as.scalar(rowIndexMax(t(cat_counts)));\n@@ -100,13 +99,10 @@ return(Frame[String] dataset, Frame[String] singleSet)\n}\n# find the mask of missing values\n- tmpMask_c = (eX_c == 0);\n- tmpMask_c = (tmpMask_c * colMod) # fill missing values with mode\n+ tmpMask_c = (eX_c==0) * colMod # fill missing values with mode\n# Generate a matrix of actual length\n- p_c = table((seq(1, ncol(tmpMask_c))) , (removeEmpty(target = t(cMask)*seq(1, ncol(cMask)), margin =\"rows\")), 1)\n- if(ncol(p_c) < ncol(cMask))\n- p_c = cbind(p_c, matrix(0, nrow(p_c), ncol(cMask)-ncol(p_c)))\n+ p_c = table(seq(1, ncol(tmpMask_c)), removeEmpty(target=scat, margin =\"rows\", select=t(cMask)), ncol(tmpMask_c), ncol(cMask))\nMask_c = tmpMask_c %*% p_c\ninverseMask_c = Mask_c == 0\n@@ -131,14 +127,13 @@ return(Frame[String] dataset, Frame[String] singleSet)\ndXMask = matrix(0, 1, ncol(dX))\nindex = 1\nfor(k in 1:col) {\n- if(as.scalar(dcDistincts[1,k]) != 0) {\n- for(l in 1:as.scalar(dcDistincts[1,k])){\n- dXMask[1,index] = 1\n- index = index +1\n- }\n+ nDistk = as.scalar(dcDistincts[1,k]);\n+ if(nDistk != 0) {\n+ dXMask[1,index:(index+nDistk-1)] = matrix(1,1,nDistk)\n+ index += nDistk;\n}\nelse\n- index = index +1\n+ index += 1\n}\n#multiple imputations\n@@ -149,7 +144,6 @@ return(Frame[String] dataset, Frame[String] singleSet)\nin_n = 1; in_c = 1; i=1; j=1; # varibales for index selection\nwhile(i <= ncol(dX))\n{\n-\nif(as.scalar(dXMask[1,i]) == 0)\n{\n# construct column selector\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Script-level improvements mice builtin function
* Loop vectorization of scalar assignment
* Removed unnecessary branch for table padding
* Minor modifications of rmEmpty use to increase common subexpression
elimination |
49,698 | 27.04.2020 00:01:26 | -19,080 | 23328d8c0c621a9fd21f94929c5d56ab13dd2c25 | [MINOR] Fix Javadoc issue at `getLineageTrace`
* The parameter `var` is the output variable name for which the
lineage trace is sought.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/jmlc/PreparedScript.java",
"new_path": "src/main/java/org/apache/sysds/api/jmlc/PreparedScript.java",
"diff": "@@ -477,7 +477,10 @@ public class PreparedScript implements ConfigurableAPI\n/**\n* Capture lineage of the DML/PyDML program and view result as a string.\n*\n+ * @param var the output variable name on which lineage trace is sought\n+ *\n* @return string results of lineage trace\n+ *\n*/\npublic String getLineageTrace(String var) {\nreturn _outVarLineage.get(var);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix Javadoc issue at `getLineageTrace`
* The parameter `var` is the output variable name for which the
lineage trace is sought.
Closes #899. |
49,698 | 27.04.2020 00:02:26 | -19,080 | d36ec4c5b7ef12e3129945f653d69f016788ad1f | [MINOR][DOC] Update 'SystemDS' to 'Apache SystemDS'
* This is as per the discussion at
* Also, wrap long lines and update mvn build command.
Closes | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -17,9 +17,16 @@ limitations under the License.\n{% end comment %}\n-->\n-# SystemDS\n+# Apache SystemDS\n-**Overview:** SystemDS is a versatile system for the end-to-end data science lifecycle from data integration, cleaning, and feature engineering, over efficient, local and distributed ML model training, to deployment and serving. To this end, we aim to provide a stack of declarative languages with R-like syntax for (1) the different tasks of the data-science lifecycle, and (2) users with different expertise. These high-level scripts are compiled into hybrid execution plans of local, in-memory CPU and GPU operations, as well as distributed operations on Apache Spark. In contrast to existing systems - that either provide homogeneous tensors or 2D Datasets - and in order to serve the entire data science lifecycle, the underlying data model are DataTensors, i.e., tensors (multi-dimensional arrays) whose first dimension may have a heterogeneous and nested schema.\n+**Overview:** SystemDS is a versatile system for the end-to-end data science lifecycle from data integration, cleaning,\n+and feature engineering, over efficient, local and distributed ML model training, to deployment and serving. To this\n+end, we aim to provide a stack of declarative languages with R-like syntax for (1) the different tasks of the data-science\n+lifecycle, and (2) users with different expertise. These high-level scripts are compiled into hybrid execution plans of\n+local, in-memory CPU and GPU operations, as well as distributed operations on Apache Spark. In contrast to existing\n+systems - that either provide homogeneous tensors or 2D Datasets - and in order to serve the entire data science lifecycle,\n+the underlying data model are DataTensors, i.e., tensors (multi-dimensional arrays) whose first dimension may have a\n+heterogeneous and nested schema.\n**Quick Start** [Install, Quick Start and Hello World](/bin/README.md)\n@@ -27,9 +34,12 @@ limitations under the License.\n**Python Documentation** [Python SystemDS Documentation](https://damslab.github.io/docs/sysdspython/index.html)\n-**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from [**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2018. We will continue to support linear algebra programs over matrices, while replacing the underlying data model and compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own snapshot via Apache Maven: `mvn -DskipTests clean package`.\n+**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from\n+[**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2018. We will continue to support linear algebra\n+programs over matrices, while replacing the underlying data model and compiler, as well as substantially extending the\n+supported functionalities. Until the first release, you can build your own snapshot via Apache Maven:\n+ `mvn clean package -P distribution`.\n-[](https://opensource.org/licenses/Apache-2.0)\n\n\n\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Update 'SystemDS' to 'Apache SystemDS'
* This is as per the discussion at
https://www.mail-archive.com/[email protected]/msg01051.html
* Also, wrap long lines and update mvn build command.
Closes #898. |
49,698 | 28.04.2020 00:15:58 | -7,200 | bdf78e462506ef8ef7fc9e6b23a6520e4155eca0 | AutoEncoder test for codegenalg suite
This patch adds a test case for AutoEncoder with codegen
enabled against a corresponding R script.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/codegenalg/partone/AlgorithmAutoEncoder.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/codegenalg/partone/AlgorithmAutoEncoder.java",
"diff": "package org.apache.sysds.test.functions.codegenalg.partone;\nimport java.io.File;\n+import java.util.HashMap;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\nimport org.junit.Assert;\nimport org.junit.Test;\nimport org.apache.sysds.api.DMLScript;\n@@ -37,11 +39,12 @@ public class AlgorithmAutoEncoder extends AutomatedTestBase\nprivate final static String TEST_DIR = \"functions/codegenalg/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + AlgorithmAutoEncoder.class.getSimpleName() + \"/\";\n- private final static int rows = 2468;\n+ private final static int rows = 1068;\nprivate final static int cols = 784;\nprivate final static double sparsity1 = 0.7; //dense\nprivate final static double sparsity2 = 0.1; //sparse\n+ private final static double eps = 1e-5;\nprivate final static int H1 = 500;\nprivate final static int H2 = 2;\n@@ -179,22 +182,66 @@ public class AlgorithmAutoEncoder extends AutomatedTestBase\nTestConfiguration config = getTestConfiguration(TEST_NAME);\nloadTestConfiguration(config);\n- fullDMLScriptName = \"scripts/staging/autoencoder-2layer.dml\";\n+ fullDMLScriptName = SCRIPT_DIR + TEST_DIR + \"/Algorithm_AutoEncoder.dml\";\n+ //\"scripts/staging/autoencoder-2layer.dml\";\nprogramArgs = new String[]{ \"-stats\", \"-nvargs\", \"X=\"+input(\"X\"),\n\"H1=\"+H1, \"H2=\"+H2, \"EPOCH=\"+epochs, \"BATCH=\"+batchsize,\n+ \"W1_rand=\"+input(\"W1_rand\"),\"W2_rand=\"+input(\"W2_rand\"),\n+ \"W3_rand=\"+input(\"W3_rand\"), \"W4_rand=\"+input(\"W4_rand\"),\n+ \"order_rand=\"+input(\"order_rand\"),\n\"W1_out=\"+output(\"W1\"), \"b1_out=\"+output(\"b1\"),\n\"W2_out=\"+output(\"W2\"), \"b2_out=\"+output(\"b2\"),\n\"W3_out=\"+output(\"W3\"), \"b3_out=\"+output(\"b3\"),\n\"W4_out=\"+output(\"W4\"), \"b4_out=\"+output(\"b4\")};\n+\n+ rCmd = getRCmd(inputDir(), String.valueOf(H1), String.valueOf(H2),\n+ String.valueOf(epochs), String.valueOf(batchsize), expectedDir());\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n//generate actual datasets\ndouble[][] X = getRandomMatrix(rows, cols, 0, 1, sparse?sparsity2:sparsity1, 714);\nwriteInputMatrixWithMTD(\"X\", X, true);\n+ //generate rand matrices for W1, W2, W3, W4 here itself for passing onto both DML and R scripts\n+ double[][] W1_rand = getRandomMatrix(H1, cols, 0, 1, sparse?sparsity2:sparsity1, 800);\n+ writeInputMatrixWithMTD(\"W1_rand\", W1_rand, true);\n+ double[][] W2_rand = getRandomMatrix(H2, H1, 0, 1, sparse?sparsity2:sparsity1, 900);\n+ writeInputMatrixWithMTD(\"W2_rand\", W2_rand, true);\n+ double[][] W3_rand = getRandomMatrix(H1, H2, 0, 1, sparse?sparsity2:sparsity1, 589);\n+ writeInputMatrixWithMTD(\"W3_rand\", W3_rand, true);\n+ double[][] W4_rand = getRandomMatrix(cols, H1, 0, 1, sparse?sparsity2:sparsity1, 150);\n+ writeInputMatrixWithMTD(\"W4_rand\", W4_rand, true);\n+ double[][] order_rand = getRandomMatrix(rows, 1, 0, 1, sparse?sparsity2:sparsity1, 143);\n+ writeInputMatrixWithMTD(\"order_rand\", order_rand, true); //for the permut operation on input X\n+\n//run script\nrunTest(true, false, null, -1);\n- //TODO R script\n+ runRScript(true);\n+\n+ HashMap<MatrixValue.CellIndex, Double> dmlW1 = readDMLMatrixFromHDFS(\"W1\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlW2 = readDMLMatrixFromHDFS(\"W2\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlW3 = readDMLMatrixFromHDFS(\"W3\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlW4 = readDMLMatrixFromHDFS(\"W4\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlb1 = readDMLMatrixFromHDFS(\"b1\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlb2 = readDMLMatrixFromHDFS(\"b2\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlb3 = readDMLMatrixFromHDFS(\"b3\");\n+ HashMap<MatrixValue.CellIndex, Double> dmlb4 = readDMLMatrixFromHDFS(\"b4\");\n+ HashMap<MatrixValue.CellIndex, Double> rW1 = readRMatrixFromFS(\"W1\");\n+ HashMap<MatrixValue.CellIndex, Double> rW2 = readRMatrixFromFS(\"W2\");\n+ HashMap<MatrixValue.CellIndex, Double> rW3 = readRMatrixFromFS(\"W3\");\n+ HashMap<MatrixValue.CellIndex, Double> rW4 = readRMatrixFromFS(\"W4\");\n+ HashMap<MatrixValue.CellIndex, Double> rb1 = readRMatrixFromFS(\"b1\");\n+ HashMap<MatrixValue.CellIndex, Double> rb2 = readRMatrixFromFS(\"b2\");\n+ HashMap<MatrixValue.CellIndex, Double> rb3 = readRMatrixFromFS(\"b3\");\n+ HashMap<MatrixValue.CellIndex, Double> rb4 = readRMatrixFromFS(\"b4\");\n+ TestUtils.compareMatrices(dmlW1, rW1, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlW2, rW2, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlW3, rW3, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlW4, rW4, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlb1, rb1, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlb2, rb2, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlb3, rb3, eps, \"Stat-DML\", \"Stat-R\");\n+ TestUtils.compareMatrices(dmlb4, rb4, eps, \"Stat-DML\", \"Stat-R\");\nAssert.assertTrue(heavyHittersContainsSubString(\"spoof\")\n|| heavyHittersContainsSubString(\"sp_spoof\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegenalg/Algorithm_AutoEncoder.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+\n+#1. tanh function\n+func = function(X){\n+ Y = tanh(X)\n+ return(Y)\n+}\n+\n+func1 = function(X) {\n+ Y = (exp(2*X) - 1)/(exp(2*X) + 1)\n+ Y_prime = 1 - Y^2\n+ return(Y_prime)\n+}\n+\n+#2. feedForward\n+\n+obj <- function(E){\n+ val = 0.5 * sum(E^2)\n+ return(val)\n+}\n+\n+\n+X = readMM(paste(args[1], \"X.mtx\", sep=\"\"));\n+W1_rand = readMM(paste(args[1], \"W1_rand.mtx\", sep=\"\"));\n+W2_rand = readMM(paste(args[1], \"W2_rand.mtx\", sep=\"\"));\n+W3_rand = readMM(paste(args[1], \"W3_rand.mtx\", sep=\"\"));\n+W4_rand = readMM(paste(args[1], \"W4_rand.mtx\", sep=\"\"));\n+order_rand = readMM(paste(args[1], \"order_rand.mtx\", sep=\"\"));\n+\n+num_hidden1 = as.integer(args[2]) #$H1\n+num_hidden2 = as.integer(args[3]) #$H2\n+max_epochs = as.integer(args[4]) #$EPOCH\n+batch_size = as.integer(args[5]) #$BATCH\n+\n+mu = 0.9 # momentum\n+step = 1e-5\n+decay = 0.95\n+hfile = \" \"\n+fmt = \"text\"\n+full_obj = FALSE\n+\n+n = nrow(X)\n+m = ncol(X)\n+\n+#randomly reordering rows\n+#permut = table(seq(from=1,to=n,by=1), order(runif(n, min=0, max=1)))\n+permut = table(seq(from=1,to=n,by=1), order(order_rand))\n+permut = as.data.frame.matrix(permut)\n+permut = data.matrix(permut)\n+X = (permut %*% X)\n+#z-transform, whitening operator is better\n+means = t(as.matrix(colSums(X)))/n\n+csx2 = t(as.matrix(colSums(X^2)))/n\n+stds = sqrt(csx2 - (means*means)*n/(n-1)) + 1e-17\n+X = (X - matrix(1, nrow(X),1) %*% means)/(matrix(1,nrow(X),1) %*% stds)\n+\n+W1 = sqrt(6)/sqrt(m + num_hidden1) * W1_rand\n+b1 = matrix(0, num_hidden1, 1)\n+\n+W2 = sqrt(6)/sqrt(num_hidden1 + num_hidden2) * W2_rand\n+b2 = matrix(0, num_hidden2, 2)\n+\n+W3 = sqrt(6)/sqrt(num_hidden2 + num_hidden1) * W3_rand\n+b3 = matrix(0, num_hidden1, 1)\n+\n+W4 = sqrt(6)/sqrt(num_hidden2 + m) * W4_rand\n+b4 = matrix(0, m, 1)\n+\n+upd_W1 = matrix(0, nrow(W1), ncol(W1))\n+upd_b1 = matrix(0, nrow(b1), ncol(b1))\n+upd_W2 = matrix(0, nrow(W2), ncol(W2))\n+upd_b2 = matrix(0, nrow(b2), ncol(b2))\n+upd_W3 = matrix(0, nrow(W3), ncol(W3))\n+upd_b3 = matrix(0, nrow(b3), ncol(b3))\n+upd_W4 = matrix(0, nrow(W4), ncol(W4))\n+upd_b4 = matrix(0, nrow(b4), ncol(b4))\n+\n+if( full_obj ){\n+ # nothing to do here\n+}\n+\n+iter = 0\n+num_iters_per_epoch = ceiling(n / batch_size)\n+max_iterations = max_epochs * num_iters_per_epoch\n+# debug\n+# print(\"num_iters_per_epoch=\" + num_iters_per_epoch + \" max_iterations=\" + max_iterations)\n+beg = 1\n+while( iter < max_iterations ) {\n+ end = beg + batch_size - 1\n+ if(end > n) end = n\n+ X_batch = X[beg:end,]\n+\n+ # Notation:\n+ # 1 2 3 4 5 6 7 8 9\n+ # [H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat, Yhat_prime, E]\n+ # tmp_ff = feedForward(X_batch, W1, b1, W2, b2, W3, b3, W4, b4, X_batch)\n+ # H1 = tmp_ff[1]; H1_prime = tmp_ff[2]; H2 = tmp_ff[3]; H2_prime = tmp_ff[4];\n+ # H3 = tmp_ff[5]; H3_prime = tmp_ff[6]; Yhat = tmp_ff[7]; Yhat_prime = tmp_ff[8];\n+ # E = tmp_ff[9]\n+ # inputs: X, W1, b1, W2, b2, W3, b3, W4, b4, X_batch\n+ H1_in = t(W1 %*% t(X_batch) + b1 %*% matrix(1,ncol(b1),nrow(X_batch)))\n+ H1 = func(H1_in)\n+ H1_prime = func1(H1_in)\n+\n+ H2_in = t(W2 %*% t(H1) + b2%*% matrix(1,ncol(b2),nrow(H1)))\n+ H2 = func(H2_in)\n+ H2_prime = func1(H2_in)\n+\n+ H3_in = t(W3 %*% t(H2) + b3%*% matrix(1,ncol(b3),nrow(H2)))\n+ H3 = func(H3_in)\n+ H3_prime = func1(H3_in)\n+\n+ Yhat_in = t(W4 %*% t(H3) + b4%*% matrix(1,ncol(b4),nrow(H3)))\n+ Yhat = func(Yhat_in)\n+ Yhat_prime = func1(Yhat_in)\n+\n+ E = Yhat - X_batch\n+\n+ # Notation:\n+ # 1 2 3 4 5 6 7 8\n+ # [W1_grad, b1_grad, W2_grad, b2_grad, W3_grad, b3_grad,W4_grad, b4_grad]\n+ # tmp_grad = grad(X_batch, H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat_prime, E, W1, W2, W3, W4)\n+ # W1_grad = tmp_grad[1]; b1_grad = tmp_grad[2]; W2_grad = tmp_grad[3]; b2_grad = tmp_grad[4];\n+ # W3_grad = tmp_grad[5]; b3_grad = tmp_grad[6]; W4_grad = tmp_grad[7]; b4_grad = tmp_grad[8];\n+ # grad function\n+\n+ #backprop\n+ delta4 = E * Yhat_prime\n+ delta3 = H3_prime * (delta4 %*% W4)\n+ delta2 = H2_prime * (delta3 %*% W3)\n+ delta1 = H1_prime * (delta2 %*% W2)\n+\n+ #compute gradients\n+ b4_grad = (colSums(delta4))\n+ b3_grad = (colSums(delta3))\n+ b2_grad = (colSums(delta2))\n+ b1_grad = (colSums(delta1))\n+\n+ W4_grad = t(delta4) %*% H3\n+ W3_grad = t(delta3) %*% H2\n+ W2_grad = t(delta2) %*% H1\n+ W1_grad = t(delta1) %*% X_batch\n+\n+ ob = obj(E)\n+ epochs = iter / num_iters_per_epoch\n+ # debug\n+ # print(table(epochs, ob), zero.print = \"0\")\n+\n+ #update\n+ local_step = step / nrow(X_batch)\n+ upd_W1 = mu * upd_W1 - local_step * W1_grad\n+ upd_b1 = mu * upd_b1 - local_step * b1\n+ upd_W2 = mu * upd_W2 - local_step * W2_grad\n+ upd_b2 = mu * upd_b2 - local_step * b2\n+ upd_W3 = mu * upd_W3 - local_step * W3_grad\n+ upd_b3 = mu * upd_b3 - local_step * b3\n+ upd_W4 = mu * upd_W4 - local_step * W4_grad\n+ upd_b4 = mu * upd_b4 - local_step * b4\n+ W1 = W1 + upd_W1\n+ b1 = b1 + upd_b1\n+ W2 = W2 + upd_W2\n+ b2 = b2 + upd_b2\n+ W3 = W3 + upd_W3\n+ b3 = b3 + upd_b3\n+ W4 = W4 + upd_W4\n+ b4 = b4 + upd_b4\n+\n+ iter = iter + 1\n+ if(end == n) beg = 1\n+ else beg = end + 1\n+\n+ if(iter %% num_iters_per_epoch == 0)\n+ step = step * decay\n+\n+ if(full_obj & iter %% num_iters_per_epoch == 0 ) {\n+ # Notation:\n+ # tmp_ff = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ # full_H1 = tmp_ff[1]; full_H1_prime = tmp_ff[2]; full_H2 = tmp_ff[3]; full_H2_prime = tmp_ff[4];\n+ # full_H3 = tmp_ff[5]; full_H3_prime = tmp_ff[6]; full_Yhat = tmp_ff[7]; full_Yhat_prime = tmp_ff[8];\n+ # full_E = tmp_ff[9];\n+ # inputs: X, W1, b1, W2, b2, W3, b3, W4, b4, X\n+ H1_in = t(W1 %*% t(X) + b1 %*% matrix(1,ncol(b1),nrow(X)))\n+\n+ full_H1 = func(H1_in)\n+ full_H1_prime = func1(H1_in)\n+\n+ H2_in = t(W2 %*% t(H1) + b2%*% matrix(1,ncol(b2),nrow(H1)))\n+ full_H2 = func(H2_in)\n+ full_H2_prime = func1(H2_in)\n+\n+ H3_in = t(W3 %*% t(H2) + b3%*% matrix(1,ncol(b3),nrow(H2)))\n+ full_H3 = func(H3_in)\n+ full_H3_prime = func1(H3_in)\n+\n+ Yhat_in = t(W4 %*% t(H3) + b4%*% matrix(1,ncol(b4),nrow(H3)))\n+ full_Yhat = func(Yhat_in)\n+ full_Yhat_prime = func1(Yhat_in)\n+ full_E = full_Yhat - X\n+\n+ full_o = obj(full_E)\n+ epochs = iter %/% num_iters_per_epoch\n+ # debug\n+ # print(table(epochs, full_o, deparse.level=2), zero.print=\".\")\n+ # print(\"EPOCHS=\" + epochs + \" iter=\" + iter + \" OBJ (FULL DATA)=\" + full_o)\n+ }\n+}\n+\n+#debug\n+#print.table(W1, digits=3)\n+writeMM(as(W1,\"CsparseMatrix\"), paste(args[6], \"W1\", sep=\"\"));\n+writeMM(as(b1,\"CsparseMatrix\"), paste(args[6], \"b1\", sep=\"\"));\n+writeMM(as(W2,\"CsparseMatrix\"), paste(args[6], \"W2\", sep=\"\"));\n+writeMM(as(b2,\"CsparseMatrix\"), paste(args[6], \"b2\", sep=\"\"));\n+writeMM(as(W3,\"CsparseMatrix\"), paste(args[6], \"W3\", sep=\"\"));\n+writeMM(as(b3,\"CsparseMatrix\"), paste(args[6], \"b3\", sep=\"\"));\n+writeMM(as(W4,\"CsparseMatrix\"), paste(args[6], \"W4\", sep=\"\"));\n+writeMM(as(b4,\"CsparseMatrix\"), paste(args[6], \"b4\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegenalg/Algorithm_AutoEncoder.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Trains a 2-layer autoencoder with minibatch SGD, momentum and step-size decay.\n+# If invoked with H1 > H2 then it becomes a 'bowtie' structured autoencoder\n+# Weights are initialized using Glorot & Bengio (2010) AISTATS initialization.\n+# The script standardizes the input before training (can be turned off).\n+# Also, it randomly reshuffles rows before training.\n+# Currently, tanh is set to be the activation function.\n+# By re-implementing 'func' DML-bodied function, one can change the activation.\n+\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Filename where the input is stored\n+# H1 Int --- Number of neurons in the 1st hidden layer\n+# H2 Int --- Number of neurons in the 2nd hidden layer\n+# EPOCH Int --- Number of epochs to train for\n+# fmt String 'text' Output format (\"text\", \"csv\", \"binary\" etc.)\n+# OBJ Boolean FALSE If TRUE, Computes objective function value (squared-loss)\n+# at the end of each epoch. Note that, computing the full\n+# objective can take a lot of time.\n+# BATCH Int 256 Mini-batch size (training parameter)\n+# STEP Double 1e-5 Initial step size (training parameter)\n+# DECAY Double 0.95 Decays step size after each epoch (training parameter)\n+# MOMENTUM Double 0.9 Momentum parameter (training parameter)\n+# ---------------------------------------------------------------------------------------------\n+#\n+# OUTPUT PARAMETERS (all filenames):\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# W1_out String --- File to store weights between input layer and 1st hidden layer\n+# b1_out String --- File to store bias between input layer and 1st hidden layer\n+# W2_out String --- File to store weights between 1st hidden layer and 2nd hidden layer\n+# b2_out String --- File to store bias between 1st hidden layer and 2nd hidden layer\n+# W3_out String --- File to store weights between 2nd hidden layer and 3rd hidden layer\n+# b3_out String --- File to store bias between 2nd hidden layer and 3rd hidden layer\n+# W4_out String --- File to store weights between 3rd hidden layer and output layer\n+# b4_out String --- File to store bias between 3rd hidden layer and output layer\n+# HIDDEN String \" \" File to store the hidden (2nd) layer representation if needed\n+# ---------------------------------------------------------------------------------------------\n+#\n+# INVOCATION:\n+# -f autoencoder_2layer.dml --nvargs X=<input file> H1=500 H2=2 EPOCH=1 fmt=\"csv\"\n+# W1_out=<weights from input to 1st hidden layer> b1_out=<bias from input to 1st hidden layer>\n+# W2_out=<weights from 1st hidden layer to 2nd hidden layer> b2_out=<bias from 1st hidden layer to 2nd hidden layer>\n+# W3_out=<weights from 2nd hidden layer to 3rd hidden layer> b3_out=<bias from 2nd hidden layer to 3rd hidden layer>\n+# W4_out=<weights from 3rd hidden layer to output> b4_out=<bias from 3rd hidden layer to output>\n+#\n+\n+# NOTE: This is a copy of the `scripts/staging/autoencoder-2layer.dml` with rand()\n+# replaced with random matrices generated in the corresponding test class.\n+\n+#implements tanh\n+#to use another activation fn, implement a DML-bodied function with\n+#function name 'func' and comment out this one\n+func = function(Matrix[Double] X) return(Matrix[Double] Y, Matrix[Double] Y_prime){\n+ Y = tanh(X)\n+ Y_prime = 1 - Y^2\n+}\n+\n+feedForward = function(Matrix[Double] X, Matrix[Double] W1, Matrix[Double] b1,\n+ Matrix[Double] W2, Matrix[Double] b2, Matrix[Double] W3, Matrix[Double] b3,\n+ Matrix[Double] W4, Matrix[Double] b4, Matrix[Double] Y)\n+ return(Matrix[Double] H1, Matrix[Double] H1_prime, Matrix[Double] H2,\n+ Matrix[Double] H2_prime, Matrix[Double] H3, Matrix[Double] H3_prime,\n+ Matrix[Double] Yhat, Matrix[Double] Yhat_prime, Matrix[Double] E)\n+{\n+ H1_in = t(W1 %*% t(X) + b1)\n+ [H1, H1_prime] = func(H1_in)\n+\n+ H2_in = t(W2 %*% t(H1) + b2)\n+ [H2, H2_prime] = func(H2_in)\n+\n+ H3_in = t(W3 %*% t(H2) + b3)\n+ [H3, H3_prime] = func(H3_in)\n+\n+ Yhat_in = t(W4 %*% t(H3) + b4)\n+ [Yhat, Yhat_prime] = func(Yhat_in)\n+ E = Yhat - Y\n+}\n+\n+grad = function(Matrix[Double] X, Matrix[Double] H1, Matrix[Double] H1_prime,\n+ Matrix[Double] H2, Matrix[Double] H2_prime, Matrix[Double] H3,\n+ Matrix[Double] H3_prime, Matrix[Double] Yhat_prime, Matrix[Double] E,\n+ Matrix[Double] W1, Matrix[Double] W2, Matrix[Double] W3, Matrix[Double] W4)\n+ return(Matrix[Double] W1_grad, Matrix[Double] b1_grad, Matrix[Double] W2_grad,\n+ Matrix[Double] b2_grad, Matrix[Double] W3_grad, Matrix[Double] b3_grad,\n+ Matrix[Double] W4_grad, Matrix[Double] b4_grad)\n+{\n+ #backprop\n+ delta4 = E * Yhat_prime\n+ delta3 = H3_prime * (delta4 %*% W4)\n+ delta2 = H2_prime * (delta3 %*% W3)\n+ delta1 = H1_prime * (delta2 %*% W2)\n+\n+ #compute gradients\n+ b4_grad = t(colSums(delta4))\n+ b3_grad = t(colSums(delta3))\n+ b2_grad = t(colSums(delta2))\n+ b1_grad = t(colSums(delta1))\n+\n+ W4_grad = t(delta4) %*% H3\n+ W3_grad = t(delta3) %*% H2\n+ W2_grad = t(delta2) %*% H1\n+ W1_grad = t(delta1) %*% X\n+}\n+\n+obj = function(Matrix[Double] E) return(Double val){\n+ val = 0.5 * sum(E^2)\n+}\n+\n+batch_size = ifdef($BATCH, 256)\n+mu = ifdef($MOMENTUM, 0.9)\n+step = ifdef($STEP, 1e-5)\n+decay = ifdef($DECAY, 0.95)\n+hfile = ifdef($HIDDEN, \" \")\n+fmt = ifdef($fmt, \"text\")\n+full_obj = ifdef($OBJ, FALSE)\n+\n+X = read($X)\n+num_hidden1 = $H1\n+num_hidden2 = $H2\n+max_epochs = $EPOCH\n+W1_rand = read($W1_rand)\n+W2_rand = read($W2_rand)\n+W3_rand = read($W3_rand)\n+W4_rand = read($W4_rand)\n+order_rand = read($order_rand)\n+\n+n = nrow(X)\n+m = ncol(X)\n+\n+#randomly reordering rows\n+#permut = table(seq(1,n,1), order(target=Rand(rows=n, cols=1, min=0, max=1, pdf=\"uniform\"), by=1, index.return=TRUE), n, n)\n+permut = table(seq(1,n,1), order(target=order_rand, by=1, index.return=TRUE), n, n)\n+X = permut %*% X\n+\n+#z-transform, whitening operator is better\n+means = colSums(X)/n\n+stds = sqrt((colSums(X^2)/n - means*means)*n/(n-1)) + 1e-17\n+X = (X - means)/stds\n+\n+W1 = sqrt(6)/sqrt(m + num_hidden1) * W1_rand #Rand(rows=num_hidden1, cols=m, min=-1, max=1, pdf=\"uniform\")\n+b1 = matrix(0, rows=num_hidden1, cols=1)\n+W2 = sqrt(6)/sqrt(num_hidden1 + num_hidden2) * W2_rand #Rand(rows=num_hidden2, cols=num_hidden1, min=-1, max=1, pdf=\"uniform\")\n+b2 = matrix(0, rows=num_hidden2, cols=1)\n+W3 = sqrt(6)/sqrt(num_hidden2 + num_hidden1) * W3_rand #Rand(rows=num_hidden1, cols=num_hidden2, min=-1, max=1, pdf=\"uniform\")\n+b3 = matrix(0, rows=num_hidden1, cols=1)\n+W4 = sqrt(6)/sqrt(num_hidden2 + m) * W4_rand #Rand(rows=m, cols=num_hidden1, min=-1, max=1, pdf=\"uniform\")\n+b4 = matrix(0, rows=m, cols=1)\n+\n+upd_W1 = matrix(0, rows=nrow(W1), cols=ncol(W1))\n+upd_b1 = matrix(0, rows=nrow(b1), cols=ncol(b1))\n+upd_W2 = matrix(0, rows=nrow(W2), cols=ncol(W2))\n+upd_b2 = matrix(0, rows=nrow(b2), cols=ncol(b2))\n+upd_W3 = matrix(0, rows=nrow(W3), cols=ncol(W3))\n+upd_b3 = matrix(0, rows=nrow(b3), cols=ncol(b3))\n+upd_W4 = matrix(0, rows=nrow(W4), cols=ncol(W4))\n+upd_b4 = matrix(0, rows=nrow(b4), cols=ncol(b4))\n+\n+if( full_obj ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime,\n+ full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ full_o = obj(full_E)\n+ print(\"EPOCHS=\" + 0 + \" OBJ (FULL DATA): \" + full_o)\n+}\n+\n+iter = 0\n+num_iters_per_epoch = ceil(n / batch_size)\n+max_iterations = max_epochs * num_iters_per_epoch\n+#print(\"num_iters_per_epoch=\" + num_iters_per_epoch + \" max_iterations=\" + max_iterations)\n+beg = 1\n+while( iter < max_iterations ) {\n+ end = beg + batch_size - 1\n+ if(end > n) end = n\n+ X_batch = X[beg:end,]\n+\n+ [H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat, Yhat_prime, E] = feedForward(X_batch, W1, b1, W2, b2, W3, b3, W4, b4, X_batch)\n+ [W1_grad, b1_grad, W2_grad, b2_grad, W3_grad, b3_grad, W4_grad, b4_grad] = grad(X_batch, H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat_prime, E, W1, W2, W3, W4)\n+\n+ o = obj(E)\n+ print(\"epochs=%5.4f BATCH beg=%d end=%d obj=%f\", (iter / num_iters_per_epoch), beg, end, o)\n+\n+ #update\n+ local_step = step / nrow(X_batch)\n+ upd_W1 = mu * upd_W1 - local_step * W1_grad\n+ upd_b1 = mu * upd_b1 - local_step * b1\n+ upd_W2 = mu * upd_W2 - local_step * W2_grad\n+ upd_b2 = mu * upd_b2 - local_step * b2\n+ upd_W3 = mu * upd_W3 - local_step * W3_grad\n+ upd_b3 = mu * upd_b3 - local_step * b3\n+ upd_W4 = mu * upd_W4 - local_step * W4_grad\n+ upd_b4 = mu * upd_b4 - local_step * b4\n+ W1 = W1 + upd_W1\n+ b1 = b1 + upd_b1\n+ W2 = W2 + upd_W2\n+ b2 = b2 + upd_b2\n+ W3 = W3 + upd_W3\n+ b3 = b3 + upd_b3\n+ W4 = W4 + upd_W4\n+ b4 = b4 + upd_b4\n+\n+ iter = iter + 1\n+ if(end == n) beg = 1\n+ else beg = end + 1\n+\n+ if( iter %% num_iters_per_epoch == 0 ) step = step * decay\n+\n+ if( full_obj & iter %% num_iters_per_epoch == 0 ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime, full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ full_o = obj(full_E)\n+ epochs = iter %/% num_iters_per_epoch\n+ print(\"EPOCHS=\" + epochs + \" iter=\" + iter + \" OBJ (FULL DATA)=\" + full_o)\n+ }\n+}\n+\n+write(W1, $W1_out, format=fmt)\n+write(b1, $b1_out, format=fmt)\n+write(W2, $W2_out, format=fmt)\n+write(b2, $b2_out, format=fmt)\n+write(W3, $W3_out, format=fmt)\n+write(b3, $b3_out, format=fmt)\n+write(W4, $W4_out, format=fmt)\n+write(b4, $b4_out, format=fmt)\n+\n+if( hfile != \" \" ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime, full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ reordered_H = t(permut) %*% full_H2\n+ write(reordered_H, hfile, format=fmt)\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-2121] AutoEncoder test for codegenalg suite
This patch adds a test case for AutoEncoder with codegen
enabled against a corresponding R script.
Closes #890. |
49,706 | 29.04.2020 23:24:10 | -7,200 | 937749621bf4d79238f52b0baae4d3308a9318fd | [MINOR] Fix readme badges with links to master branch
Fix badges to only reflect status on push to master branch
Make badges link to the tests conducted.
Closes | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -40,10 +40,10 @@ programs over matrices, while replacing the underlying data model and compiler,\nsupported functionalities. Until the first release, you can build your own snapshot via Apache Maven:\n`mvn clean package -P distribution`.\n-\n-\n-\n-\n-\n-\n-\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Build%22+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3ADocumentation+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Component+Test%22+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Application+Test%22+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Function+Test%22+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Python+Test%22+branch%3Amaster+event%3Apush)\n+[](https://github.com/apache/systemml/actions?query=workflow%3A%22Federated+Python+Test%22+branch%3Amaster+event%3Apush)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix readme badges with links to master branch
- Fix badges to only reflect status on push to master branch
- Make badges link to the tests conducted.
Closes #903. |
49,739 | 29.04.2020 23:48:33 | -7,200 | a7f17b3d17176ea8339cb5b5bcdd3c5854763761 | Fix Python lm/rand tests (tolerance, workflow)
Just a few changes to the lm test case (increasing tolerance) so that the
tc doesn't fail randomly. Remove multiple definitions of run in python
workflow file.
AMLS project SS 2020, part 2.
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/python.yml",
"new_path": ".github/workflows/python.yml",
"diff": "@@ -74,9 +74,7 @@ jobs:\n${{ runner.os }}-pip-${{ matrix.python-version }}-\n- name: Install pip Dependencies\n- run: pip install numpy py4j wheel\n- run: pip install scipy\n- run: pip install sklearn\n+ run: pip install numpy py4j wheel scipy sklearn\n- name: Build Python Package\nrun: |\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/test_lm.py",
"new_path": "src/main/python/tests/test_lm.py",
"diff": "@@ -38,7 +38,7 @@ sds = SystemDSContext()\nregressor = LinearRegression(fit_intercept=False)\nshape = (random.randrange(1, 30), random.randrange(1, 30))\n-eps = 1e-05\n+eps = 1e-03\nclass TestLm(unittest.TestCase):\ndef setUp(self):\n@@ -60,8 +60,8 @@ class TestLm(unittest.TestCase):\nmodel.coef_ = model.coef_.reshape(sds_model_weights.shape)\nself.assertTrue(np.allclose(sds_model_weights, model.coef_, eps))\nexcept Exception as e:\n- self.assertTrue(False, \"This should not raise an exception!\")\nprint(e)\n+ self.assertTrue(False, \"This should not raise an exception!\")\ndef test_lm_invalid_shape(self):\nX = np.random.rand(shape[0], 0)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/test_matrix_rand.py",
"new_path": "src/main/python/tests/test_matrix_rand.py",
"diff": "@@ -27,14 +27,16 @@ import unittest\nimport numpy as np\nimport scipy.stats as st\nimport random\n+import math\npath = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../\")\nsys.path.insert(0, path)\nfrom systemds.context import SystemDSContext\n-shape = (random.randrange(1, 50), random.randrange(1, 50))\n+shape = (random.randrange(1, 25), random.randrange(1, 25))\n+dist_shape = (10, 15)\nmin_max = (0, 1)\n-sparsity = 0.2\n+sparsity = random.uniform(0.0, 1.0)\nseed = 123\ndistributions = [\"norm\", \"uniform\"]\n@@ -58,37 +60,31 @@ class TestRand(unittest.TestCase):\nself.assertTrue((m.min() >= min_max[0]) and (m.max() <= min_max[1]))\ndef test_rand_sparsity(self):\n- m = sds.rand(rows=shape[0], cols=shape[1], sparsity=sparsity, seed=seed).compute()\n- count, bins = np.histogram(m.flatten(\"F\"))\n- non_zero_value_percent = sum(count[1:]) * 100 / sum(count)\n- e = 0.05\n+ m = sds.rand(rows=shape[0], cols=shape[1], sparsity=sparsity, seed=0).compute()\n+ non_zero_value_percent = np.count_nonzero(m) * 100 /np.prod(m.shape)\n- self.assertTrue(\n- sum(count) == (shape[0] * shape[1])\n- and (non_zero_value_percent >= (sparsity - e) * 100)\n- and (non_zero_value_percent <= (sparsity + e) * 100)\n- )\n+ self.assertTrue(math.isclose(non_zero_value_percent, sparsity*100, rel_tol=5))\ndef test_rand_uniform_distribution(self):\nm = sds.rand(\n- rows=shape[0],\n- cols=shape[1],\n+ rows=dist_shape[0],\n+ cols=dist_shape[1],\npdf=\"uniform\",\nmin=min_max[0],\nmax=min_max[1],\n- seed=seed).compute()\n+ seed=0).compute()\ndist = find_best_fit_distribution(m.flatten(\"F\"), distributions)\nself.assertTrue(dist == \"uniform\")\ndef test_rand_normal_distribution(self):\nm = sds.rand(\n- rows=shape[0],\n- cols=shape[1],\n+ rows=dist_shape[0],\n+ cols=dist_shape[1],\npdf=\"normal\",\nmin=min_max[0],\nmax=min_max[1],\n- seed=seed).compute()\n+ seed=0).compute()\ndist = find_best_fit_distribution(m.flatten(\"F\"), distributions)\nself.assertTrue(dist == \"norm\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-316] Fix Python lm/rand tests (tolerance, workflow)
Just a few changes to the lm test case (increasing tolerance) so that the
tc doesn't fail randomly. Remove multiple definitions of run in python
workflow file.
AMLS project SS 2020, part 2.
Closes #902. |
49,689 | 01.05.2020 15:20:37 | -7,200 | cf62a867846f6551ccbc9276124a7b679822fe77 | Add test for multiLogReg with reuse | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageReuseAlg extends AutomatedTestBase {\nprotected static final String TEST_DIR = \"functions/lineage/\";\nprotected static final String TEST_NAME = \"LineageReuseAlg\";\n- protected static final int TEST_VARIANTS = 2;\n+ protected static final int TEST_VARIANTS = 3;\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageReuseAlg.class.getSimpleName() + \"/\";\n@Override\n@@ -59,6 +59,11 @@ public class LineageReuseAlg extends AutomatedTestBase {\ntestLineageTrace(TEST_NAME+\"2\", ReuseCacheType.REUSE_HYBRID.name().toLowerCase());\n}\n+ @Test\n+ public void testMultiLogReg() {\n+ testLineageTrace(TEST_NAME+\"3\", ReuseCacheType.REUSE_HYBRID.name().toLowerCase());\n+ }\n+\npublic void testLineageTrace(String testname, String reuseType) {\nboolean old_simplification = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\nboolean old_sum_product = OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageReuseAlg3.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+findBetas = function(Matrix[double] X, Matrix[double] y)\n+ return (Matrix[double] all_betas)\n+{\n+ R = matrix(0, rows=10*(ncol(X)+1), cols=5);\n+ for (lamda in 20:25) {\n+ #betas = multiLogReg(X=X, Y=y, maxii=0, verbose=FALSE);\n+ betas = multiLogReg(X=X, Y=y, icpt=2, tol=0.000001,\n+ reg=lamda, maxi=100, maxii=0, verbose=FALSE);\n+ R[1:ncol(X)+1,] = betas;\n+ }\n+ all_betas = R;\n+}\n+\n+findIcpt = function(Matrix[double] X, Matrix[double] y)\n+ return (Matrix[double] all_betas)\n+{\n+ R = matrix(0, rows=12*(ncol(X)+2), cols=5);\n+ for (lamda in 20:22) {\n+ for (icpt in 1:2) {\n+ #Function level reuse of 3 out of 6 calls.\n+ betas = multiLogReg(X=X, Y=y, icpt=icpt, tol=0.000001,\n+ reg=lamda, maxi=100, maxii=0, verbose=FALSE);\n+ #betas = multiLogReg(X=X, Y=y, icpt=icpt, maxii=0, verbose=FALSE);\n+ R[1:ncol(X)+1,] = betas;\n+ }\n+ }\n+ all_betas = R;\n+}\n+\n+\n+X = rand(rows=1000, cols=100, sparsity=1.0, seed=42);\n+y = rand(rows=1000, cols=1, min=0, max=6, sparsity=1.0, seed=42);\n+y = floor(y);\n+\n+all_betas1 = findBetas(X, y);\n+all_betas2 = findIcpt(X, y);\n+while(FALSE){}\n+R = rbind(all_betas1, all_betas2);\n+\n+write(R, $1, format=\"text\");\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-338] Add test for multiLogReg with reuse |
49,720 | 02.05.2020 23:32:47 | -7,200 | 1a7f5ca662597a524a052e97b5a4e6fe340938ec | New built-in function imputeByFD (MVI by robust FDs)
Missing value imputation via robust functional dependencies
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -292,7 +292,8 @@ SYSTEMDS-380 Memory Footprint\n* 371 Matrix Block Memory footprint update\nSYSTEMDS-390 New Builtin Functions IV\n- * 391 New GLM builtin-in function (from algorithms) OK\n+ * 391 New GLM builtin function (from algorithms) OK\n+ * 392 Builtin function for missing value imputation via FDs OK\nOthers:\n* Break append instruction to cbind and rbind\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/imputeByFD.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Implements builtin for imputing missing values from observed values (if exist)\n+# using robust functional dependencies\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# F String -- Data frame\n+# source Integer -- source attribute to use for imputation and error correction\n+# target Integer -- attribute to be fixed\n+# threshold Double -- threshold value in interval [0, 1] for robust FDs\n+# ---------------------------------------------------------------------------------------------\n+\n+\n+#Output(s)\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# imputed_F String --- Frame with possible imputations\n+\n+\n+s_imputeByFD = function(Frame[String] F, Integer sourceAttribute, Integer targetAttribute, Double threshold)\n+ return(Frame[String] imputed_F)\n+{\n+\n+ # sanity checks\n+ if( threshold < 0 | threshold > 1 )\n+ stop(\"Stopping due to invalid input, threshold required in interval [0, 1] found \"+threshold)\n+\n+ if(sourceAttribute < 0 | sourceAttribute > ncol(F) | targetAttribute < 0 | targetAttribute > ncol(F))\n+ stop(\"Stopping due to invalid source and target\")\n+\n+\n+ # detect schema for transformation\n+ schema = detectSchema(F)\n+ s=\"\"\n+ for(i in 1: ncol(F)) {\n+ if(as.scalar(schema[1,i]) == \"STRING\" | as.scalar(schema[1,i]) == \"BOOLEAN\" )\n+ s = s+as.integer(i)+\",\";\n+ }\n+\n+ # recode data frame\n+ jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n+ [X, M] = transformencode(target=F, spec=jspecR);\n+\n+ # impute missing values and fix errors\n+ X[,targetAttribute] = imputeAndCorrect(X[,sourceAttribute], X[,targetAttribute], threshold)\n+\n+ # getting the actual data back\n+ dF = transformdecode(target=X, spec=jspecR, meta=M);\n+ imputed_F = dF;\n+}\n+\n+imputeAndCorrect = function(Matrix[Double] X, Matrix[Double] Y, Double threshold)\n+ return(Matrix[Double] imputed_Y) {\n+\n+ XY = cbind(X, Y)\n+\n+ # replace the NaN values with zero\n+ XY = replace(target = XY, pattern=NaN, replacement=0)\n+ missing_mask = (XY == 0)\n+\n+ # map the missing values to an arbitrary number (i.e., Max values + 1)\n+ XY = missing_mask * (colMaxs(XY)+1) + XY\n+\n+ # create mapping between source and target\n+ ctab = table(XY[,1], XY[,2], 1)\n+\n+ # remove the table column representing missing values\n+ if(sum(missing_mask[,2]) > 0)\n+ ctab = ctab[,1:ncol(ctab)-1]\n+\n+ ctab = ctab/(rowSums(ctab)) > threshold\n+\n+ # Get the most frequent mapped value of Y\n+ ans = (ctab == rowMaxs(ctab)) * t(seq(1, ncol(ctab))) # rowIndexMax(ctab)?\n+ tabMax = rowSums(ans) != (ncol(ans) * ((ncol(ans))+1)/2) # vector for controlling max(0)\n+ filled = rowMaxs(ans) * tabMax\n+ imputed_Y = table(seq(1,nrow(X)), XY[,1]) %*% filled;\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -96,6 +96,7 @@ public enum Builtins {\nIMG_MIRROR(\"img_mirror\", true),\nIMG_BRIGHTNESS(\"img_brightness\", true),\nIMG_CROP(\"img_crop\", true),\n+ IMPUTE_FD(\"imputeByFD\", true),\nINTERQUANTILE(\"interQuantile\", false),\nINTERSECT(\"intersect\", true),\nINVERSE(\"inv\", \"inverse\", false),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinImputeFDTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.runtime.io.FrameWriter;\n+import org.apache.sysds.runtime.io.FrameWriterFactory;\n+import org.apache.sysds.runtime.matrix.data.FrameBlock;\n+import org.apache.sysds.runtime.matrix.data.InputInfo;\n+import org.apache.sysds.runtime.matrix.data.OutputInfo;\n+import org.apache.sysds.runtime.util.UtilFunctions;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+\n+import java.io.IOException;\n+\n+public class BuiltinImputeFDTest extends AutomatedTestBase {\n+\n+ private final static String TEST_NAME = \"imputeFD\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinImputeFDTest.class.getSimpleName() + \"/\";\n+ private final static int rows = 11;\n+ private final static int cols = 4;\n+ private final static double epsilon = 0.0000000001;\n+\n+ private final static Types.ValueType[] schema = {Types.ValueType.BOOLEAN, Types.ValueType.STRING, Types.ValueType.STRING, Types.ValueType.FP64};\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"C\"}));\n+ }\n+\n+ @Test\n+ public void test1() throws IOException {\n+ runImpute_RFDTests(2,3, 0.6, 1, LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void test2() throws IOException {\n+ runImpute_RFDTests(2,3, 0.45, 2, LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void test3() throws IOException {\n+ runImpute_RFDTests(2,3, 0.6, 1, LopProperties.ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void test4() throws IOException {\n+ runImpute_RFDTests(2,3, 0.4, 2, LopProperties.ExecType.SPARK);\n+ }\n+ private void runImpute_RFDTests(int source, int target, double threshold, int test, LopProperties.ExecType instType)\n+ throws IOException\n+ {\n+ Types.ExecMode platformOld = setExecMode(instType);\n+ try {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-args\", input(\"A\"), String.valueOf(source),String.valueOf(target), String.valueOf(threshold), output(\"B\")}; //\n+ //initialize the frame data.\n+ FrameBlock frame1 = new FrameBlock(schema);\n+ FrameWriter writer = FrameWriterFactory.createFrameWriter(OutputInfo.CSVOutputInfo);\n+ double[][] A = getRandomMatrix(rows, cols, 0, 1, 0.7, -1);\n+ initFrameDataString(frame1, A, test);\n+ writer.writeFrameToHDFS(frame1.slice(0, rows - 1, 0, schema.length - 1, new FrameBlock()),\n+ input(\"A\"), rows, schema.length);\n+\n+ runTest(true, false, null, -1);\n+ FrameBlock frameRead = readDMLFrameFromHDFS(\"B\", InputInfo.BinaryBlockInputInfo);\n+ FrameBlock realFrame = tureOutput(A);\n+ verifyFrameData(frameRead, realFrame, schema);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+\n+ private static void initFrameDataString(FrameBlock frame1, double[][] data, int test) {\n+ boolean[] b = new boolean[rows];\n+ long[] l = new long[rows];\n+ String[] s1 = null, s2 = null;\n+ for (int i = 0; i < rows; i++) {\n+ data[i][1] = (b[i] = (Boolean) UtilFunctions.doubleToObject(Types.ValueType.BOOLEAN, data[i][1], false)) ? 1 : 0;\n+ l[i] = (Long) UtilFunctions.doubleToObject(Types.ValueType.INT64, data[i][2], false);\n+ }\n+ switch (test)\n+ {\n+ case 1:\n+ s1 = new String[] {\"TU-Graz\", \"TU-Graz\", \"TU-Graz\", \"IIT\", \"IIT\", \"IIT\", \"IIT\", \"SIBA\", \"SIBA\", \"SIBA\", \"TU-Wien\"};\n+ s2 = new String[] {\"Austria\", \"Austria\", \"Austria\", \"India\", \"IIT\", \"India\", \"India\", \"Pakistan\", \"Pakistan\", \"Austria\", \"Austria\"};\n+ break;\n+ case 2:\n+ s1 = new String[] {\"TU-Graz\", \"TU-Graz\", \"TU-Graz\", \"IIT\", \"IIT\", \"IIT\", \"IIT\", \"SIBA\", \"SIBA\", \"SIBA\", \"TU-Wien\"};\n+ s2 = new String[] {\"Austria\", \"Austria\", \"Austria\", \"India\", \"IIT\", \"In\",\"India\", \"Pakistan\", \"Pakistan\", null,\"Austria\"};\n+ break;\n+ }\n+\n+ frame1.appendColumn(b);\n+ frame1.appendColumn(s1);\n+ frame1.appendColumn(s2);\n+ frame1.appendColumn(l);\n+ }\n+\n+ private static FrameBlock tureOutput(double[][] data) {\n+ FrameBlock frame1 = new FrameBlock(schema);\n+ boolean[] b = new boolean[rows];\n+ String[] s1 = {\"TU-Graz\", \"TU-Graz\", \"TU-Graz\", \"IIT\", \"IIT\", \"IIT\",\"IIT\", \"SIBA\", \"SIBA\", \"SIBA\", \"TU-Wien\"};\n+ String[] s2 = {\"Austria\", \"Austria\", \"Austria\", \"India\", \"India\", \"India\",\"India\", \"Pakistan\", \"Pakistan\", \"Pakistan\", \"Austria\"};\n+ long[] l = new long[rows];\n+ for (int i = 0; i < rows; i++) {\n+ data[i][1] = (b[i] = (Boolean) UtilFunctions.doubleToObject(Types.ValueType.BOOLEAN, data[i][1], false)) ? 1 : 0;\n+ l[i] = (Long) UtilFunctions.doubleToObject(Types.ValueType.INT64, data[i][2], false);\n+ }\n+ frame1.appendColumn(b);\n+ frame1.appendColumn(s1);\n+ frame1.appendColumn(s2);\n+ frame1.appendColumn(l);\n+ return frame1;\n+ }\n+\n+ private static void verifyFrameData(FrameBlock frame1, FrameBlock frame2, Types.ValueType[] schema) {\n+ for (int i = 0; i < frame1.getNumRows(); i++)\n+ for (int j = 0; j < frame1.getNumColumns(); j++) {\n+ Object val1 = UtilFunctions.stringToObject(schema[j], UtilFunctions.objectToString(frame1.get(i, j)));\n+ Object val2 = UtilFunctions.stringToObject(schema[j], UtilFunctions.objectToString(frame2.get(i, j)));\n+ if (TestUtils.compareToR(schema[j], val1, val2, epsilon) != 0)\n+ Assert.fail(\"The DML data for cell (\" + i + \",\" + j + \") is \" + val1 + \", not same as the expected value \" + val2);\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Copyright 2020 Graz University of Technology\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1, data_type=\"frame\", format=\"csv\", header=FALSE);\n+Y = imputeByFD(X, $2, $3, $4);\n+write(Y, $5, format=\"binary\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-392] New built-in function imputeByFD (MVI by robust FDs)
Missing value imputation via robust functional dependencies
Closes #887. |
49,720 | 03.05.2020 14:50:43 | -7,200 | 8fbcd758674a07fa0a0f41be2ecea110b53691cc | [MINOR] Various improvements of data cleaning built-in primitives
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/mice.dml",
"new_path": "scripts/builtin/mice.dml",
"diff": "# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n# F String --- Data Frame\n+# cMask Double --- A 0/1 row vector for identifying numeric (0) adn categorical features (1)\n# iter Integer 3 Number of iteration for multiple imputations\n# complete Integer 3 A complete dataset generated though a specific iteration\n# ---------------------------------------------------------------------------------------------\n# Assumption missing value are represented with empty string i.e \",,\" in csv file\n# variables with suffix n are storing continous/numeric data and variables with suffix c are storing categorical data\n-s_mice= function(Frame[String] F, Matrix[Double] cMask, Integer iter = 3, Integer complete = 3)\n+s_mice= function(Frame[String] F, Matrix[Double] cMask, Integer iter = 3, Integer complete = 3, Boolean verbose = FALSE)\nreturn(Frame[String] dataset, Frame[String] singleSet)\n{\nif(ncol(F) == 1)\nstop(\"invalid aregument: can not apply mice on single column\")\n- # adding a temporary categorical feature (in-case all attributes are continous)\n+ if(complete > iter)\n+ complete = iter\n+\n+\n+ # adding a temporary feature (in-case all attributes are of same type)\nF = cbind(F, as.frame(matrix(1,nrow(F), 1)))\ncMask = cbind(cMask, matrix(1,1,1))\n@@ -58,6 +63,10 @@ return(Frame[String] dataset, Frame[String] singleSet)\nMask_Result = matrix(0, rows=1, cols=col)\nscat = seq(1, ncol(cMask))\ncat = removeEmpty(target=scat, margin=\"rows\", select=t(cMask))\n+\n+ if(nrow(cat) == ncol(F))\n+ cMask[1,ncol(cMask)] = 0\n+\ns=\"\"\nfor(i in 1: nrow(cat), check =0)\ns = s+as.integer(as.scalar(cat[i, 1]))+\",\";\n@@ -199,7 +208,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\nMask_Filled_c[,in_c] = table(R, 1, pred, n, 1);\ni = as.integer(j)\n}\n-\n+ if(in_c < col)\nin_c = in_c + 1\ni = i+1;\n}\n@@ -229,11 +238,11 @@ return(Frame[String] dataset, Frame[String] singleSet)\ndataset = XO + Agg_Matrix\nsingleSet = Result[index:row, ]\n- # # decoding nominal columns\n+ # decoding nominal columns\ndataset = transformdecode(target=dataset, spec=jspecR, meta=M);\nsingleSet = transformdecode(target=singleSet, spec=jspecR, meta=M);\n- # # removing extra categorical column\n+ # removing extra categorical column\ndataset = dataset[,1:col-1]\nsingleSet = singleSet[,1:col-1]\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/multiLogReg.dml",
"new_path": "scripts/builtin/multiLogReg.dml",
"diff": "@@ -259,7 +259,8 @@ m_multiLogReg = function(Matrix[Double] X, Matrix[Double] Y, Integer icpt = 2, D\niter = iter + 1;\nconverge = ((norm_Grad < (tol * norm_Grad_initial)) | (iter > maxi) |\n((is_trust_boundary_reached == 0) & (abs (actred) < (abs (obj) + abs (obj_new)) * 0.00000000000001)));\n- if (converge) { print (\"Termination / Convergence condition satisfied.\"); }\n+ if (verbose & converge)\n+ print (\"Termination / Convergence condition satisfied.\");\n}\nif (icpt == 2) {\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/outlierByIQR.dml",
"new_path": "scripts/builtin/outlierByIQR.dml",
"diff": "# k Double 1.5 a constant used to discern outliers k*IQR\n# isIterative Boolean TRUE iterative repair or single repair\n# repairMethod Integer 1 values: 0 = delete rows having outliers,\n-# 1 = replace outliers as missing values\n+# 1 = replace outliers with zeros\n+# 2 = replace outliers as missing values\n# max_iterations Integer 0 values: 0 = arbitrary number of iteraition until all outliers are removed,\n# n = any constant defined by user\n# ---------------------------------------------------------------------------------------------\n@@ -56,11 +57,11 @@ m_outlierByIQR = function(Matrix[Double] X, Double k =1.5, Integer repairMethod\nlowerBound = (Q1 - (k * IQR));\noutlierFilter = X < lowerBound | X > upperBound\n- if(sum(outlierFilter) > 1 & sum(X) != 0 & sumPrevious != sumNext ) {\n+ if(sum(outlierFilter) > 1 & sumNext != 0 & sumPrevious != sumNext ) {\n#TODO: see outlierBySd why are sumPrevious and sumNext necessary\n- sumPrevious = sum(X)\n- X = fix_outliers(X, outlierFilter, repairMethod)\n- sumNext = sum(X)\n+ sumPrevious = sum(X * !is.nan(X))\n+ X = fix_outliers_iqr(X, outlierFilter, repairMethod)\n+ sumNext = sum(X * !is.nan(X))\n}\nelse\nmax_iterations = -1\n@@ -79,7 +80,7 @@ m_outlierByIQR = function(Matrix[Double] X, Double k =1.5, Integer repairMethod\n}\n}\n-fix_outliers = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer repairMethod = 1)\n+fix_outliers_iqr = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer repairMethod = 1)\nreturn(Matrix[Double] fixed_X)\n{\nrows = nrow(X)\n@@ -90,8 +91,13 @@ fix_outliers = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer\n}\nelse if(repairMethod == 1)\nX = (outlierFilter == 0) * X\n+ else if(repairMethod == 2)\n+ {\n+ outlierFilter = replace(target = (outlierFilter == 0), pattern = 0, replacement = NaN)\n+ X = outlierFilter * X\n+ }\nelse\n- stop(\"outlierByIQR: invalid argument - repair required 0-1 found: \"+repairMethod)\n+ stop(\"outlierByIQR: invalid argument - repair required 0-2 found: \"+repairMethod)\nfixed_X = X\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/outlierBySd.dml",
"new_path": "scripts/builtin/outlierBySd.dml",
"diff": "# ---------------------------------------------------------------------------------------------\n# X Double --- Matrix X\n# k Double 3 threshold values 1, 2, 3 for 68%, 95%, 99.7% respectively (3-sigma rule)\n-# repairMethod Integer 1 values: 0 = delete rows having outliers, 1 = replace outliers as missing values\n-# (this script replaces outliers with zeros)\n+# repairMethod Integer 1 values: 0 = delete rows having outliers, 1 = replace outliers as zeros\n+# 2 = replace outliers as missing values\n# max_iterations Integer 0 values: 0 = arbitrary number of iteration until all outliers are removed,\n# n = any constant defined by user\n# ---------------------------------------------------------------------------------------------\n@@ -61,11 +61,11 @@ m_outlierBySd = function(Matrix[Double] X, Double k = 3, Integer repairMethod =\noutlierFilter = (X < lowerBound) | (X > upperBound)\n- if(sum(outlierFilter) > 1 & sum(X) != 0 & sumPrevious != sumNext) {\n+ if(sum(outlierFilter) > 1 & sumNext != 0 & sumPrevious != sumNext) {\n#TODO why is the check with sumPrevious and sumNext necessary\n- sumPrevious = sum(X)\n- X = fix_outliers(X, outlierFilter, repairMethod)\n- sumNext = sum(X)\n+ sumPrevious = sum(X * !is.nan(X))\n+ X = fix_outliers_sd(X, outlierFilter, repairMethod)\n+ sumNext = sum(X * !is.nan(X))\n}\nelse\nmax_iterations = - 1;\n@@ -85,7 +85,7 @@ m_outlierBySd = function(Matrix[Double] X, Double k = 3, Integer repairMethod =\n}\n}\n-fix_outliers = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer repairMethod = 2)\n+fix_outliers_sd = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer repairMethod = 2)\nreturn(Matrix[Double] fixed_X)\n{\nrows = nrow(X)\n@@ -96,6 +96,10 @@ fix_outliers = function(Matrix[Double] X, Matrix[Double] outlierFilter, Integer\n}\nelse if(repairMethod == 1)\nX = (outlierFilter == 0) * X\n+ else if (repairMethod == 2) {\n+ outlierFilter = replace(target = (outlierFilter == 0), pattern = 0, replacement = NaN)\n+ X = outlierFilter * X\n+ }\nelse\nstop(\"outlierBySd: invalid argument - repair required 0-1 found: \"+repairMethod)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/winsorize.dml",
"new_path": "scripts/builtin/winsorize.dml",
"diff": "#-------------------------------------------------------------\nm_winsorize = function(Matrix[Double] X) return (Matrix[Double] Y) {\n+ Y = matrix(0, nrow(X), ncol(X))\n+ parfor(i in 1:ncol(X))\n+ Y[,i] = fixOutliersWinsorize(X[,i])\n+}\n+\n+fixOutliersWinsorize = function(Matrix[Double] X) return (Matrix[Double] Y)\n+{\n# compute quantiles for lower and upper probs\nq = quantile(X, matrix(\"0.05 0.95\", rows=2, cols=1));\nql = as.scalar(q[1,1]);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"diff": "@@ -45,20 +45,30 @@ public class BuiltinMiceTest extends AutomatedTestBase {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[]{\"B\"}));\n}\n@Test\n- public void testMiceCP() {\n- runMiceNominalTest( LopProperties.ExecType.CP);\n+ public void testMiceMixCP() {\n+ double[][] mask = {{ 0.0, 0.0, 1.0, 1.0, 0.0}};\n+ runMiceNominalTest(mask, 1, LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testMiceNumberCP() {\n+ double[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\n+ runMiceNominalTest(mask, 2, LopProperties.ExecType.CP);\n}\n+ @Test\n+ public void testMiceCategoricalCP() {\n+ double[][] mask = {{ 1.0, 1.0, 1.0, 1.0, 1.0}};\n+ runMiceNominalTest(mask, 3, LopProperties.ExecType.CP);\n+ }\n// @Test\n// public void testMiceSpark() {\n// runMiceNominalTest( LopProperties.ExecType.SPARK);\n// }\n-\n- private void runMiceNominalTest( LopProperties.ExecType instType) {\n+ private void runMiceNominalTest(double[][] mask, int testType, LopProperties.ExecType instType) {\nTypes.ExecMode platformOld = setExecMode(instType);\ntry {\n- double[][] mask = {{ 0.0, 0.0, 1.0, 1.0, 0.0}};\nloadTestConfiguration(getTestConfiguration(TEST_NAME));\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n@@ -71,14 +81,41 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nrunTest(true, false, null, -1);\nrunRScript(true);\n+\n+ switch (testType)\n+ {\n+ case 1:\n+ testCategoricalOutput();\n+ testNumericOutput();\n+ break;\n+ case 2:\n+ testNumericOutput();\n+ break;\n+ case 3:\n+ testCategoricalOutput();\n+ break;\n+ }\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+\n+ private void testNumericOutput()\n+ {\n//compare matrices\nHashMap<MatrixValue.CellIndex, Double> dmlfileN = readDMLMatrixFromHDFS(\"N\");\nHashMap<MatrixValue.CellIndex, Double> rfileN = readRMatrixFromFS(\"N\");\n- HashMap<MatrixValue.CellIndex, Double> dmlfileC = readDMLMatrixFromHDFS(\"C\");\n- HashMap<MatrixValue.CellIndex, Double> rfileC = readRMatrixFromFS(\"C\");\n// compare numerical imputations\nTestUtils.compareMatrices(dmlfileN, rfileN, eps, \"Stat-DML\", \"Stat-R\");\n+\n+ }\n+ private void testCategoricalOutput()\n+ {\n+ HashMap<MatrixValue.CellIndex, Double> dmlfileC = readDMLMatrixFromHDFS(\"C\");\n+ HashMap<MatrixValue.CellIndex, Double> rfileC = readRMatrixFromFS(\"C\");\n+\n// compare categorical imputations\nint countTrue = 0;\nfor (MatrixValue.CellIndex index : dmlfileC.keySet()) {\n@@ -93,8 +130,4 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nelse\nAssert.fail();\n}\n- finally {\n- rtplatform = platformOld;\n- }\n- }\n}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinOutlierByIQRTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinOutlierByIQRTest.java",
"diff": "@@ -83,6 +83,15 @@ public class BuiltinOutlierByIQRTest extends AutomatedTestBase {\nrunOutlierTest(false, 1.5, 1, 0,LopProperties.ExecType.SPARK);\n}\n+ @Test\n+ public void testOutlierRepair2IterativeCP() {\n+ runOutlierTest(false, 1.5, 2, 0,LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOutlierRepair2IterativeSP() {\n+ runOutlierTest(false, 1.5, 2, 0,LopProperties.ExecType.SPARK);\n+ }\nprivate void runOutlierTest(boolean sparse, double k, int repair, int max_iterations, LopProperties.ExecType instType)\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinOutlierBySDTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinOutlierBySDTest.java",
"diff": "@@ -57,6 +57,11 @@ public class BuiltinOutlierBySDTest extends AutomatedTestBase {\nrunOutlierTest(false, 2, 2, 10, LopProperties.ExecType.CP);\n}\n+ @Test\n+ public void testOutlierRepair2SP() {\n+ runOutlierTest(false, 2, 2, 0, LopProperties.ExecType.CP);\n+ }\n+\n@Test\npublic void testOutlierRepair0SP() {\nrunOutlierTest(false, 2, 0, 10, LopProperties.ExecType.SPARK);\n@@ -64,7 +69,7 @@ public class BuiltinOutlierBySDTest extends AutomatedTestBase {\n@Test\npublic void testOutlierRepair1SP() {\n- runOutlierTest(false, 2,1, 0, LopProperties.ExecType.SPARK);\n+ runOutlierTest(false, 2, 1, 10, LopProperties.ExecType.SPARK);\n}\n@Test\n@@ -79,7 +84,7 @@ public class BuiltinOutlierBySDTest extends AutomatedTestBase {\n@Test\npublic void testOutlierIterativeSP() {\n- runOutlierTest(false, 2,1, 0, LopProperties.ExecType.SPARK);\n+ runOutlierTest(false, 2, 1, 10, LopProperties.ExecType.SPARK);\n}\nprivate void runOutlierTest(boolean sparse, double k, int repair, int max_iterations, LopProperties.ExecType instType)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinWinsorizeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinWinsorizeTest.java",
"diff": "@@ -35,7 +35,7 @@ public class BuiltinWinsorizeTest extends AutomatedTestBase\nprivate final static String TEST_DIR = \"functions/builtin/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + BuiltinWinsorizeTest.class.getSimpleName() + \"/\";\n- private final static double eps = 1e-4;\n+ private final static double eps = 1e-3;\nprivate final static int rows = 1765;\nprivate final static double spDense = 0.99;\n@@ -69,7 +69,7 @@ public class BuiltinWinsorizeTest extends AutomatedTestBase\nrCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + expectedDir();\n//generate actual dataset\n- double[][] A = getRandomMatrix(rows, 1, -1, 1, spDense, 7);\n+ double[][] A = getRandomMatrix(rows, 10, -1, 1, spDense, 7);\nwriteInputMatrixWithMTD(\"A\", A, true);\nrunTest(true, false, null, -1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/mice.R",
"new_path": "src/test/scripts/functions/builtin/mice.R",
"diff": "@@ -26,14 +26,41 @@ library(dplyr)\nd <- read.csv(args[1], header=FALSE )\nmass <- as.matrix(readMM(paste(args[2], \"M.mtx\", sep=\"\")));\n+\n+if(sum(mass) == ncol(d))\n+{\n+d = d[,3:4]\n+mass = mass[1,3:4]\n+meth=\"\"\n+ for(i in 1: 2) {\n+ d[[names(d)[i]]] = as.factor(d[[names(d)[i]]]);\n+ meth = c(meth, \"polyreg\")\n+ }\n+\n+ meth=meth[-1]\n+\n+ #impute\n+ imputeD <- mice(d,where = is.na(d), method = meth, m=3)\n+ R = data.frame(complete(imputeD,3))\n+ c = select_if(R, is.factor)\n+\n+ # convert factor into numeric before casting to matrix\n+ c = sapply(c, function(x) as.numeric(as.character(x)))\n+ writeMM(as(as.matrix(c), \"CsparseMatrix\"), paste(args[3], \"C\", sep=\"\"));\n+} else if (sum(mass) == 0)\n+{\n+ print(\"Generating R witout cat\")\n+ imputeD <- mice(d,where = is.na(d), method = \"norm.predict\", m=3)\n+ R = data.frame(complete(imputeD,3))\n+ n = select_if(R, is.numeric)\n+ writeMM(as(as.matrix(n), \"CsparseMatrix\"), paste(args[3], \"N\", sep=\"\"));\n+} else {\nmeth=\"\"\nfor(i in 1: ncol(mass)) {\nif(as.integer(mass[1,i]) == 1) {\nd[[names(d)[i]]] = as.factor(d[[names(d)[i]]]);\nmeth = c(meth, \"polyreg\")\n- }\n- else\n- meth = c(meth, \"norm.predict\")\n+ } else meth = c(meth, \"norm.predict\")\n}\nmeth=meth[-1]\n@@ -57,12 +84,10 @@ pred[names(d)[4], names(d)[3]] = 1\n#impute\nimputeD <- mice(d,where = is.na(d), method = meth, m=3, pred = pred)\nR = data.frame(complete(imputeD,3))\n-\n-n =select_if(R, is.numeric)\nc = select_if(R, is.factor)\n-\n# convert factor into numeric before casting to matrix\nc = sapply(c, function(x) as.numeric(as.character(x)))\n-\n-writeMM(as(as.matrix(n), \"CsparseMatrix\"), paste(args[3], \"N\", sep=\"\"));\n+ n = select_if(R, is.numeric)\nwriteMM(as(as.matrix(c), \"CsparseMatrix\"), paste(args[3], \"C\", sep=\"\"));\n+ writeMM(as(as.matrix(n), \"CsparseMatrix\"), paste(args[3], \"N\", sep=\"\"));\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/mice.dml",
"new_path": "src/test/scripts/functions/builtin/mice.dml",
"diff": "X = read($X, data_type=\"frame\", format=\"csv\");\nM = read($Mask)\n-[dataset, singleSet]= mice(F=X, cMask=M, iter=$iteration, complete=$com)\n+[dataset, singleSet]= mice(F=X, cMask=M, iter=$iteration, complete=$com, verbose = FALSE)\n+\n+if(sum(M) == ncol(X))\n+{\n+ c = as.matrix(singleSet[,3:4]) # comparing only selected columns with R results because dataset is continuos and\n+ write(c, $dataC) # for categorical imputation R polyreg only support upto 50 distinct items (50 categories/feature)\n+}\n+else if (sum(M) == 0)\n+{\nn = as.matrix(dataset) * (1-M)\nn = removeEmpty(target=n, margin = \"cols\")\n+ write(n, $dataN)\n+}\n+else\n+{\nc = as.matrix(dataset) * (M)\nc = removeEmpty(target=c, margin = \"cols\")\n+ n = as.matrix(dataset) * (1-M)\n+ n = removeEmpty(target=n, margin = \"cols\")\nwrite(n, $dataN)\nwrite(c, $dataC)\n-\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/outlier_by_IQR.dml",
"new_path": "src/test/scripts/functions/builtin/outlier_by_IQR.dml",
"diff": "#-------------------------------------------------------------\nX = read($1);\n-Y = outlierByIQR(X, $2, $3, $4, TRUE);\n+Y = outlierByIQR(X, $2, $3, $4, FALSE);\nwrite(Y, $5)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/outlier_by_sd.dml",
"new_path": "src/test/scripts/functions/builtin/outlier_by_sd.dml",
"diff": "#-------------------------------------------------------------\nX = read($1);\n-Y = outlierBySd(X, $2, $3, $4, TRUE);\n+Y = outlierBySd(X, $2, $3, $4, FALSE);\nwrite(Y, $5)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/winsorize.R",
"new_path": "src/test/scripts/functions/builtin/winsorize.R",
"diff": "@@ -25,6 +25,8 @@ library(\"Matrix\")\nlibrary(\"DescTools\")\nX = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")))\n-Y = Winsorize(X);\n+Y = matrix(0, nrow(X), ncol(X))\n+for(i in 1:ncol(X))\n+ Y[,i] = Winsorize(X[,i]);\nwriteMM(as(Y, \"CsparseMatrix\"), paste(args[2], \"B\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/caching/BufferpoolLeak.dml",
"new_path": "src/test/scripts/functions/caching/BufferpoolLeak.dml",
"diff": "X = rand(rows=$1, cols=$2, min=1, max=10);\nfor(i in 1:500) {\n#print(\"executed iteration \"+i)\n- [m1,m2] = mice(as.frame(X), matrix(0,1,ncol(X)),3,3)\n+ [m1,m2] = mice(as.frame(X), matrix(0,1,ncol(X)),3,3, FALSE)\n}\nif( ncol(X) > $2 )\nprint(toString(m1));\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Various improvements of data cleaning built-in primitives
Closes #901. |
49,738 | 09.05.2020 21:49:01 | -7,200 | 5e726cfe3122336becde8c84800180dd211d935a | Fix file format handling, docs, and github test config
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/documentation.yml",
"new_path": ".github/workflows/documentation.yml",
"diff": "name: Documentation\n-on:\n- push:\n- branches:\n- - master\n+on: [push, pull_request]\njobs:\ndocumentation1:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/DataExpression.java",
"new_path": "src/main/java/org/apache/sysds/parser/DataExpression.java",
"diff": "@@ -1245,7 +1245,7 @@ public class DataExpression extends DataIdentifier\n//validate read filename\nif (getVarParam(FORMAT_TYPE) == null || FileFormat.isTextFormat(getVarParam(FORMAT_TYPE).toString()))\ngetOutput().setBlocksize(-1);\n- else if (getVarParam(FORMAT_TYPE).toString().equalsIgnoreCase(\"binary\"))\n+ else if (getVarParam(FORMAT_TYPE).toString().equalsIgnoreCase(FileFormat.BINARY.toString()))\ngetOutput().setBlocksize(ConfigurationManager.getBlocksize());\nelse\nraiseValidateError(\"Invalid format \" + getVarParam(FORMAT_TYPE)\n@@ -2059,11 +2059,9 @@ public class DataExpression extends DataIdentifier\n// if the read method parameter is a constant, then verify value matches MTD metadata file\nif (getVarParam(key.toString()) != null && (getVarParam(key.toString()) instanceof ConstIdentifier)\n&& !getVarParam(key.toString()).toString().equalsIgnoreCase(val.toString())) {\n- raiseValidateError(\n- \"Parameter '\" + key.toString()\n+ raiseValidateError(\"Parameter '\" + key.toString()\n+ \"' has conflicting values in metadata and read statement. MTD file value: '\"\n- + val.toString() + \"'. Read statement value: '\" + getVarParam(key.toString()) + \"'.\",\n- conditional);\n+ + val.toString() + \"'. Read statement value: '\" + getVarParam(key.toString()) + \"'.\", conditional);\n} else {\n// if the read method does not specify parameter value, then add MTD metadata file value to parameter list\nif (getVarParam(key.toString()) == null){\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -955,7 +955,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nthrow new DMLRuntimeException(\"Unexpected error while writing mtd file (\" + filePathAndName + \") -- metadata is null.\");\n// Write the matrix to HDFS in requested format\n- FileFormat fmt = iimd.getFileFormat();\n+ FileFormat fmt = (outputFormat != null) ? FileFormat.safeValueOf(outputFormat) : iimd.getFileFormat();\nif ( fmt != FileFormat.MM ) {\n// Get the dimension information from the metadata stored within MatrixObject\nDataCharacteristics dc = iimd.getDataCharacteristics();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java",
"diff": "@@ -517,7 +517,7 @@ public class SparkExecutionContext extends ExecutionContext\n* in order to support the old transform implementation.\n*\n* @param fo frame object\n- * @param inputInfo input info\n+ * @param fmt file format type\n* @return JavaPairRDD handle for a frame object\n*/\n@SuppressWarnings({ \"unchecked\", \"resource\" })\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -879,8 +879,9 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nprivate void processWriteInstruction(ExecutionContext ec) {\n//get filename (literal or variable expression)\nString fname = ec.getScalarInput(getInput2().getName(), ValueType.STRING, getInput2().isLiteral()).getStringValue();\n- if (!getInput3().getName().equalsIgnoreCase(\"libsvm\"))\n- {\n+ String fmtStr = getInput3().getName();\n+ FileFormat fmt = FileFormat.safeValueOf(fmtStr);\n+ if( fmt != FileFormat.LIBSVM ) {\nString desc = ec.getScalarInput(getInput4().getName(), ValueType.STRING, getInput4().isLiteral()).getStringValue();\n_formatProperties.setDescription(desc);\n}\n@@ -889,28 +890,25 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nwriteScalarToHDFS(ec, fname);\n}\nelse if( getInput1().getDataType() == DataType.MATRIX ) {\n- String outFmt = getInput3().getName();\n- if (outFmt.equalsIgnoreCase(\"matrixmarket\"))\n+ if( fmt == FileFormat.MM )\nwriteMMFile(ec, fname);\n- else if (outFmt.equalsIgnoreCase(\"csv\") )\n+ else if( fmt == FileFormat.CSV )\nwriteCSVFile(ec, fname);\nelse {\n// Default behavior\nMatrixObject mo = ec.getMatrixObject(getInput1().getName());\nmo.setPrivacyConstraints(getPrivacyConstraint());\n- mo.exportData(fname, outFmt, _formatProperties);\n+ mo.exportData(fname, fmtStr, _formatProperties);\n}\n}\nelse if( getInput1().getDataType() == DataType.FRAME ) {\n- String outFmt = getInput3().getName();\nFrameObject mo = ec.getFrameObject(getInput1().getName());\n- mo.exportData(fname, outFmt, _formatProperties);\n+ mo.exportData(fname, fmtStr, _formatProperties);\n}\nelse if( getInput1().getDataType() == DataType.TENSOR ) {\n// TODO write tensor\n- String outFmt = getInput3().getName();\nTensorObject to = ec.getTensorObject(getInput1().getName());\n- to.exportData(fname, outFmt, _formatProperties);\n+ to.exportData(fname, fmtStr, _formatProperties);\n}\n}\n@@ -973,7 +971,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\n*/\nprivate void writeMMFile(ExecutionContext ec, String fname) {\nMatrixObject mo = ec.getMatrixObject(getInput1().getName());\n- String outFmt = \"matrixmarket\";\n+ String outFmt = FileFormat.MM.toString();\nif(mo.isDirty()) {\n// there exist data computed in CP that is not backed up on HDFS\n// i.e., it is either in-memory or in evicted space\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/ReblockSPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/ReblockSPInstruction.java",
"diff": "@@ -42,6 +42,7 @@ import org.apache.sysds.runtime.io.FileFormatPropertiesMM;\nimport org.apache.sysds.runtime.io.IOUtilFunctions;\nimport org.apache.sysds.runtime.matrix.data.FrameBlock;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysds.runtime.matrix.data.MatrixCell;\nimport org.apache.sysds.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\nimport org.apache.sysds.runtime.meta.DataCharacteristics;\n@@ -154,8 +155,16 @@ public class ReblockSPInstruction extends UnarySPInstruction {\ncsvInstruction.processInstruction(sec);\nreturn;\n}\n- else if(fmt == FileFormat.BINARY)\n- {\n+ else if(fmt == FileFormat.BINARY && mc.getBlocksize() <= 0) {\n+ //BINARY BLOCK <- BINARY CELL (e.g., after grouped aggregate)\n+ JavaPairRDD<MatrixIndexes, MatrixCell> binaryCells = (JavaPairRDD<MatrixIndexes, MatrixCell>) sec.getRDDHandleForMatrixObject(mo, FileFormat.BINARY);\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> out = RDDConverterUtils.binaryCellToBinaryBlock(sec.getSparkContext(), binaryCells, mcOut, outputEmptyBlocks);\n+\n+ //put output RDD handle into symbol table\n+ sec.setRDDHandleForVariable(output.getName(), out);\n+ sec.addLineageRDD(output.getName(), input1.getName());\n+ }\n+ else if(fmt == FileFormat.BINARY) {\n//BINARY BLOCK <- BINARY BLOCK (different sizes)\nJavaPairRDD<MatrixIndexes, MatrixBlock> in1 = sec.getBinaryMatrixBlockRDDHandleForVariable(input1.getName());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/functions/ExtractBlockForBinaryReblock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/functions/ExtractBlockForBinaryReblock.java",
"diff": "@@ -46,7 +46,7 @@ public class ExtractBlockForBinaryReblock implements PairFlatMapFunction<Tuple2<\n//sanity check block sizes\nif(in_blen <= 0 || out_blen <= 0)\n- throw new DMLRuntimeException(\"Block sizes not unknown:\" + in_blen + \",\" + out_blen);\n+ throw new DMLRuntimeException(\"Block sizes unknown:\" + in_blen + \", \" + out_blen);\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/FrameReaderFactory.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/FrameReaderFactory.java",
"diff": "@@ -57,6 +57,8 @@ public class FrameReaderFactory\nreader = new FrameReaderBinaryBlockParallel();\nelse\nreader = new FrameReaderBinaryBlock();\n+ break;\n+\ndefault:\nthrow new DMLRuntimeException(\n\"Failed to create frame reader for unknown format: \" + fmt.toString());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/io/csv/FormatChangeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/io/csv/FormatChangeTest.java",
"diff": "@@ -37,7 +37,6 @@ import org.apache.sysds.test.TestUtils;\[email protected]\npublic class FormatChangeTest extends AutomatedTestBase\n{\n-\nprivate final static String TEST_NAME = \"csv_test\";\nprivate final static String TEST_DIR = \"functions/io/csv/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FormatChangeTest.class.getSimpleName() + \"/\";\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-55] Fix file format handling, docs, and github test config
Closes #907. |
49,738 | 12.05.2020 22:26:14 | -7,200 | 96a719bc384f0c60dc1994be49d72d91d2031dea | Fix lineage merge on parfor w/ conditional control flow
This patch makes a minor robustness fix to the parfor lineage merge for
the case that certain workers did not make any updates of result
variables due to conditional control flow in the parfor body. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ParForProgramBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ParForProgramBlock.java",
"diff": "@@ -1351,6 +1351,7 @@ public class ParForProgramBlock extends ForProgramBlock\nLineageItem current = lineages[0].get(var._name);\nfor( int i=1; i<lineages.length; i++ ) {\nLineageItem next = lineages[i].get(var._name);\n+ if( next != null ) //robustness for cond. control flow\ncurrent = LineageItemUtils.replace(next, retIn, current);\n}\nec.getLineage().set(var._name, current);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-238] Fix lineage merge on parfor w/ conditional control flow
This patch makes a minor robustness fix to the parfor lineage merge for
the case that certain workers did not make any updates of result
variables due to conditional control flow in the parfor body. |
49,706 | 15.05.2020 22:49:56 | -7,200 | 99a7271a12a8d30e9f604cb46db5c6c6ee20d241 | [MINOR] Fix missing licenses and build rat check
ignore __pycache__ folders for rat
add license missing in compression tests
add license missing in scripts - __init__.py files
rat check for build workflow
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build.yml",
"new_path": ".github/workflows/build.yml",
"diff": "@@ -48,4 +48,4 @@ jobs:\n${{ runner.os }}-maven-\n- name: Build\n- run: mvn package\n+ run: mvn package -P rat\n"
},
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<exclude>**/*.mtx</exclude>\n<exclude>**/*.mtd</exclude>\n<exclude>**/*.out</exclude>\n+ <exclude>**/__pycache__/**</exclude>\n<exclude>**/part-*</exclude>\n<exclude>**/*.keep</exclude>\n<exclude>**/target/**</exclude>\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/__init__.py",
"new_path": "scripts/staging/slicing/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/__init__.py",
"new_path": "scripts/staging/slicing/base/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/tests/__init__.py",
"new_path": "scripts/staging/slicing/tests/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/tests/classification/__init__.py",
"new_path": "scripts/staging/slicing/tests/classification/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/tests/regression/__init__.py",
"new_path": "scripts/staging/slicing/tests/regression/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateDDCTest.java",
"new_path": "src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateDDCTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\npackage org.apache.sysds.test.component.compress.colgroup;\nimport java.util.ArrayList;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateTest.java",
"new_path": "src/test/java/org/apache/sysds/test/component/compress/colgroup/JolEstimateTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\npackage org.apache.sysds.test.component.compress.colgroup;\nimport static org.junit.Assert.assertTrue;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix missing licenses and build rat check
ignore __pycache__ folders for rat
add license missing in compression tests
add license missing in scripts - __init__.py files
rat check for build workflow
Closes #912. |
49,706 | 15.05.2020 22:53:58 | -7,200 | 9bc52328f5535492d50cca811a67bd81829220ce | [MINOR] Cache Python pip and apt dependencies
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/python.yml",
"new_path": ".github/workflows/python.yml",
"diff": "@@ -53,8 +53,18 @@ jobs:\nwith:\npath: ~/.m2/repository\nkey: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}\n- restore-keys: |\n- ${{ runner.os }}-maven-\n+\n+ - name: Cache Pip Dependencies\n+ uses: actions/cache@v1\n+ with:\n+ path: ~/.cache/pip\n+ key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('src/main/python/setup.py') }}\n+\n+ - name: Cache Deb Dependencies\n+ uses: actions/cache@v1\n+ with:\n+ path: /var/cache/apt/archives\n+ key: ${{ runner.os }}-${{ hashFiles('.github/workflows/python.yml') }}\n- name: Maven clean & package\nrun: mvn clean package -P distribution\n@@ -65,15 +75,7 @@ jobs:\npython-version: ${{ matrix.python-version }}\narchitecture: 'x64'\n- - name: Cache Pip Dependencies\n- uses: actions/cache@v1\n- with:\n- path: ~/.cache/pip\n- key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('src/main/python/setup.py') }}\n- restore-keys: |\n- ${{ runner.os }}-pip-${{ matrix.python-version }}-\n-\n- - name: Install protobuf\n+ - name: Install Protobuf\nrun: sudo apt-get install protobuf-compiler libprotoc-dev\n- name: Install pip Dependencies\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Cache Python pip and apt dependencies
Closes #913. |
49,689 | 21.05.2020 00:51:03 | -7,200 | 0dc1d16694a9fd7b56583b53566eab724d74a4db | [SYSTEMDS-74]Fix partial rewrites with new lineageitem constructors. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -650,7 +650,7 @@ public class LineageRewriteReuse\n//for (LineageItem input : source.getInputs()) {\n// create tsmm lineage on top of the input of last append\nLineageItem input1 = source.getInputs()[0];\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {input1});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {input1});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended column in cache\n@@ -682,7 +682,7 @@ public class LineageRewriteReuse\nreturn false;\n// create tsmm lineage on top of the input of last append\nLineageItem input1 = source.getInputs()[0];\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {input1});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {input1});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n}\n@@ -703,7 +703,7 @@ public class LineageRewriteReuse\nif (source.getOpcode().equalsIgnoreCase(\"rbind\")) {\n// create tsmm lineage on top of the input of last append\nLineageItem input1 = source.getInputs()[0];\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {input1});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {input1});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended column in cache\n@@ -730,8 +730,8 @@ public class LineageRewriteReuse\nLineageItem input = source.getInputs()[0];\nif (input.getOpcode().equalsIgnoreCase(\"cbind\")) {\nLineageItem L2appin1 = input.getInputs()[0];\n- LineageItem tmp = new LineageItem(\"comb\", \"cbind\", new LineageItem[] {L2appin1, source.getInputs()[1]});\n- LineageItem toProbe = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {tmp});\n+ LineageItem tmp = new LineageItem(\"cbind\", new LineageItem[] {L2appin1, source.getInputs()[1]});\n+ LineageItem toProbe = new LineageItem(curr.getOpcode(), new LineageItem[] {tmp});\nif (LineageCache.probe(toProbe))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(toProbe));\n// look for the appended column in cache\n@@ -757,7 +757,7 @@ public class LineageRewriteReuse\nif (left.getOpcode().equalsIgnoreCase(\"rbind\")){\nLineageItem leftSource = left.getInputs()[0]; //left inpur of rbind = X\n// create ba+* lineage on top of the input of last append\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {leftSource, right});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {leftSource, right});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended column in cache\n@@ -782,7 +782,7 @@ public class LineageRewriteReuse\nif (right.getOpcode().equalsIgnoreCase(\"cbind\")) {\nLineageItem rightSource = right.getInputs()[0]; //left inpur of rbind = X\n// create ba+* lineage on top of the input of last append\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {left, rightSource});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {left, rightSource});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended column in cache\n@@ -813,7 +813,7 @@ public class LineageRewriteReuse\nif (!((DataGenCPInstruction)ins).isOnesCol())\nreturn false;\n// create ba+* lineage on top of the input of last append\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {left, rightSource1});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {left, rightSource1});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n}\n@@ -835,7 +835,7 @@ public class LineageRewriteReuse\nLineageItem leftSource = left.getInputs()[0]; //left inpur of rbind = X\nLineageItem rightSource = right.getInputs()[0]; //right inpur of rbind = Y\n// create * lineage on top of the input of last append\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {leftSource, rightSource});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {leftSource, rightSource});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended rows in cache\n@@ -862,7 +862,7 @@ public class LineageRewriteReuse\nLineageItem leftSource = left.getInputs()[0]; //left inpur of cbind = X\nLineageItem rightSource = right.getInputs()[0]; //right inpur of cbind = Y\n// create * lineage on top of the input of last append\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(), new LineageItem[] {leftSource, rightSource});\n+ LineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {leftSource, rightSource});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n// look for the appended columns in cache\n@@ -892,7 +892,7 @@ public class LineageRewriteReuse\nif (target.getOpcode().equalsIgnoreCase(\"cbind\")) {\n// create groupedagg lineage on top of the input of last append\nLineageItem input1 = target.getInputs()[0];\n- LineageItem tmp = new LineageItem(\"toProbe\", curr.getOpcode(),\n+ LineageItem tmp = new LineageItem(curr.getOpcode(),\nnew LineageItem[] {input1, groups, weights, fn, ngroups});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/RewriteTest3.dml",
"new_path": "src/test/scripts/functions/lineage/RewriteTest3.dml",
"diff": "@@ -23,15 +23,15 @@ X = read($1);\nsum = 0;\ntmp = X[,1];\n-tmp1 = matrix(0, rows=nrow(X), cols=1);\n+tmp1 = matrix(0, rows=nrow(X), cols=0);\nR = matrix(0, 1, ncol(X));\n+ones_n = matrix(1, rows=nrow(X), cols=1);\n-for (i in 2:ncol(X)) {\n- Res1 = t(tmp1) %*% tmp1;\n+for (i in 1:ncol(X)) {\ntmp = cbind(tmp, X[,i]);\n- while(FALSE) {};\n- ones_n = matrix(1, rows=nrow(X), cols=1);\ntmp1 = cbind(tmp, ones_n);\n+ Res1 = t(tmp1) %*% tmp1;\n+ while(FALSE) {};\nR[1,i] = sum(Res1);\nsum = sum + sum(Res1);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-74]Fix partial rewrites with new lineageitem constructors. |
49,706 | 23.05.2020 22:40:54 | -7,200 | b7424431f3f6131e3245c03ba8d40aa81d2cb245 | [MINOR] Update Dockerfile (fixes, new R dependency)
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/action/Dockerfile",
"new_path": ".github/action/Dockerfile",
"diff": "#\n#-------------------------------------------------------------\n-FROM sebaba/testingsysds:0.2\n+FROM sebaba/testingsysds:2.0\n"
},
{
"change_type": "MODIFY",
"old_path": ".github/workflows/componentTests.yml",
"new_path": ".github/workflows/componentTests.yml",
"diff": "@@ -52,4 +52,19 @@ jobs:\nrun: mvn clean compile test-compile\n- name: Component Tests\n- run: mvn surefire:test -DskipTests=false -Dtest=org.apache.sysds.test.component.*.**\n+ run: |\n+ log=\"/tmp/sysdstest.log\"\n+ echo \"Starting Tests\"\n+ mvn surefire:test -DskipTests=false -Dtest=org.apache.sysds.test.component.*.** 2>&1 > $log\n+ grep_args=\"SUCCESS\"\n+ grepvals=\"$( tail -n 100 $log | grep $grep_args)\"\n+ if [[ $grepvals == *\"SUCCESS\"* ]]; then\n+ echo \"--------------------- last 100 lines from test ------------------------\"\n+ tail -n 100 $log\n+ echo \"------------------ last 100 lines from test end -----------------------\"\n+ exit 0\n+ else\n+ echo \"\\n $(cat $log)\"\n+ exit 1\n+ fi\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/build.sh",
"new_path": "docker/build.sh",
"diff": "# Build the docker containers\n# The first build is for running systemds through docker.\n-docker image build -f docker/sysds.Dockerfile -t sebaba/sysds:0.2 .\n+docker image build -f docker/sysds.Dockerfile -t sebaba/sysds:2.0 .\n# The second build is for testing systemds. This image installs the R dependencies needed to run the tests.\n-docker image build -f docker/testsysds.Dockerfile -t sebaba/testingsysds:0.2 .\n+docker image build -f docker/testsysds.Dockerfile -t sebaba/testingsysds:2.0 .\n# The third build is python docker for systemds.\n-docker image build -f docker/pythonsysds.Dockerfile -t sebaba/pythonsysds:0.2 .\n+docker image build -f docker/pythonsysds.Dockerfile -t sebaba/pythonsysds:2.0 .\n# You might want to prune the docker system afterwards using\n# docker system prune\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/entrypoint.sh",
"new_path": "docker/entrypoint.sh",
"diff": "cd /github/workspace\n-build=\"$(mvn -T 2 clean compile test-compile surefire:test | grep 'BUILD')\"\n+build=\"$(mvn -T 2 clean compile test-compile | grep 'BUILD')\"\nif [[ $build == *\"SUCCESS\"* ]]; then\necho \"Successfull build\"\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/sysds.Dockerfile",
"new_path": "docker/sysds.Dockerfile",
"diff": "@@ -39,11 +39,11 @@ RUN wget http://archive.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/ap\n# Install Extras\nRUN apk add --no-cache git bash\n-RUN git clone https://github.com/apache/systemml.git\n+RUN git clone https://github.com/apache/systemml.git systemds\nWORKDIR /usr/src/systemds/\n-RUN mvn package\n+RUN mvn clean package -P distribution\n# Remove Maven since it is not needed for running the system\nRUN rm -r /usr/lib/mvn\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/installDependencies.R",
"new_path": "src/test/scripts/installDependencies.R",
"diff": "@@ -55,6 +55,7 @@ custom_install(\"caret\");\ncustom_install(\"sigmoid\");\ncustom_install(\"DescTools\");\ncustom_install(\"mice\");\n+custom_install(\"mclust\");\nprint(\"Installation Done\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update Dockerfile (fixes, new R dependency)
Closes #918. |
49,759 | 23.05.2020 22:57:17 | -7,200 | 86fd7b3d4aae5dbca8090e2638e0abc4da696655 | New builtin function toOneHot (one hot encoding)
Adds a builtin function toOneHot which transforms a vector containing
integers into a one-hot-encoded matrix (note transform works over frames
and reassigns the integer codes)
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -302,6 +302,7 @@ SYSTEMDS-390 New Builtin Functions IV\n* 391 New GLM builtin function (from algorithms) OK\n* 392 Builtin function for missing value imputation via FDs OK\n* 393 Builtin to find Connected Components of a graph OK\n+ * 394 Builtin for one-hot encoding of matrix (not frame), see table OK\nOthers:\n* Break append instruction to cbind and rbind\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/toOneHot.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# One-hot encodes a vector\n+\n+# INPUT PARAMETERS:\n+# --------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# --------------------------------------------------------------------------------------------\n+# X matrix --- vector with N integer entries between 1 and numClasses\n+# numclasses int --- number of columns, must be >= largest value in X\n+\n+# Output:\n+# --------------------------------------------------------------------------------------------\n+# NAME TYPE MEANING\n+# -------------------------------------------------------------------------------------------\n+# Y matrix one-hot-encoded matrix with shape (N, numClasses)\n+# -------------------------------------------------------------------------------------------\n+\n+m_toOneHot = function(matrix[double] X, integer numClasses)\n+ return (matrix[double] Y) {\n+ if(numClasses < max(X))\n+ stop(\"numClasses must be >= largest value in X to prevent cropping\");\n+ Y = table(seq(1, nrow(X)), X, nrow(X), numClasses);\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -174,6 +174,7 @@ public enum Builtins {\nTAN(\"tan\", false),\nTANH(\"tanh\", false),\nTRACE(\"trace\", false),\n+ TO_ONE_HOT(\"toOneHot\", true),\nTYPEOF(\"typeOf\", false),\nVAR(\"var\", false),\nXOR(\"xor\", false),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinToOneHotTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+import static org.junit.Assert.fail;\n+\n+public class BuiltinToOneHotTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"toOneHot\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinToOneHotTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 0;\n+ private final static int rows = 10;\n+ private final static int cols = 1;\n+ private final static int numClasses = 10;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[]{\"B\"}));\n+ }\n+\n+ @Test\n+ public void runSimpleTest() {\n+ runToOneHotTest(false, false, LopProperties.ExecType.CP, false);\n+ }\n+\n+ @Test\n+ public void runFailingSimpleTest() {\n+ runToOneHotTest(false, false, ExecType.CP, true);\n+ }\n+\n+ private void runToOneHotTest(boolean scalar, boolean sparse, ExecType instType, boolean shouldFail) {\n+ Types.ExecMode platformOld = setExecMode(instType);\n+\n+ try\n+ {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+\n+ //generate actual dataset\n+ double[][] A = TestUtils.round(getRandomMatrix(rows, cols, 1, numClasses, 1, 7));\n+ int max = -1;\n+ for(int i = 0; i < rows; i++)\n+ max = Math.max(max, (int) A[i][0]);\n+ writeInputMatrixWithMTD(\"A\", A, false);\n+\n+ // script fails if numClasses provided is smaller than maximum value in A\n+ int numClassesPassed = shouldFail ? max - 1 : max;\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-args\", input(\"A\"),\n+ String.format(\"%d\", numClassesPassed), output(\"B\") };\n+\n+ runTest(true, false, null, -1);\n+\n+ if(!shouldFail) {\n+ HashMap<MatrixValue.CellIndex, Double> expected = computeExpectedResult(A);\n+ HashMap<MatrixValue.CellIndex, Double> result = readDMLMatrixFromHDFS(\"B\");\n+ TestUtils.compareMatrices(result, expected, eps, \"Stat-DML\", \"Stat-Java\");\n+ }\n+ else {\n+ try {\n+ readDMLMatrixFromHDFS(\"B\");\n+ fail(\"File should not have been written\");\n+ } catch(AssertionError e) {\n+ // exception expected\n+ }\n+ }\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+\n+ private static HashMap<MatrixValue.CellIndex, Double> computeExpectedResult(double[][] a) {\n+ HashMap<MatrixValue.CellIndex, Double> expected = new HashMap<>();\n+ for(int i = 0; i < a.length; i++) {\n+ for(int j = 0; j < a[i].length; j++) {\n+ // indices start with 1 here\n+ expected.put(new MatrixValue.CellIndex(i + 1, (int) a[i][j]), 1.0);\n+ }\n+ }\n+ return expected;\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/toOneHot.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1);\n+num_classes = $2;\n+Y = toOneHot(X, num_classes);\n+write(Y, $3);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-394] New builtin function toOneHot (one hot encoding)
Adds a builtin function toOneHot which transforms a vector containing
integers into a one-hot-encoded matrix (note transform works over frames
and reassigns the integer codes)
Closes #916. |
49,730 | 23.05.2020 23:24:30 | -7,200 | e78962b8db5b1c90fdb37ae6d9c6284f744cdbfc | Fixes distributed slice finding implementation
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/Tasks.txt",
"new_path": "docs/Tasks.txt",
"diff": "@@ -209,6 +209,7 @@ SYSTEMDS-250 Extended Slice Finding\n* 251 Alternative slice enumeration approach OK\n* 252 Initial data slicing implementation Python OK\n* 253 Distributed slicing algorithms (task/data parallel) OK\n+ * 254 Consolidation and fixes distributed slice finding OK\nSYSTEMDS-260 Misc Tools\n* 261 Stable marriage algorithm OK\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/Bucket.py",
"new_path": "scripts/staging/slicing/base/Bucket.py",
"diff": "@@ -45,6 +45,8 @@ class Bucket:\nself.parents = []\nself.sum_error = 0\nself.size = 0\n+ self.s_upper = 0\n+ self.s_lower = 0\nself.score = 0\nself.error = 0\nself.max_tuple_error = 0\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/SparkNode.py",
"new_path": "scripts/staging/slicing/base/SparkNode.py",
"diff": "@@ -65,25 +65,16 @@ class SparkNode:\nprint(mask)\nif loss_type == 0:\nself.calc_l2(mask)\n- if loss_type == 1:\n+ elif loss_type == 1:\nself.calc_class(mask)\ndef calc_class(self, mask):\nself.e_max = 1\n- size = 0\n- mistakes = 0\n- for row in self.preds:\n- flag = True\n- for attr in mask:\n- if attr not in row[0].indices:\n- flag = False\n- if flag:\n- size = size + 1\n- if row[1] == 0:\n- mistakes += 1\n- self.size = size\n- if size != 0:\n- self.loss = mistakes / size\n+ filtered = self.filter_by_mask(mask)\n+ self.size = len(filtered)\n+ mistakes = len(list(filter(lambda row: row[1] == 0, filtered)))\n+ if self.size != 0:\n+ self.loss = mistakes / self.size\nelse:\nself.loss = 0\nself.e_upper = self.loss\n@@ -92,25 +83,22 @@ class SparkNode:\nmax_tuple_error = 0\nsum_error = 0\nsize = 0\n- for row in self.preds:\n- flag = True\n- for attr in mask:\n- if attr not in row[0].indices:\n- flag = False\n- if flag:\n- size = size + 1\n+ filtered = self.filter_by_mask(mask)\n+ self.size = len(filtered)\n+ for row in filtered:\nif row[1] > max_tuple_error:\nmax_tuple_error = row[1]\n- sum_error = sum_error + row[1]\n+ sum_error += row[1]\nself.e_max = max_tuple_error\nself.e_upper = max_tuple_error\nself.e_max_upper = max_tuple_error\n- if size != 0:\n- self.loss = sum_error/size\n+ if self.size != 0:\n+ self.loss = sum_error/self.size\nelse:\nself.loss = 0\n- self.size = size\n- self.s_upper = size\n+\n+ def filter_by_mask(self, mask):\n+ return list(filter(lambda row: all(attr in row[0].indices for attr in mask), self.preds))\ndef calc_s_upper(self, cur_lvl):\ncur_min = self.parents[0].size\n@@ -168,30 +156,6 @@ class SparkNode:\ndef check_bounds(self, top_k, x_size, alpha):\nreturn self.s_upper >= x_size / alpha and self.c_upper >= top_k.min_score\n- def update_bounds(self, s_upper, s_lower, e_upper, e_max_upper, w):\n- try:\n- minimized = min(s_upper, self.s_upper)\n- self.s_upper = minimized\n- minimized = min(s_lower, self.s_lower)\n- self.s_lower = minimized\n- minimized = min(e_upper, self.e_upper)\n- self.e_upper = minimized\n- minimized = min(e_max_upper, self.e_max_upper)\n- self.e_max_upper = minimized\n- c_upper = self.calc_c_upper(w)\n- minimized = min(c_upper, self.c_upper)\n- self.c_upper = minimized\n- except AttributeError:\n- # initial bounds calculation\n- self.s_upper = s_upper\n- self.s_lower = s_lower\n- self.e_upper = e_upper\n- self.e_max_upper = e_max_upper\n- c_upper = self.calc_c_upper(w)\n- self.c_upper = c_upper\n- minimized = min(c_upper, self.c_upper)\n- self.c_upper = minimized\n-\ndef print_debug(self, topk, level):\nprint(\"new node has been created: \" + self.make_name() + \"\\n\")\nif level >= 1:\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/node.py",
"new_path": "scripts/staging/slicing/base/node.py",
"diff": "#\n#-------------------------------------------------------------\n+\nclass Node:\nerror: float\nname: \"\"\n@@ -41,9 +42,14 @@ class Node:\nself.parents = []\nself.attributes = []\nself.size = 0\n+ self.s_upper = 0\n+ self.s_lower = 0\nself.score = 0\nself.complete_x = complete_x\nself.loss = 0\n+ self.e_upper = 0\n+ self.e_max_upper = 0\n+ self.c_upper = 0\nself.x_size = x_size\nself.preds = preds\nself.s_lower = 1\n@@ -69,38 +75,27 @@ class Node:\ndef calc_class(self, mask):\nself.e_max = 1\n- size = 0\nmistakes = 0\n- for row in self.complete_x:\n- flag = True\n- for attr in mask:\n- if row[1][attr] == 0:\n- flag = False\n- if flag:\n- size = size + 1\n+ filtered = self.filter_by_mask(mask)\n+ for row in filtered:\nif self.y_test[row[0]][1] != self.preds[row[0]][1]:\nmistakes = mistakes + 1\n- self.size = size\n- if size != 0:\n- self.loss = mistakes / size\n+ self.size = len(filtered)\n+ if self.size != 0:\n+ self.loss = mistakes / self.size\nelse:\nself.loss = 0\nself.e_upper = self.loss\ndef calc_l2(self, mask):\n+ filtered = self.filter_by_mask(mask)\nmax_tuple_error = 0\nsum_error = 0\n- size = 0\n- for row in self.complete_x:\n- flag = True\n- for attr in mask:\n- if row[1][attr] == 0:\n- flag = False\n- if flag:\n- size = size + 1\n+ size = len(filtered)\n+ for row in filtered:\nif float(self.preds[row[0]][1]) > max_tuple_error:\nmax_tuple_error = float(self.preds[row[0]][1])\n- sum_error = sum_error + float(self.preds[row[0]][1])\n+ sum_error += float(self.preds[row[0]][1])\nself.e_max = max_tuple_error\nself.e_upper = max_tuple_error\nif size != 0:\n@@ -109,6 +104,9 @@ class Node:\nself.loss = 0\nself.size = size\n+ def filter_by_mask(self, mask):\n+ return list(filter(lambda row: all(row[1][attr] == 1 for attr in mask), self.complete_x))\n+\ndef calc_s_upper(self, cur_lvl):\ncur_min = self.parents[0].size\nfor parent in self.parents:\n@@ -166,28 +164,36 @@ class Node:\nreturn self.s_upper >= x_size / alpha and self.c_upper >= top_k.min_score\ndef update_bounds(self, s_upper, s_lower, e_upper, e_max_upper, w):\n- try:\n+ if self.s_upper:\nminimized = min(s_upper, self.s_upper)\nself.s_upper = minimized\n+ else:\n+ self.s_upper = s_upper\n+\n+ if self.s_lower:\nminimized = min(s_lower, self.s_lower)\nself.s_lower = minimized\n+ else:\n+ self.s_lower = s_lower\n+\n+ if self.e_upper:\nminimized = min(e_upper, self.e_upper)\nself.e_upper = minimized\n+ else:\n+ self.e_upper = e_upper\n+\n+ if self.e_max_upper:\nminimized = min(e_max_upper, self.e_max_upper)\nself.e_max_upper = minimized\n- c_upper = self.calc_c_upper(w)\n- minimized = min(c_upper, self.c_upper)\n- self.c_upper = minimized\n- except AttributeError:\n- # initial bounds calculation\n- self.s_upper = s_upper\n- self.s_lower = s_lower\n- self.e_upper = e_upper\n+ else:\nself.e_max_upper = e_max_upper\n+\nc_upper = self.calc_c_upper(w)\n- self.c_upper = c_upper\n+ if self.c_upper:\nminimized = min(c_upper, self.c_upper)\nself.c_upper = minimized\n+ else:\n+ self.c_upper = c_upper\ndef print_debug(self, topk, level):\nprint(\"new node has been created: \" + self.make_name() + \"\\n\")\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/slicer.py",
"new_path": "scripts/staging/slicing/base/slicer.py",
"diff": "@@ -38,12 +38,10 @@ def opt_fun(fi, si, f, x_size, w):\n# valid combination example: node ABC + node BCD (on 4th level) // three attributes nodes have two common attributes\n# invalid combination example: node ABC + CDE (on 4th level) // result node - ABCDE (absurd for 4th level)\ndef slice_name_nonsense(node_i, node_j, cur_lvl):\n- commons = 0\n- for attr1 in node_i.attributes:\n- for attr2 in node_j.attributes:\n- if attr1[0].split(\"_\")[0] == attr2[0].split(\"_\")[0]:\n- commons = commons + 1\n- return commons != cur_lvl - 1\n+ attr1 = list(map(lambda x: x[0].split(\"_\")[0], node_i.attributes))\n+ attr2 = list(map(lambda x: x[0].split(\"_\")[0], node_j.attributes))\n+ commons = len(list(set(attr1) & set(attr2)))\n+ return commons == cur_lvl - 1\ndef union(lst1, lst2):\n@@ -81,7 +79,7 @@ def join_enum(node_i, prev_lvl, complete_x, loss, x_size, y_test, errors, debug,\nall_nodes, top_k, cur_lvl_nodes):\nfor node_j in range(len(prev_lvl)):\nflag = slice_name_nonsense(prev_lvl[node_i], prev_lvl[node_j], cur_lvl)\n- if not flag and prev_lvl[node_j].key[0] > prev_lvl[node_i].key[0]:\n+ if flag and prev_lvl[node_j].key[0] > prev_lvl[node_i].key[0]:\nnew_node = Node(complete_x, loss, x_size, y_test, errors)\nparents_set = set(new_node.parents)\nparents_set.add(prev_lvl[node_i])\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/base/union_slicer.py",
"new_path": "scripts/staging/slicing/base/union_slicer.py",
"diff": "@@ -25,14 +25,11 @@ from slicing.base.slicer import opt_fun, union\ndef check_attributes(left_node, right_node):\n- flag = False\n- for attr1 in left_node.attributes:\n- for attr2 in right_node.attributes:\n- if attr1[0].split(\"_\")[0] == attr2[0].split(\"_\")[0]:\n- # there are common attributes which is not the case we need\n- flag = True\n- break\n- return flag\n+ attr1 = list(map(lambda x: x[0].split(\"_\")[0], left_node.attributes))\n+ attr2 = list(map(lambda x: x[0].split(\"_\")[0], right_node.attributes))\n+ if set(attr1).intersection(set(attr2)):\n+ return False\n+ return True\ndef make_first_level(all_features, complete_x, loss, x_size, y_test, errors, loss_type, w, alpha, top_k):\n@@ -65,10 +62,6 @@ def make_first_level(all_features, complete_x, loss, x_size, y_test, errors, los\nreturn first_level, all_nodes\n-def union_enum():\n- return None\n-\n-\ndef process(all_features, complete_x, loss, x_size, y_test, errors, debug, alpha, k, w, loss_type, b_update):\ntop_k = Topk(k)\n# First level slices are enumerated in a \"classic way\" (getting data and not analyzing bounds\n@@ -94,7 +87,7 @@ def process(all_features, complete_x, loss, x_size, y_test, errors, debug, alpha\nfor node_i in range(len(levels[left][0])):\nfor node_j in range(len(levels[right][0])):\nflag = check_attributes(levels[left][0][node_i], levels[right][0][node_j])\n- if not flag:\n+ if flag:\nnew_node = Node(complete_x, loss, x_size, y_test, errors)\nparents_set = set(new_node.parents)\nparents_set.add(levels[left][0][node_i])\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/spark_modules/join_data_parallel.py",
"new_path": "scripts/staging/slicing/spark_modules/join_data_parallel.py",
"diff": "@@ -74,10 +74,9 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nx_size = len(pred_pandas)\nb_topk = SparkContext.broadcast(sc, top_k)\nb_cur_lvl = SparkContext.broadcast(sc, cur_lvl)\n- b_cur_lvl_nodes = SparkContext.broadcast(sc, cur_lvl_nodes)\nbuckets = {}\n- for node in b_cur_lvl_nodes.value:\n- bucket = Bucket(node, b_cur_lvl.value, w, x_size, loss)\n+ for node in cur_lvl_nodes:\n+ bucket = Bucket(node, cur_lvl, w, x_size, loss)\nbuckets[bucket.name] = bucket\nb_buckets = SparkContext.broadcast(sc, buckets)\nrows = predictions.rdd.map(lambda row: (row[0], row[1].indices, row[2]))\\\n@@ -91,16 +90,17 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\ncur_lvl_nodes.map(lambda bucket: bucket.print_debug(b_topk.value)).collect()\ncur_lvl = 1\nprev_level = cur_lvl_nodes.collect()\n- top_k = b_topk.value.buckets_top_k(prev_level, x_size, alpha)\n+ top_k = top_k.buckets_top_k(prev_level, x_size, alpha)\nwhile len(prev_level) > 0:\nb_cur_lvl_nodes = SparkContext.broadcast(sc, prev_level)\nb_topk = SparkContext.broadcast(sc, top_k)\nb_cur_lvl = SparkContext.broadcast(sc, cur_lvl)\n- b_topk.value.print_topk()\n- buckets = join_enum(b_cur_lvl_nodes.value, b_cur_lvl.value, x_size, alpha, b_topk.value, w, loss)\n+ top_k.print_topk()\n+ buckets = join_enum(prev_level, cur_lvl, x_size, alpha, top_k, w, loss)\nb_buckets = SparkContext.broadcast(sc, buckets)\n- to_slice = dict(filter(lambda bucket: bucket[1].check_bounds(x_size, alpha, b_topk.value), b_buckets.value.items()))\n- mapped = rows.map(lambda row: rows_mapper(row, to_slice, loss_type))\n+ to_slice = dict(filter(lambda bucket: bucket[1].check_bounds(x_size, alpha, top_k), buckets.items()))\n+ b_to_slice = SparkContext.broadcast(sc, to_slice)\n+ mapped = rows.map(lambda row: rows_mapper(row, b_to_slice.value, loss_type))\nflattened = mapped.flatMap(lambda line: (line.items()))\nto_process = flattened.combineByKey(combiner, merge_values, merge_combiners)\nif debug:\n@@ -108,13 +108,13 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nprev_level = to_process\\\n.map(lambda bucket: spark_utils.calc_bucket_metrics(bucket[1], loss, w, x_size, b_cur_lvl.value))\\\n.collect()\n- cur_lvl = b_cur_lvl.value + 1\n- top_k = b_topk.value.buckets_top_k(prev_level, x_size, alpha)\n- print(\"Level \" + str(b_cur_lvl.value) + \" had \" + str(\n+ cur_lvl += 1\n+ top_k = top_k.buckets_top_k(prev_level, x_size, alpha)\n+ print(\"Level \" + str(cur_lvl) + \" had \" + str(\nlen(b_cur_lvl_nodes.value * (len(b_cur_lvl_nodes.value) - 1)))+\" candidates but after pruning only \" +\nstr(len(prev_level)) + \" go to the next level\")\nprint(\"Program stopped at level \" + str(cur_lvl))\nprint()\nprint(\"Selected slices are: \")\n- b_topk.value.print_topk()\n+ top_k.print_topk()\nreturn None\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/spark_modules/spark_slicer.py",
"new_path": "scripts/staging/slicing/spark_modules/spark_slicer.py",
"diff": "@@ -73,14 +73,15 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nb_topk.value, w, loss_type)) \\\n.map(lambda node: (node.key, node)).collect()\nfirst_level.update(init_slices)\n- update_top_k(first_level, b_topk.value, alpha, predictions)\n+ update_top_k(first_level, top_k, alpha, predictions)\nprev_level = SparkContext.broadcast(sc, first_level)\nlevels.append(prev_level)\ncur_lvl = cur_lvl + 1\n- b_topk.value.print_topk()\n+ top_k.print_topk()\n# checking the first partition of level. if not empty then processing otherwise no elements were added to this level\nwhile len(levels[cur_lvl - 1].value) > 0:\nnodes_list = {}\n+ b_topk = SparkContext.broadcast(sc, top_k)\npartitions = sc.parallelize(levels[cur_lvl - 1].value.values())\nmapped = partitions.mapPartitions(lambda nodes: spark_utils.nodes_enum(nodes, levels[cur_lvl - 1].value.values(),\npredictions, loss, b_topk.value, alpha, k, w,\n@@ -89,7 +90,7 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nnodes_list.update(flattened.map(lambda node: (node.key, node)).distinct().collect())\nprev_level = SparkContext.broadcast(sc, nodes_list)\nlevels.append(prev_level)\n- update_top_k(nodes_list, b_topk.value, alpha, predictions)\n+ update_top_k(nodes_list, top_k, alpha, predictions)\ncur_lvl = cur_lvl + 1\nb_topk.value.print_topk()\nprint(\"Level \" + str(cur_lvl) + \" had \" + str(len(levels[cur_lvl - 1].value) * (len(levels[cur_lvl - 1].value) - 1)) +\n@@ -97,4 +98,4 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nprint(\"Program stopped at level \" + str(cur_lvl))\nprint()\nprint(\"Selected slices are: \")\n- b_topk.value.print_topk()\n+ top_k.print_topk()\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/spark_modules/spark_union_slicer.py",
"new_path": "scripts/staging/slicing/spark_modules/spark_union_slicer.py",
"diff": "@@ -40,13 +40,14 @@ def process(all_features, predictions, loss, sc, debug, alpha, k, w, loss_type,\n.map(lambda node: (node.key, node)) \\\n.collect()\nfirst_level.update(init_slices)\n- update_top_k(first_level, b_topk.value, alpha, predictions)\n+ update_top_k(first_level, top_k, alpha, predictions)\nprev_level = SparkContext.broadcast(sc, first_level)\nlevels.append(prev_level)\ncur_lvl = 1\n- b_topk.value.print_topk()\n+ top_k.print_topk()\nwhile len(levels[cur_lvl - 1].value) > 0:\ncur_lvl_res = {}\n+ b_topk = SparkContext.broadcast(sc, top_k)\nfor left in range(int(cur_lvl / 2) + 1):\nright = cur_lvl - left - 1\npartitions = sc.parallelize(levels[left].value.values())\n@@ -59,7 +60,7 @@ def process(all_features, predictions, loss, sc, debug, alpha, k, w, loss_type,\ncur_lvl_res.update(partial)\nprev_level = SparkContext.broadcast(sc, cur_lvl_res)\nlevels.append(prev_level)\n- update_top_k(cur_lvl_res, b_topk.value, alpha, predictions)\n+ update_top_k(cur_lvl_res, top_k, alpha, predictions)\ncur_lvl = cur_lvl + 1\ntop_k.print_topk()\nprint(\"Level \" + str(cur_lvl) + \" had \" + str(len(levels[cur_lvl - 1].value) * (len(levels[cur_lvl - 1].value) - 1)) +\n@@ -67,4 +68,4 @@ def process(all_features, predictions, loss, sc, debug, alpha, k, w, loss_type,\nprint(\"Program stopped at level \" + str(cur_lvl))\nprint()\nprint(\"Selected slices are: \")\n- b_topk.value.print_topk()\n+ top_k.print_topk()\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/spark_modules/spark_utils.py",
"new_path": "scripts/staging/slicing/spark_modules/spark_utils.py",
"diff": "@@ -49,6 +49,12 @@ def approved_join_slice(node_i, node_j, cur_lvl):\nreturn commons == cur_lvl - 1\n+def approved_union_slice(node_i, node_j):\n+ if set(node_i.attributes).intersection(set(node_j.attributes)):\n+ return False\n+ return True\n+\n+\ndef make_first_level(features, predictions, loss, top_k, w, loss_type):\nfirst_level = []\n# First level slices are enumerated in a \"classic way\" (getting data and not analyzing bounds\n@@ -66,15 +72,6 @@ def make_first_level(features, predictions, loss, top_k, w, loss_type):\nreturn first_level\n-def approved_union_slice(node_i, node_j):\n- for attr1 in node_i.attributes:\n- for attr2 in node_j.attributes:\n- if attr1 == attr2:\n- # there are common attributes which is not the case we need\n- return False\n- return True\n-\n-\ndef process_node(node_i, level, loss, predictions, cur_lvl, top_k, alpha, loss_type, w, debug, enumerator):\ncur_enum_nodes = []\nfor node_j in level:\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/slicing/spark_modules/union_data_parallel.py",
"new_path": "scripts/staging/slicing/spark_modules/union_data_parallel.py",
"diff": "@@ -67,7 +67,7 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nb_cur_lvl = SparkContext.broadcast(sc, cur_lvl)\nbuckets = {}\nfor node in cur_lvl_nodes:\n- bucket = Bucket(node, b_cur_lvl.value, w, x_size, loss)\n+ bucket = Bucket(node, cur_lvl, w, x_size, loss)\nbuckets[bucket.name] = bucket\nb_buckets = SparkContext.broadcast(sc, buckets)\nrows = predictions.rdd.map(lambda row: (row[0], row[1].indices, row[2])) \\\n@@ -83,37 +83,39 @@ def parallel_process(all_features, predictions, loss, sc, debug, alpha, k, w, lo\nprev_level = cur_lvl_nodes.collect()\nb_cur_lvl_nodes = SparkContext.broadcast(sc, prev_level)\nlevels.append(b_cur_lvl_nodes)\n- top_k = b_topk.value.buckets_top_k(prev_level, x_size, alpha)\n- while len(levels[cur_lvl - 1].value) > 0:\n+ top_k = top_k.buckets_top_k(prev_level, x_size, alpha)\n+ while len(prev_level) > 0:\nb_topk = SparkContext.broadcast(sc, top_k)\nb_cur_lvl = SparkContext.broadcast(sc, cur_lvl)\n- b_topk.value.print_topk()\n+ top_k.print_topk()\nbuckets = []\nfor left in range(int(cur_lvl / 2) + 1):\nright = cur_lvl - left - 1\n- nodes = union_enum(levels[left].value, levels[right].value, x_size, alpha, b_topk.value, w, loss, b_cur_lvl.value)\n+ nodes = union_enum(levels[left].value, levels[right].value, x_size, alpha, top_k, w, loss, cur_lvl)\nbuckets.append(nodes)\nb_buckets = sc.parallelize(buckets)\nall_buckets = b_buckets.flatMap(lambda line: (line.items()))\ncombined = dict(all_buckets.combineByKey(combiner, merge_values, merge_combiners).collect())\nb_buckets = SparkContext.broadcast(sc, combined)\n- to_slice = dict(filter(lambda bucket: bucket[1].check_bounds(x_size, alpha, b_topk.value), b_buckets.value.items()))\n- mapped = rows.map(lambda row: rows_mapper(row, to_slice, loss_type))\n+ to_slice = dict(filter(lambda bucket: bucket[1].check_bounds(x_size, alpha, top_k), combined.items()))\n+ b_to_slice = SparkContext.broadcast(sc, to_slice)\n+ mapped = rows.map(lambda row: rows_mapper(row, b_to_slice.value, loss_type))\nflattened = mapped.flatMap(lambda line: (line.items()))\npartial = flattened.combineByKey(combiner, join_data_parallel.merge_values, join_data_parallel.merge_combiners)\nprev_level = partial\\\n.map(lambda bucket: spark_utils.calc_bucket_metrics(bucket[1], loss, w, x_size, b_cur_lvl.value)).collect()\n+ top_k = top_k.buckets_top_k(prev_level, x_size, alpha)\n+ b_topk = SparkContext.broadcast(sc, top_k)\nif debug:\npartial.values().map(lambda bucket: bucket.print_debug(b_topk.value)).collect()\n- top_k = b_topk.value.buckets_top_k(prev_level, x_size, alpha)\nprint(\"Level \" + str(cur_lvl) + \" had \" + str(\nlen(levels[cur_lvl - 1].value) * (len(levels[cur_lvl - 1].value) - 1)) +\n\" candidates but after pruning only \" + str(len(prev_level)) + \" go to the next level\")\nprint(\"Program stopped at level \" + str(cur_lvl))\nb_cur_lvl_nodes = SparkContext.broadcast(sc, prev_level)\nlevels.append(b_cur_lvl_nodes)\n- cur_lvl = b_cur_lvl.value + 1\n+ cur_lvl += 1\nprint()\nprint(\"Selected slices are: \")\n- b_topk.value.print_topk()\n+ top_k.print_topk()\nreturn None\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-254] Fixes distributed slice finding implementation
Closes #908. |
49,738 | 24.05.2020 17:19:39 | -7,200 | c61e7ac71a83df3d525b6131ca76cf8252c6802f | [MINOR] Integration of steplm builtin (avoid excessive test output) | [
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/StepLinearRegDS.dml",
"new_path": "scripts/algorithms/StepLinearRegDS.dml",
"diff": "@@ -79,331 +79,15 @@ fileX = $X;\nfileY = $Y;\nfileB = $B;\nfileS = $S;\n-\nwrite_beta = ifdef($write_beta, TRUE);\n-\n-# currently only the forward selection strategy in supported: start from one feature and iteratively add\n-# features until AIC improves\n-dir = \"forward\";\n-\nfmt = ifdef ($fmt, \"text\");\n-intercept_status = ifdef ($icpt, 1);\n+intercept = ifdef ($icpt, 1);\nthr = ifdef ($thr, 0.001);\n-print (\"BEGIN STEPWISE LINEAR REGRESSION SCRIPT\");\n-print (\"Reading X and Y...\");\nX_orig = read (fileX);\ny = read (fileY);\n-n = nrow (X_orig);\n-m_orig = ncol (X_orig);\n-\n-# BEGIN STEPWISE LINEAR REGRESSION\n-\n-if (dir == \"forward\") {\n- continue = TRUE;\n- columns_fixed = matrix (0, rows = 1, cols = m_orig);\n- columns_fixed_ordered = matrix (0, rows = 1, cols = 1);\n-\n- # X_global stores the best model found at each step\n- X_global = matrix (0, rows = n, cols = 1);\n-\n- if (intercept_status == 1 | intercept_status == 2) {\n- beta = mean (y);\n- AIC_best = 2 + n * log(sum((beta - y)^2) / n);\n- } else {\n- beta = 0.0;\n- AIC_best = n * log(sum(y^2) / n);\n- }\n-\n- AICs = matrix (AIC_best, rows = 1, cols = m_orig);\n- print (\"Best AIC without any features: \" + AIC_best);\n-\n- boa_ncol = ncol(X_orig)\n- if (intercept_status != 0) {\n- boa_ncol = boa_ncol + 1\n- }\n-\n- beta_out_all = matrix(0, rows = boa_ncol, cols = m_orig * 1);\n-\n- y_ncol = 1;\n-\n- # First pass to examine single features\n- parfor (i in 1:m_orig, check = 0) {\n- columns_fixed_ordered_1 = matrix(i, rows=1, cols=1);\n-\n- [AIC_1, beta_out_i] = linear_regression (X_orig[, i], y, m_orig, columns_fixed_ordered_1,\n- write_beta, 0);\n-\n- AICs[1, i] = AIC_1;\n-\n- beta_out_all[1:nrow(beta_out_i), (i - 1) * y_ncol + 1 : i * y_ncol] = beta_out_i[, 1:1];\n-\n- }\n-\n- # Determine the best AIC\n- column_best = 0;\n- for (k in 1:m_orig) {\n- AIC_cur = as.scalar (AICs[1, k]);\n- if ( (AIC_cur < AIC_best) & ((AIC_best - AIC_cur) > abs (thr * AIC_best)) ) {\n- column_best = k;\n- AIC_best = as.scalar(AICs[1, k]);\n- }\n- }\n-\n- # beta best so far\n- beta_best = beta_out_all[, (column_best-1) * y_ncol + 1: column_best * y_ncol];\n-\n- if (column_best == 0) {\n- print (\"AIC of an empty model is \" + AIC_best + \" and adding no feature achieves more than \" +\n- (thr * 100) + \"% decrease in AIC!\");\n- Selected = matrix (0, rows = 1, cols = 1);\n- if (intercept_status == 0) {\n- B = matrix (beta, rows = m_orig, cols = 1);\n- } else {\n- B_tmp = matrix (0, rows = m_orig + 1, cols = 1);\n- B_tmp[m_orig + 1, ] = beta;\n- B = B_tmp;\n- }\n-\n- beta_out = B;\n-\n- write(Selected, fileS, format=fmt);\n- write(beta_out, fileB, format=fmt);\n-\n- stop (\"\");\n- }\n- print (\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\n- columns_fixed[1, column_best] = 1;\n- columns_fixed_ordered[1, 1] = column_best;\n- X_global = X_orig[, column_best];\n-\n- while (continue) {\n- # Subsequent passes over the features\n- beta_out_all_2 = matrix(0, rows = boa_ncol, cols = m_orig * 1);\n-\n- parfor (i in 1:m_orig, check = 0) {\n- if (as.scalar(columns_fixed[1, i]) == 0) {\n-\n- # Construct the feature matrix\n- X = cbind (X_global, X_orig[, i]);\n-\n- tmp = matrix(0, rows=1, cols=1);\n- tmp[1, 1] = i;\n- columns_fixed_ordered_2 = append(columns_fixed_ordered, tmp )\n- [AIC_2, beta_out_i] = linear_regression (X, y, m_orig, columns_fixed_ordered_2, write_beta, 0);\n- beta_out_all_2[1:nrow(beta_out_i), (i - 1) * y_ncol + 1 : i * y_ncol] = beta_out_i[,1:1];\n-\n- AICs[1, i] = AIC_2;\n- }\n- }\n-\n- # Determine the best AIC\n- for (k in 1:m_orig) {\n- AIC_cur = as.scalar (AICs[1, k]);\n- if ( (AIC_cur < AIC_best) & ((AIC_best - AIC_cur) > abs (thr * AIC_best)) &\n- (as.scalar(columns_fixed[1, k]) == 0) ) {\n- column_best = k;\n- AIC_best = as.scalar(AICs[1, k]);\n- }\n- }\n-\n- # have the best beta store in the matrix\n- beta_best = beta_out_all_2[, (column_best - 1) * y_ncol + 1 : column_best * y_ncol];\n-\n- # Append best found features (i.e., columns) to X_global\n- if (as.scalar(columns_fixed[1, column_best]) == 0) { # new best feature found\n- print (\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\n- columns_fixed[1, column_best] = 1;\n- columns_fixed_ordered = cbind (columns_fixed_ordered, as.matrix(column_best));\n-\n- if (ncol(columns_fixed_ordered) == m_orig) { # all features examined\n- X_global = cbind (X_global, X_orig[, column_best]);\n- continue = FALSE;\n- } else {\n- X_global = cbind (X_global, X_orig[, column_best]);\n- }\n- } else {\n- continue = FALSE;\n- }\n-\n- }\n-\n- # run linear regression with selected set of features\n- print (\"Running linear regression with selected features...\");\n- [AIC, beta_out] = linear_regression (X_global, y, m_orig, columns_fixed_ordered, write_beta, 1);\n-\n- Selected = columns_fixed_ordered;\n- if (intercept_status != 0) {\n- Selected = cbind(Selected, matrix(boa_ncol, rows=1, cols=1))\n- }\n-\n- beta_out = reorder_matrix(boa_ncol, beta_out, Selected);\n+[beta_out, Selected] = steplm(X=X_orig, y=y, icpt=intercept, verbose=FALSE);\nwrite(Selected, fileS, format=fmt);\nwrite(beta_out, fileB, format=fmt);\n-\n-} else {\n- stop (\"Currently only forward selection strategy is supported!\");\n-}\n-\n-# Computes linear regression using a direct solver for (X^T X) beta = X^T y.\n-# It also outputs the AIC of the computed model.\n-\n-linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\n- Matrix[Double] Selected, Boolean write_beta, Boolean writeStats)\n- return (Double AIC, Matrix[Double] beta) {\n-\n- intercept_status = ifdef ($icpt, 0);\n- fmt = ifdef ($fmt, \"text\");\n- n = nrow (X);\n- m = ncol (X);\n-\n- # Introduce the intercept, shift and rescale the columns of X if needed\n- if (intercept_status == 1 | intercept_status == 2) { # add the intercept column\n- ones_n = matrix (1, rows = n, cols = 1);\n- X = cbind (X, ones_n);\n- m = m - 1;\n- }\n-\n- m_ext = ncol(X);\n-\n- if (intercept_status == 2) { # scale-&-shift X columns to mean 0, variance 1\n- # Important assumption: X [, m_ext] = ones_n\n- avg_X_cols = t(colSums(X)) / n;\n- var_X_cols = (t(colSums (X ^ 2)) - n * (avg_X_cols ^ 2)) / (n - 1);\n- is_unsafe = (var_X_cols <= 0);\n- scale_X = 1.0 / sqrt (var_X_cols * (1 - is_unsafe) + is_unsafe);\n- scale_X [m_ext, 1] = 1;\n- shift_X = - avg_X_cols * scale_X;\n- shift_X [m_ext, 1] = 0;\n- } else {\n- scale_X = matrix (1, rows = m_ext, cols = 1);\n- shift_X = matrix (0, rows = m_ext, cols = 1);\n- }\n-\n- # BEGIN THE DIRECT SOLVE ALGORITHM (EXTERNAL CALL)\n-\n- A = t(X) %*% X;\n- b = t(X) %*% y;\n- if (intercept_status == 2) {\n- A = t(diag (scale_X) %*% A + shift_X %*% A [m_ext, ]);\n- A = diag (scale_X) %*% A + shift_X %*% A [m_ext, ];\n- b = diag (scale_X) %*% b + shift_X %*% b [m_ext, ];\n- }\n-\n- beta_unscaled = solve (A, b);\n-\n- # END THE DIRECT SOLVE ALGORITHM\n-\n- if (intercept_status == 2) {\n- beta = scale_X * beta_unscaled;\n- beta [m_ext, ] = beta [m_ext, ] + t(shift_X) %*% beta_unscaled;\n- } else {\n- beta = beta_unscaled;\n- }\n-\n- # COMPUTE AIC\n- y_residual = y - X %*% beta;\n- ss_res = sum (y_residual ^ 2);\n- eq_deg_of_freedom = m_ext;\n- AIC = (2 * eq_deg_of_freedom) + n * log (ss_res / n);\n-\n- if(write_beta == 1) {\n- fileO = ifdef ($O, \" \");\n- fileS = $S;\n-\n- print (\"Computing the statistics...\");\n- avg_tot = sum (y) / n;\n- ss_tot = sum (y ^ 2);\n- ss_avg_tot = ss_tot - n * avg_tot ^ 2;\n- var_tot = ss_avg_tot / (n - 1);\n- # y_residual = y - X %*% beta;\n- avg_res = sum (y_residual) / n;\n- # ss_res = sum (y_residual ^ 2);\n- ss_avg_res = ss_res - n * avg_res ^ 2;\n-\n- R2 = 1 - ss_res / ss_avg_tot;\n- if (n > m_ext) {\n- dispersion = ss_res / (n - m_ext);\n- adjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n- } else {\n- dispersion = NaN;\n- adjusted_R2 = NaN;\n- }\n-\n- R2_nobias = 1 - ss_avg_res / ss_avg_tot;\n- deg_freedom = n - m - 1;\n- if (deg_freedom > 0) {\n- var_res = ss_avg_res / deg_freedom;\n- adjusted_R2_nobias = 1 - var_res / (ss_avg_tot / (n - 1));\n- } else {\n- var_res = NaN;\n- adjusted_R2_nobias = NaN;\n- print (\"Warning: zero or negative number of degrees of freedom.\");\n- }\n-\n- R2_vs_0 = 1 - ss_res / ss_tot;\n- if (n > m) {\n- adjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n- } else {\n- adjusted_R2_vs_0 = NaN;\n- }\n-\n- str = \"AVG_TOT_Y,\" + avg_tot; # Average of the response value Y\n- str = append (str, \"STDEV_TOT_Y,\" + sqrt (var_tot)); # Standard Deviation of the response value Y\n- str = append (str, \"AVG_RES_Y,\" + avg_res); # Average of the residual Y - pred(Y|X), i.e. residual bias\n- str = append (str, \"STDEV_RES_Y,\" + sqrt (var_res)); # Standard Deviation of the residual Y - pred(Y|X)\n- str = append (str, \"DISPERSION,\" + dispersion); # GLM-style dispersion, i.e. residual sum of squares / # d.f.\n- str = append (str, \"R2,\" + R2); # R^2 of residual with bias included vs. total average\n- str = append (str, \"ADJUSTED_R2,\" + adjusted_R2); # Adjusted R^2 of residual with bias included vs. total average\n- str = append (str, \"R2_NOBIAS,\" + R2_nobias); # R^2 of residual with bias subtracted vs. total average\n- str = append (str, \"ADJUSTED_R2_NOBIAS,\" + adjusted_R2_nobias); # Adjusted R^2 of residual with bias subtracted vs. total average\n- if (intercept_status == 0) {\n- str = append (str, \"R2_VS_0,\" + R2_vs_0); # R^2 of residual with bias included vs. zero constant\n- str = append (str, \"ADJUSTED_R2_VS_0,\" + adjusted_R2_vs_0); # Adjusted R^2 of residual with bias included vs. zero constant\n- }\n-\n- if (fileO != \" \" & writeStats != 0) {\n- write(str, fileO);\n- } else {\n- print (str);\n- print (\"\");\n- }\n-\n- # TODO IMP NOTE: with the fix in PR-22, we have not accounted for\n- # intercept=2 and # the code before # was not matching so we have removed it\n- # for now. Pl see the git revision history and diff to see the changes.\n- # in future we will have this feature. For now it is disabled\n- }\n- }\n-\n-\n-reorder_matrix = function(\n- double ncolX, # number of column in X, inlcuding the intercept column\n- matrix[double] B, # beta\n- matrix[double] S # Selected\n-) return (matrix[double] Y) {\n- # This function assumes that B and S have same number of elements.\n- # if the intercept is included in the model, all inputs should be adjusted\n- # appropriately before calling this function.\n-\n- S = t(S);\n- num_empty_B = ncolX - nrow(B);\n- if (num_empty_B < 0) {\n- stop(\"Error: unable to re-order the matrix. Reason: B more than matrix X\");\n- }\n-\n- if (num_empty_B > 0) {\n- pad_zeros = matrix(0, rows = num_empty_B, cols=1);\n- B = rbind(B, pad_zeros);\n- S = rbind(S, pad_zeros);\n- }\n-\n- # since the table won't accept zeros as index we hack it.\n- S0 = replace(target = S, pattern = 0, replacement = ncolX+1);\n- seqS = seq(1, nrow(S0));\n- P = table(seqS, S0, ncolX, ncolX);\n-\n- Y = t(P) %*% B;\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/steplm.dml",
"new_path": "scripts/builtin/steplm.dml",
"diff": "@@ -71,6 +71,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\n# start from one feature and iteratively add features until AIC improves\nthr = 0.001;\n+ if(verbose)\nprint(\"BEGIN STEPWISE LINEAR REGRESSION SCRIPT\");\nX_orig = X;\nn = nrow(X_orig);\n@@ -90,6 +91,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\nbeta = 0.0;\nAIC_best_orig = n * log(sum(y ^ 2) / n);\n}\n+ if(verbose)\nprint(\"Best AIC without any features: \" + AIC_best_orig);\nboa_ncol = ncol(X_orig) + as.integer(icpt!=0);\nbeta_out_all = matrix(0, boa_ncol, m_orig);\n@@ -108,6 +110,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\n# beta best so far\nbeta_best = beta_out_all[, column_best];\nif (column_best == 0) {\n+ if(verbose)\nprint(\"AIC of an empty model is \" + AIC_best + \" and adding no feature achieves more than \" + (thr * 100) + \"% decrease in AIC!\");\nB = matrix(0, m_orig, 1);\nif (icpt != 0)\n@@ -115,7 +118,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\nS = matrix(0, 1, 1);\n}\nelse {\n-\n+ if(verbose)\nprint(\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\ncolumns_fixed[1, column_best] = 1;\n@@ -152,6 +155,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\n# Append best found features (i.e., columns) to X_global\nif (as.scalar(columns_fixed[1, column_best]) == 0) {\n# new best feature found\n+ if(verbose)\nprint(\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\ncolumns_fixed[1, column_best] = 1;\ncolumns_fixed_ordered = cbind(columns_fixed_ordered, as.matrix(column_best));\n@@ -167,6 +171,7 @@ m_steplm = function(Matrix[Double] X, Matrix[Double] y, Integer icpt = 0,\n}\n}\n# run linear regression with selected set of features\n+ if( verbose )\nprint(\"Running linear regression with selected features...\");\n[AIC, beta_out] = linear_regression(X_global, y, icpt, reg, tol, maxi, verbose);\nS = columns_fixed_ordered;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Integration of steplm builtin (avoid excessive test output) |
49,738 | 25.05.2020 21:11:52 | -7,200 | b66a3c006ce6a3a888653e2d1accec479cc756fd | Fix convergence condition of connected components builtin | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/components.dml",
"new_path": "scripts/builtin/components.dml",
"diff": "@@ -40,7 +40,7 @@ m_components = function(Matrix[Double] G, Integer maxi = 0, Boolean verbose = TR\niter = 1;\n# iterative computation of connected components\n- while( diff > 0 & (maxi==0 | maxi<=iter) ) {\n+ while( diff > 0 & (maxi==0 | iter<=maxi) ) {\nu = max(rowMaxs(G * t(c)), c);\ndiff = sum(u != c)\nc = u; # update assignment\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinComponentsTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinComponentsTest.java",
"diff": "@@ -45,20 +45,25 @@ public class BuiltinComponentsTest extends AutomatedTestBase {\n@Test\npublic void testConnectedComponents11CP() {\n- runConnectedComponentsTest(11, LopProperties.ExecType.CP);\n+ runConnectedComponentsTest(11, 0, LopProperties.ExecType.CP);\n}\n@Test\npublic void testConnectedComponents201CP() {\n- runConnectedComponentsTest(201, LopProperties.ExecType.CP);\n+ runConnectedComponentsTest(201, 0, LopProperties.ExecType.CP);\n}\n@Test\npublic void testConnectedComponents2001CP() {\n- runConnectedComponentsTest(2001, LopProperties.ExecType.CP);\n+ runConnectedComponentsTest(2001, 0, LopProperties.ExecType.CP);\n}\n- private void runConnectedComponentsTest(int numVertices, ExecType instType)\n+ @Test\n+ public void testConnectedComponents11Maxi100CP() {\n+ runConnectedComponentsTest(11, 100, LopProperties.ExecType.CP);\n+ }\n+\n+ private void runConnectedComponentsTest(int numVertices, int maxi, ExecType instType)\n{\nTypes.ExecMode platformOld = setExecMode(instType);\n@@ -68,7 +73,8 @@ public class BuiltinComponentsTest extends AutomatedTestBase {\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[]{ \"-args\", input(\"X\"), output(\"R\")};\n+ programArgs = new String[]{ \"-args\",\n+ input(\"X\"), String.valueOf(maxi), output(\"R\")};\n//generate actual dataset (3 components)\ndouble[][] X = new double[numVertices-3][2];\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/ConnectedComponents.dml",
"new_path": "src/test/scripts/functions/builtin/ConnectedComponents.dml",
"diff": "@@ -23,6 +23,6 @@ X = read($1)\nn = max(X);\nG = table(X[,1], X[, 2], n, n)\nG = G + t(G); #symmetry\n-C = components(G=G, verbose=FALSE)\n+C = components(G=G, maxi=$2, verbose=FALSE)\n-write(C, $2)\n+write(C, $3)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-393] Fix convergence condition of connected components builtin |
49,698 | 27.05.2020 06:51:21 | -19,080 | 36f1e51b270fee8fa2a8afc55713ca6202982116 | [MINOR] Move refactor `dev` docs to `dev` folder
* This change provide structure separate internal dev doc from
user facing docs
Closes | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.md",
"new_path": "CONTRIBUTING.md",
"diff": "@@ -29,7 +29,7 @@ ___\nBefore contributing a pull request, we highly suggest applying a code formatter to the written code.\n-We have provided at profile for java located in [Codestyle File ./docs/CodeStyle.eclipse.xml](./docs/CodeStyle_eclipse.xml). This can be loaded in most editors e.g.:\n+We have provided at profile for java located in [Codestyle File ./docs/CodeStyle.eclipse.xml](dev/docs/CodeStyle_eclipse.xml). This can be loaded in most editors e.g.:\n- [Eclipse](https://stackoverflow.com/questions/10432538/eclipse-import-conf-xml-files#10433986)\n- [IntelliJ](https://imagej.net/Eclipse_code_style_profiles_and_IntelliJ)\n"
},
{
"change_type": "MODIFY",
"old_path": "bin/README.md",
"new_path": "bin/README.md",
"diff": "@@ -93,4 +93,4 @@ the option `sysds.native.blas` in `SystemDS-config.xml`.\n## Further reading\n-More documentation is available in the [docs directory of our github repository](/docs/README.md)\n+More documentation is available in the [docs directory of our github repository](/dev/docs/README.md)\n"
},
{
"change_type": "RENAME",
"old_path": "docs/CodeStyle_eclipse.xml",
"new_path": "dev/docs/CodeStyle_eclipse.xml",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "docs/README.md",
"new_path": "dev/docs/README.md",
"diff": "@@ -22,11 +22,11 @@ limitations under the License.\nVarious forms of documentation for SystemDS are available.\nIn this directory you'll find\n-* a [DML language reference](./dml-language-reference.md)\n-* a description of [builtin functions (WIP)](./builtins-reference.md)\n-* [coding style settings](./CodeStyle_eclipse.xml) for Eclipse (compatible with various other IDEs)\n+* a [DML language reference](dml-language-reference.md)\n+* a description of [builtin functions (WIP)](builtins-reference.md)\n+* [coding style settings](CodeStyle_eclipse.xml) for Eclipse (compatible with various other IDEs)\n* More information inside our [CONTRIBUTING.md](/CONTRIBUTING.md)\n-* an enumerated list of open and completed [tasks](./Tasks.txt)\n+* an enumerated list of open and completed [tasks](Tasks.txt)\n## Pointers to more documentation\n"
},
{
"change_type": "RENAME",
"old_path": "docs/Tasks.txt",
"new_path": "dev/docs/Tasks.txt",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "docs/dml-language-reference.md",
"new_path": "dev/docs/dml-language-reference.md",
"diff": ""
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Move refactor `dev` docs to `dev` folder
* This change provide structure separate internal dev doc from
user facing docs
Closes #920. |
49,698 | 30.05.2020 00:05:57 | -19,080 | 0e76598f47c94607bcf36b57f3bd4f1fd1cfc4d5 | [MINOR][DOC] Both the parameters should be named for rand()
* correct syntax: rand(rows = .., cols = ..)
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -149,7 +149,7 @@ The *icpt-argument* can be set to 3 modes:\n### Example\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\nlm(X = X, y = y)\n```\n@@ -179,7 +179,7 @@ lmDS(X, y, icpt = 0, reg = 1e-7, verbose = TRUE)\n### Example\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\nlmDS(X = X, y = y)\n```\n@@ -211,7 +211,7 @@ lmCG(X, y, icpt = 0, reg = 1e-7, tol = 1e-7, maxi = 0, verbose = TRUE)\n### Example\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\nlmCG(X = X, y = y, maxi = 10)\n```\n@@ -239,7 +239,7 @@ lmpredict(X, w)\n### Example\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\nw = lm(X = X, y = y)\nyp = lmpredict(X, w)\n```\n@@ -287,7 +287,7 @@ If the best AIC is achieved without any features the matrix of *selected* featur\n### Example\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\n[C, S] = steplm(X = X, y = y, icpt = 1);\n```\n@@ -318,7 +318,7 @@ slicefinder(X,W, y, k, paq, S);\n### Usage\n```r\nX = rand (rows = 50, cols = 10)\n-y = X %*% rand(rows=ncol(X), 1)\n+y = X %*% rand(rows = ncol(X), cols = 1)\nw = lm(X = X, y = y)\nress = slicefinder(X = X,W = w, Y = y, k = 5, paq = 1, S = 2);\n```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Both the parameters should be named for rand()
* correct syntax: rand(rows = .., cols = ..)
Closes #926. |
49,738 | 30.05.2020 00:39:07 | -7,200 | 405462e84ad1192e447bb09c03fe20d112bf6afb | [MINOR] Fix invalid consistency checks of spark append_aligned rbind | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinarySPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinarySPInstruction.java",
"diff": "@@ -459,7 +459,7 @@ public abstract class BinarySPInstruction extends ComputationSPInstruction {\n}\nif( checkAligned ) {\n- if( mc1.getCols() % mc1.getBlocksize() != 0 )\n+ if( (cbind ? mc1.getCols() : mc1.getRows()) % mc1.getBlocksize() != 0 )\nthrow new DMLRuntimeException(\"Input matrices are not aligned to blocksize boundaries. Wrong append selected\");\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix invalid consistency checks of spark append_aligned rbind |
49,683 | 01.06.2020 08:17:14 | -19,080 | 8d020fb9f807750b07d08398caa2d433305819b6 | [DOC] Add documentation for builtin sigmoid func.
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -27,9 +27,11 @@ limitations under the License.\n* [`lmDS`-Function](#lmds-function)\n* [`lmCG`-Function](#lmcg-function)\n* [`lmpredict`-Function](#lmpredict-function)\n+ * [`sigmoid`-Function](#sigmoid-function)\n* [`steplm`-Function](#steplm-function)\n* [`slicefinder`-Function](#slicefinder-function)\n+\n# Introduction\nThe DML (Declarative Machine Learning) language has built-in functions which enable access to both low- and high-level functions\n@@ -244,6 +246,33 @@ w = lm(X = X, y = y)\nyp = lmpredict(X, w)\n```\n+## `sigmoid`-Function\n+\n+The Sigmoid function is a type of activation function, and also defined as a squashing function which limit the output\n+to a range between 0 and 1, which will make these functions useful in the prediction of probabilities.\n+\n+### Usage\n+```r\n+sigmoid(X)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :---- | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of feature vectors. |\n+\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | 1-column matrix of weights. |\n+\n+### Example\n+```r\n+X = rand (rows = 20, cols = 10)\n+Y = sigmoid(X)\n+```\n+\n## `steplm`-Function\nThe `steplm`-function (stepwise linear regression) implements a classical forward feature selection method.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Add documentation for builtin sigmoid func.
Closes #929. |
49,689 | 01.06.2020 23:15:54 | -7,200 | 7bf3517b11ad4fc303b97e952a2acbc6002909c5 | [SYSTEMDS-411]Fix missing cache size update. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"diff": "@@ -137,6 +137,8 @@ public class LineageCacheEviction\nh.setCacheStatus(LineageCacheStatus.SPILLED);\nh = h._nextEntry;\n}\n+ // Reduce cachesize once for all the entries.\n+ updateSize(e.getSize(), false);\n// Keep them in cache.\nreturn;\n}\n@@ -371,6 +373,8 @@ public class LineageCacheEviction\nh.setValue(mb);\nh = h._nextEntry;\n}\n+ // Increase cachesize once for all the entries.\n+ updateSize(e.getSize(), true);\n}\n// Adjust disk reading speed\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-411]Fix missing cache size update. |
49,683 | 02.06.2020 10:15:24 | -19,080 | 9cbfa02196ff035f37f5a65bca0d7af085157c7e | [DOC] Documentation for builtin scale function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -27,6 +27,7 @@ limitations under the License.\n* [`lmDS`-Function](#lmds-function)\n* [`lmCG`-Function](#lmcg-function)\n* [`lmpredict`-Function](#lmpredict-function)\n+ * [`scale`-Function](#scale-function)\n* [`sigmoid`-Function](#sigmoid-function)\n* [`steplm`-Function](#steplm-function)\n* [`slicefinder`-Function](#slicefinder-function)\n@@ -110,6 +111,7 @@ Note that reshape construction is not yet supported for **SPARK** execution.\n**DML-bodied built-in functions** are written as DML-Scripts and executed as such when called.\n+\n## `lm`-Function\nThe `lm`-function solves linear regression using either the **direct solve method** or the **conjugate gradient algorithm**\n@@ -246,6 +248,35 @@ w = lm(X = X, y = y)\nyp = lmpredict(X, w)\n```\n+## `scale`-Function\n+\n+The scale function is a generic function whose default method centers or scales the column of a numeric matrix.\n+\n+### Usage\n+```r\n+scale(X, center=TRUE, scale=TRUE)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of feature vectors. |\n+| center | Boolean | required | either a logical value or numerical value. |\n+| scale | Boolean | required | either a logical value or numerical value. |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | 1-column matrix of weights. |\n+\n+### Example\n+```r\n+X = rand(rows = 20, cols = 10)\n+center=TRUE;\n+scale=TRUE;\n+Y= scale(X,center,scale)\n+```\n+\n## `sigmoid`-Function\nThe Sigmoid function is a type of activation function, and also defined as a squashing function which limit the output\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin scale function
Closes #934. |
49,726 | 02.06.2020 12:17:09 | -19,080 | 719ebe0d215035c054c435a5b7d790e643ec0fe1 | [DOC] Documentation for builtin normalize function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -31,6 +31,8 @@ limitations under the License.\n* [`sigmoid`-Function](#sigmoid-function)\n* [`steplm`-Function](#steplm-function)\n* [`slicefinder`-Function](#slicefinder-function)\n+ * [`normalize`-Function](#normalize-function)\n+\n# Introduction\n@@ -382,3 +384,34 @@ y = X %*% rand(rows = ncol(X), cols = 1)\nw = lm(X = X, y = y)\nress = slicefinder(X = X,W = w, Y = y, k = 5, paq = 1, S = 2);\n```\n+## `normalize`-Function\n+\n+The `normalize`-function normalises the values of a matrix by changing the dataset to use a common scale.\n+This is done while preserving differences in the ranges of values.\n+The output is a matrix of values in range [0,1].\n+\n+### Usage\n+```r\n+normalize(X);\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of feature vectors. |\n+\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | 1-column matrix of normalized values. |\n+\n+\n+\n+### Example\n+```r\n+X = rand(rows = 50, cols = 10)\n+y = X %*% rand(rows=ncol(X), cols=1)\n+y = normalize(X = X)\n+\n+```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin normalize function
Closes #935. |
49,706 | 03.06.2020 18:57:30 | -7,200 | d8c9495ae6d8e0507d113718b23dfd8fa5035e6d | [MINOR] Fix BuiltinFunctionExpression
Closes
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"diff": "@@ -912,6 +912,14 @@ public class BuiltinFunctionExpression extends DataIdentifier\ncase NROW:\ncase NCOL:\ncase LENGTH:\n+ checkNumParameters(1);\n+ checkDataTypeParam(getFirstExpr(),\n+ DataType.FRAME, DataType.LIST, DataType.MATRIX);\n+ output.setDataType(DataType.SCALAR);\n+ output.setDimensions(0, 0);\n+ output.setBlocksize(0);\n+ output.setValueType(ValueType.INT64);\n+ break;\ncase COUNT_DISTINCT:\ncase COUNT_DISTINCT_APPROX:\ncheckNumParameters(1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix BuiltinFunctionExpression
Closes #937.
Closes #944. |
49,733 | 04.06.2020 09:40:49 | -19,080 | 1c2ec03b48734e3f79d2ea9a35aa3d2891f15b27 | [DOC][1/2] Documentation for builtin KMeans function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -23,6 +23,7 @@ limitations under the License.\n* [Built-In Construction Functions](#built-in-construction-functions)\n* [`tensor`-Function](#tensor-function)\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n+ * [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n* [`lmDS`-Function](#lmds-function)\n* [`lmCG`-Function](#lmcg-function)\n@@ -114,6 +115,32 @@ Note that reshape construction is not yet supported for **SPARK** execution.\n**DML-bodied built-in functions** are written as DML-Scripts and executed as such when called.\n+## `KMeans`-Function\n+\n+The kmeans() implements the KMeans Clustering algorithm.\n+\n+### Usage\n+```r\n+kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :--------- | :-------------- | :--------- | :---------- |\n+| x | Matrix[Double] | required | The input Matrix to do KMeans on. |\n+| k | Int | `10` | Number of centroids |\n+| runs | Int | `10` | Number of runs (with different initial centroids) |\n+| max_iter | Int | `100` |Max no. of iterations allowed |\n+| eps | Double | `0.000001` | Tolerance (epsilon) for WCSS change ratio |\n+| is_verbose | Boolean | FALSE | do not print per-iteration stats |\n+\n+### Returns\n+| Type | Description |\n+| :----- | :---------- |\n+| String | The mapping of records to centroids |\n+| String | The output matrix with the centroids |\n+\n+\n## `lm`-Function\nThe `lm`-function solves linear regression using either the **direct solve method** or the **conjugate gradient algorithm**\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC][1/2] Documentation for builtin KMeans function
Closes #933. |
49,683 | 04.06.2020 11:06:47 | -19,080 | 859ad3a72906c67b7300c9980251da4cde9ed8f8 | [DOC] Documentation for builtin toOneHot function
Example description:
Number of classes (numClasses): 5
Input Matrix (X):
2.000 3.000 2.000
2.000 3.000 2.000
3.000 4.000 2.000
Result from toOneHot(X, numClasses):
0.000 1.000 0.000 0.000 0.000
0.000 1.000 0.000 0.000 0.000
0.000 0.000 1.000 0.000 0.000
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -33,7 +33,7 @@ limitations under the License.\n* [`steplm`-Function](#steplm-function)\n* [`slicefinder`-Function](#slicefinder-function)\n* [`normalize`-Function](#normalize-function)\n-\n+ * [`toOneHot`-Function](#toOneHOt-function)\n# Introduction\n@@ -442,3 +442,30 @@ y = X %*% rand(rows=ncol(X), cols=1)\ny = normalize(X = X)\n```\n+\n+## `toOneHot`-Function\n+\n+The `toOneHot`-function encodes unordered categorical vector to multiple binarized vectors.\n+\n+### Usage\n+```r\n+toOneHot(X, numClasses)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :--------- | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | vector with N integer entries between 1 and numClasses. |\n+| numClasses | int | required | number of columns, must be greater than or equal to largest value in X. |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | one-hot-encoded matrix with shape (N, numClasses). |\n+\n+### Example\n+```r\n+numClasses = 5\n+X = round(rand(rows = 10, cols = 10, min = 1, max = numClasses))\n+y = toOneHot(X,numClasses)\n+```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin toOneHot function
Example description:
Number of classes (numClasses): 5
Input Matrix (X):
2.000 3.000 2.000
2.000 3.000 2.000
3.000 4.000 2.000
Result from toOneHot(X, numClasses):
0.000 1.000 0.000 0.000 0.000
0.000 1.000 0.000 0.000 0.000
0.000 0.000 1.000 0.000 0.000
Closes #939. |
49,715 | 05.06.2020 17:36:46 | -19,080 | 9939f8c729df9479adde14fd9457723daf295d4e | [DOC] Documentation for builtin msvm function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -34,6 +34,7 @@ limitations under the License.\n* [`slicefinder`-Function](#slicefinder-function)\n* [`normalize`-Function](#normalize-function)\n* [`gnmf`-Function](#gnmf-function)\n+ * [`msvm`-Function](#msvm-function)\n* [`toOneHot`-Function](#toOneHOt-function)\n@@ -503,3 +504,40 @@ numClasses = 5\nX = round(rand(rows = 10, cols = 10, min = 1, max = numClasses))\ny = toOneHot(X,numClasses)\n```\n+\n+## `msvm`-Function\n+\n+The `msvm`-function implements builtin multiclass SVM with squared slack variables\n+It learns one-against-the-rest binary-class classifiers by making a function call to l2SVM\n+\n+### Usage\n+```r\n+msvm(X, Y, intercept, epsilon, lamda, maxIterations, verbose)\n+```\n+\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Double | --- | Matrix X of feature vectors.|\n+| Y | Double | --- | Matrix Y of class labels. |\n+| intercept | Boolean | False | No Intercept ( If set to TRUE then a constant bias column is added to X)|\n+| num_classes | Integer | 10 | Number of classes.|\n+| epsilon | Double | 0.001 | Procedure terminates early if the reduction in objective function value is less than epsilon (tolerance) times the initial objective function value.|\n+| lamda | Double | 1.0 | Regularization parameter (lambda) for L2 regularization|\n+| maxIterations | Integer | 100 | Maximum number of conjugate gradient iterations|\n+| verbose | Boolean | False | Set to true to print while training.|\n+\n+\n+### Returns\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| model | Double | --- | Model matrix. |\n+\n+\n+### Example\n+```r\n+X = rand(rows = 50, cols = 10)\n+y = round(X %*% rand(rows=ncol(X), cols=1))\n+model = msvm(X = X, Y = y, intercept = FALSE, epsilon = 0.005, lambda = 1.0, maxIterations = 100, verbose = FALSE)\n+```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin msvm function
Closes #936. |
49,738 | 05.06.2020 17:45:36 | -7,200 | 9d5999dc91df37beaddbdd48fe3c7487188f52a7 | [MINOR] Fix reading dml scripts from dist fs / object store | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"diff": "@@ -112,8 +112,6 @@ public class DMLScript\npublic static String _uuid = IDHandler.createDistributedUniqueID();\nprivate static final Log LOG = LogFactory.getLog(DMLScript.class.getName());\n- private static FileSystem fs = null;\n-\n///////////////////////////////\n// public external interface\n////////\n@@ -283,7 +281,7 @@ public class DMLScript\n|| IOUtilFunctions.isObjectStoreFileScheme(new Path(fileName)) )\n{\nPath scriptPath = new Path(fileName);\n- fs = IOUtilFunctions.getFileSystem(scriptPath);\n+ FileSystem fs = IOUtilFunctions.getFileSystem(scriptPath);\nin = new BufferedReader(new InputStreamReader(fs.open(scriptPath)));\n}\n// from local file system\n@@ -303,8 +301,6 @@ public class DMLScript\nthrow ex;\n}\nfinally {\n- if(fs != null)\n- fs.close();\nIOUtilFunctions.closeSilently(in);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix reading dml scripts from dist fs / object store |
49,744 | 08.06.2020 12:27:12 | -19,080 | 8750194ed952efadd06608b988bd7ee2b0268a8e | [DOC] Documentation for builtin Outlier function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -35,6 +35,7 @@ limitations under the License.\n* [`normalize`-Function](#normalize-function)\n* [`gnmf`-Function](#gnmf-function)\n* [`msvm`-Function](#msvm-function)\n+ * [`outlier`-Function](#outlier-function)\n* [`toOneHot`-Function](#toOneHOt-function)\n@@ -413,6 +414,7 @@ y = X %*% rand(rows = ncol(X), cols = 1)\nw = lm(X = X, y = y)\nress = slicefinder(X = X,W = w, Y = y, k = 5, paq = 1, S = 2);\n```\n+\n## `normalize`-Function\nThe `normalize`-function normalises the values of a matrix by changing the dataset to use a common scale.\n@@ -478,6 +480,33 @@ H = rand(rows = 2, cols = ncol(X), min = -0.05, max = 0.05);\ngnmf(X = X, rnk = 2, eps = 10^-8, maxi = 10)\n```\n+## `outlier`-Function\n+\n+This `outlier`-function takes a matrix data set as input from where it determines which point(s)\n+have the largest difference from mean.\n+\n+### Usage\n+```r\n+outlier(X, opposite)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------- | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of Recoded dataset for outlier evaluation |\n+| opposite | Boolean | required | (1)TRUE for evaluating outlier from upper quartile range, (0)FALSE for evaluating outlier from lower quartile range |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | matrix indicating outlier values |\n+\n+### Example\n+```r\n+X = rand (rows = 50, cols = 10)\n+outlier(X=X, opposite=1)\n+```\n+\n## `toOneHot`-Function\nThe `toOneHot`-function encodes unordered categorical vector to multiple binarized vectors.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin Outlier function
Closes #940. |
49,683 | 08.06.2020 13:19:40 | -19,080 | 13a2363d7cae753d79cc8ec6c3bf9823d646189c | [DOC] Documentation for builtin naivebayes function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -35,6 +35,7 @@ limitations under the License.\n* [`normalize`-Function](#normalize-function)\n* [`gnmf`-Function](#gnmf-function)\n* [`msvm`-Function](#msvm-function)\n+ * [`naivebayes`-Function](#naivebayes-function)\n* [`outlier`-Function](#outlier-function)\n* [`toOneHot`-Function](#toOneHOt-function)\n@@ -480,6 +481,36 @@ H = rand(rows = 2, cols = ncol(X), min = -0.05, max = 0.05);\ngnmf(X = X, rnk = 2, eps = 10^-8, maxi = 10)\n```\n+## `naivebayes`-Function\n+\n+The `naivebayes`-function computes the class conditional probabilities and class priors.\n+\n+### Usage\n+```r\n+naivebayes(D, C, laplace, verbose)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| D | Matrix[Double] | required | One dimensional column matrix with N rows. |\n+| C | Matrix[Double] | required | One dimensional column matrix with N rows. |\n+| Laplace | Double | `1` | Any Double value. |\n+| Verbose | Boolean | `TRUE` | Boolean value. |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | Class priors, One dimensional column matrix with N rows. |\n+| Matrix[Double] | Class conditional probabilites, One dimensional column matrix with N rows. |\n+\n+### Example\n+```r\n+D=rand(rows=10,cols=1,min=10)\n+C=rand(rows=10,cols=1,min=10)\n+[prior, classConditionals] = naivebayes(D, C, laplace = 1, verbose = TRUE)\n+```\n+\n## `outlier`-Function\nThis `outlier`-function takes a matrix data set as input from where it determines which point(s)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin naivebayes function
Closes #948. |
49,726 | 08.06.2020 13:36:33 | -19,080 | 898b8c56194f8c98b19187449869945ceffe451b | [DOC] Documentation for builtin gridSearch function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -23,6 +23,7 @@ limitations under the License.\n* [Built-In Construction Functions](#built-in-construction-functions)\n* [`tensor`-Function](#tensor-function)\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n+ * [`gridSearch`-Function](#gridSearch-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n* [`lmDS`-Function](#lmds-function)\n@@ -119,6 +120,42 @@ Note that reshape construction is not yet supported for **SPARK** execution.\n**DML-bodied built-in functions** are written as DML-Scripts and executed as such when called.\n+## `gridSearch`-Function\n+\n+The `gridSearch`-function is used to find the optimal hyper-parameters of a model which results in the most _accurate_\n+predictions. This function takes `train` and `eval` functions by name.\n+\n+### Usage\n+```r\n+gridSearch(X, y, train, predict, params, paramValues, verbose)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Input Matrix of vectors. |\n+| y | Matrix[Double] | required | Input Matrix of vectors. |\n+| train | String | required | Specified training function. |\n+| predict | String | required | Evaluation based function. |\n+| params | List[String] | required | List of parameters |\n+| paramValues | List[Unknown] | required | Range of values for the parameters |\n+| verbose | Boolean | `TRUE` | If `TRUE` print messages are activated |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | Parameter combination |\n+| Frame[Unknown] | Best results model |\n+\n+### Example\n+```r\n+X = rand (rows = 50, cols = 10)\n+y = X %*% rand(rows = ncol(X), cols = 1)\n+params = list(\"reg\", \"tol\", \"maxi\")\n+paramRanges = list(10^seq(0,-4), 10^seq(-5,-9), 10^seq(1,3))\n+[B, opt]= gridSearch(X=X, y=y, train=\"lm\", predict=\"lmPredict\", params=params, paramValues=paramRanges, verbose = TRUE)\n+```\n+\n## `KMeans`-Function\nThe kmeans() implements the KMeans Clustering algorithm.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin gridSearch function
Closes #950. |
49,744 | 08.06.2020 14:17:46 | -19,080 | 8aed1e98f616b3e65ffea4f2d1b54fce6c2ab3e5 | [DOC] Documentation for builtin confusionMatrix function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -23,6 +23,7 @@ limitations under the License.\n* [Built-In Construction Functions](#built-in-construction-functions)\n* [`tensor`-Function](#tensor-function)\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n+ * [`confusionMatrix`-Function](#confusionmatrix-function)\n* [`gridSearch`-Function](#gridSearch-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n@@ -119,6 +120,40 @@ Note that reshape construction is not yet supported for **SPARK** execution.\n**DML-bodied built-in functions** are written as DML-Scripts and executed as such when called.\n+## `confusionMatrix`-Function\n+\n+A `confusionMatrix`-accepts a vector for prediction and a one-hot-encoded matrix, then it computes the max value\n+of each vector and compare them, after which it calculates and returns the sum of classifications and the average of\n+each true class.\n+\n+### Usage\n+```r\n+confusionMatrix(P, Y)\n+```\n+\n+### Arguments\n+\n+| Name | Type | Default | Description |\n+| :--- | :------------- | :------ | :---------- |\n+| P | Matrix[Double] | --- | vector of prediction |\n+| Y | Matrix[Double] | --- | vector of Golden standard One Hot Encoded |\n+\n+### Returns\n+\n+| Name | Type | Description |\n+| :----------- | :------------- | :---------- |\n+| ConfusionSum | Matrix[Double] | The Confusion Matrix Sums of classifications |\n+| ConfusionAvg | Matrix[Double] | The Confusion Matrix averages of each true class |\n+\n+### Example\n+\n+```r\n+numClasses = 1\n+z = rand(rows = 5, cols = 1, min = 1, max = 9)\n+X = round(rand(rows = 5, cols = 1, min = 1, max = numClasses))\n+y = toOneHot(X, numClasses)\n+[ConfusionSum, ConfusionAvg] = confusionMatrix(P=z, Y=y)\n+```\n## `gridSearch`-Function\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin confusionMatrix function
Closes #952. |
49,683 | 08.06.2020 14:37:10 | -19,080 | f4780d332de1aba8b05c78f9e6dd50ddc61e28b6 | [DOC] Documentation for builtin mice function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -30,6 +30,7 @@ limitations under the License.\n* [`lmDS`-Function](#lmds-function)\n* [`lmCG`-Function](#lmcg-function)\n* [`lmpredict`-Function](#lmpredict-function)\n+ * [`mice`-Function](#mice-function)\n* [`scale`-Function](#scale-function)\n* [`sigmoid`-Function](#sigmoid-function)\n* [`steplm`-Function](#steplm-function)\n@@ -353,6 +354,37 @@ w = lm(X = X, y = y)\nyp = lmpredict(X, w)\n```\n+## `mice`-Function\n+\n+The `mice`-function implements Multiple Imputation using Chained Equations (MICE) for nominal data.\n+\n+### Usage\n+```r\n+mice(F, cMask, iter, complete, verbose)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------- | :------------- | -------- | :---------- |\n+| F | Frame[String] | required | Data Frame with one-dimensional row matrix with N columns where N>1. |\n+| cMask | Matrix[Double] | required | 0/1 row vector for identifying numeric (0) and categorical features (1) with one-dimensional row matrix with column = ncol(F). |\n+| iter | Integer | `3` | Number of iteration for multiple imputations. |\n+| complete | Integer | `3` | A complete dataset generated though a specific iteration. |\n+| verbose | Boolean | `FALSE` | Boolean value. |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Frame[String] | imputed dataset. |\n+| Frame[String] | A complete dataset generated though a specific iteration. |\n+\n+### Example\n+```r\n+F = as.frame(matrix(\"4 3 2 8 7 8 5\", rows=1, cols=7))\n+cMask = round(rand(rows=1,cols=ncol(F),min=0,max=1))\n+[dataset, singleSet] = mice(F, cMask, iter = 3, complete = 3, verbose = FALSE)\n+```\n+\n## `scale`-Function\nThe scale function is a generic function whose default method centers or scales the column of a numeric matrix.\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/mice.dml",
"new_path": "scripts/builtin/mice.dml",
"diff": "# singleSet Double --- A complete dataset generated though a specific iteration\n# Assumption missing value are represented with empty string i.e \",,\" in csv file\n-# variables with suffix n are storing continous/numeric data and variables with suffix c are storing categorical data\n+# variables with suffix n are storing continuous/numeric data and variables with suffix c are storing categorical data\ns_mice= function(Frame[String] F, Matrix[Double] cMask, Integer iter = 3, Integer complete = 3, Boolean verbose = FALSE)\nreturn(Frame[String] dataset, Frame[String] singleSet)\n{\nif(ncol(F) == 1)\n- stop(\"invalid aregument: can not apply mice on single column\")\n+ stop(\"invalid argument: can not apply mice on single column\")\nif(complete > iter)\ncomplete = iter\n@@ -78,7 +78,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\nXO = replace(target=X, pattern=NaN, replacement=0);\n- # remove categorical features and impute continous features with mean\n+ # remove categorical features and impute continuous features with mean\neX_n = removeEmpty(target=X, margin=\"cols\", select=(cMask==0))\ncol_n = ncol(eX_n);\n# storing the mask/address of missing values\n@@ -150,7 +150,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\n{\nMask_Filled_n = Mask_n;\nMask_Filled_c = Mask_c\n- in_n = 1; in_c = 1; i=1; j=1; # varibales for index selection\n+ in_n = 1; in_c = 1; i=1; j=1; # variables for index selection\nwhile(i <= ncol(dX))\n{\nif(as.scalar(dXMask[1,i]) == 0)\n@@ -197,7 +197,7 @@ return(Frame[String] dataset, Frame[String] singleSet)\ntest_X = removeEmpty(target = slice2, margin = \"cols\", select = selX);\ntest_Y = slice2a[,in_c]\n- # train clasification model\n+ # train classification model\nbeta = multiLogReg(X=train_X, Y=train_Y, icpt = 1, tol = 0.00000001, reg = 0.001, maxi = 100, maxii=0, verbose=FALSE)\n# predicting missing values\n[prob,pred,acc] = multiLogRegPredict(X=test_X, B=beta, Y = test_Y)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin mice function
Closes #953. |
49,744 | 08.06.2020 15:08:54 | -19,080 | e0c87ccfe39cca0103e7190ff93ab7fd63d9d3ae | [DOC] Documentation for builtin winsorize function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -41,6 +41,7 @@ limitations under the License.\n* [`naivebayes`-Function](#naivebayes-function)\n* [`outlier`-Function](#outlier-function)\n* [`toOneHot`-Function](#toOneHOt-function)\n+ * [`winsorize`-Function](#winsorize-function)\n# Introduction\n@@ -705,3 +706,30 @@ X = rand(rows = 50, cols = 10)\ny = round(X %*% rand(rows=ncol(X), cols=1))\nmodel = msvm(X = X, Y = y, intercept = FALSE, epsilon = 0.005, lambda = 1.0, maxIterations = 100, verbose = FALSE)\n```\n+\n+## `winsorize`-Function\n+\n+The `winsorize`-function removes outliers from the data. It does so by computing upper and lower quartile range\n+of the given data then it replaces any value that falls outside this range (less than lower quartile range or more\n+than upper quartile range).\n+\n+### Usage\n+```r\n+winsorize(X)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------- | :------------- | :--------| :---------- |\n+| X | Matrix[Double] | required | recorded data set with possible outlier values |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | Matrix without outlier values |\n+\n+### Example\n+```r\n+X = rand(rows=10, cols=10,min = 1, max=9)\n+Y = winsorize(X=X)\n+```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin winsorize function
Closes #955. |
49,726 | 08.06.2020 15:25:49 | -19,080 | efd89db58b10166e7045ad9125228ee15c1e9b89 | [DOC] Documentation for builtin pnmf function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -31,6 +31,7 @@ limitations under the License.\n* [`lmCG`-Function](#lmcg-function)\n* [`lmpredict`-Function](#lmpredict-function)\n* [`mice`-Function](#mice-function)\n+ * [`pnmf`-Function](#pnmf-function)\n* [`scale`-Function](#scale-function)\n* [`sigmoid`-Function](#sigmoid-function)\n* [`steplm`-Function](#steplm-function)\n@@ -386,6 +387,39 @@ cMask = round(rand(rows=1,cols=ncol(F),min=0,max=1))\n[dataset, singleSet] = mice(F, cMask, iter = 3, complete = 3, verbose = FALSE)\n```\n+## `pnmf`-Function\n+\n+The `pnmf`-function implements Poisson Non-negative Matrix Factorization (PNMF). Matrix `X` is factorized into\n+two non-negative matrices, `W` and `H` based on Poisson probabilistic assumption. This non-negativity makes the\n+resulting matrices easier to inspect.\n+\n+### Usage\n+```r\n+pnmf(X, rnk, eps = 10^-8, maxi = 10, verbose = TRUE)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of feature vectors. |\n+| rnk | Integer | required | Number of components into which matrix X is to be factored. |\n+| eps | Double | `10^-8` | Tolerance |\n+| maxi | Integer | `10` | Maximum number of conjugate gradient iterations. |\n+| verbose | Boolean | TRUE | If TRUE, 'iter' and 'obj' are printed.|\n+\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | List of pattern matrices, one for each repetition. |\n+| Matrix[Double] | List of amplitude matrices, one for each repetition. |\n+\n+### Example\n+```r\n+X = rand(rows = 50, cols = 10)\n+[W, H] = pnmf(X = X, rnk = 2, eps = 10^-8, maxi = 10, verbose = TRUE)\n+```\n+\n## `scale`-Function\nThe scale function is a generic function whose default method centers or scales the column of a numeric matrix.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin pnmf function
Closes #960. |
49,689 | 12.06.2020 00:49:14 | -7,200 | f813fc9d5099bf32a7525855652c00b2afd82f1f | Cache MultiReturnBuiltin instructions
This patch enables caching of multi-return instructions like eigen.
Furthermore, this includes a new partial rewrite and adds PCA
as a test case. | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/Tasks.txt",
"new_path": "dev/docs/Tasks.txt",
"diff": "@@ -318,6 +318,8 @@ SYSTEMDS-400 Spark Backend Improvements\nSYSTEMDS-410 Lineage Tracing, Reuse and Integration II\n* 411 Improved handling of multi-level cache duplicates OK\n+ * 412 Robust lineage tracing (non-recursive, parfor) OK\n+ * 413 Cache and reuse MultiReturnBuiltin instructions OK\nOthers:\n* Break append instruction to cbind and rbind\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/MultiReturnBuiltinCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/MultiReturnBuiltinCPInstruction.java",
"diff": "@@ -98,6 +98,10 @@ public class MultiReturnBuiltinCPInstruction extends ComputationCPInstruction {\n}\n+ public int getNumOutputs() {\n+ return _outputs.size();\n+ }\n+\n@Override\npublic void processInstruction(ExecutionContext ec) {\nif(!LibCommonsMath.isSupportedMultiReturnOperation(getOpcode()))\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -37,6 +37,7 @@ import org.apache.sysds.runtime.instructions.cp.CPInstruction.CPType;\nimport org.apache.sysds.runtime.instructions.cp.ComputationCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.Data;\nimport org.apache.sysds.runtime.instructions.cp.MMTSJCPInstruction;\n+import org.apache.sysds.runtime.instructions.cp.MultiReturnBuiltinCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ParameterizedBuiltinCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.LineageCacheStatus;\n@@ -79,13 +80,25 @@ public class LineageCache\n// will always fit in memory and hence can be pinned unconditionally\nif (LineageCacheConfig.isReusable(inst, ec)) {\nComputationCPInstruction cinst = (ComputationCPInstruction) inst;\n- LineageItem item = cinst.getLineageItem(ec).getValue();\n+ LineageItem instLI = cinst.getLineageItem(ec).getValue();\n+ HashMap<LineageItem, LineageCacheEntry> liMap = new HashMap<>();\n+ if (inst instanceof MultiReturnBuiltinCPInstruction) {\n+ MultiReturnBuiltinCPInstruction mrInst = (MultiReturnBuiltinCPInstruction)inst;\n+ for (int i=0; i<mrInst.getNumOutputs(); i++) {\n+ String opcode = instLI.getOpcode() + String.valueOf(i);\n+ liMap.put(new LineageItem(opcode, instLI.getInputs()), null);\n+ }\n+ }\n+ else\n+ liMap.put(instLI, null);\n//atomic try reuse full/partial and set placeholder, without\n//obtaining value to avoid blocking in critical section\nLineageCacheEntry e = null;\n+ Boolean reuseAll = true;\nsynchronized( _cache ) {\n//try to reuse full or partial intermediates\n+ for (LineageItem item : liMap.keySet()) {\nif (LineageCacheConfig.getCacheType().isFullReuse())\ne = LineageCache.probe(item) ? getIntern(item) : null;\n//TODO need to also move execution of compensation plan out of here\n@@ -93,24 +106,39 @@ public class LineageCache\nif (e == null && LineageCacheConfig.getCacheType().isPartialReuse())\nif( LineageRewriteReuse.executeRewrites(inst, ec) )\ne = getIntern(item);\n- reuse = (e != null);\n+ //TODO: MultiReturnBuiltin and partial rewrites\n+ reuseAll &= (e != null);\n+ liMap.put(item, e);\n//create a placeholder if no reuse to avoid redundancy\n//(e.g., concurrent threads that try to start the computation)\n- if(!reuse && isMarkedForCaching(inst, ec)) {\n+ if(e == null && isMarkedForCaching(inst, ec)) {\nputIntern(item, cinst.output.getDataType(), null, null, 0);\n+ //FIXME: different o/p datatypes for MultiReturnBuiltins.\n+ }\n}\n}\n+ reuse = reuseAll;\nif(reuse) { //reuse\n//put reuse value into symbol table (w/ blocking on placeholders)\n+ for (Map.Entry<LineageItem, LineageCacheEntry> entry : liMap.entrySet()) {\n+ e = entry.getValue();\n+ String outName = null;\n+ if (inst instanceof MultiReturnBuiltinCPInstruction)\n+ outName = ((MultiReturnBuiltinCPInstruction)inst).\n+ getOutput(entry.getKey().getOpcode().charAt(entry.getKey().getOpcode().length()-1)-'0').getName();\n+ else\n+ outName = cinst.output.getName();\n+\nif (e.isMatrixValue())\n- ec.setMatrixOutput(cinst.output.getName(), e.getMBValue());\n+ ec.setMatrixOutput(outName, e.getMBValue());\nelse\n- ec.setScalarOutput(cinst.output.getName(), e.getSOValue());\n+ ec.setScalarOutput(outName, e.getSOValue());\n+ reuse = true;\n+ }\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementInstHits();\n- reuse = true;\n}\n}\n@@ -220,9 +248,23 @@ public class LineageCache\nreturn;\nif (LineageCacheConfig.isReusable(inst, ec) ) {\n//if (!isMarkedForCaching(inst, ec)) return;\n- LineageItem item = ((LineageTraceable) inst).getLineageItem(ec).getValue();\n- Data data = ec.getVariable(((ComputationCPInstruction) inst).output);\n+ HashMap<LineageItem, Data> liDataMap = new HashMap<>();\n+ LineageItem instLI = ((LineageTraceable) inst).getLineageItem(ec).getValue();\n+ if (inst instanceof MultiReturnBuiltinCPInstruction) {\n+ MultiReturnBuiltinCPInstruction mrInst = (MultiReturnBuiltinCPInstruction)inst;\n+ for (int i=0; i<mrInst.getNumOutputs(); i++) {\n+ String opcode = instLI.getOpcode() + String.valueOf(i);\n+ LineageItem li = new LineageItem(opcode, instLI.getInputs());\n+ Data value = ec.getVariable(mrInst.getOutput(i));\n+ liDataMap.put(li, value);\n+ }\n+ }\n+ else\n+ liDataMap.put(instLI, ec.getVariable(((ComputationCPInstruction) inst).output));\nsynchronized( _cache ) {\n+ for (Map.Entry<LineageItem, Data> entry : liDataMap.entrySet()) {\n+ LineageItem item = entry.getKey();\n+ Data data = entry.getValue();\nif (data instanceof MatrixObject)\n_cache.get(item).setValue(((MatrixObject)data).acquireReadAndRelease(), computetime);\nelse if (data instanceof ScalarObject)\n@@ -240,6 +282,7 @@ public class LineageCache\n}\n}\n}\n+ }\npublic static void putValue(List<DataIdentifier> outputs, LineageItem[] liInputs,\nString name, ExecutionContext ec, long computetime)\n@@ -258,6 +301,7 @@ public class LineageCache\nboundLI.resetVisitStatusNR();\nif (boundLI == null || !LineageCache.probe(li) || !LineageCache.probe(boundLI)) {\nAllOutputsCacheable = false;\n+ //FIXME: if boundLI is for a MultiReturnBuiltin instruction\n}\nFuncLIMap.put(li, boundLI);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -37,7 +37,8 @@ public class LineageCacheConfig\n\"tsmm\", \"ba+*\", \"*\", \"/\", \"+\", \"||\", \"nrow\", \"ncol\", \"round\", \"exp\", \"log\",\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n- \"^\", \"uamax\", \"uark+\"\n+ \"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n+ \"^2\", \"uack+\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -71,6 +71,7 @@ public class LineageRewriteReuse\nprivate static final String LR_VAR = \"__lrwrt\";\nprivate static BasicProgramBlock _lrPB = null;\nprivate static ExecutionContext _lrEC = null;\n+ private static boolean _disableReuse = true;\nprivate static final Log LOG = LogFactory.getLog(LineageRewriteReuse.class.getName());\nprivate static boolean LDEBUG = false; //internal debugging\n@@ -111,6 +112,8 @@ public class LineageRewriteReuse\nnewInst = (newInst == null) ? rewriteElementMulCbind(curr, ec, lrwec) : newInst;\n//aggregate(target=cbind(X, deltaX,...) = cbind(aggregate(target=X,...), aggregate(target=deltaX,...)) for same agg function\nnewInst = (newInst == null) ? rewriteAggregateCbind(curr, ec, lrwec) : newInst;\n+ //A %*% B[,1:k] = (A %*% B)[,1:k];\n+ newInst = (newInst == null) ? rewriteIndexingMatMul(curr, ec, lrwec) : newInst;\nif (newInst == null)\nreturn false;\n@@ -221,6 +224,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteTsmmCbindOnes APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -262,6 +266,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteTsmmRbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -322,6 +327,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteTsmm2Cbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -364,6 +370,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteMetMulRbindLeft APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -406,6 +413,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteMatMulCbindRight APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -437,6 +445,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteMatMulCbindRightOnes APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -490,6 +499,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteElementMulRbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -543,6 +553,7 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteElementMulCbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -596,6 +607,58 @@ public class LineageRewriteReuse\nif (LOG.isDebugEnabled())\nLOG.debug(\"LINEAGE REWRITE rewriteElementMulCbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\n+\n+ if (DMLScript.STATISTICS)\n+ LineageCacheStatistics.incrementPRewrites();\n+ return inst;\n+ }\n+\n+ private static ArrayList<Instruction> rewriteIndexingMatMul (Instruction curr, ExecutionContext ec, ExecutionContext lrwec)\n+ {\n+ /* This rewrite replaces the indexed matrix with its source as an\n+ * input to matrix multiplication, with the hope that in future\n+ * iterations all the outputs can be sliced out from the cached\n+ * result (e.g. PCA in a loop)\n+ * Note: this particular rewrite needs to cache the compensation plan\n+ * execution results (unlike other rewrites) to be effective.\n+ * TODO: Generalize for all cases and move to compiler\n+ */\n+ // Check the applicability of this rewrite.\n+ Map<String, MatrixBlock> inCache = new HashMap<>();\n+ if (!isIndexingMatMul (curr, ec, inCache))\n+ return null;\n+\n+ // Create a transient read op over the input to rightIndex\n+ MatrixBlock indexSource = inCache.get(\"indexSource\");\n+ lrwec.setVariable(\"indexSource\", convMBtoMO(indexSource));\n+ DataOp input2Index = HopRewriteUtils.createTransientRead(\"indexSource\", indexSource);\n+ // Create or read the matrix multiplication\n+ Hop matMultRes;\n+ MatrixObject moL = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n+ if (inCache.containsKey(\"BigMatMult\")) {\n+ MatrixBlock BigMatMult = inCache.get(\"BigMatMult\");\n+ lrwec.setVariable(\"BigMatMult\", convMBtoMO(BigMatMult));\n+ matMultRes = HopRewriteUtils.createTransientRead(\"BigMatMult\", BigMatMult);\n+ }\n+ else {\n+ lrwec.setVariable(\"left\", moL);\n+ DataOp leftMatrix = HopRewriteUtils.createTransientRead(\"left\", moL);\n+ matMultRes = HopRewriteUtils.createMatrixMultiply(leftMatrix, input2Index);\n+ // Perform the multiplication once and cache for future iterations.\n+ }\n+ // Gather the indexing parameters.\n+ MatrixObject moR = ec.getMatrixObject(((ComputationCPInstruction)curr).input2);\n+ IndexingOp lrwHop = HopRewriteUtils.createIndexingOp(matMultRes, new LiteralOp(1),\n+ new LiteralOp(moL.getNumRows()), new LiteralOp(1), new LiteralOp(moR.getNumColumns()));\n+ DataOp lrwWrite = HopRewriteUtils.createTransientWrite(LR_VAR, lrwHop);\n+\n+ // generate runtime instructions\n+ if (LOG.isDebugEnabled())\n+ LOG.debug(\"LINEAGE REWRITE rewriteIndexingMatMul APPLIED\");\n+ ArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ // Keep reuse enabled\n+ _disableReuse = false;\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\n@@ -873,6 +936,33 @@ public class LineageRewriteReuse\nreturn inCache.containsKey(\"lastMatrix\") ? true : false;\n}\n+ private static boolean isIndexingMatMul(Instruction curr, ExecutionContext ec, Map<String, MatrixBlock> inCache) {\n+ if (!LineageCacheConfig.isReusable(curr, ec)) {\n+ return false;\n+ }\n+ /* rightIndex -> ba+* is to generic.\n+ * Use ba+* -> rightIndex -> ba+* to avoid false positives.\n+ * TODO: generalized but robust applicability function\n+ */\n+\n+ // Check if the right input of ba+* came from rightindex\n+ LineageItem item = ((ComputationCPInstruction) curr).getLineageItem(ec).getValue();\n+ if (curr.getOpcode().equalsIgnoreCase(\"ba+*\")) {\n+ LineageItem left = item.getInputs()[0];\n+ LineageItem right = item.getInputs()[1];\n+ if (right.getOpcode().equalsIgnoreCase(\"rightIndex\")) {\n+ LineageItem indexSource = right.getInputs()[0];\n+ if (LineageCache.probe(indexSource) && indexSource.getOpcode().equalsIgnoreCase(\"ba+*\"))\n+ inCache.put(\"indexSource\", LineageCache.getMatrix(indexSource));\n+ LineageItem tmp = new LineageItem(item.getOpcode(), new LineageItem[] {left, indexSource});\n+ if (LineageCache.probe(tmp))\n+ inCache.put(\"BigMatMult\", LineageCache.getMatrix(tmp));\n+ }\n+ }\n+ // return true only if the input to rightIndex is found\n+ return inCache.containsKey(\"indexSource\") ? true : false;\n+ }\n+\n/*----------------------INSTRUCTIONS GENERATION & EXECUTION-----------------------*/\nprivate static ArrayList<Instruction> genInst(Hop hops, ExecutionContext ec) {\n@@ -896,8 +986,10 @@ public class LineageRewriteReuse\nBasicProgramBlock pb = getProgramBlock();\npb.setInstructions(newInst);\nReuseCacheType oldReuseOption = DMLScript.LINEAGE_REUSE;\n+ if (_disableReuse)\nLineageCacheConfig.shutdownReuse();\npb.execute(lrwec);\n+ if (_disableReuse)\nLineageCacheConfig.restartReuse(oldReuseOption);\n}\ncatch (Exception e) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageReuseAlg extends AutomatedTestBase {\nprotected static final String TEST_DIR = \"functions/lineage/\";\nprotected static final String TEST_NAME = \"LineageReuseAlg\";\n- protected static final int TEST_VARIANTS = 3;\n+ protected static final int TEST_VARIANTS = 4;\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageReuseAlg.class.getSimpleName() + \"/\";\n@Override\n@@ -64,6 +64,11 @@ public class LineageReuseAlg extends AutomatedTestBase {\ntestLineageTrace(TEST_NAME+\"3\", ReuseCacheType.REUSE_HYBRID);\n}\n+ @Test\n+ public void testPCAHybrid() {\n+ testLineageTrace(TEST_NAME+\"4\", ReuseCacheType.REUSE_HYBRID);\n+ }\n+\n@Test\npublic void testStepLMFull() {\ntestLineageTrace(TEST_NAME+\"1\", ReuseCacheType.REUSE_FULL);\n@@ -79,6 +84,11 @@ public class LineageReuseAlg extends AutomatedTestBase {\ntestLineageTrace(TEST_NAME+\"3\", ReuseCacheType.REUSE_FULL);\n}\n+ @Test\n+ public void testPCAFull() {\n+ testLineageTrace(TEST_NAME+\"4\", ReuseCacheType.REUSE_FULL);\n+ }\n+\npublic void testLineageTrace(String testname, ReuseCacheType reuseType) {\nboolean old_simplification = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\nboolean old_sum_product = OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageReuseAlg4.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# INPUT String --- Location to read the matrix A of feature vectors\n+# K Int --- Indicates dimension of the new vector space constructed from eigen vector\n+# CENTER Int 0 Indicates whether or not to center data\n+# SCALE Int 0 Indicates whether or not to scale data\n+# PROJDATA Int 0 This argument indicates if the data should be projected or not\n+# ---------------------------------------------------------------------------------------------\n+\n+PCA = function(Matrix[Double] A, Integer K = ncol(A), Integer center = 1, Integer scale = 1,\n+ Integer projectData = 1) return(Matrix[Double] newA)\n+{\n+ evec_dominant = matrix(0,cols=1,rows=1);\n+\n+ N = nrow(A);\n+ D = ncol(A);\n+\n+ # perform z-scoring (centering and scaling)\n+ A = scale(A, center==1, scale==1);\n+\n+ # co-variance matrix\n+ mu = colSums(A)/N;\n+ C = (t(A) %*% A)/(N-1) - (N/(N-1))*t(mu) %*% mu;\n+\n+ # compute eigen vectors and values\n+ [evalues, evectors] = eigen(C);\n+\n+ decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+ diagmat = table(seq(1,D),decreasing_Idx);\n+ # sorts eigenvalues by decreasing order\n+ evalues = diagmat %*% evalues;\n+ # sorts eigenvectors column-wise in the order of decreasing eigenvalues\n+ evectors = evectors %*% diagmat;\n+\n+\n+ # select K dominant eigen vectors\n+ nvec = ncol(evectors);\n+\n+ eval_dominant = evalues[1:K, 1];\n+ evec_dominant = evectors[,1:K];\n+\n+ # the square root of eigenvalues\n+ eval_stdev_dominant = sqrt(eval_dominant);\n+\n+ if (projectData == 1){\n+ # Construct new data set by treating computed dominant eigenvectors as the basis vectors\n+ newA = A %*% evec_dominant;\n+ }\n+}\n+\n+A = rand(rows=100, cols=10, seed=42);\n+R = matrix(0, rows=1, cols=ncol(A));\n+for (i in 1:ncol(A)) {\n+ newA = PCA(A=A, K=i);\n+ while(FALSE){}\n+ R[,i] = sum(newA);\n+}\n+write(R, $1, format=\"text\");\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-413] Cache MultiReturnBuiltin instructions
This patch enables caching of multi-return instructions like eigen.
Furthermore, this includes a new partial rewrite and adds PCA
as a test case. |
49,744 | 13.06.2020 14:31:56 | -19,080 | 30b6f2746682100fab4cfe785d777604902f8da2 | [DOC] Documentation for builtin glm function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -24,6 +24,7 @@ limitations under the License.\n* [`tensor`-Function](#tensor-function)\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n* [`confusionMatrix`-Function](#confusionmatrix-function)\n+ * [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n@@ -158,6 +159,45 @@ y = toOneHot(X, numClasses)\n[ConfusionSum, ConfusionAvg] = confusionMatrix(P=z, Y=y)\n```\n+## `glm`-Function\n+\n+The `glm`-function is a flexible generalization of ordinary linear regression that allows for response variables that have\n+error distribution models.\n+\n+### Usage\n+```r\n+glm(X,Y)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :--- | :------------- | :------- | :---------- |\n+| X | Matrix[Double] | required | matrix X of feature vectors |\n+| Y | Matrix[Double] | required | matrix Y with either 1 or 2 columns: if dfam = 2, Y is 1-column Bernoulli or 2-column Binomial (#pos, #neg) |\n+| dfam | Int | `1` | Distribution family code: 1 = Power, 2 = Binomial |\n+| vpow | Double | `0.0` | Power for Variance defined as (mean)^power (ignored if dfam != 1): 0.0 = Gaussian, 1.0 = Poisson, 2.0 = Gamma, 3.0 = Inverse Gaussian |\n+| link | Int | `0` | Link function code: 0 = canonical (depends on distribution), 1 = Power, 2 = Logit, 3 = Probit, 4 = Cloglog, 5 = Cauchit |\n+| lpow | Double | `1.0` | Power for Link function defined as (mean)^power (ignored if link != 1): -2.0 = 1/mu^2, -1.0 = reciprocal, 0.0 = log, 0.5 = sqrt, 1.0 = identity |\n+| yneg | Double | `0.0` | Response value for Bernoulli \"No\" label, usually 0.0 or -1.0 |\n+| icpt | Int | `0` | Intercept presence, X columns shifting and rescaling: 0 = no intercept, no shifting, no rescaling; 1 = add intercept, but neither shift nor rescale X; 2 = add intercept, shift & rescale X columns to mean = 0, variance = 1 |\n+| reg | Double | `0.0` | Regularization parameter (lambda) for L2 regularization |\n+| tol | Double | `1e-6` | Tolerance (epislon) value. |\n+| disp | Double | `0.0` | (Over-)dispersion value, or 0.0 to estimate it from data |\n+| moi | Int | `200` | Maximum number of outer (Newton / Fisher Scoring) iterations |\n+| mii | Int | `0` | Maximum number of inner (Conjugate Gradient) iterations, 0 = no maximum |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :--------------- |\n+| Matrix[Double] | Matrix whose size depends on icpt ( icpt=0: ncol(X) x 1; icpt=1: (ncol(X) + 1) x 1; icpt=2: (ncol(X) + 1) x 2) |\n+\n+### Example\n+```r\n+X = rand (rows = 5, cols = 5 )\n+y = X %*% rand(rows = ncol(X), cols = 1)\n+beta = glm(X=X,Y=y)\n+```\n+\n## `gridSearch`-Function\nThe `gridSearch`-function is used to find the optimal hyper-parameters of a model which results in the most _accurate_\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin glm function
Closes #968. |
49,715 | 13.06.2020 14:53:27 | -19,080 | 752b4a2975a3e890e2e220bc43f7d537fb9afbd2 | [DOC] Builtin image data augmentation functions
* img_brightness()
* img_crop()
* img_mirror()
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -26,6 +26,9 @@ limitations under the License.\n* [`confusionMatrix`-Function](#confusionmatrix-function)\n* [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n+ * [`img_brightness`-Function](#img_brightness-function)\n+ * [`img_crop`-Function](#img_crop-function)\n+ * [`img_mirror`-Function](#img_mirror-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n* [`lmDS`-Function](#lmds-function)\n@@ -234,6 +237,91 @@ paramRanges = list(10^seq(0,-4), 10^seq(-5,-9), 10^seq(1,3))\n[B, opt]= gridSearch(X=X, y=y, train=\"lm\", predict=\"lmPredict\", params=params, paramValues=paramRanges, verbose = TRUE)\n```\n+## `img_brightness`-Function\n+\n+The `img_brightness`-function is an image data augumentation function.\n+It changes the brightness of the image.\n+\n+### Usage\n+```r\n+img_brightness(img_in, value, channel_max)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :---------- | :------------- | -------- | :---------- |\n+| img_in | Matrix[Double] | --- | Input matrix/image |\n+| value | Double | --- | The amount of brightness to be changed for the image |\n+| channel_max | Integer | --- | Maximum value of the brightness of the image |\n+\n+### Returns\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| img_out | Matrix[Double] | --- | Output matrix/image |\n+\n+### Example\n+```r\n+A = rand(rows = 3, cols = 3, min = 0, max = 255)\n+B = img_brightness(img_in = A, value = 128, channel_max = 255)\n+```\n+\n+## `img_crop`-Function\n+\n+The `img_crop`-function is an image data augumentation function.\n+It cuts out a subregion of an image.\n+\n+### Usage\n+```r\n+img_crop(img_in, w, h, x_offset, y_offset)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| img_in | Matrix[Double] | --- | Input matrix/image |\n+| w | Integer | --- | The width of the subregion required |\n+| h | Integer | --- | The height of the subregion required |\n+| x_offset | Integer | --- | The horizontal coordinate in the image to begin the crop operation |\n+| y_offset | Integer | --- | The vertical coordinate in the image to begin the crop operation |\n+\n+### Returns\n+| Name | Type | Default | Description |\n+| :------ | :------------- | ------- | :---------- |\n+| img_out | Matrix[Double] | --- | Cropped matrix/image |\n+\n+### Example\n+```r\n+A = rand(rows = 3, cols = 3, min = 0, max = 255)\n+B = img_crop(img_in = A, w = 20, h = 10, x_offset = 0, y_offset = 0)\n+```\n+\n+## `img_mirror`-Function\n+\n+The `img_mirror`-function is an image data augumentation function.\n+It flips an image on the `X` (horizontal) or `Y` (vertical) axis.\n+\n+### Usage\n+```r\n+img_mirror(img_in, horizontal_axis)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :-------------- | :------------- | -------- | :---------- |\n+| img_in | Matrix[Double] | --- | Input matrix/image |\n+| horizontal_axis | Boolean | --- | If TRUE, the image is flipped with respect to horizontal axis otherwise vertical axis |\n+\n+### Returns\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| img_out | Matrix[Double] | --- | Flipped matrix/image |\n+\n+### Example\n+```r\n+A = rand(rows = 3, cols = 3, min = 0, max = 255)\n+B = img_mirror(img_in = A, horizontal_axis = TRUE)\n+```\n+\n## `KMeans`-Function\nThe kmeans() implements the KMeans Clustering algorithm.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Builtin image data augmentation functions
* img_brightness()
* img_crop()
* img_mirror()
Closes #959. |
49,744 | 13.06.2020 15:11:34 | -19,080 | fb9a480261e7abf6d0610ae04e9dc6d751eb6206 | [DOC] Documentation for builtin cvlm function
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -24,6 +24,7 @@ limitations under the License.\n* [`tensor`-Function](#tensor-function)\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n* [`confusionMatrix`-Function](#confusionmatrix-function)\n+ * [`cvlm`-Function](#cvlm-function)\n* [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n* [`img_brightness`-Function](#img_brightness-function)\n@@ -162,6 +163,39 @@ y = toOneHot(X, numClasses)\n[ConfusionSum, ConfusionAvg] = confusionMatrix(P=z, Y=y)\n```\n+## `cvlm`-Function\n+\n+The `cvlm`-function is used for cross-validation of the provided data model. This function follows a non-exhaustive\n+cross validation method. It uses [`lm`](#lm-function) and [`lmpredict`](#lmpredict-function) functions to solve the linear\n+regression and to predict the class of a feature vector with no intercept, shifting, and rescaling.\n+\n+### Usage\n+```r\n+cvlm(X, y, k)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :--- | :------------- | :------- | :---------- |\n+| X | Matrix[Double] | required | Recorded Data set into matrix |\n+| y | Matrix[Double] | required | 1-column matrix of response values. |\n+| k | Integer | required | Number of subsets needed, It should always be more than `1` and less than `nrow(X)` |\n+| icpt | Integer | `0` | Intercept presence, shifting and rescaling the columns of X |\n+| reg | Double | `1e-7` | Regularization constant (lambda) for L2-regularization. set to nonzero for highly dependant/sparse/numerous features |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | Response values |\n+| Matrix[Double] | Validated data set |\n+\n+### Example\n+```r\n+X = rand (rows = 5, cols = 5)\n+y = X %*% rand(rows = ncol(X), cols = 1)\n+[predict, beta] = cvlm(X = X, y = y, k = 4)\n+```\n+\n## `glm`-Function\nThe `glm`-function is a flexible generalization of ordinary linear regression that allows for response variables that have\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] Documentation for builtin cvlm function
Closes #962. |
49,733 | 14.06.2020 09:00:29 | -19,080 | 0c2a2b393ae76da4068288c2be02fb753289b45b | [DOC][1/2] imputeByFD and discoverFD builtin func.
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -25,11 +25,13 @@ limitations under the License.\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n* [`confusionMatrix`-Function](#confusionmatrix-function)\n* [`cvlm`-Function](#cvlm-function)\n+ * [`discoverFD`-Function](#discoverFD-function)\n* [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n* [`img_brightness`-Function](#img_brightness-function)\n* [`img_crop`-Function](#img_crop-function)\n* [`img_mirror`-Function](#img_mirror-function)\n+ * [`imputeByFD`-Function](#imputeByFD-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n* [`lmDS`-Function](#lmds-function)\n@@ -196,6 +198,28 @@ y = X %*% rand(rows = ncol(X), cols = 1)\n[predict, beta] = cvlm(X = X, y = y, k = 4)\n```\n+## `discoverFD`-Function\n+\n+The `discoverFD`-function finds the functional dependencies.\n+\n+### Usage\n+```r\n+discoverFD(X, Mask, threshold)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :-------- | :----- | ------- | :---------- |\n+| X | Double | -- | Input Matrix X, encoded Matrix if data is categorical |\n+| Mask | Double | -- | A row vector for interested features i.e. Mask =[1, 0, 1] will exclude the second column from processing |\n+| threshold | Double | -- | threshold value in interval [0, 1] for robust FDs |\n+\n+### Returns\n+| Type | Description |\n+| :----- | :---------- |\n+| Double | matrix of functional dependencies |\n+\n+\n## `glm`-Function\nThe `glm`-function is a flexible generalization of ordinary linear regression that allows for response variables that have\n@@ -356,6 +380,30 @@ A = rand(rows = 3, cols = 3, min = 0, max = 255)\nB = img_mirror(img_in = A, horizontal_axis = TRUE)\n```\n+## `imputeByFD`-Function\n+\n+The `imputeByFD`-function imputes missing values from observed values (if exist)\n+using robust functional dependencies.\n+\n+### Usage\n+```r\n+imputeByFD(F, sourceAttribute, targetAttribute, threshold)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :-------- | :------ | -------- | :---------- |\n+| F | String | -- | A data frame |\n+| source | Integer | -- | Source attribute to use for imputation and error correction |\n+| target | Integer | -- | Attribute to be fixed |\n+| threshold | Double | -- | threshold value in interval [0, 1] for robust FDs |\n+\n+### Returns\n+| Type | Description |\n+| :----- | :---------- |\n+| String | Frame with possible imputations |\n+\n+\n## `KMeans`-Function\nThe kmeans() implements the KMeans Clustering algorithm.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC][1/2] imputeByFD and discoverFD builtin func.
Closes #969. |
49,733 | 14.06.2020 09:56:12 | -19,080 | ade2bd7c07403c64e7a46327f14c636da3da4ae9 | [DOC] multiLogReg and intersect builtin func.
Closes
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/builtins-reference.md",
"new_path": "dev/docs/builtins-reference.md",
"diff": "@@ -32,12 +32,14 @@ limitations under the License.\n* [`img_crop`-Function](#img_crop-function)\n* [`img_mirror`-Function](#img_mirror-function)\n* [`imputeByFD`-Function](#imputeByFD-function)\n+ * [`intersect`-Function](#intersect-function)\n* [`KMeans`-Function](#KMeans-function)\n* [`lm`-Function](#lm-function)\n* [`lmDS`-Function](#lmds-function)\n* [`lmCG`-Function](#lmcg-function)\n* [`lmpredict`-Function](#lmpredict-function)\n* [`mice`-Function](#mice-function)\n+ * [`multiLogReg`-Function](#multiLogReg-function)\n* [`pnmf`-Function](#pnmf-function)\n* [`scale`-Function](#scale-function)\n* [`sigmoid`-Function](#sigmoid-function)\n@@ -475,6 +477,27 @@ y = X %*% rand(rows = ncol(X), cols = 1)\nlm(X = X, y = y)\n```\n+## `intersect`-Function\n+\n+The `intersect`-function implements set intersection for numeric data.\n+\n+### Usage\n+```r\n+intersect(X, Y)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :--- | :----- | -------- | :---------- |\n+| X | Double | -- | matrix X, set A |\n+| Y | Double | -- | matrix Y, set B |\n+\n+### Returns\n+| Type | Description |\n+| :----- | :---------- |\n+| Double | intersection matrix, set of intersecting items |\n+\n+\n## `lmDS`-Function\nThe `lmDS`-function solves linear regression by directly solving the *linear system*.\n@@ -597,6 +620,40 @@ cMask = round(rand(rows=1,cols=ncol(F),min=0,max=1))\n[dataset, singleSet] = mice(F, cMask, iter = 3, complete = 3, verbose = FALSE)\n```\n+## `multiLogReg`-Function\n+\n+The `multiLogReg`-function solves Multinomial Logistic Regression using Trust Region method.\n+(See: Trust Region Newton Method for Logistic Regression, Lin, Weng and Keerthi, JMLR 9 (2008) 627-650)\n+\n+### Usage\n+```r\n+multiLogReg(X, Y, icpt, reg, tol, maxi, maxii, verbose)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :---- | :----- | ------- | :---------- |\n+| X | Double | -- | The matrix of feature vectors |\n+| Y | Double | -- | The matrix with category labels |\n+| icpt | Int | `0` | Intercept presence, shifting and rescaling X columns: 0 = no intercept, no shifting, no rescaling; 1 = add intercept, but neither shift nor rescale X; 2 = add intercept, shift & rescale X columns to mean = 0, variance = 1 |\n+| reg | Double | `0` | regularization parameter (lambda = 1/C); intercept is not regularized |\n+| tol | Double | `1e-6` | tolerance (\"epsilon\") |\n+| maxi | Int | `100` | max. number of outer newton interations |\n+| maxii | Int | `0` | max. number of inner (conjugate gradient) iterations |\n+\n+### Returns\n+| Type | Description |\n+| :----- | :---------- |\n+| Double | Regression betas as output for prediction |\n+\n+### Example\n+```r\n+X = rand(rows = 50, cols = 30)\n+Y = X %*% rand(rows = ncol(X), cols = 1)\n+betas = multiLogReg(X = X, Y = Y, icpt = 2, tol = 0.000001, reg = 1.0, maxi = 100, maxii = 20, verbose = TRUE)\n+```\n+\n+\n## `pnmf`-Function\nThe `pnmf`-function implements Poisson Non-negative Matrix Factorization (PNMF). Matrix `X` is factorized into\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC] multiLogReg and intersect builtin func.
Closes #954.
Closes #961. |
49,689 | 15.06.2020 01:14:02 | -7,200 | 875139055af14e0939641cb2d572458ff39ccc8f | Fix bugs in cache eviction.
This patch fixes bugs in handling of multi-level cache duplicates,
eviction and reading from disk. This also adds a new test, l2svm. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"diff": "@@ -409,7 +409,7 @@ public class DataGenCPInstruction extends UnaryCPInstruction {\n}\ncase SEQ: {\n//replace output variable name with a placeholder\n- //tmpInstStr = InstructionUtils.replaceOperandName(tmpInstStr);\n+ tmpInstStr = InstructionUtils.replaceOperandName(tmpInstStr);\ntmpInstStr = replaceNonLiteral(tmpInstStr, seq_from, 5, ec);\ntmpInstStr = replaceNonLiteral(tmpInstStr, seq_to, 6, ec);\ntmpInstStr = replaceNonLiteral(tmpInstStr, seq_incr, 7, ec);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -380,14 +380,14 @@ public class LineageCache\nelse\ne.setValue(oe.getSOValue(), computetime);\ne._origItem = probeItem;\n- // Add the SB/func entry to the end of the list of items pointing to the same data.\n+ // Add itself as original item to navigate the list.\n+ oe._origItem = probeItem;\n+\n+ // Add the SB/func entry to the list of items pointing to the same data.\n// No cache size update is necessary.\n- LineageCacheEntry tmp = oe;\n// Maintain _origItem as head.\n- while (tmp._nextEntry != null)\n- tmp = tmp._nextEntry;\n- // FIXME: No need add at the end; add just after head.\n- tmp._nextEntry = e;\n+ e._nextEntry = oe._nextEntry;\n+ oe._nextEntry = e;\n//maintain order for eviction\nLineageCacheEviction.addEntry(e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -38,7 +38,7 @@ public class LineageCacheConfig\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n- \"^2\", \"uack+\"\n+ \"^2\", \"uack+\", \"tak+*\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"diff": "@@ -101,8 +101,12 @@ public class LineageCacheEviction\nprivate static void removeOrSpillEntry(Map<LineageItem, LineageCacheEntry> cache, LineageCacheEntry e, boolean spill) {\nif (e._origItem == null) {\n// Single entry. Remove or spill.\n- if (spill)\n- spillToLocalFS(cache, e);\n+ if (spill) {\n+ updateSize(e.getSize(), false); //Release memory\n+ spillToLocalFS(cache, e); //Spill to disk\n+ e.setNullValues(); //Set null\n+ e.setCacheStatus(LineageCacheStatus.SPILLED); //Set status to spilled\n+ }\nelse\nremoveEntry(cache, e);\nreturn;\n@@ -129,7 +133,9 @@ public class LineageCacheEviction\nif (write) {\n// Spill to disk if at least one entry has status TOSPILL.\nspillToLocalFS(cache, cache.get(e._origItem));\n- LineageCacheEntry h = cache.get(e._origItem);\n+ // Reduce cachesize once for all the entries.\n+ updateSize(e.getSize(), false);\n+ LineageCacheEntry h = cache.get(e._origItem); //head\nwhile (h != null) {\n// Set values to null for all the entries.\nh.setNullValues();\n@@ -137,8 +143,6 @@ public class LineageCacheEviction\nh.setCacheStatus(LineageCacheStatus.SPILLED);\nh = h._nextEntry;\n}\n- // Reduce cachesize once for all the entries.\n- updateSize(e.getSize(), false);\n// Keep them in cache.\nreturn;\n}\n@@ -359,9 +363,10 @@ public class LineageCacheEviction\nh.setValue(mb);\nh = h._nextEntry;\n}\n+ }\n+\n// Increase cachesize once for all the entries.\nupdateSize(e.getSize(), true);\n- }\n// Adjust disk reading speed\nadjustReadWriteSpeed(e, ((double)(t1-t0))/1000000000, true);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageReuseAlg extends AutomatedTestBase {\nprotected static final String TEST_DIR = \"functions/lineage/\";\nprotected static final String TEST_NAME = \"LineageReuseAlg\";\n- protected static final int TEST_VARIANTS = 4;\n+ protected static final int TEST_VARIANTS = 5;\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageReuseAlg.class.getSimpleName() + \"/\";\n@Override\n@@ -69,6 +69,11 @@ public class LineageReuseAlg extends AutomatedTestBase {\ntestLineageTrace(TEST_NAME+\"4\", ReuseCacheType.REUSE_HYBRID);\n}\n+ @Test\n+ public void testGridSearchL2svmHybrid() {\n+ testLineageTrace(TEST_NAME+\"5\", ReuseCacheType.REUSE_HYBRID);\n+ }\n+\n@Test\npublic void testStepLMFull() {\ntestLineageTrace(TEST_NAME+\"1\", ReuseCacheType.REUSE_FULL);\n@@ -106,7 +111,6 @@ public class LineageReuseAlg extends AutomatedTestBase {\n// Without lineage-based reuse enabled\nList<String> proArgs = new ArrayList<>();\nproArgs.add(\"-stats\");\n- proArgs.add(\"-lineage\");\nproArgs.add(\"-args\");\nproArgs.add(output(\"X\"));\nprogramArgs = proArgs.toArray(new String[proArgs.size()]);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageReuseAlg5.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#hyperparameter(lambda, intercept) grid search for l2svm\n+\n+l2norm = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] B, Boolean icpt)\n+return (Matrix[Double] loss) {\n+ if (icpt)\n+ X = cbind(X, matrix(1, nrow(X), 1));\n+ loss = as.matrix(sum((y - X%*%B)^2));\n+}\n+\n+N = 1000;\n+no_lamda = 10;\n+stp = (0.1 - 0.0001)/no_lamda;\n+lamda = 0.0001;\n+Rbeta = matrix(0, rows=N+1, cols=no_lamda*2);\n+Rloss = matrix(0, rows=no_lamda*2, cols=1);\n+i = 1;\n+\n+X = rand(rows=1000, cols=N, sparsity=1.0, seed=42);\n+y = rand(rows=1000, cols=1, min=0, max=2, seed=42);\n+y = ceil(y);\n+\n+for (l in 1:no_lamda)\n+{\n+ beta = l2svm(X=X, Y=y, intercept=FALSE, epsilon=1e-12,\n+ lambda = lamda, verbose=FALSE);\n+ Rbeta[1:nrow(beta),i] = beta;\n+ Rloss[i,] = l2norm(X, y, beta, FALSE);\n+ i = i + 1;\n+\n+ beta = l2svm(X=X, Y=y, intercept=TRUE, epsilon=1e-12,\n+ lambda = lamda, verbose=FALSE);\n+ Rbeta[1:nrow(beta),i] = beta;\n+ Rloss[i,] = l2norm(X, y, beta, TRUE);\n+ i = i + 1;\n+\n+ lamda = lamda + stp;\n+}\n+\n+leastLoss = rowIndexMin(t(Rloss));\n+bestModel = Rbeta[,as.scalar(leastLoss)];\n+\n+write(bestModel, $1, format=\"text\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-411] Fix bugs in cache eviction.
This patch fixes bugs in handling of multi-level cache duplicates,
eviction and reading from disk. This also adds a new test, l2svm. |
49,706 | 15.06.2020 10:20:03 | -7,200 | 027f46ccad3d6ffc8140577212889dae1d8f98ca | [MINOR] Fix code coverage tool
Minor change to pom to update and re-enable code coverage in testing.
when testing using the following command (replace ??? with package name)
`mvn test -DskipTests=false -Dtest=org.apache.sysds.???`
This uses jacoco to produce a folder containing a webpage in
`target/site` that show coverage.
Closes | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<enableGPU>false</enableGPU>\n<jcuda.scope>provided</jcuda.scope>\n<jcuda.version>10.2.0</jcuda.version>\n+ <!-->Testing settings<!-->\n<skipTests>true</skipTests>\n+ <argLine>-Xms4g -Xmx4g</argLine>\n</properties>\n<repositories>\n<threadCount>12</threadCount>\n<!-- 1C means the number of threads times 1 possible maximum forks for testing-->\n<forkCount>1C</forkCount>\n- <argLine>-Xms4g -Xmx4g</argLine>\n<reuseForks>false</reuseForks>\n<reportFormat>brief</reportFormat>\n<trimStackTrace>true</trimStackTrace>\n<plugin>\n<groupId>org.jacoco</groupId>\n<artifactId>jacoco-maven-plugin</artifactId>\n- <version>0.7.6.201602180812</version>\n+ <version>0.8.5</version>\n<executions>\n<execution>\n- <id>prepare-agent</id>\n<goals>\n<goal>prepare-agent</goal>\n</goals>\n</execution>\n+ <execution>\n+ <id>generate-code-coverage-report</id>\n+ <phase>test</phase>\n+ <goals>\n+ <goal>report</goal>\n+ </goals>\n+ </execution>\n</executions>\n</plugin>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix code coverage tool
Minor change to pom to update and re-enable code coverage in testing.
when testing using the following command (replace ??? with package name)
`mvn test -DskipTests=false -Dtest=org.apache.sysds.???`
This uses jacoco to produce a folder containing a webpage in
`target/site` that show coverage.
Closes #956 |
49,706 | 16.06.2020 10:35:29 | -7,200 | a4853cf98b62cadce0b53600ad720821dd134527 | [MINOR] Add fed prefix for stats
Adds Federated prefix to instructions, so the statistics returned
show federated instruction executions just like Spark or GPU
instructions.
Minor fix in Startup of worker allowing log4j to work again.
Closes | [
{
"change_type": "MODIFY",
"old_path": "bin/systemds",
"new_path": "bin/systemds",
"diff": "@@ -289,7 +289,7 @@ if [ $WORKER == 1 ]; then\nCMD=\" \\\njava $SYSTEMDS_STANDALONE_OPTS \\\n-cp $CLASSPATH \\\n- -Dlog4j.configuration=file:$LOG4JPROP \\\n+ $LOG4JPROP \\\norg.apache.sysds.api.DMLScript \\\n-w $PORT\"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/Instruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/Instruction.java",
"diff": "@@ -57,6 +57,7 @@ public abstract class Instruction\npublic static final String INSTRUCTION_DELIM = Lop.INSTRUCTION_DELIMITOR;\npublic static final String SP_INST_PREFIX = \"sp_\";\npublic static final String GPU_INST_PREFIX = \"gpu_\";\n+ public static final String FEDERATED_INST_PREFIX = \"fed_\";\n//basic instruction meta data\nprotected String instString = null;\n@@ -197,6 +198,8 @@ public abstract class Instruction\nextendedOpcode = SP_INST_PREFIX + getOpcode();\nelse if( getType() == IType.GPU )\nextendedOpcode = GPU_INST_PREFIX + getOpcode();\n+ else if( getType() == IType.FEDERATED)\n+ extendedOpcode = FEDERATED_INST_PREFIX + getOpcode();\nelse\nextendedOpcode = getOpcode();\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add fed prefix for stats
Adds Federated prefix to instructions, so the statistics returned
show federated instruction executions just like Spark or GPU
instructions.
Minor fix in Startup of worker allowing log4j to work again.
Closes #970 |
49,706 | 18.06.2020 17:08:23 | -7,200 | 7326e6f64e7047f18e4f3e97fb5ad3a970b75bbf | [MINOR] Fix paths to resources docs | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -28,11 +28,11 @@ systems - that either provide homogeneous tensors or 2D Datasets - and in order\nthe underlying data model are DataTensors, i.e., tensors (multi-dimensional arrays) whose first dimension may have a\nheterogeneous and nested schema.\n-**Quick Start** [Install, Quick Start and Hello World](/bin/README.md)\n+**Quick Start** [Install, Quick Start and Hello World](https://apache.github.io/systemml/site/install.html)\n-**Documentation:** [SystemDS Documentation](/docs/README.md)\n+**Documentation:** [SystemDS Documentation](https://apache.github.io/systemml/)\n-**Python Documentation** [Python SystemDS Documentation](https://damslab.github.io/docs/sysdspython/index.html)\n+**Python Documentation** [Python SystemDS Documentation](https://apache.github.io/systemml/api/python/index.html)\n**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from\n[**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2018. We will continue to support linear algebra\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/_includes/scripts.html",
"diff": "+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+{% if site.analytics_on == true %} {% case site.analytics_provider %} {% when \"google_universal\" %}\n+<!-- Analytics -->\n+<script>\n+ (function(i, s, o, g, r, a, m) {\n+ i['GoogleAnalyticsObject'] = r;\n+ i[r] = i[r] || function() {\n+ (i[r].q = i[r].q || []).push(arguments)\n+ }, i[r].l = 1 * new Date();\n+ a = s.createElement(o),\n+ m = s.getElementsByTagName(o)[0];\n+ a.async = 1;\n+ a.src = g;\n+ m.parentNode.insertBefore(a, m)\n+ })(window, document, 'script', '//www.google-analytics.com/analytics.js', 'ga');\n+ ga('create', '{{ site.analytics_google_universal_tracking_id }}', 'auto');\n+ ga('send', 'pageview');\n+</script>\n+{% endcase %} {% endif %}\n+\n+<!-- MathJax Section -->\n+<script type=\"text/x-mathjax-config\">\n+ MathJax.Hub.Config({ TeX: { equationNumbers: { autoNumber: \"AMS\" } } });\n+</script>\n+<script>\n+ // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.\n+ // We could use \"//cdn.mathjax...\", but that won't support \"file://\".\n+ (function(d, script) {\n+ script = d.createElement('script');\n+ script.type = 'text/javascript';\n+ script.async = true;\n+ script.onload = function() {\n+ MathJax.Hub.Config({\n+ tex2jax: {\n+ inlineMath: [\n+ [\"$\", \"$\"],\n+ [\"\\\\\\\\(\", \"\\\\\\\\)\"]\n+ ],\n+ displayMath: [\n+ [\"$$\", \"$$\"],\n+ [\"\\\\[\", \"\\\\]\"]\n+ ],\n+ processEscapes: true,\n+ skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']\n+ }\n+ });\n+ };\n+ script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +\n+ 'cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML';\n+ d.getElementsByTagName('head')[0].appendChild(script);\n+ }(document));\n+</script>\n+<!-- Algolia search section -->\n+<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js\"></script>\n+<script>\n+ // Crawler configuration for the search indexing is available at:\n+ // https://github.com/algolia/docsearch-configs/blob/master/configs/apache_systemml.json\n+\n+ docsearch({\n+ apiKey: '78c19564c220d4642a41197baae304ef',\n+ indexName: 'apache_systemml',\n+ inputSelector: \"#s-bar\",\n+ // For custom styling for the dropdown, please set debug to true\n+ // so that the dropdown won't disappear when the inspect tools are\n+ // open.\n+ debug: false\n+ });\n+</script>\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/_layouts/base.html",
"diff": "+<!DOCTYPE html>\n+<!--[if lt IE 7]> <html class=\"no-js lt-ie9 lt-ie8 lt-ie7\"> <![endif]-->\n+<!--[if IE 7]> <html class=\"no-js lt-ie9 lt-ie8\"> <![endif]-->\n+<!--[if IE 8]> <html class=\"no-js lt-ie9\"> <![endif]-->\n+<!--[if gt IE 8]><!-->\n+<html class=\"no-js\">\n+<!--<![endif]-->\n+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n+<head>\n+ <title>{{ page.title }} - SystemDS {{site.SYSTEMDS_VERSION}}</title>\n+ <meta charset=\"utf-8\">\n+ <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge,chrome=1\">\n+ <meta name=\"description\" content=\"{{page.description | replace: 'SYSTEMDS_VERSION', site.SYSTEMDS_VERSION}}\">\n+ <meta name=\"viewport\" content=\"width=device-width\">\n+ <link rel=\"stylesheet\" href=\"./css/bootstrap.min.css\">\n+ <link rel=\"stylesheet\" href=\"./css/main.css\">\n+ <link rel=\"stylesheet\" href=\"./css/pygments-default.css\">\n+ <link rel=\"shortcut icon\" href=\"./img/favicon.png\">\n+ <link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css\" />\n+ <script src=\"./js/vendor/jquery-1.12.0.min.js\"></script>\n+ <script src=\"./js/vendor/bootstrap.min.js\"></script>\n+ <script src=\"./js/vendor/anchor.min.js\"></script>\n+ <script src=\"./js/main.js\"></script>\n+</head>\n+\n+<body>\n+ {% include header.html %}\n+ <div class=\"container\" id=\"content\">\n+ <h1 class=\"title\">{{ page.title }}</h1>\n+ {{ content }}\n+ </div>\n+ {% include scripts.html %}\n+</body>\n+\n+</html>\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/_layouts/site.html",
"diff": "+<!DOCTYPE html>\n+<!--[if lt IE 7]> <html class=\"no-js lt-ie9 lt-ie8 lt-ie7\"> <![endif]-->\n+<!--[if IE 7]> <html class=\"no-js lt-ie9 lt-ie8\"> <![endif]-->\n+<!--[if IE 8]> <html class=\"no-js lt-ie9\"> <![endif]-->\n+<!--[if gt IE 8]><!-->\n+<html class=\"no-js\">\n+<!--<![endif]-->\n+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n+<head>\n+ <title>{{ page.title }} - SystemDS {{site.SYSTEMDS_VERSION}}</title>\n+ <meta charset=\"utf-8\">\n+ <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge,chrome=1\">\n+ <meta name=\"description\" content=\"{{page.description | replace: 'SYSTEMDS_VERSION', site.SYSTEMDS_VERSION}}\">\n+ <meta name=\"viewport\" content=\"width=device-width\">\n+ <link rel=\"stylesheet\" href=\"./../css/bootstrap.min.css\">\n+ <link rel=\"stylesheet\" href=\"./../css/main.css\">\n+ <link rel=\"stylesheet\" href=\"./../css/pygments-default.css\">\n+ <link rel=\"shortcut icon\" href=\"./../img/favicon.png\">\n+ <link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css\" />\n+ <script src=\"./../js/vendor/jquery-1.12.0.min.js\"></script>\n+ <script src=\"./../js/vendor/bootstrap.min.js\"></script>\n+ <script src=\"./../js/vendor/anchor.min.js\"></script>\n+ <script src=\"./../js/main.js\"></script>\n+</head>\n+\n+<body>\n+ {% include header.html %}\n+ <div class=\"container\" id=\"content\">\n+ <h1 class=\"title\">{{ page.title }}</h1>\n+ {{ content }}\n+ </div>\n+ {% include scripts.html %}\n+</body>\n+\n+</html>\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/index.md",
"new_path": "docs/index.md",
"diff": "---\n-layout: global\n-displayTitle: SystemDS Documentation\n+layout: base\ntitle: SystemDS Documentation\n-description: SystemDS Documentation\n---\n<!--\n{% comment %}\n@@ -36,11 +34,11 @@ This version of SystemDS supports: Java 8+, Python 3.5+, Hadoop 2.6+ (Not 3.X),\nVarious forms of documentation for SystemDS are available.\n-- a [DML language reference](/site/dml-language-reference) for an list of operations possible inside SystemDS.\n-- [builtin functions](/site/builtins-reference) contains a collection of builtin functions providing an high level abstraction on complex machine learning algorithms.\n-- [Run SystemDS](/site/run) contains an Helloworld example along with an environment setup guide.\n-- Instructions on python can be found at [Python Documentation](/api/python/index)\n-- The [javadoc API](/api/java/index) contains internal documentation of the system source code.\n-- [Install from Source](/site/install) guides through setup from git download to running system.\n+- a [DML language reference](./site/dml-language-reference) for an list of operations possible inside SystemDS.\n+- [builtin functions](./site/builtins-reference) contains a collection of builtin functions providing an high level abstraction on complex machine learning algorithms.\n+- [Run SystemDS](./site/run) contains an Helloworld example along with an environment setup guide.\n+- Instructions on python can be found at [Python Documentation](./api/python/index)\n+- The [javadoc API](./api/java/index) contains internal documentation of the system source code.\n+- [Install from Source](./site/install) guides through setup from git download to running system.\n- If you want to contribute take a look at [Contributing](https://github.com/apache/systemml/blob/master/CONTRIBUTING.md)\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "---\n-layout: global\n+layout: site\ntitle: Buildin Reference\n---\n<!--\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/dml-language-reference.md",
"new_path": "docs/site/dml-language-reference.md",
"diff": "---\n-layout: global\n+layout: site\ntitle: DML Language Reference\n---\n<!--\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/install.md",
"new_path": "docs/site/install.md",
"diff": "---\n-layout: global\n+layout: site\ntitle: SystemDS Install from source\n---\n<!--\n@@ -81,4 +81,4 @@ The first time you package the system it will take longer since maven will downl\nBut successive compiles should become faster.\nNow everything is setup and ready to go!\n-To execute dml scripts i suggest to take a look at [Execute SystemDS](/site/run)\n\\ No newline at end of file\n+To execute dml scripts i suggest to take a look at [Execute SystemDS](run)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/run.md",
"new_path": "docs/site/run.md",
"diff": "---\n-layout: global\n+layout: site\ntitle: Running SystemDS\n---\n<!--\n@@ -21,7 +21,7 @@ limitations under the License.\n{% endcomment %}\n-->\n-If you want to execute from source code follow the [Install from source](/site/install) guide first.\n+If you want to execute from source code follow the [Install from source](install) guide first.\n## Setting SYSTEMDS_ROOT environment variable\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix paths to resources docs |
49,689 | 21.06.2020 18:23:55 | -7,200 | 2671a55fedb826a8e560401e467d372b6dc70b34 | [MINOR] Reuse rand and others, bug fixes
This patch enables reuse for rand(matrix) and few more
instructions. Furthermore, it fixes a bug in eviction
logic that was forming cycles in the linked lists. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"diff": "@@ -173,6 +173,10 @@ public class DataGenCPInstruction extends UnaryCPInstruction {\nreturn minValue == maxValue && minValue == 1 && sparsity == 1 && getCols() == 1;\n}\n+ public boolean isMatrixCall() {\n+ return minValue == maxValue && sparsity == 1;\n+ }\n+\npublic long getFrom() {\nreturn seq_from.isLiteral() ? UtilFunctions.parseToLong(seq_from.getName()) : -1;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -375,6 +375,7 @@ public class LineageCache\nif (LineageCache.probe(probeItem)) {\nLineageCacheEntry oe = getIntern(probeItem);\nLineageCacheEntry e = _cache.get(item);\n+ boolean exists = !e.isNullVal();\nif (oe.isMatrixValue())\ne.setValue(oe.getMBValue(), computetime);\nelse\n@@ -386,8 +387,10 @@ public class LineageCache\n// Add the SB/func entry to the list of items pointing to the same data.\n// No cache size update is necessary.\n// Maintain _origItem as head.\n+ if (!exists) {\ne._nextEntry = oe._nextEntry;\noe._nextEntry = e;\n+ }\n//maintain order for eviction\nLineageCacheEviction.addEntry(e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -24,6 +24,7 @@ import org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.instructions.Instruction;\nimport org.apache.sysds.runtime.instructions.cp.ComputationCPInstruction;\n+import org.apache.sysds.runtime.instructions.cp.DataGenCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ListIndexingCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.MatrixIndexingCPInstruction;\n@@ -38,7 +39,7 @@ public class LineageCacheConfig\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n- \"^2\", \"uack+\", \"tak+*\"\n+ \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n@@ -152,7 +153,8 @@ public class LineageCacheConfig\nboolean insttype = inst instanceof ComputationCPInstruction\n&& !(inst instanceof ListIndexingCPInstruction);\nboolean rightop = (ArrayUtils.contains(REUSE_OPCODES, inst.getOpcode())\n- || (inst.getOpcode().equals(\"append\") && isVectorAppend(inst, ec)));\n+ || (inst.getOpcode().equals(\"append\") && isVectorAppend(inst, ec))\n+ || (inst instanceof DataGenCPInstruction) && ((DataGenCPInstruction) inst).isMatrixCall());\nboolean updateInplace = (inst instanceof MatrixIndexingCPInstruction)\n&& ec.getMatrixObject(((ComputationCPInstruction)inst).input1).getUpdateType().isInPlace();\nreturn insttype && rightop && !updateInplace;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageReuseAlg3.dml",
"new_path": "src/test/scripts/functions/lineage/LineageReuseAlg3.dml",
"diff": "@@ -23,7 +23,7 @@ findBetas = function(Matrix[double] X, Matrix[double] y)\nreturn (Matrix[double] all_betas)\n{\nR = matrix(0, rows=10*(ncol(X)+1), cols=5);\n- for (lamda in 20:25) {\n+ for (lamda in 20:39) {\n#betas = multiLogReg(X=X, Y=y, maxii=0, verbose=FALSE);\nbetas = multiLogReg(X=X, Y=y, icpt=2, tol=0.000001,\nreg=lamda, maxi=100, maxii=0, verbose=FALSE);\n@@ -36,7 +36,7 @@ findIcpt = function(Matrix[double] X, Matrix[double] y)\nreturn (Matrix[double] all_betas)\n{\nR = matrix(0, rows=12*(ncol(X)+2), cols=5);\n- for (lamda in 20:22) {\n+ for (lamda in 20:29) {\nfor (icpt in 1:2) {\n#Function level reuse of 3 out of 6 calls.\nbetas = multiLogReg(X=X, Y=y, icpt=icpt, tol=0.000001,\n@@ -49,7 +49,7 @@ findIcpt = function(Matrix[double] X, Matrix[double] y)\n}\n-X = rand(rows=1000, cols=1000, sparsity=1.0, seed=42);\n+X = rand(rows=1000, cols=100, sparsity=1.0, seed=42);\ny = rand(rows=1000, cols=1, min=0, max=6, sparsity=1.0, seed=42);\ny = floor(y);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/RewriteTest2.dml",
"new_path": "src/test/scripts/functions/lineage/RewriteTest2.dml",
"diff": "X = read($1);\nsum = 0;\n-tmp = X[,1];\n+tmp = matrix(0, rows=nrow(X), cols=0);\nR = matrix(0, 1, ncol(X));\n-for (i in 2:ncol(X)) {\n- Res1 = t(tmp) %*% tmp;\n+for (i in 1:ncol(X)) {\ntmp = cbind(tmp, X[,i]);\n+ Res1 = t(tmp) %*% tmp;\nwhile(FALSE) {}\nR[1,i] = sum(Res1);\nsum = sum + sum(Res1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Reuse rand and others, bug fixes
This patch enables reuse for rand(matrix) and few more
instructions. Furthermore, it fixes a bug in eviction
logic that was forming cycles in the linked lists. |
49,689 | 21.06.2020 19:20:12 | -7,200 | 882760be365a4b47a33ed3f9ab46b97a9bd853fd | New rewrite for PCA -> lmDS pipeline
This patch contains a rewrite to reuse tsmm result in lmDS if
called after PCA incrementally for increasing number of columns. | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -339,6 +339,7 @@ SYSTEMDS-410 Lineage Tracing, Reuse and Integration II\n* 411 Improved handling of multi-level cache duplicates OK\n* 412 Robust lineage tracing (non-recursive, parfor) OK\n* 413 Cache and reuse MultiReturnBuiltin instructions OK\n+ * 414 New rewrite for PCA --> lmDS pipeline OK\nSYSTEMDS-500 Documentation Webpage Reintroduction\n* 501 Make Documentation webpage framework OK\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageReuseAlg.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageReuseAlg extends AutomatedTestBase {\nprotected static final String TEST_DIR = \"functions/lineage/\";\nprotected static final String TEST_NAME = \"LineageReuseAlg\";\n- protected static final int TEST_VARIANTS = 5;\n+ protected static final int TEST_VARIANTS = 6;\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageReuseAlg.class.getSimpleName() + \"/\";\n@Override\n@@ -74,6 +74,11 @@ public class LineageReuseAlg extends AutomatedTestBase {\ntestLineageTrace(TEST_NAME+\"5\", ReuseCacheType.REUSE_HYBRID);\n}\n+ @Test\n+ public void testPCA_LM_pipeline() {\n+ testLineageTrace(TEST_NAME+\"6\", ReuseCacheType.REUSE_HYBRID);\n+ }\n+\n@Test\npublic void testStepLMFull() {\ntestLineageTrace(TEST_NAME+\"1\", ReuseCacheType.REUSE_FULL);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageReuseAlg6.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#PCA -> LM pipeline\n+\n+checkR2 = function(Matrix[double] X, Matrix[double] y, Matrix[double] y_p,\n+ Matrix[double] beta, Integer icpt) return (Double R2_ad)\n+{\n+ n = nrow(X);\n+ m = ncol(X);\n+ m_ext = m;\n+ if (icpt == 1|icpt == 2)\n+ m_ext = m+1; #due to extra column ones\n+ avg_tot = sum(y)/n;\n+ ss_tot = sum(y^2);\n+ ss_avg_tot = ss_tot - n*avg_tot^2;\n+ y_res = y - y_p;\n+ avg_res = sum(y - y_p)/n;\n+ ss_res = sum((y - y_p)^2);\n+ R2 = 1 - ss_res/ss_avg_tot;\n+ dispersion = ifelse(n>m_ext, ss_res/(n-m_ext), NaN);\n+ R2_ad = ifelse(n>m_ext, 1-dispersion/(ss_avg_tot/(n-1)), NaN);\n+}\n+\n+PCA = function(Matrix[Double] A, Integer K = ncol(A), Integer center = 1, Integer scale = 1,\n+ Integer projectData = 1) return(Matrix[Double] newA)\n+{\n+ N = nrow(A);\n+ D = ncol(A);\n+\n+ # perform z-scoring (centering and scaling)\n+ A = scale(A, center==1, scale==1);\n+\n+ # co-variance matrix\n+ mu = colSums(A)/N;\n+ C = (t(A) %*% A)/(N-1) - (N/(N-1))*t(mu) %*% mu;\n+\n+ # compute eigen vectors and values\n+ [evalues, evectors] = eigen(C);\n+\n+ decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+ diagmat = table(seq(1,D),decreasing_Idx);\n+ # sorts eigenvalues by decreasing order\n+ evalues = diagmat %*% evalues;\n+ # sorts eigenvectors column-wise in the order of decreasing eigenvalues\n+ evectors = evectors %*% diagmat;\n+\n+\n+ # select K dominant eigen vectors\n+ nvec = ncol(evectors);\n+\n+ eval_dominant = evalues[1:K, 1];\n+ evec_dominant = evectors[,1:K];\n+\n+ # the square root of eigenvalues\n+ eval_stdev_dominant = sqrt(eval_dominant);\n+\n+ if (projectData == 1){\n+ # Construct new data set by treating computed dominant eigenvectors as the basis vectors\n+ newA = A %*% evec_dominant;\n+ }\n+}\n+\n+M = 1000;\n+A = rand(rows=M, cols=100, seed=42);\n+y = rand(rows=M, cols=1, seed=1);\n+R = matrix(0, rows=1, cols=20);\n+\n+Kc = floor(ncol(A) * 0.8);\n+\n+for (i in 1:10) {\n+ newA1 = PCA(A=A, K=Kc+i);\n+ beta1 = lm(X=newA1, y=y, icpt=1, reg=0.0001, verbose=FALSE);\n+ y_predict1 = lmpredict(X=newA1, w=beta1, icpt=1);\n+ R2_ad1 = checkR2(newA1, y, y_predict1, beta1, 1);\n+ R[,i] = R2_ad1;\n+}\n+\n+write(R, $1, format=\"text\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-414] New rewrite for PCA -> lmDS pipeline
This patch contains a rewrite to reuse tsmm result in lmDS if
called after PCA incrementally for increasing number of columns. |
49,738 | 23.06.2020 22:46:05 | -7,200 | c6d7a52e2e4259fa62ba8e0b15cdfe1397baac0f | [MINOR] Additional lineage parfor remote tests, and cleanups
This patch adds msvm w/ remote_spark parfor workers to the test suite
and fixes missing support for tak+ operators in the recompute-by-lineage
utility. | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/l2svm.dml",
"new_path": "scripts/builtin/l2svm.dml",
"diff": "@@ -72,7 +72,7 @@ m_l2svm = function(Matrix[Double] X, Matrix[Double] Y, Boolean intercept = FALSE\n# TODO make this a stop condition for l2svm instead of just printing.\nif(num_min + num_max != nrow(Y))\n- print(\"L2SVM: WARNING invalid number of labels in Y\")\n+ print(\"L2SVM: WARNING invalid number of labels in Y: \"+num_min+\" \"+num_max)\n# Scale inputs to -1 for negative, and 1 for positive classification\nif(check_min != -1 | check_max != +1)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/ipa/FunctionCallSizeInfo.java",
"new_path": "src/main/java/org/apache/sysds/hops/ipa/FunctionCallSizeInfo.java",
"diff": "@@ -233,14 +233,11 @@ public class FunctionCallSizeInfo\n&& h1.getDim1()==h2.getDim1()\n&& h1.getDim2()==h2.getDim2()\n&& h1.getNnz()==h2.getNnz() );\n- //check literal values (equi value)\n- if( h1 instanceof LiteralOp ) {\n- consistent &= (h2 instanceof LiteralOp\n+ //check literal values (both needs to be literals and same value)\n+ if( h1 instanceof LiteralOp || h2 instanceof LiteralOp ) {\n+ consistent &= (h1 instanceof LiteralOp && h2 instanceof LiteralOp\n&& HopRewriteUtils.isEqualValue((LiteralOp)h1, (LiteralOp)h2));\n}\n- else if(h2 instanceof LiteralOp) {\n- consistent = false; //h2 literal, but h1 not\n- }\n}\n}\nif( consistent )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"diff": "@@ -278,6 +278,24 @@ public class LineageItemUtils {\noperands.put(item.getId(), aggunary);\nbreak;\n}\n+ case AggregateBinary: {\n+ Hop input1 = operands.get(item.getInputs()[0].getId());\n+ Hop input2 = operands.get(item.getInputs()[1].getId());\n+ Hop aggbinary = HopRewriteUtils.createMatrixMultiply(input1, input2);\n+ operands.put(item.getId(), aggbinary);\n+ break;\n+ }\n+ case AggregateTernary: {\n+ Hop input1 = operands.get(item.getInputs()[0].getId());\n+ Hop input2 = operands.get(item.getInputs()[1].getId());\n+ Hop input3 = operands.get(item.getInputs()[2].getId());\n+ Hop aggternary = HopRewriteUtils.createSum(\n+ HopRewriteUtils.createBinary(\n+ HopRewriteUtils.createBinary(input1, input2, OpOp2.MULT),\n+ input3, OpOp2.MULT));\n+ operands.put(item.getId(), aggternary);\n+ break;\n+ }\ncase Unary:\ncase Builtin: {\nHop input = operands.get(item.getInputs()[0].getId());\n@@ -308,13 +326,6 @@ public class LineageItemUtils {\noperands.put(item.getId(), binary);\nbreak;\n}\n- case AggregateBinary: {\n- Hop input1 = operands.get(item.getInputs()[0].getId());\n- Hop input2 = operands.get(item.getInputs()[1].getId());\n- Hop aggbinary = HopRewriteUtils.createMatrixMultiply(input1, input2);\n- operands.put(item.getId(), aggbinary);\n- break;\n- }\ncase Ternary: {\noperands.put(item.getId(), HopRewriteUtils.createTernary(\noperands.get(item.getInputs()[0].getId()),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceParforTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceParforTest.java",
"diff": "@@ -46,6 +46,7 @@ public class LineageTraceParforTest extends AutomatedTestBase {\nprotected static final String TEST_NAME3 = \"LineageTraceParfor3\"; //rand - matrix result - remote spark parfor\nprotected static final String TEST_NAME4 = \"LineageTraceParforSteplm\"; //rand - steplm\nprotected static final String TEST_NAME5 = \"LineageTraceParforKmeans\"; //rand - kmeans\n+ protected static final String TEST_NAME6 = \"LineageTraceParforMSVM\"; //rand - msvm remote parfor\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageTraceParforTest.class.getSimpleName() + \"/\";\n@@ -63,6 +64,7 @@ public class LineageTraceParforTest extends AutomatedTestBase {\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] {\"R\"}) );\naddTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] {\"R\"}) );\naddTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] {\"R\"}) );\n+ addTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] {\"R\"}) );\n}\n@Test\n@@ -135,6 +137,11 @@ public class LineageTraceParforTest extends AutomatedTestBase {\ntestLineageTraceParFor(32, TEST_NAME5);\n}\n+ @Test\n+ public void testLineageTraceMSVM_Remote64() {\n+ testLineageTraceParFor(64, TEST_NAME6);\n+ }\n+\nprivate void testLineageTraceParFor(int ncol, String testname) {\ntry {\nSystem.out.println(\"------------ BEGIN \" + testname + \"------------\");\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageTraceParforMSVM.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+msvm2 = function(Matrix[Double] X, Matrix[Double] Y, Boolean intercept = FALSE,\n+ Double epsilon = 0.001, Double lambda = 1.0, Integer maxIterations = 100, Boolean verbose = FALSE)\n+ return(Matrix[Double] model)\n+{\n+ if(min(Y) < 0)\n+ stop(\"MSVM: Invalid Y input, containing negative values\")\n+\n+ if(verbose)\n+ print(\"Running Multiclass-SVM\")\n+\n+ num_rows_in_w = ncol(X)\n+ if(intercept) {\n+ num_rows_in_w = num_rows_in_w + 1\n+ }\n+\n+ if(ncol(Y) > 1)\n+ Y = rowMaxs(Y * t(seq(1,ncol(Y))))\n+\n+ # Assuming number of classes to be max contained in Y\n+ w = matrix(0, rows=num_rows_in_w, cols=max(Y))\n+\n+ parfor(class in 1:max(Y), opt=CONSTRAINED, par=4, mode=REMOTE_SPARK) {\n+ Y_local = 2 * (Y == class) - 1\n+ w[,class] = l2svm(X=X, Y=Y_local, intercept=intercept,\n+ epsilon=epsilon, lambda=lambda, maxIterations=maxIterations,\n+ verbose= verbose, columnId=class)\n+ }\n+\n+ model = w\n+}\n+\n+nclass = 10;\n+\n+X = rand(rows=$2, cols=$3, seed=1);\n+y = rand(rows=$2, cols=1, min=0, max=nclass, seed=2);\n+y = ceil(y);\n+\n+model = msvm2(X=X, Y=y, intercept=FALSE);\n+\n+write(model, $1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Additional lineage parfor remote tests, and cleanups
This patch adds msvm w/ remote_spark parfor workers to the test suite
and fixes missing support for tak+ operators in the recompute-by-lineage
utility. |
49,738 | 27.06.2020 00:26:01 | -7,200 | dfb36d102ff76a55d130b33fad7791dd2108db9c | Initial mlcontext lineage support (tracing, reuse)
This patch adds basic lineage support to the MLContext API. Since
in-memory objects are directly bound to the symbol table, lineage
tracing views these objects as literals and incorrectly reused
intermediates even if different in-memory objects where used in
subsequent mlcontext invocations. | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -340,6 +340,7 @@ SYSTEMDS-410 Lineage Tracing, Reuse and Integration II\n* 412 Robust lineage tracing (non-recursive, parfor) OK\n* 413 Cache and reuse MultiReturnBuiltin instructions OK\n* 414 New rewrite for PCA --> lmDS pipeline OK\n+ * 415 MLContext lineage support (tracing and reuse correctness) OK\nSYSTEMDS-420 Compiler Improvements\n* 421 Fix invalid IPA scalar propagation into functions OK\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/mlcontext/MLContext.java",
"new_path": "src/main/java/org/apache/sysds/api/mlcontext/MLContext.java",
"diff": "@@ -41,6 +41,8 @@ import org.apache.sysds.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysds.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysds.runtime.instructions.cp.Data;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.meta.MetaDataFormat;\nimport org.apache.sysds.utils.MLContextProxy;\nimport org.apache.sysds.utils.Explain.ExplainType;\n@@ -386,13 +388,34 @@ public class MLContext implements ConfigurableAPI\n* to standard output.\n*\n* @param explain\n- * {@code true} if explanation should be output, {@code false}\n- * otherwise\n+ * {@code true} if explanation should be output, {@code false} otherwise\n*/\npublic void setExplain(boolean explain) {\nthis.explain = explain;\n}\n+ /**\n+ * Set whether or not lineage should be traced\n+ *\n+ * @param lineage\n+ * {@code true} if lineage should be traced, {@code false} otherwise\n+ */\n+ public void setLineage(boolean lineage) {\n+ DMLScript.LINEAGE = lineage;\n+ }\n+\n+ /**\n+ * Set type of lineage-based reuse caching and enable lineage tracing\n+ *\n+ * @param reuse\n+ * reuse cache type to use\n+ */\n+ public void setLineage(ReuseCacheType reuse) {\n+ DMLScript.LINEAGE_REUSE = reuse;\n+ setLineage(true);\n+ LineageCacheConfig.setConfig(reuse);\n+ }\n+\n/**\n* Obtain whether or not all values should be maintained in the symbol table\n* after execution.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysds/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -50,6 +50,7 @@ import org.apache.sysds.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysds.runtime.controlprogram.Program;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContextFactory;\n+import org.apache.sysds.runtime.lineage.LineageItemUtils;\nimport org.apache.sysds.utils.Explain;\nimport org.apache.sysds.utils.Statistics;\nimport org.apache.sysds.utils.Explain.ExplainCounts;\n@@ -214,8 +215,11 @@ public class ScriptExecutor {\nprotected void createAndInitializeExecutionContext() {\nexecutionContext = ExecutionContextFactory.createContext(runtimeProgram);\nLocalVariableMap symbolTable = script.getSymbolTable();\n- if (symbolTable != null)\n+ if (symbolTable != null) {\nexecutionContext.setVariables(symbolTable);\n+ if( DMLScript.LINEAGE )\n+ LineageItemUtils.addAllDataLineage(executionContext);\n+ }\n//attach registered outputs (for dynamic recompile)\nexecutionContext.getVariables().setRegisteredOutputs(\nnew HashSet<>(script.getOutputVariables()));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/lops/compile/Dag.java",
"new_path": "src/main/java/org/apache/sysds/lops/compile/Dag.java",
"diff": "@@ -775,7 +775,7 @@ public class Dag<N extends Lop>\n//String createInst = prepareVariableInstruction(\"createvar\", node);\n//out.addPreInstruction(CPInstructionParser.parseSingleInstruction(createInst));\nint blen = (int) oparams.getBlocksize();\n- Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(\n+ Instruction createvarInst = VariableCPInstruction.prepCreatevarInstruction(\noparams.getLabel(), oparams.getFile_name(), true, node.getDataType(),\ngetOutputFileFormat(node, false).toString(),\nnew MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), blen, oparams.getNnz()),\n@@ -803,7 +803,7 @@ public class Dag<N extends Lop>\nfor( Lop fnOut: fcall.getFunctionOutputs()) {\nOutputParameters fnOutParams = fnOut.getOutputParameters();\n//OutputInfo oinfo = getOutputInfo((N)fnOut, false);\n- Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(\n+ Instruction createvarInst = VariableCPInstruction.prepCreatevarInstruction(\nfnOutParams.getLabel(), getFilePath() + fnOutParams.getLabel(),\ntrue, fnOut.getDataType(), getOutputFileFormat(fnOut, false).toString(),\nnew MatrixCharacteristics(fnOutParams.getNumRows(), fnOutParams.getNumCols(), (int)fnOutParams.getBlocksize(), fnOutParams.getNnz()),\n@@ -913,7 +913,7 @@ public class Dag<N extends Lop>\nString tempFileName = getNextUniqueFilename();\nint blen = (int) oparams.getBlocksize();\n- Instruction createvarInst = VariableCPInstruction.prepareCreateVariableInstruction(\n+ Instruction createvarInst = VariableCPInstruction.prepCreatevarInstruction(\ntempVarName, tempFileName, true, node.getDataType(), out.getOutInfo().toString(),\nnew MatrixCharacteristics(oparams.getNumRows(), oparams.getNumCols(), blen, oparams.getNnz()),\noparams.getUpdateType());\n@@ -946,7 +946,7 @@ public class Dag<N extends Lop>\n// Generate a single mvvar instruction (e.g., mvvar tempA A)\n// instead of two instructions \"cpvar tempA A\" and \"rmvar tempA\"\n- Instruction currInstr = VariableCPInstruction.prepareMoveInstruction(tempVarName, constVarName);\n+ Instruction currInstr = VariableCPInstruction.prepMoveInstruction(tempVarName, constVarName);\ncurrInstr.setLocation(node);\n@@ -1010,7 +1010,7 @@ public class Dag<N extends Lop>\n&& ((VariableCPInstruction)inst2).isRemoveVariableNoFile()\n&& inst1.getInput1().getName().equals(\n((VariableCPInstruction)inst2).getInput1().getName()) ) {\n- ret.add(VariableCPInstruction.prepareMoveInstruction(\n+ ret.add(VariableCPInstruction.prepMoveInstruction(\ninst1.getInput1().getName(), inst1.getInput2().getName()));\n}\nelse {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -215,7 +215,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n*/\nprotected CacheableData(DataType dt, ValueType vt) {\nsuper (dt, vt);\n- _uniqueID = isCachingActive() ? _seq.getNextID() : -1;\n+ _uniqueID = _seq.getNextID();\n_cacheStatus = CacheStatus.EMPTY;\n_numReadThreads = 0;\n_gpuObjects = DMLScript.USE_ACCELERATOR ? new HashMap<>() : null;\n@@ -271,6 +271,10 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nreturn _hdfsFileName;\n}\n+ public long getUniqueID() {\n+ return _uniqueID;\n+ }\n+\npublic synchronized void setFileName( String file ) {\nif( _hdfsFileName!=null && !_hdfsFileName.equals(file) )\nif( !isEmpty(true) )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -1126,7 +1126,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nreturn parseInstruction(sb.toString());\n}\n- public static Instruction prepareMoveInstruction(String srcVar, String destFileName, String format) {\n+ public static Instruction prepMoveInstruction(String srcVar, String destFileName, String format) {\nStringBuilder sb = new StringBuilder();\nsb.append(\"CP\");\nsb.append(Lop.OPERAND_DELIMITOR);\n@@ -1141,7 +1141,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nreturn parseInstruction(str);\n}\n- public static Instruction prepareMoveInstruction(String srcVar, String destVar) {\n+ public static Instruction prepMoveInstruction(String srcVar, String destVar) {\n// example: mvvar tempA A\nStringBuilder sb = new StringBuilder();\nsb.append(\"CP\");\n@@ -1155,7 +1155,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nreturn parseInstruction(str);\n}\n- private static String getBasicCreateVarString(String varName, String fileName, boolean fNameOverride, DataType dt, String format) {\n+ private static String getBasicCreatevarString(String varName, String fileName, boolean fNameOverride, DataType dt, String format) {\n//note: the filename override property leads to concatenation of unique ids in order to\n//ensure conflicting filenames for objects that originate from the same instruction\nboolean lfNameOverride = fNameOverride && !ConfigurationManager\n@@ -1179,13 +1179,13 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nreturn sb.toString();\n}\n- public static Instruction prepareCreateMatrixVariableInstruction(String varName, String fileName, boolean fNameOverride, String format) {\n- return parseInstruction(getBasicCreateVarString(varName, fileName, fNameOverride, DataType.MATRIX, format));\n+ public static Instruction prepCreatevarInstruction(String varName, String fileName, boolean fNameOverride, String format) {\n+ return parseInstruction(getBasicCreatevarString(varName, fileName, fNameOverride, DataType.MATRIX, format));\n}\n- public static Instruction prepareCreateVariableInstruction(String varName, String fileName, boolean fNameOverride, DataType dt, String format, DataCharacteristics mc, UpdateType update) {\n+ public static Instruction prepCreatevarInstruction(String varName, String fileName, boolean fNameOverride, DataType dt, String format, DataCharacteristics mc, UpdateType update) {\nStringBuilder sb = new StringBuilder();\n- sb.append(getBasicCreateVarString(varName, fileName, fNameOverride, dt, format));\n+ sb.append(getBasicCreatevarString(varName, fileName, fNameOverride, dt, format));\nsb.append(Lop.OPERAND_DELIMITOR);\nsb.append(mc.getRows());\n@@ -1203,9 +1203,9 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\nreturn parseInstruction(str);\n}\n- public static Instruction prepareCreateVariableInstruction(String varName, String fileName, boolean fNameOverride, DataType dt, String format, DataCharacteristics mc, UpdateType update, boolean hasHeader, String delim, boolean sparse) {\n+ public static Instruction prepCreatevarInstruction(String varName, String fileName, boolean fNameOverride, DataType dt, String format, DataCharacteristics mc, UpdateType update, boolean hasHeader, String delim, boolean sparse) {\nStringBuilder sb = new StringBuilder();\n- sb.append(getBasicCreateVarString(varName, fileName, fNameOverride, dt, format));\n+ sb.append(getBasicCreatevarString(varName, fileName, fNameOverride, dt, format));\nsb.append(Lop.OPERAND_DELIMITOR);\nsb.append(mc.getRows());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"diff": "@@ -64,6 +64,7 @@ import org.apache.sysds.parser.Statement;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.BasicProgramBlock;\nimport org.apache.sysds.runtime.controlprogram.Program;\n+import org.apache.sysds.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysds.runtime.instructions.Instruction;\n@@ -86,6 +87,7 @@ import java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.LinkedHashMap;\nimport java.util.Map;\n+import java.util.Map.Entry;\nimport java.util.Set;\nimport java.util.Stack;\nimport java.util.stream.Collectors;\n@@ -825,4 +827,18 @@ public class LineageItemUtils {\nreturn(CPOpInputs != null ? LineageItemUtils.getLineage(ec,\nCPOpInputs.toArray(new CPOperand[CPOpInputs.size()])) : null);\n}\n+\n+ public static void addAllDataLineage(ExecutionContext ec) {\n+ for( Entry<String, Data> e : ec.getVariables().entrySet() ) {\n+ if( e.getValue() instanceof CacheableData<?> ) {\n+ CacheableData<?> cdata = (CacheableData<?>) e.getValue();\n+ //only createvar instruction with pREAD prefix added to lineage\n+ String fromVar = org.apache.sysds.lops.Data.PREAD_PREFIX+e.getKey();\n+ ec.traceLineage(VariableCPInstruction.prepCreatevarInstruction(\n+ fromVar, \"CacheableData::\"+cdata.getUniqueID(), false, \"binary\"));\n+ //move from pREADx to x\n+ ec.traceLineage(VariableCPInstruction.prepMoveInstruction(fromVar, e.getKey()));\n+ }\n+ }\n+ }\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageMLContextTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.lineage;\n+\n+import static org.apache.sysds.api.mlcontext.ScriptFactory.dml;\n+\n+import java.util.ArrayList;\n+import java.util.List;\n+\n+import org.apache.spark.api.java.JavaRDD;\n+import org.junit.Test;\n+import org.apache.sysds.api.mlcontext.MatrixFormat;\n+import org.apache.sysds.api.mlcontext.MatrixMetadata;\n+import org.apache.sysds.api.mlcontext.Script;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\n+import org.apache.sysds.test.functions.mlcontext.MLContextTestBase;\n+\n+public class LineageMLContextTest extends MLContextTestBase {\n+\n+ @Test\n+ public void testPrintLineage() {\n+ System.out.println(\"LineageMLContextTest - JavaRDD<String> IJV sum DML\");\n+\n+ List<String> list = new ArrayList<>();\n+ list.add(\"1 1 5\");\n+ list.add(\"2 2 5\");\n+ list.add(\"3 3 5\");\n+ JavaRDD<String> javaRDD = sc.parallelize(list);\n+ MatrixMetadata mm = new MatrixMetadata(MatrixFormat.IJV, 3, 3);\n+\n+ Script script = dml(\n+ \"print('sum: '+sum(M+M));\"\n+ +\"print(lineage(M+M));\"\n+ ).in(\"M\", javaRDD, mm);\n+ setExpectedStdOut(\"sum: 30.0\");\n+\n+ ml.setLineage(ReuseCacheType.NONE);\n+ ml.execute(script);\n+ }\n+\n+ @Test\n+ public void testReuseSameRDD() {\n+ System.out.println(\"LineageMLContextTest - JavaRDD<String> IJV sum DML\");\n+\n+ List<String> list = new ArrayList<>();\n+ list.add(\"1 1 5\");\n+ list.add(\"2 2 5\");\n+ list.add(\"3 3 5\");\n+ JavaRDD<String> javaRDD = sc.parallelize(list);\n+ MatrixMetadata mm = new MatrixMetadata(MatrixFormat.IJV, 3, 3);\n+\n+ Script script = dml(\n+ \"print('sum: '+sum(M+M));\"\n+ +\"print(lineage(M+M));\"\n+ ).in(\"M\", javaRDD, mm);\n+ setExpectedStdOut(\"sum: 30.0\");\n+\n+ ml.setLineage(ReuseCacheType.REUSE_FULL);\n+ ml.execute(script);\n+ ml.execute(script); //w/ reuse\n+ }\n+\n+ @Test\n+ public void testNoReuseDifferentRDD() {\n+ System.out.println(\"LineageMLContextTest - JavaRDD<String> IJV sum DML\");\n+\n+ List<String> list = new ArrayList<>();\n+ list.add(\"1 1 5\");\n+ list.add(\"2 2 5\");\n+ list.add(\"3 3 5\");\n+ JavaRDD<String> javaRDD = sc.parallelize(list);\n+ MatrixMetadata mm = new MatrixMetadata(MatrixFormat.IJV, 3, 3);\n+\n+ Script script = dml(\n+ \"print('sum: '+sum(M+M));\"\n+ +\"print(lineage(M+M));\"\n+ ).in(\"M\", javaRDD, mm);\n+\n+ ml.setLineage(ReuseCacheType.REUSE_FULL);\n+\n+ setExpectedStdOut(\"sum: 30.0\");\n+ ml.execute(script);\n+\n+ list.add(\"4 4 5\");\n+ JavaRDD<String> javaRDD2 = sc.parallelize(list);\n+ MatrixMetadata mm2 = new MatrixMetadata(MatrixFormat.IJV, 4, 4);\n+ script.in(\"M\", javaRDD2, mm2);\n+\n+ setExpectedStdOut(\"sum: 40.0\");\n+ ml.execute(script); //w/o reuse\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/recompile/IPAConstantPropagationFunTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/recompile/IPAConstantPropagationFunTest.java",
"diff": "@@ -30,27 +30,38 @@ import org.apache.sysds.test.TestUtils;\npublic class IPAConstantPropagationFunTest extends AutomatedTestBase\n{\n- private final static String TEST_NAME1 = \"IPAFunctionArgs\";\n+ private final static String TEST_NAME1 = \"IPAFunctionArgsFor\";\n+ private final static String TEST_NAME2 = \"IPAFunctionArgsParfor\";\n+\nprivate final static String TEST_DIR = \"functions/recompile/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + IPAConstantPropagationFunTest.class.getSimpleName() + \"/\";\n@Override\npublic void setUp() {\naddTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[]{\"R\"}));\n+ addTestConfiguration(TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[]{\"R\"}));\n}\n@Test\n- public void runIPAConstantPropagationTest()\n- {\n+ public void runIPAConstantPropagationForTest() {\n+ runIPAConstantPropagationTest(TEST_NAME1);\n+ }\n+\n+ @Test\n+ public void runIPAConstantPropagationParForTest() {\n+ runIPAConstantPropagationTest(TEST_NAME2);\n+ }\n+\n+ private void runIPAConstantPropagationTest(String testname) {\nboolean oldFlagIPA = OptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS;\ntry\n{\n- TestConfiguration config = getTestConfiguration(TEST_NAME1);\n+ TestConfiguration config = getTestConfiguration(testname);\nloadTestConfiguration(config);\nString HOME = SCRIPT_DIR + TEST_DIR;\n- fullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n+ fullDMLScriptName = HOME + testname + \".dml\";\nprogramArgs = new String[]{\"-args\", output(\"R\") };\nOptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS = true;\n"
},
{
"change_type": "RENAME",
"old_path": "src/test/scripts/functions/recompile/IPAFunctionArgs.dml",
"new_path": "src/test/scripts/functions/recompile/IPAFunctionArgsFor.dml",
"diff": ""
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/recompile/IPAFunctionArgsParfor.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+checkR2 = function(Matrix[double] X, Matrix[double] y, Matrix[double] y_p,\n+ Matrix[double] beta, Integer icpt) return (Double R2_ad)\n+{\n+ n = nrow(X);\n+ m = ncol(X);\n+ m_ext = m;\n+ if (icpt == 1|icpt == 2)\n+ m_ext = m+1; #due to extra column ones\n+ avg_tot = sum(y)/n;\n+ ss_tot = sum(y^2);\n+ ss_avg_tot = ss_tot - n*avg_tot^2;\n+ y_res = y - y_p;\n+ avg_res = sum(y - y_p)/n;\n+ ss_res = sum((y - y_p)^2);\n+ R2 = 1 - ss_res/ss_avg_tot;\n+ dispersion = ifelse(n>m_ext, ss_res/(n-m_ext), NaN);\n+ R2_ad = ifelse(n>m_ext, 1-dispersion/(ss_avg_tot/(n-1)), NaN);\n+}\n+\n+\n+PCA = function(Matrix[Double] A, Integer K = ncol(A), Integer center = 1, Integer scale = 1,\n+ Integer projectData = 1) return(Matrix[Double] newA)\n+{\n+ evec_dominant = matrix(0,cols=1,rows=1);\n+\n+ N = nrow(A);\n+ D = ncol(A);\n+ print(\"K = \"+K);\n+\n+ # perform z-scoring (centering and scaling)\n+ A = scale(A, center==1, scale==1);\n+\n+ # co-variance matrix\n+ mu = colSums(A)/N;\n+ C = (t(A) %*% A)/(N-1) - (N/(N-1))*t(mu) %*% mu;\n+\n+ # compute eigen vectors and values\n+ [evalues, evectors] = eigen(C);\n+\n+ decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+ diagmat = table(seq(1,D),decreasing_Idx);\n+ # sorts eigenvalues by decreasing order\n+ evalues = diagmat %*% evalues;\n+ # sorts eigenvectors column-wise in the order of decreasing eigenvalues\n+ evectors = evectors %*% diagmat;\n+\n+\n+ # select K dominant eigen vectors\n+ nvec = ncol(evectors);\n+\n+ eval_dominant = evalues[1:K, 1];\n+ evec_dominant = evectors[,1:K];\n+\n+ # the square root of eigenvalues\n+ eval_stdev_dominant = sqrt(eval_dominant);\n+\n+ if (projectData == 1){\n+ # Construct new data set by treating computed dominant eigenvectors as the basis vectors\n+ newA = A %*% evec_dominant;\n+ }\n+}\n+\n+# Get the dataset\n+M = 1000;\n+A = rand(rows=M, cols=100, seed=1);\n+y = rand(rows=M, cols=1, seed=2);\n+R = matrix(0, rows=1, cols=20);\n+\n+Kc = floor(ncol(A) * 0.8);\n+\n+for (i in 1:10) {\n+ newA1 = PCA(A=A, K=Kc+i);\n+ beta1 = lm(X=newA1, y=y, icpt=1, reg=0.0001, verbose=FALSE);\n+ y_predict1 = lmpredict(X=newA1, w=beta1, icpt=1);\n+ R2_ad1 = checkR2(newA1, y, y_predict1, beta1, 1);\n+ R[,i] = R2_ad1;\n+}\n+\n+parfor (i in 1:10) {\n+ newA3 = PCA(A=A, K=Kc+5);\n+ beta3 = lm(X=newA3, y=y, icpt=1, reg=0.001*i, verbose=FALSE);\n+ y_predict3 = lmpredict(X=newA3, w=beta3, icpt=1);\n+ R2_ad3 = checkR2(newA3, y, y_predict3, beta3, 1);\n+ R[,10+i] = R2_ad3;\n+}\n+\n+\n+write(R, $1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-415] Initial mlcontext lineage support (tracing, reuse)
This patch adds basic lineage support to the MLContext API. Since
in-memory objects are directly bound to the symbol table, lineage
tracing views these objects as literals and incorrectly reused
intermediates even if different in-memory objects where used in
subsequent mlcontext invocations. |
49,689 | 27.06.2020 22:59:35 | -7,200 | ba075affc2798e9d804440b154840e8f88f2ecf0 | [MINOR] Improve robustness in partial reuse. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageCacheConfig\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n- \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\"\n+ \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Improve robustness in partial reuse. |
49,689 | 29.06.2020 15:23:40 | -7,200 | 93e0930e97cf11c35ee6796aff2ae6e415a3d1f2 | Add a new rewrite for StepLM
This patch adds a new rewrite to partially reuse tsmm
results in StepLM (forward). | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -342,6 +342,7 @@ SYSTEMDS-410 Lineage Tracing, Reuse and Integration II\n* 414 New rewrite for PCA --> lmDS pipeline OK\n* 415 MLContext lineage support (tracing and reuse correctness) OK\n* 416 Lineage deduplication while, nested if, loop sequences OK\n+ * 417 New rewrite for partial reuse in StepLM OK\nSYSTEMDS-420 Compiler Improvements\n* 421 Fix invalid IPA scalar propagation into functions OK\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -99,6 +99,8 @@ public class LineageRewriteReuse\nnewInst = (newInst == null) ? rewriteTsmmCbind(curr, ec, lrwec) : newInst;\n//tsmm(cbind(cbind(X, deltaX), ones)) -> TODO\nnewInst = (newInst == null) ? rewriteTsmm2Cbind(curr, ec, lrwec) : newInst;\n+ //tsmm(cbind(cbind(X, deltaX), ones)) -> TODO\n+ newInst = (newInst == null) ? rewriteTsmm2CbindSameLeft(curr, ec, lrwec) : newInst;\n//tsmm(rbind(X, deltaX)) -> tsmm(X) + tsmm(deltaX)\nnewInst = (newInst == null) ? rewriteTsmmRbind(curr, ec, lrwec) : newInst;\n//rbind(X,deltaX) %*% Y -> rbind(X %*% Y, deltaX %*% Y)\n@@ -337,6 +339,74 @@ public class LineageRewriteReuse\nreturn inst;\n}\n+ private static ArrayList<Instruction> rewriteTsmm2CbindSameLeft (Instruction curr, ExecutionContext ec, ExecutionContext lrwec)\n+ {\n+ /* The difference between rewriteTsmm2Cbind and this rewrite is that the former applies\n+ * when columns are increasingly appended where the later applies when different columns\n+ * are appended to a single base matrix.\n+ */\n+ // Check the applicability of this rewrite.\n+ Map<String, MatrixBlock> inCache = new HashMap<>();\n+ if (!isTsmm2CbindSameLeft(curr, ec, inCache))\n+ return null;\n+\n+ // Create a transient read op over the last tsmm result\n+ MatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n+ MatrixObject newmo = convMBtoMO(cachedEntry);\n+ lrwec.setVariable(\"cachedEntry\", newmo);\n+ DataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n+\n+ // Create a transient read op over the input to this tsmm\n+ MatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n+ lrwec.setVariable(\"oldMatrix\", mo);\n+ DataOp newMatrix = HopRewriteUtils.createTransientRead(\"oldMatrix\", mo);\n+\n+ // pull out the newly added column(2nd last) from the input matrix\n+ Hop lastCol;\n+ // Use deltaX from cache, or create rightIndex\n+ if (inCache.containsKey(\"deltaX\")) {\n+ MatrixBlock cachedRI = inCache.get(\"deltaX\");\n+ lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lastCol = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n+ }\n+ else\n+ lastCol = HopRewriteUtils.createIndexingOp(newMatrix, new LiteralOp(1), new LiteralOp(mo.getNumRows()),\n+ new LiteralOp(mo.getNumColumns()-1), new LiteralOp(mo.getNumColumns()-1));\n+\n+ // apply t(lastCol) on i/p matrix to get the result vectors.\n+ ReorgOp tlastCol = HopRewriteUtils.createTranspose(lastCol);\n+ AggBinaryOp newCol = HopRewriteUtils.createMatrixMultiply(tlastCol, newMatrix);\n+ ReorgOp tnewCol = HopRewriteUtils.createTranspose(newCol);\n+\n+ // Replace the 2nd last row and column of the last tsmm resutl with the result vector.\n+ IndexingOp topLeft = HopRewriteUtils.createIndexingOp(lastRes, new LiteralOp(1), new LiteralOp(newmo.getNumRows()-2),\n+ new LiteralOp(1), new LiteralOp(newmo.getNumColumns()-2));\n+ IndexingOp topRight = HopRewriteUtils.createIndexingOp(lastRes, new LiteralOp(1), new LiteralOp(newmo.getNumRows()-2),\n+ new LiteralOp(newmo.getNumColumns()), new LiteralOp(newmo.getNumColumns()));\n+ IndexingOp bottomLeft = HopRewriteUtils.createIndexingOp(lastRes, new LiteralOp(newmo.getNumRows()),\n+ new LiteralOp(newmo.getNumRows()), new LiteralOp(1), new LiteralOp(newmo.getNumColumns()-2));\n+ IndexingOp bottomRight = HopRewriteUtils.createIndexingOp(lastRes, new LiteralOp(newmo.getNumRows()),\n+ new LiteralOp(newmo.getNumRows()), new LiteralOp(newmo.getNumColumns()), new LiteralOp(newmo.getNumColumns()));\n+ IndexingOp topCol = HopRewriteUtils.createIndexingOp(tnewCol, new LiteralOp(1), new LiteralOp(mo.getNumColumns()-2),\n+ new LiteralOp(1), new LiteralOp(1));\n+ IndexingOp bottomCol = HopRewriteUtils.createIndexingOp(tnewCol, new LiteralOp(mo.getNumColumns()),\n+ new LiteralOp(mo.getNumColumns()), new LiteralOp(1), new LiteralOp(1));\n+ NaryOp rowOne = HopRewriteUtils.createNary(OpOpN.CBIND, topLeft, topCol, topRight);\n+ NaryOp rowTwo = HopRewriteUtils.createNary(OpOpN.CBIND, bottomLeft, bottomCol, bottomRight);\n+ NaryOp lrwHop = HopRewriteUtils.createNary(OpOpN.RBIND, rowOne, newCol, rowTwo);\n+ DataOp lrwWrite = HopRewriteUtils.createTransientWrite(LR_VAR, lrwHop);\n+\n+ // generate runtime instructions\n+ if (LOG.isDebugEnabled())\n+ LOG.debug(\"LINEAGE REWRITE rewriteTsmm2CbindSameLeft APPLIED\");\n+ ArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ _disableReuse = true;\n+\n+ if (DMLScript.STATISTICS)\n+ LineageCacheStatistics.incrementPRewrites();\n+ return inst;\n+ }\n+\nprivate static ArrayList<Instruction> rewriteMatMulRbindLeft (Instruction curr, ExecutionContext ec, ExecutionContext lrwec)\n{\n// Check the applicability of this rewrite.\n@@ -837,6 +907,42 @@ public class LineageRewriteReuse\nreturn inCache.containsKey(\"lastMatrix\") ? true : false;\n}\n+ private static boolean isTsmm2CbindSameLeft (Instruction curr, ExecutionContext ec, Map<String, MatrixBlock> inCache)\n+ {\n+ if (!LineageCacheConfig.isReusable(curr, ec))\n+ return false;\n+\n+ //TODO: support nary cbind\n+ // If the input to tsmm came from cbind, look for both the inputs in cache.\n+ LineageItem item = ((ComputationCPInstruction) curr).getLineageItem(ec).getValue();\n+ // look for two consecutive cbinds\n+ if (curr.getOpcode().equalsIgnoreCase(\"tsmm\")) {\n+ LineageItem source = item.getInputs()[0];\n+ if (source.getOpcode().equalsIgnoreCase(\"cbind\")) {\n+ LineageItem input = source.getInputs()[0];\n+ if (input.getOpcode().equalsIgnoreCase(\"cbind\")) {\n+ LineageItem L2appin1 = input.getInputs()[0];\n+ if (!L2appin1.getOpcode().equalsIgnoreCase(\"rightIndex\"))\n+ return false;\n+ LineageItem RI = input.getInputs()[1];\n+ if (LineageCache.probe(RI))\n+ inCache.put(\"deltaX\", LineageCache.getMatrix(RI));\n+ LineageItem cu = RI.getInputs()[4];\n+ LineageItem old_cu = reduceColByOne(cu);\n+ LineageItem old_RI = new LineageItem(\"rightIndex\", new LineageItem[] {RI.getInputs()[0],\n+ RI.getInputs()[1], RI.getInputs()[2], old_cu, old_cu});\n+ LineageItem old_cbind = new LineageItem(\"cbind\", new LineageItem[] {L2appin1, old_RI});\n+ LineageItem tmp = new LineageItem(\"cbind\", new LineageItem[] {old_cbind, source.getInputs()[1]});\n+ LineageItem toProbe = new LineageItem(curr.getOpcode(), new LineageItem[] {tmp});\n+ if (LineageCache.probe(toProbe))\n+ inCache.put(\"lastMatrix\", LineageCache.getMatrix(toProbe));\n+ }\n+ }\n+ }\n+ // return true only if the last tsmm is found\n+ return inCache.containsKey(\"lastMatrix\") ? true : false;\n+ }\n+\nprivate static boolean isMatMulRbindLeft(Instruction curr, ExecutionContext ec, Map<String, MatrixBlock> inCache)\n{\nif (!LineageCacheConfig.isReusable(curr, ec))\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-417] Add a new rewrite for StepLM
This patch adds a new rewrite to partially reuse tsmm
results in StepLM (forward). |
49,706 | 29.06.2020 23:11:58 | -7,200 | dd742abe095829690fb43208f2bf7b04cfc41910 | [MINOR] Fix JDocs
exclude protobuf in Jdocs
Closes | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<groupId>org.apache.maven.plugins</groupId>\n<artifactId>maven-javadoc-plugin</artifactId>\n<version>3.1.1</version>\n+ <configuration>\n+ <excludePackageNames>org.apache.systeds.protobuf.*</excludePackageNames>\n+ </configuration>\n<executions>\n<execution>\n<id>attach-javadocs</id>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -1875,7 +1875,7 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n* if data value in any cell is greater than the specified threshold of that attribute\n* the output frame will store a null on that cell position, thus removing the length-violating values.\n*\n- * @param row vector of valid lengths\n+ * @param feaLen vector of valid lengths\n* @return FrameBlock with invalid values converted into missing values (null)\n*/\npublic FrameBlock invalidByLength(MatrixBlock feaLen) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix JDocs
exclude protobuf in Jdocs
Closes #923 |
49,700 | 30.06.2020 10:10:04 | -7,200 | 433f638d04b65dbccbd1f59d468ed00bc6b7ae2f | Privacy Runtime Extended
Add FederatedWorkerHandlerException And Improved Handling of
Exceptions in FederatedWorkerHandler | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -47,6 +47,7 @@ import org.apache.sysds.runtime.meta.DataCharacteristics;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\nimport org.apache.sysds.runtime.meta.MetaData;\nimport org.apache.sysds.runtime.meta.MetaDataFormat;\n+import org.apache.sysds.runtime.privacy.CheckedConstraintsLog;\nimport org.apache.sysds.runtime.privacy.PrivacyConstraint;\nimport org.apache.sysds.runtime.util.HDFSTool;\nimport org.apache.sysds.runtime.util.LocalFileUtils;\n@@ -322,6 +323,8 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\npublic void setPrivacyConstraints(PrivacyConstraint pc) {\n_privacyConstraint = pc;\n+ if ( DMLScript.CHECK_PRIVACY && pc != null )\n+ CheckedConstraintsLog.addLoadedConstraint(pc.getPrivacyLevel());\n}\npublic PrivacyConstraint getPrivacyConstraint() {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/MatrixObject.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/MatrixObject.java",
"diff": "package org.apache.sysds.runtime.controlprogram.caching;\n+import static org.apache.sysds.runtime.util.UtilFunctions.requestFederatedData;\n+\n+import java.io.IOException;\n+import java.lang.ref.SoftReference;\n+import java.util.List;\n+import java.util.concurrent.Future;\n+\nimport org.apache.commons.lang.mutable.MutableBoolean;\nimport org.apache.commons.lang3.tuple.Pair;\nimport org.apache.sysds.api.DMLScript;\n@@ -41,18 +48,10 @@ import org.apache.sysds.runtime.meta.DataCharacteristics;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\nimport org.apache.sysds.runtime.meta.MetaData;\nimport org.apache.sysds.runtime.meta.MetaDataFormat;\n-import org.apache.sysds.runtime.privacy.DMLPrivacyException;\nimport org.apache.sysds.runtime.util.DataConverter;\nimport org.apache.sysds.runtime.util.HDFSTool;\nimport org.apache.sysds.runtime.util.IndexRange;\n-import java.io.IOException;\n-import java.lang.ref.SoftReference;\n-import java.util.List;\n-import java.util.concurrent.Future;\n-\n-import static org.apache.sysds.runtime.util.UtilFunctions.requestFederatedData;\n-\n/**\n* Represents a matrix in control program. This class contains method to read\n* matrices from HDFS and convert them to a specific format/representation. It\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRequest.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRequest.java",
"diff": "@@ -35,7 +35,7 @@ public class FederatedRequest implements Serializable {\nprivate FedMethod _method;\nprivate List<Object> _data;\n- private boolean checkPrivacy;\n+ private boolean _checkPrivacy;\npublic FederatedRequest(FedMethod method, List<Object> data) {\n_method = method;\n@@ -82,7 +82,7 @@ public class FederatedRequest implements Serializable {\n}\npublic void setCheckPrivacy(boolean checkPrivacy){\n- this.checkPrivacy = checkPrivacy;\n+ this._checkPrivacy = checkPrivacy;\n}\npublic void setCheckPrivacy(){\n@@ -90,6 +90,6 @@ public class FederatedRequest implements Serializable {\n}\npublic boolean checkPrivacy(){\n- return checkPrivacy;\n+ return _checkPrivacy;\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"diff": "@@ -65,7 +65,11 @@ public class FederatedResponse implements Serializable {\n}\npublic String getErrorMessage() {\n- return ExceptionUtils.getFullStackTrace( (Exception) _data[0] );\n+ if (_data[0] instanceof Throwable )\n+ return ExceptionUtils.getFullStackTrace( (Throwable) _data[0] );\n+ else if (_data[0] instanceof String)\n+ return (String) _data[0];\n+ else return \"No readable error message\";\n}\npublic Object[] getData() throws Exception {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -48,6 +48,7 @@ import org.apache.sysds.runtime.matrix.operators.AggregateUnaryOperator;\nimport org.apache.sysds.runtime.matrix.operators.ScalarOperator;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\nimport org.apache.sysds.runtime.meta.MetaDataFormat;\n+import org.apache.sysds.runtime.privacy.DMLPrivacyException;\nimport org.apache.sysds.runtime.privacy.PrivacyMonitor;\nimport org.apache.sysds.runtime.privacy.PrivacyPropagator;\nimport org.apache.sysds.utils.JSONHelper;\n@@ -114,12 +115,17 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nreturn executeScalarOperation(request);\ndefault:\nString message = String.format(\"Method %s is not supported.\", method);\n- return new FederatedResponse(FederatedResponse.Type.ERROR, message);\n+ return new FederatedResponse(FederatedResponse.Type.ERROR, new FederatedWorkerHandlerException(message));\n}\n}\n- catch (Exception exception) {\n+ catch (DMLPrivacyException | FederatedWorkerHandlerException exception) {\nreturn new FederatedResponse(FederatedResponse.Type.ERROR, exception);\n}\n+ catch (Exception exception) {\n+ return new FederatedResponse(FederatedResponse.Type.ERROR,\n+ new FederatedWorkerHandlerException(\"Exception of type \"\n+ + exception.getClass() + \" thrown when processing request\"));\n+ }\n}\nprivate FederatedResponse readData(FederatedRequest request, Types.DataType dataType) {\n@@ -141,7 +147,8 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nbreak;\ndefault:\n// should NEVER happen (if we keep request codes in sync with actual behaviour)\n- return new FederatedResponse(FederatedResponse.Type.ERROR, \"Could not recognize datatype\");\n+ return new FederatedResponse(FederatedResponse.Type.ERROR,\n+ new FederatedWorkerHandlerException(\"Could not recognize datatype\"));\n}\n// read metadata\n@@ -153,7 +160,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\ntry (BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)))) {\nJSONObject mtd = JSONHelper.parse(br);\nif (mtd == null)\n- return new FederatedResponse(FederatedResponse.Type.ERROR, \"Could not parse metadata file\");\n+ return new FederatedResponse(FederatedResponse.Type.ERROR, new FederatedWorkerHandlerException(\"Could not parse metadata file\"));\nmc.setRows(mtd.getLong(DataExpression.READROWPARAM));\nmc.setCols(mtd.getLong(DataExpression.READCOLPARAM));\ncd = PrivacyPropagator.parseAndSetPrivacyConstraint(cd, mtd);\n@@ -224,7 +231,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\n// TODO rest of the possible datatypes\ndefault:\nreturn new FederatedResponse(FederatedResponse.Type.ERROR,\n- \"FederatedWorkerHandler: Not possible to send datatype \" + dataObject.getDataType().name());\n+ new FederatedWorkerHandlerException(\"Not possible to send datatype \" + dataObject.getDataType().name()));\n}\n}\n@@ -239,8 +246,8 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nData dataObject = _vars.get(varID);\nif (dataObject.getDataType() != Types.DataType.MATRIX) {\nreturn new FederatedResponse(FederatedResponse.Type.ERROR,\n- \"FederatedWorkerHandler: Aggregation only supported for matrices, not for \"\n- + dataObject.getDataType().name());\n+ new FederatedWorkerHandlerException(\"Aggregation only supported for matrices, not for \"\n+ + dataObject.getDataType().name()));\n}\nMatrixObject matrixObject = (MatrixObject) dataObject;\nmatrixObject = PrivacyMonitor.handlePrivacy(matrixObject);\n@@ -261,12 +268,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\noutNumCols += numMissing;\n}\nMatrixBlock ret = new MatrixBlock(outNumRows, outNumCols, operator.aggOp.initialValue);\n- try {\nLibMatrixAgg.aggregateUnaryMatrix(matrixBlock, ret, operator);\n- }\n- catch (Exception e) {\n- return new FederatedResponse(FederatedResponse.Type.ERROR, \"FederatedWorkerHandler: \" + e);\n- }\n// result block without correction\nret.dropLastRowsOrColumns(operator.aggOp.correction);\nreturn new FederatedResponse(FederatedResponse.Type.SUCCESS, ret);\n@@ -284,8 +286,8 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\ndataObject = PrivacyMonitor.handlePrivacy(dataObject);\nif (dataObject.getDataType() != Types.DataType.MATRIX) {\nreturn new FederatedResponse(FederatedResponse.Type.ERROR,\n- \"FederatedWorkerHandler: ScalarOperator dont support \"\n- + dataObject.getDataType().name());\n+ new FederatedWorkerHandlerException(\"FederatedWorkerHandler: ScalarOperator dont support \"\n+ + dataObject.getDataType().name()));\n}\nMatrixObject matrixObject = (MatrixObject) dataObject;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandlerException.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.runtime.controlprogram.federated;\n+\n+/**\n+ * Exception to throw when an exception occurs in FederatedWorkerHandler during handling of FederatedRequest. The\n+ * purpose of FederatedWorkerHandlerException is to propagate useful information from the federated workers to the\n+ * federated master without exposing details that are usually included in exceptions, for instance name of files that\n+ * were not found or data points that could not be handled correctly.\n+ */\n+public class FederatedWorkerHandlerException extends RuntimeException {\n+\n+ private static final long serialVersionUID = 1L;\n+\n+ /**\n+ * Create new instance of FederatedWorkerHandlerException with a message.\n+ *\n+ * @param msg message describing the exception\n+ */\n+ public FederatedWorkerHandlerException(String msg) {\n+ super(msg);\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/CheckedConstraintsLog.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/CheckedConstraintsLog.java",
"diff": "@@ -26,7 +26,15 @@ import java.util.function.BiFunction;\nimport org.apache.sysds.runtime.privacy.PrivacyConstraint.PrivacyLevel;\n+/**\n+ * Class counting the checked privacy constraints and the loaded privacy constraints.\n+ */\npublic class CheckedConstraintsLog {\n+ private static Map<PrivacyLevel,LongAdder> loadedConstraintsTotal = new EnumMap<PrivacyLevel,LongAdder>(PrivacyLevel.class);\n+ static {\n+ for ( PrivacyLevel level : PrivacyLevel.values() )\n+ loadedConstraintsTotal.put(level, new LongAdder());\n+ }\nprivate static Map<PrivacyLevel,LongAdder> checkedConstraintsTotal = new EnumMap<PrivacyLevel,LongAdder>(PrivacyLevel.class);\nprivate static BiFunction<LongAdder, LongAdder, LongAdder> mergeLongAdders = (v1, v2) -> {\nv1.add(v2.longValue() );\n@@ -45,23 +53,40 @@ public class CheckedConstraintsLog {\n}\n/**\n- * Remove all elements from checked constraints log.\n+ * Add an occurence of the given privacy level to the loaded constraints log total.\n+ * @param level privacy level from loaded privacy constraint\n+ */\n+ public static void addLoadedConstraint(PrivacyLevel level){\n+ if (level != null)\n+ loadedConstraintsTotal.get(level).increment();\n+ }\n+\n+ /**\n+ * Remove all elements from checked constraints log and loaded constraints log.\n*/\npublic static void reset(){\ncheckedConstraintsTotal.clear();\n+ loadedConstraintsTotal.replaceAll((k,v)->new LongAdder());\n}\npublic static Map<PrivacyLevel,LongAdder> getCheckedConstraints(){\nreturn checkedConstraintsTotal;\n}\n+ public static Map<PrivacyLevel, LongAdder> getLoadedConstraints(){\n+ return loadedConstraintsTotal;\n+ }\n+\n/**\n* Get string representing all contents of the checked constraints log.\n* @return string representation of checked constraints log.\n*/\npublic static String display(){\nStringBuilder sb = new StringBuilder();\n+ sb.append(\"Checked Privacy Constraints:\\n\");\ncheckedConstraintsTotal.forEach((k,v)->sb.append(\"\\t\" + k + \": \" + v + \"\\n\"));\n+ sb.append(\"Loaded Privacy Constraints:\\n\");\n+ loadedConstraintsTotal.forEach((k,v)->sb.append(\"\\t\" + k + \": \" + v + \"\\n\"));\nreturn sb.toString();\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"diff": "package org.apache.sysds.runtime.privacy;\nimport java.util.EnumMap;\n-import java.util.HashMap;\nimport java.util.concurrent.atomic.LongAdder;\nimport org.apache.sysds.runtime.controlprogram.caching.CacheableData;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"new_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"diff": "@@ -999,7 +999,7 @@ public class Statistics\n}\nif (DMLScript.CHECK_PRIVACY)\n- sb.append(\"Checked Privacy Constraints:\\n\" + CheckedConstraintsLog.display());\n+ sb.append(CheckedConstraintsLog.display());\nreturn sb.toString();\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/BuiltinGLMTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.privacy;\n+\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Collection;\n+import java.util.HashMap;\n+import java.util.List;\n+import java.util.Random;\n+\n+import org.apache.sysds.api.DMLException;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.hops.OptimizerUtils;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.privacy.PrivacyConstraint;\n+import org.apache.sysds.runtime.privacy.PrivacyConstraint.PrivacyLevel;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+/**\n+ * Adapted from org.apache.sysds.test.functions.builtin.BuiltinGLMTest.\n+ * Different privacy constraints are added to the input.\n+ */\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class BuiltinGLMTest extends AutomatedTestBase\n+{\n+ protected final static String TEST_NAME = \"glmTest\";\n+ protected final static String TEST_DIR = \"functions/builtin/\";\n+ protected String TEST_CLASS_DIR = TEST_DIR + BuiltinGLMTest.class.getSimpleName() + \"/\";\n+ double eps = 1e-4;\n+\n+ protected int numRecords, numFeatures, distFamilyType, linkType, intercept;\n+ protected double distParam, linkPower, logFeatureVarianceDisbalance, avgLinearForm, stdevLinearForm, dispersion;\n+\n+ public BuiltinGLMTest(int numRecords_, int numFeatures_, int distFamilyType_, double distParam_,\n+ int linkType_, double linkPower_, double logFeatureVarianceDisbalance_,\n+ double avgLinearForm_, double stdevLinearForm_, double dispersion_)\n+ {\n+ this.numRecords = numRecords_;\n+ this.numFeatures = numFeatures_;\n+ this.distFamilyType = distFamilyType_;\n+ this.distParam = distParam_;\n+ this.linkType = linkType_;\n+ this.linkPower = linkPower_;\n+ this.logFeatureVarianceDisbalance = logFeatureVarianceDisbalance_;\n+ this.avgLinearForm = avgLinearForm_;\n+ this.stdevLinearForm = stdevLinearForm_;\n+ this.dispersion = dispersion_;\n+ }\n+\n+ private void setIntercept(int intercept_)\n+ {\n+ intercept = intercept_/100;\n+ }\n+\n+ @Override\n+ public void setUp()\n+ {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_CLASS_DIR, TEST_NAME);\n+ }\n+\n+ // Private\n+ @Test\n+ public void glmTestIntercept_0_CP_Private() {\n+ setIntercept(0);\n+ runtestGLM(new PrivacyConstraint(PrivacyLevel.Private), DMLException.class);\n+ }\n+\n+ // PrivateAggregation\n+ @Test\n+ public void glmTestIntercept_0_CP_PrivateAggregation() {\n+ setIntercept(0);\n+ runtestGLM(new PrivacyConstraint(PrivacyLevel.PrivateAggregation), null);\n+ }\n+\n+ // None\n+ @Test\n+ public void glmTestIntercept_0_CP_None() {\n+ setIntercept(0);\n+ runtestGLM(new PrivacyConstraint(PrivacyLevel.None), null);\n+ }\n+\n+ public void runtestGLM(PrivacyConstraint privacyConstraint, Class<?> expectedException) {\n+ Types.ExecMode platformOld = setExecMode(LopProperties.ExecType.CP);\n+ try {\n+ int rows = numRecords; // # of rows in the training data\n+ int cols = numFeatures; // # of features in the training data\n+ System.out.println(\"------------ BEGIN \" + TEST_NAME + \" TEST WITH {\" + rows + \", \" + cols\n+ + \", \" + distFamilyType + \", \" + distParam + \", \" + linkType + \", \" + linkPower + \", \"\n+ + intercept + \", \" + logFeatureVarianceDisbalance + \", \" + avgLinearForm + \", \" + stdevLinearForm\n+ + \", \" + dispersion + \"} ------------\");\n+\n+ TestUtils.GLMDist glmdist = new TestUtils.GLMDist(distFamilyType, distParam, linkType, linkPower);\n+ glmdist.set_dispersion(dispersion);\n+\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+\n+ // prepare training data set\n+ Random r = new Random(314159265);\n+ double[][] X = TestUtils.generateUnbalancedGLMInputDataX(rows, cols, logFeatureVarianceDisbalance);\n+ double[] beta = TestUtils.generateUnbalancedGLMInputDataB(X, cols, intercept, avgLinearForm, stdevLinearForm, r);\n+ double[][] y = TestUtils.generateUnbalancedGLMInputDataY(X, beta, rows, cols, glmdist, intercept, dispersion, r);\n+\n+ int defaultBlockSize = OptimizerUtils.DEFAULT_BLOCKSIZE;\n+\n+ MatrixCharacteristics mc_X = new MatrixCharacteristics(rows, cols, defaultBlockSize, -1);\n+ writeInputMatrixWithMTD(\"X\", X, true, mc_X, privacyConstraint);\n+\n+ MatrixCharacteristics mc_y = new MatrixCharacteristics(rows, y[0].length, defaultBlockSize, -1);\n+ writeInputMatrixWithMTD(\"Y\", y, true, mc_y, privacyConstraint);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ List<String> proArgs = new ArrayList<>();\n+ proArgs.add(\"-exec\");\n+ proArgs.add(\" singlenode\");\n+ proArgs.add(\"-nvargs\");\n+ proArgs.add(\"X=\" + input(\"X\"));\n+ proArgs.add(\"Y=\" + input(\"Y\"));\n+ proArgs.add(\"dfam=\" + String.valueOf(distFamilyType));\n+ proArgs.add(((distFamilyType == 2 && distParam != 1.0) ? \"yneg=\" : \"vpow=\") + String.valueOf(distParam));\n+ proArgs.add((distFamilyType == 2 && distParam != 1.0) ? \"vpow=0.0\" : \"yneg=0.0\");\n+ proArgs.add(\"link=\" + String.valueOf(linkType));\n+ proArgs.add(\"lpow=\" + String.valueOf(linkPower));\n+ proArgs.add(\"icpt=\" + String.valueOf(intercept)); // INTERCEPT - CHANGE THIS AS NEEDED\n+ proArgs.add(\"disp=0.0\"); // DISPERSION (0.0: ESTIMATE)\n+ proArgs.add(\"reg=0.0\"); // LAMBDA REGULARIZER\n+ proArgs.add(\"tol=0.000000000001\"); // TOLERANCE (EPSILON)\n+ proArgs.add(\"moi=300\");\n+ proArgs.add(\"mii=0\");\n+ proArgs.add(\"B=\" + output(\"betas_SYSTEMDS\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = getRCmd(input(\"X.mtx\"), input(\"Y.mtx\"),\n+ String.valueOf(distFamilyType),\n+ String.valueOf(distParam),\n+ String.valueOf(linkType),\n+ String.valueOf(linkPower),\n+ String.valueOf(intercept),\n+ \"0.000000000001\",\n+ expected(\"betas_R\"));\n+\n+ runTest(true, (expectedException != null), expectedException, -1);\n+\n+ if ( expectedException == null ){\n+\n+ double max_abs_beta = 0.0;\n+ HashMap<MatrixValue.CellIndex, Double> wTRUE = new HashMap<>();\n+ for (int j = 0; j < cols; j++) {\n+ wTRUE.put(new MatrixValue.CellIndex(j + 1, 1), Double.valueOf(beta[j]));\n+ max_abs_beta = (max_abs_beta >= Math.abs(beta[j]) ? max_abs_beta : Math.abs(beta[j]));\n+ }\n+\n+ HashMap<MatrixValue.CellIndex, Double> wSYSTEMDS_raw = readDMLMatrixFromHDFS(\"betas_SYSTEMDS\");\n+ HashMap<MatrixValue.CellIndex, Double> wSYSTEMDS = new HashMap<>();\n+ for (MatrixValue.CellIndex key : wSYSTEMDS_raw.keySet())\n+ if (key.column == 1)\n+ wSYSTEMDS.put(key, wSYSTEMDS_raw.get(key));\n+\n+ runRScript(true);\n+\n+ HashMap<MatrixValue.CellIndex, Double> wR = readRMatrixFromFS(\"betas_R\");\n+\n+ if ((distParam == 0 && linkType == 1)) { // Gaussian.*\n+ //NOTE MB: Gaussian.log was the only test failing when we introduced multi-threaded\n+ //matrix multplications (mmchain). After discussions with Sasha, we decided to change the eps\n+ //because accuracy is anyway affected by various rewrites like binary to unary (-1*x->-x),\n+ //transpose-matrixmult, and dot product sum. Disabling these rewrites led to a successful\n+ //test result. Even without multi-threaded matrix mult this test was failing for different number\n+ //of rows if these rewrites are enabled. Users can turn off rewrites if high accuracy is required.\n+ //However, in the future we might also consider to use Kahan plus for aggregations in matrix mult\n+ //(at least for the final aggregation of partial results from individual threads).\n+\n+ //NOTE MB: similar issues occurred with other tests when moving to github action tests\n+ eps *= (linkPower == -1) ? 4 : 2; //Gaussian.inverse vs Gaussian.*;\n+ }\n+ TestUtils.compareMatrices(wR, wSYSTEMDS, eps * max_abs_beta, \"wR\", \"wSYSTEMDS\");\n+ }\n+ }\n+ finally {\n+ resetExecMode(platformOld);\n+ }\n+ }\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ // SCHEMA:\n+ // #RECORDS, #FEATURES, DISTRIBUTION_FAMILY, VARIANCE_POWER or BERNOULLI_NO, LINK_TYPE, LINK_POWER,\n+ // LOG_FEATURE_VARIANCE_DISBALANCE, AVG_LINEAR_FORM, ST_DEV_LINEAR_FORM, DISPERSION\n+ Object[][] data = new Object[][] {\n+ // #RECS #FTRS DFM VPOW LNK LPOW LFVD AVGLT STDLT DISP\n+ // Both DML and R work and compute close results:\n+ { 10000, 50, 1, 0.0, 1, 0.0, 3.0, 10.0, 2.0, 2.5 }, // Gaussian.log\n+ { 1000, 100, 1, 1.0, 1, 0.0, 3.0, 0.0, 1.0, 2.5 }, // Poisson.log\n+ { 10000, 50, 1, 2.0, 1, 0.0, 3.0, 0.0, 2.0, 2.5 }, // Gamma.log\n+\n+ { 10000, 50, 2, -1.0, 1, 0.0, 3.0, -5.0, 1.0, 1.0 }, // Bernoulli {-1, 1}.log // Note: Y is sparse\n+ { 1000, 100, 2, -1.0, 2, 0.0, 3.0, 0.0, 2.0, 1.0 }, // Bernoulli {-1, 1}.logit\n+ { 2000, 100, 2, -1.0, 3, 0.0, 3.0, 0.0, 2.0, 1.0 }, // Bernoulli {-1, 1}.probit\n+\n+ { 10000, 50, 2, 1.0, 1, 0.0, 3.0, -5.0, 1.0, 2.5 }, // Binomial two-column.log // Note: Y is sparse\n+ { 1000, 100, 2, 1.0, 2, 0.0, 3.0, 0.0, 2.0, 2.5 }, // Binomial two-column.logit\n+ { 2000, 100, 2, 1.0, 3, 0.0, 3.0, 0.0, 2.0, 2.5 }, // Binomial two-column.probit\n+ };\n+ return Arrays.asList(data);\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/privacy/CheckedConstraintsLogTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/CheckedConstraintsLogTest.java",
"diff": "package org.apache.sysds.test.functions.privacy;\n+import static org.junit.Assert.assertEquals;\n+import static org.junit.Assert.assertTrue;\n+\nimport java.util.EnumMap;\nimport java.util.concurrent.atomic.LongAdder;\n@@ -32,27 +35,27 @@ public class CheckedConstraintsLogTest extends AutomatedTestBase {\n@Override\npublic void setUp() {\n- CheckedConstraintsLog.getCheckedConstraints().clear();\n+ CheckedConstraintsLog.reset();\n}\n@Test\npublic void addCheckedConstraintsNull(){\nCheckedConstraintsLog.addCheckedConstraints(null);\n- assert(CheckedConstraintsLog.getCheckedConstraints() != null && CheckedConstraintsLog.getCheckedConstraints().isEmpty());\n+ assertTrue(CheckedConstraintsLog.getCheckedConstraints() != null && CheckedConstraintsLog.getCheckedConstraints().isEmpty());\n}\n@Test\npublic void addCheckedConstraintsEmpty(){\nEnumMap<PrivacyLevel,LongAdder> checked = new EnumMap<>(PrivacyLevel.class);\nCheckedConstraintsLog.addCheckedConstraints(checked);\n- assert(CheckedConstraintsLog.getCheckedConstraints() != null && CheckedConstraintsLog.getCheckedConstraints().isEmpty());\n+ assertTrue(CheckedConstraintsLog.getCheckedConstraints() != null && CheckedConstraintsLog.getCheckedConstraints().isEmpty());\n}\n@Test\npublic void addCheckedConstraintsSingleValue(){\nEnumMap<PrivacyLevel,LongAdder> checked = getMap(PrivacyLevel.Private, 300);\nCheckedConstraintsLog.addCheckedConstraints(checked);\n- assert(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 300);\n+ assertTrue(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 300);\n}\n@Test\n@@ -61,7 +64,7 @@ public class CheckedConstraintsLogTest extends AutomatedTestBase {\nCheckedConstraintsLog.addCheckedConstraints(checked);\nEnumMap<PrivacyLevel,LongAdder> checked2 = getMap(PrivacyLevel.Private, 150);\nCheckedConstraintsLog.addCheckedConstraints(checked2);\n- assert(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 450);\n+ assertTrue(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 450);\n}\n@Test\n@@ -72,7 +75,7 @@ public class CheckedConstraintsLogTest extends AutomatedTestBase {\nCheckedConstraintsLog.addCheckedConstraints(checked2);\nEnumMap<PrivacyLevel,LongAdder> checked3 = getMap(PrivacyLevel.PrivateAggregation, 150);\nCheckedConstraintsLog.addCheckedConstraints(checked3);\n- assert(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 450\n+ assertTrue(CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.Private).longValue() == 450\n&& CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.PrivateAggregation).longValue() == 150);\n}\n@@ -83,4 +86,12 @@ public class CheckedConstraintsLogTest extends AutomatedTestBase {\nchecked.put(level, valueAdder);\nreturn checked;\n}\n+\n+ @Test\n+ public void addLoadedConstraintsSingleValue(){\n+ Integer n = 12;\n+ for (int i = 0; i < n; i++)\n+ CheckedConstraintsLog.addLoadedConstraint(PrivacyLevel.Private);\n+ assertEquals(n.longValue(), CheckedConstraintsLog.getLoadedConstraints().get(PrivacyLevel.Private).longValue());\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/privacy/GLMTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/GLMTest.java",
"diff": "@@ -39,6 +39,11 @@ import org.apache.sysds.runtime.privacy.PrivacyConstraint.PrivacyLevel;\nimport org.apache.sysds.test.AutomatedTestBase;\nimport org.apache.sysds.test.TestUtils;\n+/**\n+ * Adapted from org.apache.sysds.test.applications.GLMTest.\n+ * Different privacy constraints are added to the input.\n+ */\n+\n@RunWith(value = Parameterized.class)\[email protected]\npublic class GLMTest extends AutomatedTestBase\n@@ -183,46 +188,8 @@ public class GLMTest extends AutomatedTestBase\n@Test\npublic void TestGLMPrivateX(){\n-\nPrivacyConstraint pc = new PrivacyConstraint(PrivacyLevel.Private);\n- Class<?> expectedException = null;\n- switch ( glmType ){\n- case Gaussianinverse:\n- case Poissonlog1:\n- case Poissonlog2:\n- case Poissonsqrt:\n- case Poissonid:\n- case Gammalog:\n- case Gammainverse:\n- case InvGaussian1mu:\n- case InvGaussianinverse:\n- case InvGaussianlog:\n- case InvGaussianid:\n- case Binomialid:\n- case Binomialcauchit:\n- case Gaussianlog:\n- case Gaussianid:\n- case Bernoullilog:\n- case Bernoulliid:\n- case Bernoullisqrt:\n- case Bernoullilogit1:\n- case Bernoullilogit2:\n- case Bernoulliprobit1:\n- case Bernoulliprobit2:\n- case Bernoullicloglog1:\n- case Bernoullicloglog2:\n- case Bernoullicauchit:\n- case Binomiallog:\n- case Binomialsqrt:\n- case Binomiallogit:\n- case Binomialprobit:\n- case Binomialcloglog:\n- expectedException = DMLException.class;\n- break;\n- default:\n- expectedException = null;\n- break;\n- }\n+ Class<?> expectedException = DMLException.class;\ntestGLM(pc, null, expectedException);\n}\n@@ -243,44 +210,7 @@ public class GLMTest extends AutomatedTestBase\n@Test\npublic void TestGLMPrivateY(){\nPrivacyConstraint pc = new PrivacyConstraint(PrivacyLevel.Private);\n- Class<?> expectedException = null;\n- switch ( glmType ){\n- case Gaussianinverse:\n- case Poissonlog1:\n- case Poissonlog2:\n- case Poissonsqrt:\n- case Poissonid:\n- case Gammalog:\n- case Gammainverse:\n- case InvGaussian1mu:\n- case InvGaussianinverse:\n- case InvGaussianlog:\n- case InvGaussianid:\n- case Binomialid:\n- case Binomialcauchit:\n- case Gaussianlog:\n- case Gaussianid:\n- case Bernoullilog:\n- case Bernoulliid:\n- case Bernoullisqrt:\n- case Bernoullilogit1:\n- case Bernoullilogit2:\n- case Bernoulliprobit1:\n- case Bernoulliprobit2:\n- case Bernoullicloglog1:\n- case Bernoullicloglog2:\n- case Bernoullicauchit:\n- case Binomiallog:\n- case Binomialsqrt:\n- case Binomiallogit:\n- case Binomialprobit:\n- case Binomialcloglog:\n- expectedException = DMLException.class;\n- break;\n- default:\n- expectedException = null;\n- break;\n- }\n+ Class<?> expectedException = DMLException.class;\ntestGLM(null, pc, expectedException);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2568] Privacy Runtime Extended
Add FederatedWorkerHandlerException And Improved Handling of
Exceptions in FederatedWorkerHandler |
49,738 | 05.07.2020 00:24:45 | -7,200 | aee05c0f25947db19a29773959d5b790ce65a09c | Performance lineage-based reuse (partial rewrites)
This patch makes a minor performance improvement to the important
partial rewrite tsmm(cbind(X,v)) to tsmm(X) + compensation plan, by
avoiding cbind(X, v)[,1:n-1] to extract X if X is still available in the
lineage cache. This avoids unnecessary allocation and copies. | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -345,6 +345,7 @@ SYSTEMDS-410 Lineage Tracing, Reuse and Integration II\n* 416 Lineage deduplication while, nested if, loop sequences OK\n* 417 New rewrite for partial reuse in StepLM OK\n* 418 Performance lineage tracing and reuse probing small data OK\n+ * 419 Performance and robustness partial rewrites\nSYSTEMDS-420 Compiler Improvements\n* 421 Fix invalid IPA scalar propagation into functions OK\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/rewrite/HopRewriteUtils.java",
"new_path": "src/main/java/org/apache/sysds/hops/rewrite/HopRewriteUtils.java",
"diff": "@@ -704,6 +704,10 @@ public class HopRewriteUtils\nreturn createIndexingOp(input, row, row, col, col);\n}\n+ public static IndexingOp createIndexingOp(Hop input, long rl, long ru, long cl, long cu) {\n+ return createIndexingOp(input, new LiteralOp(rl), new LiteralOp(ru), new LiteralOp(cl), new LiteralOp(cu));\n+ }\n+\npublic static IndexingOp createIndexingOp(Hop input, Hop rl, Hop ru, Hop cl, Hop cu) {\nIndexingOp ix = new IndexingOp(\"tmp\", DataType.MATRIX, ValueType.FP64, input, rl, ru, cl, cu, rl==ru, cl==cu);\nix.setBlocksize(input.getBlocksize());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -158,25 +158,23 @@ public class LineageRewriteReuse\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\nlrwec.setVariable(\"oldMatrix\", mo);\nDataOp newMatrix = HopRewriteUtils.createTransientRead(\"oldMatrix\", mo);\n- IndexingOp oldMatrix = HopRewriteUtils.createIndexingOp(newMatrix, new LiteralOp(1),\n- new LiteralOp(mo.getNumRows()), new LiteralOp(1), new LiteralOp(mo.getNumColumns()-1));\n- Hop lastCol;\n+\n+ // Use X from cache, or create rightIndex\n+ Hop oldMatrix = inCache.containsKey(\"X\") ?\n+ setupTReadCachedInput(\"X\", inCache, lrwec) :\n+ HopRewriteUtils.createIndexingOp(newMatrix, 1L, mo.getNumRows(), 1L, mo.getNumColumns()-1);\n+\n// Use deltaX from cache, or create rightIndex\n- if (inCache.containsKey(\"deltaX\")) {\n- MatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n- lastCol = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n- }\n- else\n- lastCol = HopRewriteUtils.createIndexingOp(newMatrix, new LiteralOp(1), new LiteralOp(mo.getNumRows()),\n- new LiteralOp(mo.getNumColumns()), new LiteralOp(mo.getNumColumns()));\n- // cell topRight = t(oldMatrix) %*% lastCol\n- ReorgOp tOldM = HopRewriteUtils.createTranspose(oldMatrix);\n- AggBinaryOp topRight = HopRewriteUtils.createMatrixMultiply(tOldM, lastCol);\n- // cell bottomLeft = t(lastCol) %*% oldMatrix = t(topRight)\n- ReorgOp bottomLeft = HopRewriteUtils.createTranspose(topRight);\n- // bottomRight = t(lastCol) %*% lastCol\n+ Hop lastCol = inCache.containsKey(\"deltaX\") ?\n+ setupTReadCachedInput(\"deltaX\", inCache, lrwec) :\n+ HopRewriteUtils.createIndexingOp(newMatrix, 1L, mo.getNumRows(), mo.getNumColumns(), mo.getNumColumns());\n+\n+ // cell bottomLeft = t(lastCol) %*% oldMatrix\nReorgOp tLastCol = HopRewriteUtils.createTranspose(lastCol);\n+ AggBinaryOp bottomLeft = HopRewriteUtils.createMatrixMultiply(tLastCol, oldMatrix);\n+ // cell topRight = t(oldMatrix) %*% lastCol = t(bottomLeft)\n+ ReorgOp topRight = HopRewriteUtils.createTranspose(bottomLeft);\n+ // bottomRight = t(lastCol) %*% lastCol\nAggBinaryOp bottomRight = HopRewriteUtils.createMatrixMultiply(tLastCol, lastCol);\n// rowOne = cbind(lastRes, topRight)\nBinaryOp rowOne = HopRewriteUtils.createBinary(lastRes, topRight, OpOp2.CBIND);\n@@ -810,12 +808,14 @@ public class LineageRewriteReuse\nif (curr.getOpcode().equalsIgnoreCase(\"tsmm\")) {\nLineageItem source = item.getInputs()[0];\nif (source.getOpcode().equalsIgnoreCase(\"cbind\")) {\n- //for (LineageItem input : source.getInputs()) {\n// create tsmm lineage on top of the input of last append\nLineageItem input1 = source.getInputs()[0];\nLineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {input1});\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n+ // look for the old matrix in cache\n+ if( LineageCache.probe(input1) )\n+ inCache.put(\"X\", LineageCache.getMatrix(input1));\n// look for the appended column in cache\nif (LineageCache.probe(source.getInputs()[1]))\ninCache.put(\"deltaX\", LineageCache.getMatrix(source.getInputs()[1]));\n@@ -846,6 +846,8 @@ public class LineageRewriteReuse\n// create tsmm lineage on top of the input of last append\nLineageItem input1 = source.getInputs()[0];\nLineageItem tmp = new LineageItem(curr.getOpcode(), new LineageItem[] {input1});\n+ if( LineageCache.probe(input1) )\n+ inCache.put(\"X\", LineageCache.getMatrix(input1));\nif (LineageCache.probe(tmp))\ninCache.put(\"lastMatrix\", LineageCache.getMatrix(tmp));\n}\n@@ -1178,6 +1180,12 @@ public class LineageRewriteReuse\nreturn newInst;\n}\n+ private static DataOp setupTReadCachedInput(String name, Map<String, MatrixBlock> inCache, ExecutionContext ec) {\n+ MatrixBlock cachedRI = inCache.get(name);\n+ ec.setVariable(name, convMBtoMO(cachedRI));\n+ return HopRewriteUtils.createTransientRead(name, cachedRI);\n+ }\n+\nprivate static void executeInst (ArrayList<Instruction> newInst, ExecutionContext lrwec)\n{\n// Disable explain not to print unnecessary logs\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-419] Performance lineage-based reuse (partial rewrites)
This patch makes a minor performance improvement to the important
partial rewrite tsmm(cbind(X,v)) to tsmm(X) + compensation plan, by
avoiding cbind(X, v)[,1:n-1] to extract X if X is still available in the
lineage cache. This avoids unnecessary allocation and copies. |
49,689 | 01.07.2020 22:00:55 | -7,200 | 5e9aefd999e946717f5ee58a4f1ab12c33b8b00f | Add lineage option exposing eviction policies.
This patch exposes cache eviction policies. In addition to that
this patch tunes the weights for the scoring function, adds new
reusable instructions and sanity checks. | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -358,3 +358,6 @@ Others:\nSYSTEMDS-510 IO formats\n* 511 Add protobuf support to write and read FrameBlocks to HDFS OK\n+\n+SYSTEMDS-520 Lineage Tracing, Reuse and Integration III\n+ * 521 New lineage option exposing cache policies OK\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"diff": "@@ -31,6 +31,7 @@ import org.apache.commons.cli.Options;\nimport org.apache.commons.cli.PosixParser;\nimport org.apache.sysds.common.Types.ExecMode;\nimport org.apache.sysds.hops.OptimizerUtils;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.LineageCachePolicy;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.utils.Explain;\nimport org.apache.sysds.utils.Explain.ExplainType;\n@@ -60,7 +61,8 @@ public class DMLOptions {\npublic boolean help = false; // whether to print the usage option\npublic boolean lineage = false; // whether compute lineage trace\npublic boolean lineage_dedup = false; // whether deduplicate lineage items\n- public ReuseCacheType linReuseType = ReuseCacheType.NONE;\n+ public ReuseCacheType linReuseType = ReuseCacheType.NONE; // reuse type (full, partial, hybrid)\n+ public LineageCachePolicy linCachePolicy= LineageCachePolicy.HYBRID; // lineage cache eviction policy\npublic boolean fedWorker = false;\npublic int fedWorkerPort = -1;\npublic boolean checkPrivacy = false; // Check which privacy constraints are loaded and checked during federated execution\n@@ -127,6 +129,12 @@ public class DMLOptions {\ndmlOptions.linReuseType = ReuseCacheType.REUSE_HYBRID;\nelse if (lineageType.equalsIgnoreCase(\"none\"))\ndmlOptions.linReuseType = ReuseCacheType.NONE;\n+ else if (lineageType.equalsIgnoreCase(\"policy_lru\"))\n+ dmlOptions.linCachePolicy = LineageCachePolicy.LRU;\n+ else if (lineageType.equalsIgnoreCase(\"policy_weighted\"))\n+ dmlOptions.linCachePolicy = LineageCachePolicy.WEIGHTED;\n+ else if (lineageType.equalsIgnoreCase(\"policy_hybrid\"))\n+ dmlOptions.linCachePolicy = LineageCachePolicy.HYBRID;\nelse\nthrow new org.apache.commons.cli.ParseException(\n\"Invalid argument specified for -lineage option: \" + lineageType);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"diff": "@@ -64,6 +64,7 @@ import org.apache.sysds.runtime.controlprogram.parfor.util.IDHandler;\nimport org.apache.sysds.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysds.runtime.io.IOUtilFunctions;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.LineageCachePolicy;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.privacy.CheckedConstraintsLog;\nimport org.apache.sysds.runtime.util.LocalFileUtils;\n@@ -93,6 +94,7 @@ public class DMLScript\npublic static boolean LINEAGE = DMLOptions.defaultOptions.lineage; // whether compute lineage trace\npublic static boolean LINEAGE_DEDUP = DMLOptions.defaultOptions.lineage_dedup; // whether deduplicate lineage items\npublic static ReuseCacheType LINEAGE_REUSE = DMLOptions.defaultOptions.linReuseType; // whether lineage-based reuse\n+ public static LineageCachePolicy LINEAGE_POLICY = DMLOptions.defaultOptions.linCachePolicy; // lineage cache eviction policy\npublic static boolean CHECK_PRIVACY = DMLOptions.defaultOptions.checkPrivacy; // Check which privacy constraints are loaded and checked during federated execution\npublic static boolean USE_ACCELERATOR = DMLOptions.defaultOptions.gpu;\n@@ -194,6 +196,7 @@ public class DMLScript\nLINEAGE = dmlOptions.lineage;\nLINEAGE_DEDUP = dmlOptions.lineage_dedup;\nLINEAGE_REUSE = dmlOptions.linReuseType;\n+ LINEAGE_POLICY = dmlOptions.linCachePolicy;\nCHECK_PRIVACY = dmlOptions.checkPrivacy;\nString fnameOptConfig = dmlOptions.configFile;\n@@ -219,6 +222,7 @@ public class DMLScript\n}\nLineageCacheConfig.setConfig(LINEAGE_REUSE);\n+ LineageCacheConfig.setCachePolicy(LINEAGE_POLICY);\nString dmlScriptStr = readDMLScript(isFile, fileOrScript);\nMap<String, String> argVals = dmlOptions.argVals;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -280,10 +280,18 @@ public class LineageCache\nelse\nthrow new DMLRuntimeException(\"Lineage Cache: unsupported data: \"+data.getDataType());\n+ long size = centry.getSize();\n+ //remove the entry if the entry is bigger than the cache.\n+ //FIXME: the resumed threads will enter into infinite wait as the entry\n+ //is removed. Need to add support for graceful remove (placeholder) and resume.\n+ if (size > LineageCacheEviction.getCacheLimit()) {\n+ _cache.remove(item);\n+ continue;\n+ }\n+\n//maintain order for eviction\nLineageCacheEviction.addEntry(centry);\n- long size = centry.getSize();\nif (!LineageCacheEviction.isBelowThreshold(size))\nLineageCacheEviction.makeSpace(_cache, size);\nLineageCacheEviction.updateSize(size, true);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -39,7 +39,7 @@ public class LineageCacheConfig\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n- \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\"\n+ \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\", \"uarimax\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n@@ -223,7 +223,7 @@ public class LineageCacheConfig\nWEIGHTS[0] = 1; WEIGHTS[1] = 0;\nbreak;\ncase HYBRID:\n- WEIGHTS[0] = 1; WEIGHTS[1] = 1;\n+ WEIGHTS[0] = 1; WEIGHTS[1] = 0.0033;\nbreak;\n}\n_cachepolicy = policy;\n@@ -233,9 +233,9 @@ public class LineageCacheConfig\nreturn _cachepolicy;\n}\n- public static boolean isLRU() {\n+ public static boolean isTimeBased() {\n// Check the LRU component of weights array.\n- return (WEIGHTS[1] == 1);\n+ return (WEIGHTS[1] > 0);\n}\npublic static void setSpill(boolean toSpill) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"diff": "@@ -80,7 +80,7 @@ public class LineageCacheEviction\nprotected static void getEntry(LineageCacheEntry entry) {\n// Reset the timestamp to maintain the LRU component of the scoring function\n- if (!LineageCacheConfig.isLRU())\n+ if (!LineageCacheConfig.isTimeBased())\nreturn;\nif (weightedQueue.remove(entry)) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/CacheEvictionTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/CacheEvictionTest.java",
"diff": "@@ -92,11 +92,11 @@ public class CacheEvictionTest extends AutomatedTestBase {\nproArgs.add(\"-stats\");\nproArgs.add(\"-lineage\");\nproArgs.add(ReuseCacheType.REUSE_FULL.name().toLowerCase());\n+ proArgs.add(\"policy_lru\");\nproArgs.add(\"-args\");\nproArgs.add(String.valueOf(cacheSize));\nproArgs.add(output(\"R\"));\nprogramArgs = proArgs.toArray(new String[proArgs.size()]);\n- LineageCacheConfig.setCachePolicy(LineageCacheConfig.LineageCachePolicy.LRU);\nrunTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\nHashMap<MatrixValue.CellIndex, Double> R_lru = readDMLMatrixFromHDFS(\"R\");\nlong expCount_lru = Statistics.getCPHeavyHitterCount(\"exp\");\n@@ -108,12 +108,12 @@ public class CacheEvictionTest extends AutomatedTestBase {\nproArgs.add(\"-stats\");\nproArgs.add(\"-lineage\");\nproArgs.add(ReuseCacheType.REUSE_FULL.name().toLowerCase());\n+ proArgs.add(\"policy_weighted\");\nproArgs.add(\"-args\");\nproArgs.add(String.valueOf(cacheSize));\nproArgs.add(output(\"R\"));\nprogramArgs = proArgs.toArray(new String[proArgs.size()]);\nLineage.resetInternalState();\n- LineageCacheConfig.setCachePolicy(LineageCacheConfig.LineageCachePolicy.WEIGHTED);\nrunTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\nHashMap<MatrixValue.CellIndex, Double> R_weighted= readDMLMatrixFromHDFS(\"R\");\nlong expCount_wt = Statistics.getCPHeavyHitterCount(\"exp\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-521] Add lineage option exposing eviction policies.
This patch exposes cache eviction policies. In addition to that
this patch tunes the weights for the scoring function, adds new
reusable instructions and sanity checks. |
49,738 | 05.07.2020 20:39:02 | -7,200 | 65ef6e4ce575b344a54a2aef5813b3ea1b82102a | Fix bufferpool leak in partial rewrites of lineage cache
This patch fixes a bufferpool leak in the partial rewrites of the
lineage cache, where temporarily constructed matrix objects from cached
matrix blocks where handed over to the bufferpool but never delted,
leading to lots of unnecessary evictions. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ParForProgramBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ParForProgramBlock.java",
"diff": "@@ -652,7 +652,7 @@ public class ParForProgramBlock extends ForProgramBlock\nfor( String var : _variablesDPOriginal.keySet() ) {\n//cleanup partitioned matrix (if not reused)\nif( !_variablesDPReuse.keySet().contains(var) )\n- VariableCPInstruction.processRemoveVariableInstruction(ec, var);\n+ VariableCPInstruction.processRmvarInstruction(ec, var);\n//reset to original matrix\nMatrixObject mo = (MatrixObject) _variablesDPOriginal.get( var );\nec.setVariable(var, mo);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/EvalNaryCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/EvalNaryCPInstruction.java",
"diff": "@@ -133,7 +133,7 @@ public class EvalNaryCPInstruction extends BuiltinNaryCPInstruction {\n//7. cleanup of variable expanded from list\nif( boundInputs2 != null ) {\nfor( CPOperand op : boundInputs2 )\n- VariableCPInstruction.processRemoveVariableInstruction(ec, op.getName());\n+ VariableCPInstruction.processRmvarInstruction(ec, op.getName());\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -538,7 +538,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\ncase RemoveVariable:\nfor( CPOperand input : inputs )\n- processRemoveVariableInstruction(ec, input.getName());\n+ processRmvarInstruction(ec, input.getName());\nbreak;\ncase RemoveVariableAndFile:\n@@ -597,7 +597,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\n//PRE: for robustness we cleanup existing variables, because a setVariable\n//would cause a buffer pool memory leak as these objects would never be removed\nif(ec.containsVariable(getInput1()))\n- processRemoveVariableInstruction(ec, getInput1().getName());\n+ processRmvarInstruction(ec, getInput1().getName());\nswitch(getInput1().getDataType()) {\ncase MATRIX: {\n@@ -992,7 +992,7 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\n* @param ec execution context\n* @param varname variable name\n*/\n- public static void processRemoveVariableInstruction( ExecutionContext ec, String varname ) {\n+ public static void processRmvarInstruction( ExecutionContext ec, String varname ) {\n// remove variable from symbol table\nData dat = ec.removeVariable(varname);\n//cleanup matrix data on fs/hdfs (if necessary)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -60,6 +60,7 @@ import org.apache.sysds.runtime.instructions.InstructionUtils;\nimport org.apache.sysds.runtime.instructions.cp.ComputationCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.DataGenCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ParameterizedBuiltinCPInstruction;\n+import org.apache.sysds.runtime.instructions.cp.VariableCPInstruction;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.lineage.LineageItem.LineageItemType;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\n@@ -149,8 +150,8 @@ public class LineageRewriteReuse\nreturn null;\n// Create a transient read op over the cached tsmm result\n- MatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ MatrixObject cachedEntry = toMatrixObject(inCache.get(\"lastMatrix\"));\n+ lrwec.setVariable(\"cachedEntry\", cachedEntry);\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n// Create rightIndex op to find the last matrix\n// TODO: For now assumption is that a single column is being appended in a loop\n@@ -189,6 +190,9 @@ public class LineageRewriteReuse\nLOG.debug(\"LINEAGE REWRITE rewriteTsmmCbind APPLIED\");\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"X\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -205,7 +209,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the cached tsmm result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n// Create a transient read op over current input\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -229,6 +233,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -243,7 +250,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last tsmm result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -253,7 +260,7 @@ public class LineageRewriteReuse\n// Use deltaX from cache, or create rightIndex\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastRow = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse\n@@ -271,6 +278,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -285,7 +295,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last tsmm result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- MatrixObject newmo = convMBtoMO(cachedEntry);\n+ MatrixObject newmo = toMatrixObject(cachedEntry);\nlrwec.setVariable(\"cachedEntry\", newmo);\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -297,7 +307,7 @@ public class LineageRewriteReuse\n// Use deltaX from cache, or create rightIndex\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastCol = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse\n@@ -332,6 +342,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -350,7 +363,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last tsmm result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- MatrixObject newmo = convMBtoMO(cachedEntry);\n+ MatrixObject newmo = toMatrixObject(cachedEntry);\nlrwec.setVariable(\"cachedEntry\", newmo);\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n@@ -364,7 +377,7 @@ public class LineageRewriteReuse\n// Use deltaX from cache, or create rightIndex\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastCol = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse\n@@ -400,6 +413,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -414,7 +430,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last ba+* result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nMatrixObject moL = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -427,7 +443,7 @@ public class LineageRewriteReuse\n// Use deltaX from cache, or create rightIndex\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastRow = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nlastRow = HopRewriteUtils.createIndexingOp(leftMatrix, new LiteralOp(moL.getNumRows()),\n@@ -443,6 +459,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -457,7 +476,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last ba+* result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nMatrixObject moL = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -470,7 +489,7 @@ public class LineageRewriteReuse\n// Use deltaY from cache, or create rightIndex\nif (inCache.containsKey(\"deltaY\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaY\");\n- lrwec.setVariable(\"deltaY\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaY\", toMatrixObject(cachedRI));\nlastCol = HopRewriteUtils.createTransientRead(\"deltaY\", cachedRI);\n}\nlastCol = HopRewriteUtils.createIndexingOp(rightMatrix, new LiteralOp(1), new LiteralOp(moR.getNumRows()),\n@@ -486,6 +505,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaY\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -502,7 +524,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last ba+* result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\nMatrixObject moL = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\nlrwec.setVariable(\"leftMatrix\", moL);\n@@ -518,6 +540,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -532,14 +557,14 @@ public class LineageRewriteReuse\n// Create a transient read op over the last * result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nHop lastRowL, lastRowR;\n// Use deltaX and deltaY from cache, or create rightIndices\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastRowL = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse {\n@@ -551,7 +576,7 @@ public class LineageRewriteReuse\n}\nif (inCache.containsKey(\"deltaY\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaY\");\n- lrwec.setVariable(\"deltaY\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaY\", toMatrixObject(cachedRI));\nlastRowR = HopRewriteUtils.createTransientRead(\"deltaY\", cachedRI);\n}\nelse {\n@@ -572,6 +597,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\", \"deltaY\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -586,14 +614,14 @@ public class LineageRewriteReuse\n// Create a transient read op over the last * result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nHop lastColL, lastColR;\n// Use deltaX and deltaY from cache, or create rightIndices\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastColL = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse {\n@@ -605,7 +633,7 @@ public class LineageRewriteReuse\n}\nif (inCache.containsKey(\"deltaY\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaY\");\n- lrwec.setVariable(\"deltaY\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaY\", toMatrixObject(cachedRI));\nlastColR = HopRewriteUtils.createTransientRead(\"deltaY\", cachedRI);\n}\nelse {\n@@ -626,6 +654,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\", \"deltaY\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -640,7 +671,7 @@ public class LineageRewriteReuse\n// Create a transient read op over the last * result\nMatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- lrwec.setVariable(\"cachedEntry\", convMBtoMO(cachedEntry));\n+ lrwec.setVariable(\"cachedEntry\", toMatrixObject(cachedEntry));\nDataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n//TODO: support for block of rows\nHashMap<String, String> params = ((ParameterizedBuiltinCPInstruction)curr).getParameterMap();\n@@ -657,7 +688,7 @@ public class LineageRewriteReuse\n// Use deltaX from cache, or create rightIndex\nif (inCache.containsKey(\"deltaX\")) {\nMatrixBlock cachedRI = inCache.get(\"deltaX\");\n- lrwec.setVariable(\"deltaX\", convMBtoMO(cachedRI));\n+ lrwec.setVariable(\"deltaX\", toMatrixObject(cachedRI));\nlastCol = HopRewriteUtils.createTransientRead(\"deltaX\", cachedRI);\n}\nelse\n@@ -680,6 +711,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"deltaX\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -702,14 +736,14 @@ public class LineageRewriteReuse\n// Create a transient read op over the input to rightIndex\nMatrixBlock indexSource = inCache.get(\"indexSource\");\n- lrwec.setVariable(\"indexSource\", convMBtoMO(indexSource));\n+ lrwec.setVariable(\"indexSource\", toMatrixObject(indexSource));\nDataOp input2Index = HopRewriteUtils.createTransientRead(\"indexSource\", indexSource);\n// Create or read the matrix multiplication\nHop matMultRes;\nMatrixObject moL = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\nif (inCache.containsKey(\"BigMatMult\")) {\n- MatrixBlock BigMatMult = inCache.get(\"BigMatMult\");\n- lrwec.setVariable(\"BigMatMult\", convMBtoMO(BigMatMult));\n+ MatrixObject BigMatMult = toMatrixObject(inCache.get(\"BigMatMult\"));\n+ lrwec.setVariable(\"BigMatMult\", BigMatMult);\nmatMultRes = HopRewriteUtils.createTransientRead(\"BigMatMult\", BigMatMult);\n}\nelse {\n@@ -731,6 +765,9 @@ public class LineageRewriteReuse\n// Keep reuse enabled\n_disableReuse = false;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"indexSource\", \"BigMatMult\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -743,10 +780,9 @@ public class LineageRewriteReuse\nreturn null;\n// Create a transient read op over the last tsmm result\n- MatrixBlock cachedEntry = inCache.get(\"lastMatrix\");\n- MatrixObject newmo = convMBtoMO(cachedEntry);\n+ MatrixObject newmo = toMatrixObject(inCache.get(\"lastMatrix\"));\nlrwec.setVariable(\"cachedEntry\", newmo);\n- DataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", cachedEntry);\n+ DataOp lastRes = HopRewriteUtils.createTransientRead(\"cachedEntry\", newmo);\n// Create a transient read op over this tsmm's input\nMatrixObject mo = ec.getMatrixObject(((ComputationCPInstruction)curr).input1);\n@@ -754,8 +790,7 @@ public class LineageRewriteReuse\nDataOp newMatrix = HopRewriteUtils.createTransientRead(\"oldMatrix\", mo);\n// Index out the added column from the projected matrix\n- MatrixBlock projected = inCache.get(\"projected\");\n- MatrixObject projmo = convMBtoMO(projected);\n+ MatrixObject projmo = toMatrixObject(inCache.get(\"projected\"));\nlrwec.setVariable(\"projected\", projmo);\nDataOp projRes = HopRewriteUtils.createTransientRead(\"projected\", projmo);\nIndexingOp lastCol = HopRewriteUtils.createIndexingOp(projRes, new LiteralOp(1), new LiteralOp(projmo.getNumRows()),\n@@ -790,6 +825,9 @@ public class LineageRewriteReuse\nArrayList<Instruction> inst = genInst(lrwWrite, lrwec);\n_disableReuse = true;\n+ // cleanup buffer pool\n+ addRmvarInstructions(inst, lrwec, \"cachedEntry\", \"projected\");\n+\nif (DMLScript.STATISTICS)\nLineageCacheStatistics.incrementPRewrites();\nreturn inst;\n@@ -1182,7 +1220,7 @@ public class LineageRewriteReuse\nprivate static DataOp setupTReadCachedInput(String name, Map<String, MatrixBlock> inCache, ExecutionContext ec) {\nMatrixBlock cachedRI = inCache.get(name);\n- ec.setVariable(name, convMBtoMO(cachedRI));\n+ ec.setVariable(name, toMatrixObject(cachedRI));\nreturn HopRewriteUtils.createTransientRead(name, cachedRI);\n}\n@@ -1210,14 +1248,23 @@ public class LineageRewriteReuse\n/*-------------------------------UTILITY METHODS----------------------------------*/\n- private static MatrixObject convMBtoMO (MatrixBlock cachedEntry) {\n- MetaData md = new MetaData(cachedEntry.getDataCharacteristics());\n- MatrixObject mo = new MatrixObject(ValueType.FP64, \"cachedEntry\", md);\n- mo.acquireModify(cachedEntry);\n+ private static MatrixObject toMatrixObject(MatrixBlock mb) {\n+ MetaData md = new MetaData(mb.getDataCharacteristics());\n+ MatrixObject mo = new MatrixObject(ValueType.FP64, null, md);\n+ mo.acquireModify(mb);\nmo.release();\nreturn mo;\n}\n+ private static void addRmvarInstructions(ArrayList<Instruction> inst, ExecutionContext ec, String... varnames) {\n+ //note: we can't directly call rmvar because the instructions are not executed yet\n+ ArrayList<String> tmp = new ArrayList<>();\n+ for(String varname : varnames)\n+ if(ec.containsVariable(varname))\n+ tmp.add(varname);\n+ inst.add(VariableCPInstruction.prepareRemoveInstruction(tmp.toArray(new String[0])));\n+ }\n+\nprivate static LineageItem reduceColByOne(LineageItem cu) {\nString old_data = null;\ntry {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/PartialReuseTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.lineage;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.hops.recompile.Recompiler;\n+import org.apache.sysds.runtime.controlprogram.caching.CacheStatistics;\n+import org.apache.sysds.runtime.lineage.Lineage;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+\n+import java.util.ArrayList;\n+import java.util.HashMap;\n+import java.util.List;\n+\n+public class PartialReuseTest extends AutomatedTestBase {\n+\n+ protected static final String TEST_DIR = \"functions/lineage/\";\n+ protected static final String TEST_NAME1 = \"PartialReuse1\";\n+ protected String TEST_CLASS_DIR = TEST_DIR + PartialReuseTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1));\n+ }\n+\n+ @Test\n+ public void testLineageTrace1CP() {\n+ //test partial reuse in CP (i.e., w/o reuse-aware recompilation)\n+ testLineageTraceReuse(TEST_NAME1, ExecMode.SINGLE_NODE);\n+ }\n+\n+// @Test\n+// public void testLineageTrace1Hybrid() {\n+// //test partial reuse in Hybrid (i.e., w/ reuse-aware recompilation)\n+// testLineageTraceReuse(TEST_NAME1, ExecMode.HYBRID);\n+// }\n+\n+\n+ public void testLineageTraceReuse(String testname, ExecMode et) {\n+ ExecMode execModeOld = setExecMode(et);\n+\n+ try {\n+ getAndLoadTestConfiguration(testname);\n+ fullDMLScriptName = getScript();\n+\n+ // Without lineage-based reuse enabled\n+ List<String> proArgs = new ArrayList<>();\n+ proArgs.add(\"-stats\");\n+ proArgs.add(\"-lineage\");\n+ proArgs.add(\"-args\");\n+ proArgs.add(output(\"X\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+ Lineage.resetInternalState();\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+ HashMap<MatrixValue.CellIndex, Double> X_orig = readDMLMatrixFromHDFS(\"X\");\n+\n+ // With lineage-based reuse enabled\n+ proArgs.clear();\n+ proArgs.add(\"-stats\");\n+ proArgs.add(\"-lineage\");\n+ proArgs.add(ReuseCacheType.REUSE_HYBRID.name().toLowerCase());\n+ proArgs.add(\"-args\");\n+ proArgs.add(output(\"X\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+ Lineage.resetInternalState();\n+ Lineage.setLinReuseFull();\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+ HashMap<MatrixValue.CellIndex, Double> X_reused = readDMLMatrixFromHDFS(\"X\");\n+ Lineage.setLinReuseNone();\n+\n+ //compare matrices\n+ TestUtils.compareMatrices(X_orig, X_reused, 1e-6, \"Origin\", \"Reused\");\n+\n+ //check no evictions (previously buffer pool leak)\n+ Assert.assertEquals(0, CacheStatistics.getFSWrites());\n+ }\n+ finally {\n+ resetExecMode(execModeOld);\n+ Recompiler.reinitRecompiler();\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/PartialReuse1.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Increase k for better performance gains\n+\n+X = rand(rows=20000, cols=300, seed=42);\n+sum = 0;\n+tmp = matrix(0, rows=nrow(X), cols=0);\n+R = matrix(0, 1, ncol(X));\n+\n+for (i in 1:ncol(X)) {\n+ tmp = cbind(tmp, X[,i]);\n+ Res1 = t(tmp) %*% tmp;\n+ while(FALSE) {}\n+ R[1,i] = sum(Res1);\n+}\n+\n+write(R, $1, format=\"text\");\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-419] Fix bufferpool leak in partial rewrites of lineage cache
This patch fixes a bufferpool leak in the partial rewrites of the
lineage cache, where temporarily constructed matrix objects from cached
matrix blocks where handed over to the bufferpool but never delted,
leading to lots of unnecessary evictions. |
49,700 | 07.07.2020 10:57:17 | -7,200 | da6dd12426302329c58d7b0bbd4f5e86346fef3c | [MINOR] Fix Task Numbers And Completion
There has come a slight discrepancy in the numbering of the tasks in
`/dev/Tasks.txt` and this minor commit fixes that.
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -314,15 +314,15 @@ SYSTEMDS-360 Privacy/Data Exchange Constraints\n* 361 Initial privacy meta data (compiler/runtime) OK\n* 362 Runtime privacy propagation\n* 363 Compile-time privacy propagation\n- * 364 Error handling violated privacy constraints\n- * 365 Extended privacy/data exchange constraints\n+ * 364 Error handling violated privacy constraints OK\n+ * 365 Extended privacy/data exchange constraints OK\nSYSTEMDS-370 Lossy Compression Blocks\n- * 361 ColGroup Quantization\n- * 362 ColGroup Base Data change (from Double to ??)\n+ * 371 ColGroup Quantization\n+ * 372 ColGroup Base Data change (from Double to ??)\nSYSTEMDS-380 Memory Footprint\n- * 371 Matrix Block Memory footprint update\n+ * 381 Matrix Block Memory footprint update\nSYSTEMDS-390 New Builtin Functions IV\n* 391 New GLM builtin function (from algorithms) OK\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix Task Numbers And Completion
There has come a slight discrepancy in the numbering of the tasks in
`/dev/Tasks.txt` and this minor commit fixes that.
Closes #983 |
49,698 | 04.06.2020 10:06:34 | -19,080 | 7cdb51d18ee1a9d7a957f366a71d04ca818bdf91 | [DOC][2/2] Builtin KMeans function example added
* Example to invoke KMeans function
* Update the KMeans default number of centroids in dml script.
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "@@ -435,6 +435,11 @@ kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = F\n| String | The mapping of records to centroids |\n| String | The output matrix with the centroids |\n+### Example\n+```r\n+X = rand (rows = 3972, cols = 972)\n+kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50)\n+```\n## `lm`-Function\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/kmeans.dml",
"new_path": "scripts/builtin/kmeans.dml",
"diff": "# ----------------------------------------------------------------------------\n-m_kmeans = function(Matrix[Double] X, Integer k = 0, Integer runs = 10, Integer max_iter = 1000,\n+m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer max_iter = 1000,\nDouble eps = 0.000001, Boolean is_verbose = FALSE, Integer avg_sample_size_per_centroid = 50)\nreturn (Matrix[Double] C, Matrix[Double] Y)\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOC][2/2] Builtin KMeans function example added
* Example to invoke KMeans function
* Update the KMeans default number of centroids in dml script.
Closes #947. |
49,738 | 10.07.2020 22:26:49 | -7,200 | 4d1e90079e118cea1ec27789d2cd273f650d3e41 | [MINOR] Cleanups (lineage, java warnings, tasks, formatting, seeds) | [
{
"change_type": "MODIFY",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks.txt",
"diff": "@@ -357,8 +357,11 @@ Others:\n* Break append instruction to cbind and rbind\nSYSTEMDS-510 IO formats\n- * 511 Add protobuf support to write and read FrameBlocks to HDFS OKSYSTEMDS-520 Lineage Tracing, Reuse and Integration III\n+ * 511 Add protobuf support to write and read FrameBlocks to HDFS OK\n+\n+SYSTEMDS-520 Lineage Tracing, Reuse and Integration III\n* 521 New lineage option exposing cache policies OK\nSYSTEMDS-610 Cleaning Pipelines\n* 611 Initial Brute force execution OK\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/executePipeline.dml",
"new_path": "scripts/builtin/executePipeline.dml",
"diff": "#\n#-------------------------------------------------------------\n-s_executePipeline = function(Frame[String] pipeline, Matrix[Double] X, List[Unknown] hyperParameters, Boolean verbose)\n+s_executePipeline = function(Frame[String] pipeline, Matrix[Double] X,\n+ List[Unknown] hyperParameters, Boolean verbose)\nreturn (Matrix[Double] X)\n{\nfor(i in 1: ncol(pipeline)) {\nhp = matrixToList(X, as.matrix(hyperParameters[i]))\nX = eval(as.scalar(pipeline[1,i]), hp)\n}\n-\n}\n-\n# This function will convert the matrix row-vector into list\nmatrixToList = function(Matrix[Double] X, Matrix[Double] p)\nreturn (List[Unknown] l)\n{\nl = list()\nl = append(l, X)\n- if(sum(p) != -1)\n- {\n+ if(sum(p) != -1) {\nfor(i in 1:ncol(p))\nl = append(l, as.scalar(p[1,i]))\n}\nl = append(l, FALSE) #verbose parameter value\n}\n-\n-\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/pipelines/enumerator.dml",
"new_path": "scripts/staging/pipelines/enumerator.dml",
"diff": "@@ -259,7 +259,6 @@ return (Double psum)\nelse\npsum = 0.0\n}\n-\n}\n# This function will compute the top k pipelines from the results of all executions\n@@ -323,7 +322,6 @@ return(Matrix[Double] X)\nfclassify = function(Matrix[Double] X)\nreturn (Double accuracy)\n{\n-\nif(min(X[,1]) < 1)\nstop(\"Y should contain value greater than zero\")\n@@ -344,7 +342,6 @@ return (Double accuracy)\nnTest = nrow(testSet)\ndTest = ncol(testSet)\n-\ntrain_X = trainSet[, 2:dTrain]\ntrain_Y = trainSet[, 1]\n@@ -353,12 +350,9 @@ return (Double accuracy)\nbetas = multiLogReg(X=train_X, Y=train_Y, icpt=2, tol=1e-9, reg=1.2, maxi=100, maxii=0, verbose=FALSE)\n[prob, yhat, accuracy] = multiLogRegPredict(test_X, betas, test_Y, FALSE)\n-\n}\n-\n-\n##########################################\n## Call the function Enumerator\n#########################################\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"diff": "@@ -101,7 +101,7 @@ public class FederatedResponse implements Serializable {\n*/\npublic void setCheckedConstraints(Map<PrivacyLevel,LongAdder> checkedConstraints){\nif ( checkedConstraints != null && !checkedConstraints.isEmpty() ){\n- this.checkedConstraints = new EnumMap<PrivacyLevel, LongAdder>(PrivacyLevel.class);\n+ this.checkedConstraints = new EnumMap<>(PrivacyLevel.class);\nthis.checkedConstraints.putAll(checkedConstraints);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -92,7 +92,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\n}\n}\n- private void conditionalAddCheckedConstraints(FederatedRequest request, FederatedResponse response){\n+ private static void conditionalAddCheckedConstraints(FederatedRequest request, FederatedResponse response){\nif ( request.checkPrivacy() )\nresponse.setCheckedConstraints(PrivacyMonitor.getCheckedConstraints());\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -371,8 +371,8 @@ public class LineageCache\nprivate static LineageCacheEntry getIntern(LineageItem key) {\n// This method is called only when entry is present either in cache or in local FS.\n- if (_cache.containsKey(key) && _cache.get(key).getCacheStatus() != LineageCacheStatus.SPILLED) {\nLineageCacheEntry e = _cache.get(key);\n+ if (e != null && e.getCacheStatus() != LineageCacheStatus.SPILLED) {\n// Maintain order for eviction\nLineageCacheEviction.getEntry(e);\nif (DMLScript.STATISTICS)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItem.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItem.java",
"diff": "@@ -77,6 +77,9 @@ public class LineageItem {\n_opcode = opcode;\n_data = data;\n_inputs = inputs;\n+ // materialize hash on construction\n+ // (constant time operation if input hashes constructed)\n+ _hash = hashCode();\n}\npublic LineageItem[] getInputs() {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/CheckedConstraintsLog.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/CheckedConstraintsLog.java",
"diff": "@@ -30,12 +30,12 @@ import org.apache.sysds.runtime.privacy.PrivacyConstraint.PrivacyLevel;\n* Class counting the checked privacy constraints and the loaded privacy constraints.\n*/\npublic class CheckedConstraintsLog {\n- private static Map<PrivacyLevel,LongAdder> loadedConstraintsTotal = new EnumMap<PrivacyLevel,LongAdder>(PrivacyLevel.class);\n+ private static Map<PrivacyLevel,LongAdder> loadedConstraintsTotal = new EnumMap<>(PrivacyLevel.class);\nstatic {\nfor ( PrivacyLevel level : PrivacyLevel.values() )\nloadedConstraintsTotal.put(level, new LongAdder());\n}\n- private static Map<PrivacyLevel,LongAdder> checkedConstraintsTotal = new EnumMap<PrivacyLevel,LongAdder>(PrivacyLevel.class);\n+ private static Map<PrivacyLevel,LongAdder> checkedConstraintsTotal = new EnumMap<>(PrivacyLevel.class);\nprivate static BiFunction<LongAdder, LongAdder, LongAdder> mergeLongAdders = (v1, v2) -> {\nv1.add(v2.longValue() );\nreturn v1;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"diff": "@@ -34,7 +34,7 @@ public class PrivacyMonitor\nprivate static EnumMap<PrivacyLevel,LongAdder> checkedConstraints;\nstatic {\n- checkedConstraints = new EnumMap<PrivacyLevel,LongAdder>(PrivacyLevel.class);\n+ checkedConstraints = new EnumMap<>(PrivacyLevel.class);\nfor ( PrivacyLevel level : PrivacyLevel.values() ){\ncheckedConstraints.put(level, new LongAdder());\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMulticlassSVMTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMulticlassSVMTest.java",
"diff": "@@ -113,8 +113,8 @@ public class BuiltinMulticlassSVMTest extends AutomatedTestBase {\nInteger.toString(run),\nexpectedDir());\n- double[][] X = getRandomMatrix(rows, colsX, 0, 1, sparsity, -1);\n- double[][] Y = getRandomMatrix(rows, 1, 0, 10, 1, -1);\n+ double[][] X = getRandomMatrix(rows, colsX, 0, 1, sparsity, 2);\n+ double[][] Y = getRandomMatrix(rows, 1, 0, 10, 1, 3);\nY = TestUtils.round(Y);\nwriteInputMatrixWithMTD(\"X\", X, true);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/frame/FrameDropInvalidLengthTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/frame/FrameDropInvalidLengthTest.java",
"diff": "@@ -186,14 +186,15 @@ public class FrameDropInvalidLengthTest extends AutomatedTestBase {\n}\n}\n- private ArrayList<Integer> getBadIndexes(int length) {\n- ArrayList<Integer> list = new ArrayList();\n+ private static ArrayList<Integer> getBadIndexes(int length) {\n+ ArrayList<Integer> list = new ArrayList<>();\nfor(int i =0; i<length; i++)\n{\nint r = ThreadLocalRandom.current().nextInt(0, rows);\nlist.add(r);\n}\n- return (ArrayList) list.stream().distinct().collect(Collectors.toList());\n+ return (ArrayList<Integer>) list.stream()\n+ .distinct().collect(Collectors.toList());\n}\npublic static void initFrameDataString(FrameBlock frame1, double[][] data, Types.ValueType[] lschema) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/privacy/CheckedConstraintsLogTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/CheckedConstraintsLogTest.java",
"diff": "@@ -79,7 +79,7 @@ public class CheckedConstraintsLogTest extends AutomatedTestBase {\n&& CheckedConstraintsLog.getCheckedConstraints().get(PrivacyLevel.PrivateAggregation).longValue() == 150);\n}\n- private EnumMap<PrivacyLevel,LongAdder> getMap(PrivacyLevel level, long value){\n+ private static EnumMap<PrivacyLevel,LongAdder> getMap(PrivacyLevel level, long value){\nEnumMap<PrivacyLevel,LongAdder> checked = new EnumMap<>(PrivacyLevel.class);\nLongAdder valueAdder = new LongAdder();\nvalueAdder.add(value);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Cleanups (lineage, java warnings, tasks, formatting, seeds) |
49,695 | 14.07.2020 12:57:24 | -7,200 | bbd698ca394ad0a143ac6c54f45a7bd158db60ba | [MINOR] Check for extra values in test matrix comparison.
In the case of expected matrices with zero values, TestUtils was not
correctly checking whether the actual test output matrix does have
values in these places.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/TestUtils.java",
"new_path": "src/test/java/org/apache/sysds/test/TestUtils.java",
"diff": "@@ -218,9 +218,13 @@ public class TestUtils\nreadValuesFromFileStreamAndPut(outIn, actualValues);\n}\n+ Set<CellIndex> allKeys = new HashSet<>();\n+ allKeys.addAll(expectedValues.keySet());\n+ if(expectedValues.size() != actualValues.size())\n+ allKeys.addAll(actualValues.keySet());\nint countErrors = 0;\n- for (CellIndex index : expectedValues.keySet()) {\n+ for (CellIndex index : allKeys) {\nDouble expectedValue = expectedValues.get(index);\nDouble actualValue = actualValues.get(index);\nif (expectedValue == null)\n@@ -346,8 +350,12 @@ public class TestUtils\nreadActualAndExpectedFile(null, expectedFile, actualDir, expectedValues, actualValues);\n+ Set<CellIndex> allKeys = new HashSet<>();\n+ allKeys.addAll(expectedValues.keySet());\n+ if(expectedValues.size() != actualValues.size())\n+ allKeys.addAll(actualValues.keySet());\nint countErrors = 0;\n- for(CellIndex index : expectedValues.keySet()) {\n+ for(CellIndex index : allKeys) {\nDouble expectedValue = (Double) expectedValues.get(index);\nDouble actualValue = (Double) actualValues.get(index);\nif(expectedValue == null)\n@@ -383,8 +391,12 @@ public class TestUtils\nreadActualAndExpectedFile(schema, expectedFile, actualDir, expectedValues, actualValues);\n+ Set<CellIndex> allKeys = new HashSet<>();\n+ allKeys.addAll(expectedValues.keySet());\n+ if(expectedValues.size() != actualValues.size())\n+ allKeys.addAll(actualValues.keySet());\nint countErrors = 0;\n- for(CellIndex index : expectedValues.keySet()) {\n+ for(CellIndex index : allKeys) {\nObject expectedValue = expectedValues.get(index);\nObject actualValue = actualValues.get(index);\n@@ -1107,9 +1119,13 @@ public class TestUtils\nFSDataInputStream fsout = fs.open(file.getPath());\nreadValuesFromFileStream(fsout, actualValues);\n}\n+ Set<CellIndex> allKeys = new HashSet<>();\n+ allKeys.addAll(expectedValues.keySet());\n+ if(expectedValues.size() != actualValues.size())\n+ allKeys.addAll(actualValues.keySet());\nint countErrors = 0;\n- for (CellIndex index : expectedValues.keySet()) {\n+ for (CellIndex index : allKeys) {\nDouble expectedValue = expectedValues.get(index);\nDouble actualValue = actualValues.get(index);\nif (expectedValue == null)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Check for extra values in test matrix comparison.
In the case of expected matrices with zero values, TestUtils was not
correctly checking whether the actual test output matrix does have
values in these places.
Closes #987 |
49,706 | 14.07.2020 12:29:22 | -7,200 | e20b5ad1f55025260323ee8b40467ffdca72bf77 | [MINOR] fix license on dev/docs/windows-source-installation.md
Closes | [
{
"change_type": "MODIFY",
"old_path": "dev/docs/windows-source-installation.md",
"new_path": "dev/docs/windows-source-installation.md",
"diff": "+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n## Developing Apache SystemDS on Windows Platform\nThese instructions will help you build Apache SystemDS from source code, which is the basis for the engine\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] fix license on dev/docs/windows-source-installation.md
Closes #989 |
49,706 | 16.07.2020 17:06:43 | -7,200 | 520d8bb117b0f596b752b07855954439c442fdce | [MINOR] Fix docs at currently failing locations | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageItemUtils.java",
"diff": "@@ -659,6 +659,10 @@ public class LineageItemUtils {\n/**\n* Non-recursive equivalent of {@link #rReplace(LineageItem, LineageItem, LineageItem)}\n* for robustness with regard to stack overflow errors.\n+ *\n+ * @param current Current lineage item\n+ * @param liOld Old lineage item\n+ * @param liNew New Lineage item.\n*/\npublic static void rReplaceNR(LineageItem current, LineageItem liOld, LineageItem liNew) {\nStack<LineageItem> q = new Stack<>();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/PrivacyMonitor.java",
"diff": "@@ -116,7 +116,8 @@ public class PrivacyMonitor\n/**\n* Throw DMLPrivacyException if privacy is activated for the input variable\n- * @param input variable for which the privacy constraint is checked\n+ * @param input Variable for which the privacy constraint is checked\n+ * @param ec The execution context associated with the operand.\n*/\npublic static void handlePrivacyScalarOutput(CPOperand input, ExecutionContext ec) {\nData data = ec.getVariable(input);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix docs at currently failing locations |
49,689 | 17.07.2020 13:06:35 | -7,200 | cc6bffd37e3d6a7275807bef9292950d9b52fd2c | Rename lineage cache eviction policies
This patch renames WEIGHTED policy to COSTNSIZE. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"diff": "@@ -132,7 +132,7 @@ public class DMLOptions {\nelse if (lineageType.equalsIgnoreCase(\"policy_lru\"))\ndmlOptions.linCachePolicy = LineageCachePolicy.LRU;\nelse if (lineageType.equalsIgnoreCase(\"policy_weighted\"))\n- dmlOptions.linCachePolicy = LineageCachePolicy.WEIGHTED;\n+ dmlOptions.linCachePolicy = LineageCachePolicy.COSTNSIZE;\nelse if (lineageType.equalsIgnoreCase(\"policy_hybrid\"))\ndmlOptions.linCachePolicy = LineageCachePolicy.HYBRID;\nelse\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -116,7 +116,7 @@ public class LineageCacheConfig\npublic enum LineageCachePolicy {\nLRU,\n- WEIGHTED,\n+ COSTNSIZE,\nHYBRID;\n}\n@@ -219,7 +219,7 @@ public class LineageCacheConfig\ncase LRU:\nWEIGHTS[0] = 0; WEIGHTS[1] = 1;\nbreak;\n- case WEIGHTED:\n+ case COSTNSIZE:\nWEIGHTS[0] = 1; WEIGHTS[1] = 0;\nbreak;\ncase HYBRID:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/CacheEvictionTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/CacheEvictionTest.java",
"diff": "@@ -103,19 +103,19 @@ public class CacheEvictionTest extends AutomatedTestBase {\nlong hitCount_lru = LineageCacheStatistics.getInstHits();\nlong evictedCount_lru = LineageCacheStatistics.getMemDeletes();\n- // Weighted scheme (computationTime/Size)\n+ // costnsize scheme (computationTime/Size)\nproArgs.clear();\nproArgs.add(\"-stats\");\nproArgs.add(\"-lineage\");\nproArgs.add(ReuseCacheType.REUSE_FULL.name().toLowerCase());\n- proArgs.add(\"policy_weighted\");\n+ proArgs.add(\"policy_costnsize\");\nproArgs.add(\"-args\");\nproArgs.add(String.valueOf(cacheSize));\nproArgs.add(output(\"R\"));\nprogramArgs = proArgs.toArray(new String[proArgs.size()]);\nLineage.resetInternalState();\nrunTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n- HashMap<MatrixValue.CellIndex, Double> R_weighted= readDMLMatrixFromHDFS(\"R\");\n+ HashMap<MatrixValue.CellIndex, Double> R_costnsize= readDMLMatrixFromHDFS(\"R\");\nlong expCount_wt = Statistics.getCPHeavyHitterCount(\"exp\");\nlong hitCount_wt = LineageCacheStatistics.getInstHits();\nlong evictedCount_wt = LineageCacheStatistics.getMemDeletes();\n@@ -123,7 +123,7 @@ public class CacheEvictionTest extends AutomatedTestBase {\n// Compare results\nLineage.setLinReuseNone();\n- TestUtils.compareMatrices(R_lru, R_weighted, 1e-6, \"LRU\", \"Weighted\");\n+ TestUtils.compareMatrices(R_lru, R_costnsize, 1e-6, \"LRU\", \"costnsize\");\n// Compare reused instructions\nAssert.assertTrue(expCount_lru > expCount_wt);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-335] Rename lineage cache eviction policies
This patch renames WEIGHTED policy to COSTNSIZE. |
49,689 | 18.07.2020 09:33:37 | -7,200 | 1b3e86292631c62a532030144be26fbe6eb46f2d | Fix bug in exposed eviction policy names | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLOptions.java",
"diff": "@@ -131,7 +131,7 @@ public class DMLOptions {\ndmlOptions.linReuseType = ReuseCacheType.NONE;\nelse if (lineageType.equalsIgnoreCase(\"policy_lru\"))\ndmlOptions.linCachePolicy = LineageCachePolicy.LRU;\n- else if (lineageType.equalsIgnoreCase(\"policy_weighted\"))\n+ else if (lineageType.equalsIgnoreCase(\"policy_costnsize\"))\ndmlOptions.linCachePolicy = LineageCachePolicy.COSTNSIZE;\nelse if (lineageType.equalsIgnoreCase(\"policy_hybrid\"))\ndmlOptions.linCachePolicy = LineageCachePolicy.HYBRID;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-335] Fix bug in exposed eviction policy names |
49,721 | 20.07.2020 18:51:36 | -7,200 | 586e910a1f6d728ab2fc80ea716cf0545946bb19 | Hyperband built-in function (hyper-param optimization)
AMLS project SS2020.
Closes | [
{
"change_type": "RENAME",
"old_path": "dev/Tasks.txt",
"new_path": "dev/Tasks-obsolete.txt",
"diff": "@@ -365,3 +365,4 @@ SYSTEMDS-520 Lineage Tracing, Reuse and Integration III\nSYSTEMDS-610 Cleaning Pipelines\n* 611 Initial Brute force execution OK\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "@@ -32,6 +32,7 @@ limitations under the License.\n* [`discoverFD`-Function](#discoverFD-function)\n* [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n+ * [`hyperband`-Function](#hyperband-function)\n* [`img_brightness`-Function](#img_brightness-function)\n* [`img_crop`-Function](#img_crop-function)\n* [`img_mirror`-Function](#img_mirror-function)\n@@ -301,6 +302,55 @@ paramRanges = list(10^seq(0,-4), 10^seq(-5,-9), 10^seq(1,3))\n[B, opt]= gridSearch(X=X, y=y, train=\"lm\", predict=\"lmPredict\", params=params, paramValues=paramRanges, verbose = TRUE)\n```\n+## `hyperband`-Function\n+\n+The `hyperband`-function is used for hyper parameter optimization and is based on multi-armed bandits and early elimination.\n+Through multiple parallel brackets and consecutive trials it will return the hyper parameter combination which performed best\n+on a validation dataset. A set of hyper parameter combinations is drawn from uniform distributions with given ranges; Those\n+make up the candidates for `hyperband`.\n+Notes:\n+* `hyperband` is hard-coded for `lmCG`, and uses `lmpredict` for validation\n+* `hyperband` is hard-coded to use the number of iterations as a resource\n+* `hyperband` can only optimize continuous hyperparameters\n+\n+### Usage\n+```r\n+hyperband(X_train, y_train, X_val, y_val, params, paramRanges, R, eta, verbose)\n+```\n+\n+### Arguments\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X_train | Matrix[Double] | required | Input Matrix of training vectors. |\n+| y_train | Matrix[Double] | required | Labels for training vectors. |\n+| X_val | Matrix[Double] | required | Input Matrix of validation vectors. |\n+| y_val | Matrix[Double] | required | Labels for validation vectors. |\n+| params | List[String] | required | List of parameters to optimize. |\n+| paramRanges | Matrix[Double] | required | The min and max values for the uniform distributions to draw from. One row per hyper parameter, first column specifies min, second column max value. |\n+| R | Scalar[int] | 81 | Controls number of candidates evaluated. |\n+| eta | Scalar[int] | 3 | Determines fraction of candidates to keep after each trial. |\n+| verbose | Boolean | `TRUE` | If `TRUE` print messages are activated. |\n+\n+### Returns\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | 1-column matrix of weights of best performing candidate |\n+| Frame[Unknown] | hyper parameters of best performing candidate |\n+\n+### Example\n+```r\n+X_train = rand(rows=50, cols=10);\n+y_train = rowSums(X_train) + rand(rows=50, cols=1);\n+X_val = rand(rows=50, cols=10);\n+y_val = rowSums(X_val) + rand(rows=50, cols=1);\n+\n+params = list(\"reg\");\n+paramRanges = matrix(\"0 20\", rows=1, cols=2);\n+\n+[bestWeights, optHyperParams] = hyperband(X_train=X_train, y_train=y_train,\n+ X_val=X_val, y_val=y_val, params=params, paramRanges=paramRanges);\n+```\n+\n## `img_brightness`-Function\nThe `img_brightness`-function is an image data augumentation function.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/hyperband.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+m_hyperband = function(Matrix[Double] X_train, Matrix[Double] y_train,\n+ Matrix[Double] X_val, Matrix[Double] y_val, List[String] params,\n+ Matrix[Double] paramRanges, Scalar[int] R = 81, Scalar[int] eta = 3,\n+ Boolean verbose = TRUE)\n+ return (Matrix[Double] bestWeights, Frame[Unknown] bestHyperParams)\n+{\n+ # variable names follow publication where algorithm is introduced\n+\n+ numParams = length(params);\n+\n+ assert(numParams == nrow(paramRanges));\n+ assert(ncol(paramRanges) == 2);\n+ assert(nrow(X_train) == nrow(y_train));\n+ assert(nrow(X_val) == nrow(y_val));\n+ assert(ncol(X_train) == ncol(X_val));\n+ assert(ncol(y_train) == ncol(y_val));\n+\n+ s_max = floor(log(R,eta));\n+ B = (s_max + 1) * R;\n+ bracketWinners = matrix(0, s_max+1, numParams+1);\n+ winnerWeights = matrix(0, s_max+1, ncol(X_train));\n+\n+ parfor( s in s_max:0 ) {\n+ debugMsgs = \"--------------------------\";\n+\n+ if( verbose ) {\n+ debugMsgs = append(debugMsgs, \"BRACKET s = \" + s + \"\\n\");\n+ }\n+\n+ n = ceil(floor(B/R/(s+1)) * eta^s);\n+ r = R * eta^(-s);\n+\n+ scoreboard = matrix(0,n,1+numParams);\n+ candidateWeights = matrix(0,n,ncol(X_train));\n+ # candidateWeights is not read until last round, as models are retrained\n+ # from zero in every trial at the moment\n+\n+ # draw parameter values from uniform distribution\n+ # draw e.g. regularisation factor for all the candidates at once\n+ for( curParam in 1:numParams ) {\n+ scoreboard[,curParam+1] =\n+ rand(rows=n, cols=1, min=as.scalar(paramRanges[curParam, 1]),\n+ max=as.scalar(paramRanges[curParam, 2]), pdf=\"uniform\");\n+ }\n+\n+ for( i in 0:s ) {\n+ n_i = as.integer(floor(n * eta^(-i)));\n+ r_i = as.integer(floor(r * eta^i));\n+ # when using number of iterations as a resource, r_i has to be an\n+ # integer; when using other types of resources, like portion of the\n+ # dataset, this is not the case This implementation hard-coded\n+ # iterations as the resource. floor() for r_i is not included in\n+ # publication of hyperband\n+\n+ if( verbose ) {\n+ debugMsgs = append(debugMsgs, \"+++++++++++++++\");\n+ debugMsgs = append(debugMsgs, \"i: \" + i + \" (current round)\");\n+ debugMsgs = append(debugMsgs, \"n_i: \" + n_i + \" (number of configurations evaluated)\");\n+ debugMsgs = append(debugMsgs, \"r_i: \" + r_i + \" (maximum number of iterations)\\n\");\n+ }\n+\n+ parfor( curCandidate in 1:n_i ) {\n+ # TODO argument list has to be passed from outside as well\n+ # args is a residue from the implementation with eval(\"lmCG\", args)\n+ # init argument list\n+ args = list(X=X_train, y=y_train, icpt=0, reg=1e-7,\n+ tol=1e-7, maxi=r_i, verbose=TRUE);\n+\n+ for( curParam in 1:numParams ) {\n+ # replace default values with values of the candidate at the\n+ # corresponding location\n+ args[as.scalar(params[curParam])] =\n+ as.scalar(scoreboard[curCandidate,curParam+1]);\n+ }\n+ # original version\n+ # weights = eval(learnAlgo, arguments);\n+\n+ # would be better to pass the whole list at once, this solution is error\n+ # prone depending on the order of the list. hyper parameters to optimize\n+ # are taken from args, as there they are reordered to be invariant to the\n+ # order used at calling hyperband\n+ weights = lmCG(X=X_train, y=y_train, tol=as.scalar(args[1]),\n+ reg=as.scalar(args[2]), maxi=r_i, verbose=FALSE);\n+\n+ candidateWeights[curCandidate] = t(weights)\n+ preds = lmpredict(X=X_val, w=weights);\n+ scoreboard[curCandidate,1] = as.matrix(sum((y_val - preds)^2));\n+ }\n+\n+ # reorder both matrices by same order\n+ reorder = order(target=scoreboard, index.return=TRUE);\n+ P = table(seq(1,n_i), reorder); # permutation matrix\n+ scoreboard = P %*% scoreboard;\n+ candidateWeights = P %*% candidateWeights;\n+\n+ if( verbose ) {\n+ debugMsgs = append(debugMsgs, \"validation loss | parameter values:\");\n+ debugMsgs = append(debugMsgs, toString(scoreboard));\n+ }\n+\n+ numToKeep = floor(n_i/eta);\n+\n+ # in some cases, the list of remaining candidates would get emptied\n+ if( numToKeep >= 1 ) {\n+ scoreboard = scoreboard[1:numToKeep]\n+ candidateWeights = candidateWeights[1:numToKeep];\n+ }\n+ }\n+\n+ if( verbose ) {\n+ debugMsgs = append(debugMsgs, \"Winner of Bracket: \");\n+ debugMsgs = append(debugMsgs, toString(scoreboard[1]));\n+ print(debugMsgs); # make print atomic because of parfor\n+ }\n+ bracketWinners[s+1] = scoreboard[1];\n+ winnerWeights[s+1] = candidateWeights[1];\n+ }\n+\n+ if( verbose ) {\n+ print(\"--------------------------\");\n+ print(\"WINNERS OF EACH BRACKET (from s = 0 to s_max):\");\n+ print(\"validation loss | parameter values:\");\n+ print(toString(bracketWinners));\n+ }\n+\n+ # reorder both matrices by same order\n+ reorder2 = order(target=bracketWinners, index.return=TRUE);\n+ P2 = table(seq(1,s_max+1), reorder2); # permutation matrix\n+ bracketWinners = P2 %*% bracketWinners;\n+ winnerWeights = P2 %*% winnerWeights;\n+\n+ bestHyperParams = as.frame(t(bracketWinners[1,2:1+numParams]));\n+ bestWeights = t(winnerWeights[1]);\n+\n+ if( verbose ) {\n+ print(\"Hyper parameters returned:\");\n+ print(toString(bestHyperParams));\n+ print(\"Weights returned:\");\n+ print(toString(t(bestWeights)));\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -96,6 +96,7 @@ public enum Builtins {\nGLM(\"glm\", true),\nGNMF(\"gnmf\", true),\nGRID_SEARCH(\"gridSearch\", true),\n+ HYPERBAND(\"hyperband\", true),\nIFELSE(\"ifelse\", false),\nIMG_MIRROR(\"img_mirror\", true),\nIMG_BRIGHTNESS(\"img_brightness\", true),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinHyperbandTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+\n+public class BuiltinHyperbandTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"HyperbandLM\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + BuiltinHyperbandTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 300;\n+ private final static int cols = 20;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,new String[]{\"R\"}));\n+ }\n+\n+ @Test\n+ public void testHyperbandCP() {\n+ runHyperband(ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testHyperbandSpark() {\n+ runHyperband(ExecType.SPARK);\n+ }\n+\n+ private void runHyperband(ExecType et) {\n+ ExecMode modeOld = setExecMode(et);\n+ try {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-args\", input(\"X\"), input(\"y\"), output(\"R\")};\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, 0.8, 3);\n+ double[][] y = getRandomMatrix(rows, 1, 0, 1, 0.8, 7);\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+ writeInputMatrixWithMTD(\"y\", y, true);\n+\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+\n+ //expected loss smaller than default invocation\n+ Assert.assertTrue(TestUtils.readDMLBoolean(output(\"R\")));\n+ }\n+ finally {\n+ resetExecMode(modeOld);\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/HyperbandLM.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+l2norm = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] B) return (Matrix[Double] loss) {\n+ loss = as.matrix(sum((y - X%*%B)^2));\n+}\n+\n+X = read($1);\n+y = read($2);\n+\n+# size of dataset chosen such that number of maximum iterations influences the\n+# performance of candidates\n+numTrSamples = 100;\n+numValSamples = 100;\n+\n+X_train = X[1:numTrSamples,];\n+y_train = y[1:numTrSamples,];\n+X_val = X[(numTrSamples+1):(numTrSamples+numValSamples+1),];\n+y_val = y[(numTrSamples+1):(numTrSamples+numValSamples+1),];\n+X_test = X[(numTrSamples+numValSamples+2):nrow(X),];\n+y_test = y[(numTrSamples+numValSamples+2):nrow(X),];\n+\n+params = list(\"reg\", \"tol\");\n+\n+# only works with continuous hyper parameters in this implementation\n+paramRanges = matrix(0, rows=2, cols=2);\n+\n+paramRanges[1,1] = 0;\n+paramRanges[1,2] = 20;\n+paramRanges[2,1] = 10^-10;\n+paramRanges[2,2] = 10^-12;\n+\n+# use lmCG, because this implementation of hyperband only makes sense with\n+# iterative algorithms\n+[B1, optHyperParams] = hyperband(X_train=X_train, y_train=y_train, X_val=X_val,\n+ y_val=y_val, params=params, paramRanges=paramRanges, R=50, eta=3, verbose=TRUE);\n+\n+# train reference with default values\n+B2 = lmCG(X=X_train, y=y_train, verbose=FALSE);\n+\n+l1 = l2norm(X_test, y_test, B1);\n+l2 = l2norm(X_test, y_test, B2);\n+R = as.scalar(l1 <= l2);\n+\n+write(R, $3)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2573] Hyperband built-in function (hyper-param optimization)
AMLS project SS2020.
Closes #996. |
49,698 | 20.07.2020 12:06:57 | -19,080 | d1a1492c2da608f7be0a5458beaadabb44b06c2b | Full MLContext test for LinearReg
* Takes advantage of existing R algorithm scripts used for
codegen testing.
* This would improve the testing by allowing us to provide all
the necessary inputs into the script. | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -1649,6 +1649,8 @@ public abstract class AutomatedTestBase {\n}\nprotected String getRScript() {\n+ if(fullRScriptName != null)\n+ return fullRScriptName;\nreturn sourceDirectory + selectedTest + \".R\";\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextLinregTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextLinregTest.java",
"diff": "@@ -22,8 +22,13 @@ package org.apache.sysds.test.functions.mlcontext;\nimport static org.apache.sysds.api.mlcontext.ScriptFactory.dmlFromFile;\nimport org.apache.log4j.Logger;\n-import org.junit.Test;\nimport org.apache.sysds.api.mlcontext.Script;\n+import org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\npublic class MLContextLinregTest extends MLContextTestBase {\nprotected static Logger log = Logger.getLogger(MLContextLinregTest.class);\n@@ -37,6 +42,11 @@ public class MLContextLinregTest extends MLContextTestBase {\nCG, DS,\n}\n+ private final static double eps = 1e-3;\n+\n+ private final static int rows = 2468;\n+ private final static int cols = 507;\n+\n@Test\npublic void testLinregCGSparse() {\nrunLinregTestMLC(LinregType.CG, true);\n@@ -59,24 +69,42 @@ public class MLContextLinregTest extends MLContextTestBase {\nprivate void runLinregTestMLC(LinregType type, boolean sparse) {\n- double[][] X = getRandomMatrix(10, 3, 0, 1, sparse ? sparsity2 : sparsity1, 7);\n- double[][] Y = getRandomMatrix(10, 1, 0, 10, 1.0, 3);\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, sparse ? sparsity2 : sparsity1, 7);\n+ double[][] Y = getRandomMatrix(rows, 1, 0, 10, 1.0, 3);\n+\n+ // Hack Alert\n+ // overwrite baseDirectory to the place where test data is stored.\n+ baseDirectory = \"target/testTemp/functions/mlcontext/\";\n+\n+ fullRScriptName = \"src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R\";\n+\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+ writeInputMatrixWithMTD(\"y\", Y, true);\n+\n+ rCmd = getRCmd(inputDir(), \"0\", \"0.000001\", \"0\", \"0.001\", expectedDir());\n+ runRScript(true);\n+\n+ MatrixBlock outmat = new MatrixBlock();\nswitch (type) {\ncase CG:\nScript lrcg = dmlFromFile(TEST_SCRIPT_CG);\nlrcg.in(\"X\", X).in(\"y\", Y).in(\"$icpt\", \"0\").in(\"$tol\", \"0.000001\").in(\"$maxi\", \"0\").in(\"$reg\", \"0.000001\")\n.out(\"beta_out\");\n- ml.execute(lrcg);\n+ outmat = ml.execute(lrcg).getMatrix(\"beta_out\").toMatrixBlock();\nbreak;\ncase DS:\nScript lrds = dmlFromFile(TEST_SCRIPT_DS);\nlrds.in(\"X\", X).in(\"y\", Y).in(\"$icpt\", \"0\").in(\"$reg\", \"0.000001\").out(\"beta_out\");\n- ml.execute(lrds);\n+ outmat = ml.execute(lrds).getMatrix(\"beta_out\").toMatrixBlock();\nbreak;\n}\n+\n+ //compare matrices\n+ HashMap<MatrixValue.CellIndex, Double> rfile = readRMatrixFromFS(\"w\");\n+ TestUtils.compareMatrices(rfile, outmat, eps);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-1863] Full MLContext test for LinearReg
* Takes advantage of existing R algorithm scripts used for
codegen testing.
* This would improve the testing by allowing us to provide all
the necessary inputs into the script. |
49,706 | 22.07.2020 10:45:50 | -7,200 | 836db709b6d1f8fd8848b66dc617b2a1491dcfe7 | [MINOR] Disable MacOS Building Test
Disabling MacOS building test due to actions GitHub having inconsistent
macOS testing. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build.yml",
"new_path": ".github/workflows/build.yml",
"diff": "@@ -29,7 +29,11 @@ jobs:\nstrategy:\nfail-fast: false\nmatrix:\n- os: [ubuntu-latest, macOS-latest, windows-latest]\n+ os: [\n+ ubuntu-latest,\n+ # macOS-latest,\n+ windows-latest\n+ ]\nsteps:\n- name: Checkout Repository\nuses: actions/checkout@v2\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Disable MacOS Building Test
Disabling MacOS building test due to actions GitHub having inconsistent
macOS testing. |
49,720 | 23.07.2020 14:09:40 | -7,200 | e93758d639cc22bf8e8193e3164698320664164e | [SYSTEMDS-611][MINOR] Refactoring structure
Rewriting the function for generating all possible combinations of physical pipelines out of each logical pipeline.
simplified the generation of top k pipelines. | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/pipelines/enumerator.dml",
"new_path": "scripts/staging/pipelines/enumerator.dml",
"diff": "#\n#-------------------------------------------------------------\n+# import\n+source(\"./scripts/staging/pipelines/permutations.dml\") as perm;\n+source(\"./scripts/staging/pipelines/utils.dml\") as utils;\n+\nenumerator = function(Matrix[Double] X, Matrix[Double] Y, Frame[String] logical, Frame[String] outlierPrimitives,\nFrame[String] mviPrimitives, Frame[String] param, Integer k, Boolean verbose = TRUE)\nreturn(Frame[String] Kpipeline)\n{\n- for(i in 1:1) {#nrow(logical)\n+ for(i in 1:2) {#nrow(logical)\noperator = as.frame(matrix(0,nrow(outlierPrimitives),1)) #combine all logical primitives\nfor(j in 1:ncol(logical))\n{\n@@ -33,47 +37,53 @@ return(Frame[String] Kpipeline)\noperator = cbind(operator, mviPrimitives);\n}\noperator = operator[,2:ncol(operator)]\n- intermediates = generatePermutations(operator) # get the all possible combination of pysical primitives\n+ intermediates = perm::getPermutations(operator) # get the all possible combination of physical primitives\n# for ith logical pipeline\nif(verbose)\nprint(\" pipelines \\n\"+toString(intermediates))\n[p, h] = executeAll(X, Y,intermediates, param, verbose);\n+\nKpipeline = getFinalTopK(p, h, k)\n+ print(\"top k pipelines of \"+i+\"th logical pipeline \"+toString(Kpipeline))\n+ # str = \"top k pipelines of iteration \"+i\n+ # str = append(str, toString(Kpipeline))\n}\n# if(verbose)\n- print(\"final top k pipelines \\n\"+toString(Kpipeline))\n+ # print(\"final top k pipelines \\n\"+toString(Kpipeline))\n+ # write(str, \"D:/Workspace/Pipelines/output/kpipeline.txt\")\n}\n-\n# The pipeline execution functions\n###################################################\nexecuteAll = function(Matrix[Double] X, Matrix[Double] Y, Frame[String] intermediates, Frame[String] param, Boolean verbose)\n-return(Frame[String] topP, Matrix[Double] topH)\n+return(Frame[String] opt, Matrix[Double] hyper_param)\n{\n- topP = as.frame(\"\")\n- topH = matrix(0,1,1)\n- p = as.frame(\"\")\n- hp = matrix(0,1,1)\n- clone_X = X;\n+\n+ clone_X = X;\n+ # initialize output variables\n+ opt = as.frame(\"NA\")\n+ hyper_param = matrix(0,0,1)\nif(verbose)\nprint(\"total pipelines to be executed \"+nrow(intermediates))\nfor(i in 1:nrow(intermediates)) {\nparaList = list()\n- paraList = getInstanceParam(intermediates[i,], param)\n+ op = intermediates[i,]\n+\n+ paraList = getInstanceParam(op, param)\nsum = 1\n+ print(\"executing \"+toString(op))\nwhile(sum > 0) #condition to terminate when all hyper parameters are executed\n{\nparamL = list()\n- tmp_hp = matrix(0,1,1)\n- tmp_p = intermediates[i,]\n+ hp_temp = matrix(0,1,0)\n+ opt_temp = op\nfor(j in 1: length(paraList))\n{\nsingleHp = as.matrix(paraList[j])\n+ hp_temp = cbind(hp_temp, as.matrix(ncol(singleHp)))\n+ hp_temp = cbind(hp_temp, singleHp[1,])\nparamL = append(paramL, singleHp[1, ])\n- tmp_hp = cbind(tmp_hp, as.matrix(ncol(p)))\n- tmp_hp = cbind(tmp_hp, p[1,])\n-\nif(nrow(singleHp) > 1)\n{\nsingleHp = singleHp[2:nrow(singleHp),]\n@@ -81,95 +91,26 @@ return(Frame[String] topP, Matrix[Double] topH)\nsum = sum(singleHp)\n}\n}\n- X = executePipeline(intermediates[i,], X, paramL, FALSE)\n+ X = executePipeline(op, X, paramL, FALSE)\ndata = cbind(Y, X)\nacc = eval(\"fclassify\", data)\n+ hp_temp = cbind(hp_temp, acc)\nX = clone_X\n- tmp_hp = cbind(tmp_hp,acc)\n- if(ncol(p) == 1 & sum(hp) == 0){\n- p = tmp_p\n- hp = tmp_hp\n- } else {\n- p = rbind(p, tmp_p)\n- hp = rbind(hp, tmp_hp)\n- }\n- }\n-\n- if(ncol(topP) == 1 & sum(topH) == 0){\n- topP = p\n- topH = hp\n+ if(as.scalar(opt[1,1]) == \"NA\" & nrow(hyper_param) == 0)\n+ {\n+ opt = opt_temp\n+ hyper_param = hp_temp\n}\nelse {\n- if(ncol(p) < ncol(topP)){\n- margin = ncol(topP) - ncol(p)\n- toAppend = topP[1,1:margin]\n- toAppend[1,] = \"\"\n- p = cbind(p, toAppend)\n+ opt = rbind(opt, opt_temp)\n+ hyper_param = rbind(hyper_param, hp_temp)\n}\n- else if(ncol(hp) < ncol(topH))\n- hp = cbind(matrix(0,nrow(hp),ncol(topH) - ncol(hp)), hp)\n- else if(ncol(hp) > ncol(topH))\n- topH = cbind(matrix(0,nrow(topH),ncol(hp) - ncol(topH)), topH)\n-\n- topP = rbind(topP, p)\n- topH = rbind(topH, hp)\n}\nX = clone_X\n}\n}\n-# The below functions will generate the all possible\n-# physical pipelines for a given logical pipeline\n-###################################################\n-generatePermutations = function(Frame[String] operators)\n-return (Frame[String] combinations)\n-{\n- if(ncol(operators) == 1)\n- stop(\"invalid number of columns\")\n-\n- if(ncol(operators) > 2 ) {\n- com2 = generatePermutationsOf2(operators[,1:2])\n- operators = operators[,3:ncol(operators)]\n- for(out in 1: ncol(operators)) {\n- temp = com2[,1]\n- temp1 = com2[1,1]\n- comTemp = com2[1,]\n- for(i in 1:nrow(operators)) {\n- for(j in 1:nrow(com2))\n- temp[j,1] = operators[i,out]\n- temp1 = rbind(temp1, temp)\n- comTemp = rbind(comTemp, com2)\n- }\n- comTemp = cbind(comTemp, temp1)\n- com2 = comTemp[2:nrow(comTemp),]\n- }\n- combinations = com2\n- }\n- else\n- combinations = generatePermutationsOf2(operators)\n-}\n-\n-\n-generatePermutationsOf2 = function(Frame[String] operators )\n-return(Frame[String] output)\n-{\n- jspecR = \"{ids:true, recode:[1,2]}\";\n- [X, M] = transformencode(target=operators, spec=jspecR);\n- out = matrix(0,0,2)\n- for(i in 1:nrow(X[,2])) {\n- broadcast = matrix(as.scalar(X[i,2]), nrow(X), 1)\n- if(nrow(out) == 0){\n- out = cbind(X[,1], broadcast)\n- }\n- else {\n- output_tmp = cbind(X[,1], broadcast)\n- out = rbind(out, output_tmp)\n- }\n- }\n- output = transformdecode(target=out, spec=jspecR, meta=M);\n-}\n-\n# The below functions will generate the all possible\n# combinations for different hyper parameter values\n###################################################\n@@ -266,35 +207,23 @@ return (Double psum)\ngetFinalTopK = function(Frame[String] pipeline, Matrix[Double] hparameter, Integer k)\nreturn (Frame[String] pipeline)\n{\n-\n- s=\"\"\n- for(i in 1: ncol(pipeline), check =0)\n- s = s+i+\",\";\n+ if(nrow(pipeline) < k)\n+ stop(\"the top k should be less than the total pipelines\")\n+ # combine all parameter i.e., operation and hyper-parameter values\n+ allParam = cbind(pipeline, as.frame(hparameter))\n+ # get the indexes of columns for recode transformation\n+ idx = seq(1, ncol(pipeline))\n+ index = utils::vectorToCsv(idx)\n# encoding categorical columns using recode transformation\n- jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n- [X, M] = transformencode(target=pipeline, spec=jspecR);\n-\n- nColPip = ncol(pipeline)\n- allParam = cbind(X, hparameter)\n- clone_Param = allParam\n- emptyR = matrix(0,0,ncol(allParam))\n- while(nrow(emptyR) <= k)\n- {\n- maxFirst = clone_Param[, ncol(clone_Param)] == max(clone_Param[, ncol(clone_Param)])\n- clone_Param = clone_Param * (maxFirst == 0)\n- emptyR = removeEmpty(target = clone_Param, margin = \"rows\", select = (clone_Param[, ncol(clone_Param)] == 0) )\n- }\n- top = removeEmpty(target = allParam, margin = \"rows\", select = (clone_Param[, ncol(clone_Param)] == 0) )\n- X = top[,1:nColPip]\n- hparameter = top[,nColPip+1:ncol(top)]\n- pipeline = transformdecode(target=X, spec=jspecR, meta=M);\n- pipeline = cbind(pipeline, as.frame(hparameter))\n+ jspecR = \"{ids:true, recode:[\"+index+\"]}\";\n+ [X, M] = transformencode(target=allParam, spec=jspecR);\n+ top = order(target=X, by=ncol(X), decreasing=TRUE, index.return=FALSE);\n+ pipeline = transformdecode(target=top, spec=jspecR, meta=M);\n# TODO if k+n pipelines have same accuracy then how to return k pipelines\npipeline = pipeline[1:k,]\n}\n-\n-# These private function are used to impute values and classification\n+# These private function are used to impute values by mean and by median\n##################################################################################\nimputeByMean = function(Matrix[Double] X, Boolean verbose = FALSE)\nreturn(Matrix[Double] X)\n@@ -310,8 +239,8 @@ return(Matrix[Double] X)\n{\ncols = ncol(X)\ncolMedian = matrix(0, 1, cols)\n- X = replace(target=X, pattern=NaN, replacement=0)\nMask = is.nan(X)\n+ X = replace(target=X, pattern=NaN, replacement=0)\nparfor(i in 1:cols)\ncolMedian[, i] = median(X[,i])\nMask = Mask * colMedian\n@@ -319,6 +248,8 @@ return(Matrix[Double] X)\n}\n+# Function to evaluate the pipeline using classification accuracy\n+##################################################################################\nfclassify = function(Matrix[Double] X)\nreturn (Double accuracy)\n{\n@@ -352,14 +283,11 @@ return (Double accuracy)\n[prob, yhat, accuracy] = multiLogRegPredict(test_X, betas, test_Y, FALSE)\n}\n-\n-##########################################\n-## Call the function Enumerator\n-#########################################\n+# Enumeration call\n+##################################################################################\nX = read($1, data_type=\"matrix\", format=\"csv\", header=TRUE);\nY = X[,1]+1\nX = X[,2:ncol(X)]\n-\nL = read($2, data_type=\"frame\", format=\"csv\");\nOP = read($3, data_type=\"frame\", format=\"csv\");\nMVIP = read($4, data_type=\"frame\", format=\"csv\");\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/pipelines/permutations.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+source(\"./scripts/staging/pipelines/utils.dml\") as utils;\n+\n+\n+# The below functions will generate the all possible\n+# physical pipelines for a given logical pipeline\n+###################################################\n+\n+getPermutations = function(Frame[String] opt)\n+return(Frame[String] output)\n+{\n+ idx = seq(1, ncol(opt))\n+ # get the indexes of columns for recode transformation\n+ index = utils::vectorToCsv(idx)\n+ # recode logical pipelines for easy handling\n+ jspecR = \"{ids:true, recode:[\"+index+\"]}\";\n+ [X, M] = transformencode(target=opt, spec=jspecR);\n+ # initialize output matrix\n+ n = nrow(opt)\n+ d = ncol(opt)\n+ outC = matrix(0, n^d, d)\n+\n+ parfor(i in 1 : d) {\n+ # matrix for storing rows of ith columns\n+ outR = matrix(0, 0, 1)\n+ j = n^i\n+ rowIdx = 1\n+ for(k in 1:j) {\n+ valDup = matrix(as.scalar(X[rowIdx, i]), n^(d-i), 1)\n+ outR = rbind(outR, valDup)\n+ rowIdx = rowIdx + 1\n+ rowIdx = ifelse(((rowIdx)%%(n+1)) == 0, 1, rowIdx)\n+ }\n+ outC[,i] = outR\n+ }\n+ output = transformdecode(target=outC, spec=jspecR, meta=M);\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/pipelines/utils.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+\n+\n+# Utility function to convert vector into csv\n+##################################################################################\n+vectorToCsv = function(Matrix[Double] vector)\n+return (String indexes){\n+ if(nrow(vector) > ncol(vector))\n+ vector = t(vector)\n+ s = \"\"\n+ for(i in 1:ncol(vector)-1)\n+ {\n+ s = s+i+\",\"\n+ }\n+ indexes = s+ncol(vector)\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-611][MINOR] Refactoring structure
Rewriting the function for generating all possible combinations of physical pipelines out of each logical pipeline.
simplified the generation of top k pipelines. |
49,738 | 24.07.2020 22:04:55 | -7,200 | 8a602caa135e7720024d95b7192cb7008678fd57 | Improved error handling parser syntax errors | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/dml/CustomErrorListener.java",
"new_path": "src/main/java/org/apache/sysds/parser/dml/CustomErrorListener.java",
"diff": "@@ -136,19 +136,17 @@ public class CustomErrorListener extends BaseErrorListener {\n* Syntax error occurred. Add the error to the list of parse issues.\n*/\n@Override\n- public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine,\n- String msg, RecognitionException e) {\n+ public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol,\n+ int line, int charPositionInLine, String msg, RecognitionException e)\n+ {\n+ msg = msg + \" (\"+offendingSymbol.toString()+\")\";\nparseIssues.add(new ParseIssue(line, charPositionInLine, msg, currentFileName, ParseIssueType.SYNTAX_ERROR));\ntry {\nsetAtLeastOneError(true);\n- // Print error messages with file name\n- if (currentFileName == null)\n- log.error(\"line \" + line + \":\" + charPositionInLine + \" \" + msg);\n- else {\n- String fileName = currentFileName;\n- log.error(fileName + \" line \" + line + \":\" + charPositionInLine + \" \" + msg);\n+ String out = (currentFileName != null) ? (currentFileName + \", \") : \"\";\n+ log.error(out + \"line \" + line + \":\" + charPositionInLine + \" \" + msg);\n}\n- } catch (Exception e1) {\n+ catch (Exception e1) {\nlog.error(\"ERROR: while customizing error message:\" + e1);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"diff": "@@ -1495,6 +1495,26 @@ public class MLContextTest extends MLContextTestBase {\nml.execute(script);\n}\n+ @Test\n+ public void testErrorHandlingTwoIdentifiers() {\n+ try {\n+ System.out.println(\"MLContextTest - error handling two identifiers\");\n+ Script script = dml(\"foo bar\");\n+ ml.execute(script);\n+ }\n+ catch(Exception ex) {\n+ Throwable t = ex;\n+ while( t.getCause() != null )\n+ t = t.getCause();\n+ System.out.println(t.getMessage());\n+ Assert.assertTrue(t.getMessage().contains(\"foo bar\"));\n+ //unfortunately, the generated antlr parser creates the concatenated msg\n+ //we do a best effort error reporting here, by adding the offending symbol\n+ //Assert.assertFalse(t.getMessage().contains(\"foobar\"));\n+ Assert.assertTrue(t.getMessage().contains(\"'bar'\"));\n+ }\n+ }\n+\n@Test\npublic void testInputVariablesAddLongsDML() {\nSystem.out.println(\"MLContextTest - input variables add longs DML\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-615] Improved error handling parser syntax errors |
49,738 | 24.07.2020 23:08:27 | -7,200 | 8d61ae6f46f0a8ce21f9ad7c3a617023f6983778 | Additional mlcontext test for nn-library imports
The bug reported in was non-reproducible both in a local
environment as well as through spark-shell. However, as the mlcontext
tests did not include a test for sourcing (importing) dml scripts, we
add the related test script accordingly. | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTest.java",
"diff": "@@ -1905,4 +1905,16 @@ public class MLContextTest extends MLContextTestBase {\nAssert.assertEquals(\"yes it's TRUE\", d);\n}\n+ @Test\n+ public void testNNImport() {\n+ System.out.println(\"MLContextTest - NN import\");\n+ String s = \"source(\\\"scripts/nn/layers/relu.dml\\\") as relu;\\n\"\n+ + \"X = rand(rows=100, cols=10, min=-1, max=1);\\n\"\n+ + \"R1 = relu::forward(X);\\n\"\n+ + \"R2 = max(X, 0);\\n\"\n+ + \"R = sum(R1==R2);\\n\";\n+ double ret = ml.execute(dml(s).out(\"R\"))\n+ .getScalarObject(\"R\").getDoubleValue();\n+ Assert.assertEquals(1000, ret, 1e-20);\n+ }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2572] Additional mlcontext test for nn-library imports
The bug reported in SYSTEMDS-2572 was non-reproducible both in a local
environment as well as through spark-shell. However, as the mlcontext
tests did not include a test for sourcing (importing) dml scripts, we
add the related test script accordingly. |
49,706 | 25.07.2020 22:15:12 | -7,200 | 1e626c02c1d74ec3ee6080ec54971f87d66a5279 | Simplified log4j.properties files
Testing Log4J file simplification
add default log4j file
Closes | [
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -56,8 +56,6 @@ src/main/python/tests/onnx_systemds/test_models/*.onnx\n# User configuration files\nconf/SystemDS-config.xml\n-conf/log4j.properties\n-conf/systemds-env.sh\n# Documentation artifacts\ndocs/_site\n"
},
{
"change_type": "DELETE",
"old_path": "conf/log4j-silent.properties",
"new_path": null,
"diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-# Define some default values that can be overridden by system properties\n-hadoop.root.logger=INFO,console\n-hadoop.log.dir=.\n-hadoop.log.file=hadoop.log\n-hadoop.security.logger=OFF\n-\n-# Security appender\n-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\n-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n-\n-log4j.logger.org.apache.spark=ERROR\n-\n-#\n-# Job Summary Appender\n-#\n-# Use following logger to send summary to separate file defined by\n-# hadoop.mapreduce.jobsummary.log.file rolled daily:\n-# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n-#\n-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\n-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\n-\n-# Define the root logger to the system property \"hadoop.root.logger\".\n-log4j.rootLogger=${hadoop.root.logger}, EventCounter\n-\n-# Logging Threshold\n-log4j.threshold=ALL\n-\n-#\n-# Guardim Proxy setup - HDFS, MapReduce and Hadoop RPC\n-#\n-log4j.appender.GuardiumProxyAppender=org.apache.log4j.net.SocketAppender\n-log4j.appender.GuardiumProxyAppender.RemoteHost=\n-log4j.appender.GuardiumProxyAppender.Port=\n-log4j.appender.GuardiumProxyAppender.RecoveryFile=audit-${hadoop.log.file}\n-log4j.appender.GuardiumProxyAppender.Threshold=INFO\n-\n-# Hdfs audit logs\n-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\n-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hadoop.security.logger}\n-\n-hdfs.audit.logger=INFO,NullAppender\n-hdfs.audit.log.maxfilesize=256MB\n-hdfs.audit.log.maxbackupindex=20\n-log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\n-log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}\n-log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}\n-\n-# MapReduce audit logs\n-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\n-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${hadoop.security.logger}\n-\n-mapred.audit.logger=INFO,NullAppender\n-mapred.audit.log.maxfilesize=256MB\n-mapred.audit.log.maxbackupindex=20\n-log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\n-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}\n-log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}\n-\n-# Hadoop RPC audit logs\n-log4j.additivity.SecurityLogger=false\n-log4j.logger.SecurityLogger=${hadoop.security.logger}\n-\n-log4j.appender.hadoopaudit=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.hadoopaudit.DatePattern='.'yyyy-MM-dd\n-log4j.appender.hadoopaudit.File=${hadoop.log.dir}/audit-${hadoop.log.file}\n-log4j.appender.hadoopaudit.Append=true\n-log4j.appender.hadoopaudit.layout=org.apache.log4j.PatternLayout\n-log4j.appender.hadoopaudit.layout.ConversionPattern=%d{ISO8601} %5p %c - %m%n\n-\n-#\n-# Daily Rolling File Appender\n-#\n-\n-#log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n-#log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-\n-# Rollver at midnight\n-#log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n-\n-# 30-day backup\n-#log4j.appender.DRFA.MaxBackupIndex=30\n-#log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n-\n-# Pattern format: Date LogLevel LoggerName LogMessage\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-# Debugging Pattern format\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-# console\n-# Add \"console\" to rootlogger above if you want to use this\n-#\n-\n-log4j.appender.console=org.apache.log4j.ConsoleAppender\n-log4j.appender.console.target=System.err\n-log4j.appender.console.layout=org.apache.log4j.PatternLayout\n-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n-\n-#\n-# TaskLog Appender\n-#\n-\n-#Default values\n-hadoop.tasklog.taskid=null\n-hadoop.tasklog.iscleanup=false\n-hadoop.tasklog.noKeepSplits=4\n-hadoop.tasklog.totalLogFileSize=100\n-hadoop.tasklog.purgeLogSplits=true\n-hadoop.tasklog.logsRetainHours=12\n-\n-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogSocketAppender\n-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\n-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\n-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n-\n-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-\n-#\n-#Security audit appender\n-#\n-\n-hadoop.security.log.file=SecurityAuth.audit\n-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n-\n-#\n-# Rolling File Appender\n-#\n-\n-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-#log4j.appender.RFA.MaxFileSize=1MB\n-#log4j.appender.RFA.MaxBackupIndex=30\n-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-\n-#\n-# Rolling File Appender\n-#\n-\n-log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-log4j.appender.RFA.MaxFileSize=10MB\n-log4j.appender.RFA.MaxBackupIndex=3\n-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-#Logger for streaming Job Configuration\n-#\n-log4j.logger.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=INFO,${SAJC}\n-log4j.additivity.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=false\n-\n-#\n-#Socket Appender for streaming Job Configuration\n-#\n-log4j.appender.job.conf=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.conf.RemoteHost=localhost\n-log4j.appender.job.conf.Port=${JOBCONF_LOGGING_PORT}\n-log4j.appender.job.conf.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.conf.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.conf.appender.ReconnectionDelay=120000\n-\n-\n-#\n-#Logger for streaming task attempt logs\n-#\n-log4j.logger.org.apache.hadoop.mapred.TaskLogSocketAppender=INFO,${SATA}\n-log4j.additivity.org.apache.hadoop.mapred.TaskLogSocketAppender=false\n-\n-#\n-#Socket appender for streaming task attempt logs\n-#\n-log4j.appender.task.attempt.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.task.attempt.log.RemoteHost=localhost\n-log4j.appender.task.attempt.log.Port=${TASKATTEMPT_LOGGING_PORT}\n-log4j.appender.task.attempt.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.task.attempt.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.task.attempt.log.appender.ReconnectionDelay=120000\n-\n-#\n-#Socket Appender for Streaming NameNode,SecondaryNameNode and JobTracker Logs\n-#\n-log4j.appender.socket.appender=org.apache.log4j.net.SocketAppender\n-log4j.appender.socket.appender.RemoteHost=localhost\n-log4j.appender.socket.appender.Port=${HADOOP_LOGGING_PORT}\n-log4j.appender.socket.appender.layout=org.apache.log4j.PatternLayout\n-log4j.appender.socket.appender.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.socket.appender.ReconnectionDelay=120000\n-\n-#\n-#Logger for streaming Job History Logs\n-#\n-log4j.logger.JobHistoryLogs=INFO,${SAJH}\n-log4j.additivity.JobHistoryLogs=false\n-\n-#\n-#Socket Appender for Job History Logs\n-#\n-log4j.appender.job.history.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.history.log.RemoteHost=localhost\n-log4j.appender.job.history.log.Port=${JOBHISTORY_LOGGING_PORT}\n-log4j.appender.job.history.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.history.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.history.appender.ReconnectionDelay=120000\n-\n-\n-\n-# Custom Logging levels\n-\n-hadoop.metrics.log.level=INFO\n-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n-\n-# Jets3t library\n-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n-\n-#\n-# Null Appender\n-# Trap security logger on the hadoop client side\n-#\n-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n-\n-#\n-# Event Counter Appender\n-# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n-#\n-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n-\n-#\n-# Job Summary Appender\n-#\n-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}\n-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n-log4j.appender.JSA.DatePattern=.yyyy-MM-dd\n-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}\n-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false\n"
},
{
"change_type": "RENAME",
"old_path": "conf/systemds-env.sh.template",
"new_path": "conf/log4j.properties",
"diff": "-#!/usr/bin/env bash\n#-------------------------------------------------------------\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n#\n#-------------------------------------------------------------\n-# This file is sourced when running the bin/systemds-standalone script.\n-# Copy it as systemds-env.sh and edit it to configure SystemDS.\n+log4j.rootLogger=ERROR,console\n-# Example of adding additional Java execution options, which will\n-# override defaults as necessary.\n-#SYSTEMDS_JAVA_OPTS=\"-Xmx12g -Xms8g\"\n+log4j.logger.org.apache.sysds=ERROR\n+log4j.logger.org.apache.spark=OFF\n+log4j.logger.org.apache.hadoop=OFF\n+log4j.appender.console=org.apache.log4j.ConsoleAppender\n+log4j.appender.console.target=System.err\n+log4j.appender.console.layout=org.apache.log4j.PatternLayout\n+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "conf/log4j.properties.template",
"new_path": "conf/log4j.properties.template",
"diff": "#\n#-------------------------------------------------------------\n+log4j.rootLogger=ERROR,console\n-# Define some default values that can be overridden by system properties\n-hadoop.root.logger=INFO,console\n-hadoop.log.dir=.\n-hadoop.log.file=hadoop.log\n-hadoop.security.logger=OFF\n-\n-# Security appender\n-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\n-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n-\n-#\n-# Job Summary Appender\n-#\n-# Use following logger to send summary to separate file defined by\n-# hadoop.mapreduce.jobsummary.log.file rolled daily:\n-# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n-#\n-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\n-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\n-\n-# Define the root logger to the system property \"hadoop.root.logger\".\n-log4j.rootLogger=${hadoop.root.logger}, EventCounter\n-\n-# Logging Threshold\n-log4j.threshold=ALL\n-\n-#\n-# Guardim Proxy setup - HDFS, MapReduce and Hadoop RPC\n-#\n-log4j.appender.GuardiumProxyAppender=org.apache.log4j.net.SocketAppender\n-log4j.appender.GuardiumProxyAppender.RemoteHost=\n-log4j.appender.GuardiumProxyAppender.Port=\n-log4j.appender.GuardiumProxyAppender.RecoveryFile=audit-${hadoop.log.file}\n-log4j.appender.GuardiumProxyAppender.Threshold=INFO\n-\n-# Hdfs audit logs\n-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\n-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hadoop.security.logger}\n-\n-hdfs.audit.logger=INFO,NullAppender\n-hdfs.audit.log.maxfilesize=256MB\n-hdfs.audit.log.maxbackupindex=20\n-log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\n-log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}\n-log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}\n-\n-# MapReduce audit logs\n-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\n-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${hadoop.security.logger}\n-\n-mapred.audit.logger=INFO,NullAppender\n-mapred.audit.log.maxfilesize=256MB\n-mapred.audit.log.maxbackupindex=20\n-log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\n-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}\n-log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}\n-\n-# Hadoop RPC audit logs\n-log4j.additivity.SecurityLogger=false\n-log4j.logger.SecurityLogger=${hadoop.security.logger}\n-\n-log4j.appender.hadoopaudit=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.hadoopaudit.DatePattern='.'yyyy-MM-dd\n-log4j.appender.hadoopaudit.File=${hadoop.log.dir}/audit-${hadoop.log.file}\n-log4j.appender.hadoopaudit.Append=true\n-log4j.appender.hadoopaudit.layout=org.apache.log4j.PatternLayout\n-log4j.appender.hadoopaudit.layout.ConversionPattern=%d{ISO8601} %5p %c - %m%n\n-\n-#\n-# Daily Rolling File Appender\n-#\n-\n-#log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n-#log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-\n-# Rollver at midnight\n-#log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n-\n-# 30-day backup\n-#log4j.appender.DRFA.MaxBackupIndex=30\n-#log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n-\n-# Pattern format: Date LogLevel LoggerName LogMessage\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-# Debugging Pattern format\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-# console\n-# Add \"console\" to rootlogger above if you want to use this\n-#\n+log4j.logger.org.apache.sysds=ERROR\n+log4j.logger.org.apache.spark=OFF\n+log4j.logger.org.apache.hadoop=OFF\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\\ No newline at end of file\n-\n-#\n-# TaskLog Appender\n-#\n-\n-#Default values\n-hadoop.tasklog.taskid=null\n-hadoop.tasklog.iscleanup=false\n-hadoop.tasklog.noKeepSplits=4\n-hadoop.tasklog.totalLogFileSize=100\n-hadoop.tasklog.purgeLogSplits=true\n-hadoop.tasklog.logsRetainHours=12\n-\n-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogSocketAppender\n-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\n-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\n-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n-\n-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-\n-#\n-#Security audit appender\n-#\n-\n-hadoop.security.log.file=SecurityAuth.audit\n-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n-\n-#\n-# Rolling File Appender\n-#\n-\n-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-#log4j.appender.RFA.MaxFileSize=1MB\n-#log4j.appender.RFA.MaxBackupIndex=30\n-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-\n-#\n-# Rolling File Appender\n-#\n-\n-log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-log4j.appender.RFA.MaxFileSize=10MB\n-log4j.appender.RFA.MaxBackupIndex=3\n-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-#Logger for streaming Job Configuration\n-#\n-log4j.logger.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=INFO,${SAJC}\n-log4j.additivity.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=false\n-\n-#\n-#Socket Appender for streaming Job Configuration\n-#\n-log4j.appender.job.conf=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.conf.RemoteHost=localhost\n-log4j.appender.job.conf.Port=${JOBCONF_LOGGING_PORT}\n-log4j.appender.job.conf.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.conf.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.conf.appender.ReconnectionDelay=120000\n-\n-\n-#\n-#Logger for streaming task attempt logs\n-#\n-log4j.logger.org.apache.hadoop.mapred.TaskLogSocketAppender=INFO,${SATA}\n-log4j.additivity.org.apache.hadoop.mapred.TaskLogSocketAppender=false\n-\n-#\n-#Socket appender for streaming task attempt logs\n-#\n-log4j.appender.task.attempt.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.task.attempt.log.RemoteHost=localhost\n-log4j.appender.task.attempt.log.Port=${TASKATTEMPT_LOGGING_PORT}\n-log4j.appender.task.attempt.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.task.attempt.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.task.attempt.log.appender.ReconnectionDelay=120000\n-\n-#\n-#Socket Appender for Streaming NameNode,SecondaryNameNode and JobTracker Logs\n-#\n-log4j.appender.socket.appender=org.apache.log4j.net.SocketAppender\n-log4j.appender.socket.appender.RemoteHost=localhost\n-log4j.appender.socket.appender.Port=${HADOOP_LOGGING_PORT}\n-log4j.appender.socket.appender.layout=org.apache.log4j.PatternLayout\n-log4j.appender.socket.appender.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.socket.appender.ReconnectionDelay=120000\n-\n-#\n-#Logger for streaming Job History Logs\n-#\n-log4j.logger.JobHistoryLogs=INFO,${SAJH}\n-log4j.additivity.JobHistoryLogs=false\n-\n-#\n-#Socket Appender for Job History Logs\n-#\n-log4j.appender.job.history.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.history.log.RemoteHost=localhost\n-log4j.appender.job.history.log.Port=${JOBHISTORY_LOGGING_PORT}\n-log4j.appender.job.history.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.history.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.history.appender.ReconnectionDelay=120000\n-\n-\n-\n-# Custom Logging levels\n-\n-hadoop.metrics.log.level=INFO\n-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n-\n-# Jets3t library\n-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n-\n-#\n-# Null Appender\n-# Trap security logger on the hadoop client side\n-#\n-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n-\n-#\n-# Event Counter Appender\n-# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n-#\n-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n-\n-#\n-# Job Summary Appender\n-#\n-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}\n-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n-log4j.appender.JSA.DatePattern=.yyyy-MM-dd\n-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}\n-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/resources/log4j.properties",
"new_path": "src/test/resources/log4j.properties",
"diff": "#\n#-------------------------------------------------------------\n-# Define some default values that can be overridden by system properties\n-hadoop.root.logger=ERROR,console\n-hadoop.log.dir=.\n-hadoop.log.file=hadoop.log\n-hadoop.security.logger=OFF\n+log4j.rootLogger=ERROR,console\n-# Security appender\n-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\n-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n-\n-#\n-# Job Summary Appender\n-#\n-# Use following logger to send summary to separate file defined by\n-# hadoop.mapreduce.jobsummary.log.file rolled daily:\n-# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n-#\n-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\n-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\n-\n-# Define the root logger to the system property \"hadoop.root.logger\".\n-log4j.rootLogger=${hadoop.root.logger}, EventCounter\n-\n-# Logging Threshold\n-log4j.threshold=ALL\n-\n-#\n-# Guardim Proxy setup - HDFS, MapReduce and Hadoop RPC\n-#\n-log4j.appender.GuardiumProxyAppender=org.apache.log4j.net.SocketAppender\n-log4j.appender.GuardiumProxyAppender.RemoteHost=\n-log4j.appender.GuardiumProxyAppender.Port=\n-log4j.appender.GuardiumProxyAppender.RecoveryFile=audit-${hadoop.log.file}\n-log4j.appender.GuardiumProxyAppender.Threshold=INFO\n-\n-# Hdfs audit logs\n-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\n-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hadoop.security.logger}\n-\n-hdfs.audit.logger=INFO,NullAppender\n-hdfs.audit.log.maxfilesize=256MB\n-hdfs.audit.log.maxbackupindex=20\n-log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\n-log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}\n-log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}\n-\n-# MapReduce audit logs\n-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\n-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${hadoop.security.logger}\n-\n-mapred.audit.logger=INFO,NullAppender\n-mapred.audit.log.maxfilesize=256MB\n-mapred.audit.log.maxbackupindex=20\n-log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender\n-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\n-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\n-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\n-log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}\n-log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}\n-\n-# Hadoop RPC audit logs\n-log4j.additivity.SecurityLogger=false\n-log4j.logger.SecurityLogger=${hadoop.security.logger}\n-\n-log4j.appender.hadoopaudit=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.hadoopaudit.DatePattern='.'yyyy-MM-dd\n-log4j.appender.hadoopaudit.File=${hadoop.log.dir}/audit-${hadoop.log.file}\n-log4j.appender.hadoopaudit.Append=true\n-log4j.appender.hadoopaudit.layout=org.apache.log4j.PatternLayout\n-log4j.appender.hadoopaudit.layout.ConversionPattern=%d{ISO8601} %5p %c - %m%n\n-\n-#\n-# Daily Rolling File Appender\n-#\n-\n-#log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n-#log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-\n-# Rollver at midnight\n-#log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n-\n-# 30-day backup\n-#log4j.appender.DRFA.MaxBackupIndex=30\n-#log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n-\n-# Pattern format: Date LogLevel LoggerName LogMessage\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-# Debugging Pattern format\n-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-# console\n-# Add \"console\" to rootlogger above if you want to use this\n-#\n+log4j.logger.org.apache.sysds=ERROR\n+log4j.logger.org.apache.spark=OFF\n+log4j.logger.org.apache.hadoop=OFF\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n-\n-#\n-# TaskLog Appender\n-#\n-\n-#Default values\n-hadoop.tasklog.taskid=null\n-hadoop.tasklog.iscleanup=false\n-hadoop.tasklog.noKeepSplits=4\n-hadoop.tasklog.totalLogFileSize=100\n-hadoop.tasklog.purgeLogSplits=true\n-hadoop.tasklog.logsRetainHours=12\n-\n-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogSocketAppender\n-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\n-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\n-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n-\n-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-\n-#\n-#Security audit appender\n-#\n-\n-hadoop.security.log.file=SecurityAuth.audit\n-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\n-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\n-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n-\n-#\n-# Rolling File Appender\n-#\n-\n-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-#log4j.appender.RFA.MaxFileSize=1MB\n-#log4j.appender.RFA.MaxBackupIndex=30\n-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-\n-#\n-# Rolling File Appender\n-#\n-\n-log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n-# Logfile size and and 30-day backups\n-log4j.appender.RFA.MaxFileSize=10MB\n-log4j.appender.RFA.MaxBackupIndex=3\n-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n-\n-#\n-#Logger for streaming Job Configuration\n-#\n-log4j.logger.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=INFO,${SAJC}\n-log4j.additivity.org.apache.hadoop.mapred.JobTrackerConfLogStreaming=false\n-\n-#\n-#Socket Appender for streaming Job Configuration\n-#\n-log4j.appender.job.conf=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.conf.RemoteHost=localhost\n-log4j.appender.job.conf.Port=${JOBCONF_LOGGING_PORT}\n-log4j.appender.job.conf.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.conf.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.conf.appender.ReconnectionDelay=120000\n-\n-\n-#\n-#Logger for streaming task attempt logs\n-#\n-log4j.logger.org.apache.hadoop.mapred.TaskLogSocketAppender=INFO,${SATA}\n-log4j.additivity.org.apache.hadoop.mapred.TaskLogSocketAppender=false\n-\n-#\n-#Socket appender for streaming task attempt logs\n-#\n-log4j.appender.task.attempt.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.task.attempt.log.RemoteHost=localhost\n-log4j.appender.task.attempt.log.Port=${TASKATTEMPT_LOGGING_PORT}\n-log4j.appender.task.attempt.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.task.attempt.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.task.attempt.log.appender.ReconnectionDelay=120000\n-\n-#\n-#Socket Appender for Streaming NameNode,SecondaryNameNode and JobTracker Logs\n-#\n-log4j.appender.socket.appender=org.apache.log4j.net.SocketAppender\n-log4j.appender.socket.appender.RemoteHost=localhost\n-log4j.appender.socket.appender.Port=${HADOOP_LOGGING_PORT}\n-log4j.appender.socket.appender.layout=org.apache.log4j.PatternLayout\n-log4j.appender.socket.appender.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.socket.appender.ReconnectionDelay=120000\n-\n-#\n-#Logger for streaming Job History Logs\n-#\n-log4j.logger.JobHistoryLogs=INFO,${SAJH}\n-log4j.additivity.JobHistoryLogs=false\n-\n-#\n-#Socket Appender for Job History Logs\n-#\n-log4j.appender.job.history.log=org.apache.log4j.net.SocketAppender\n-log4j.appender.job.history.log.RemoteHost=localhost\n-log4j.appender.job.history.log.Port=${JOBHISTORY_LOGGING_PORT}\n-log4j.appender.job.history.log.layout=org.apache.log4j.PatternLayout\n-log4j.appender.job.history.log.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n-log4j.appender.job.history.appender.ReconnectionDelay=120000\n-\n-\n-\n-# Custom Logging levels\n-\n-hadoop.metrics.log.level=INFO\n-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n-\n-# Jets3t library\n-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n-\n-#\n-# Null Appender\n-# Trap security logger on the hadoop client side\n-#\n-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n-\n-#\n-# Event Counter Appender\n-# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n-#\n-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n-\n-#\n-# Job Summary Appender\n-#\n-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}\n-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout\n-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n-log4j.appender.JSA.DatePattern=.yyyy-MM-dd\n-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}\n-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-135] Simplified log4j.properties files
- Testing Log4J file simplification
- add default log4j file
Closes #980. |
49,722 | 28.07.2020 14:06:05 | -7,200 | c14430a1895855422c4ced0b81089eb16eed2d00 | New built-in function cor (correlation matrix)
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/site/run_issues.md",
"diff": "+Error: Could not find or load main class org.apache.sysds.api.DMLScript\n+\n+Solution for macOS: Install `realpath` with Homebrew\n+```bash\n+brew install coreutils\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<exclude>src/main/python/docs/build/**/*</exclude>\n<exclude>docs/api/**/*</exclude>\n<exclude>docs/_site/**/*</exclude>\n+ <exclude>docs/site/run_issues.md</exclude>\n<exclude>docs/.jekyll-cache/**/*</exclude>\n<exclude>docs/css/bootstrap.min.css</exclude>\n<exclude>docs/css/pygments-default.css</exclude>\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/cor.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+\n+m_cor = function(Matrix[Double] X) return (Matrix[Double] Y) {\n+ # compute correlation matrix in vectorized form\n+ Xc = X - colMeans(X);\n+ Y = ((t(Xc) %*% Xc)/(nrow(X)-1)) / (t(colSds(X)) %*% colSds(X));\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -82,6 +82,7 @@ public enum Builtins {\nCUMSUM(\"cumsum\", false),\nCUMSUMPROD(\"cumsumprod\", false),\nCONFUSIONMATRIX(\"confusionMatrix\", true),\n+ COR(\"cor\", true),\nDETECTSCHEMA(\"detectSchema\", false),\nDIAG(\"diag\", false),\nDISCOVER_FD(\"discoverFD\", true),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinCorrelationMatrixTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+public class BuiltinCorrelationMatrixTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"correlationMatrix\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinCorrelationMatrixTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 1e-3;\n+ private final static int rows = 1765;\n+ private final static double spDense = 0.99;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,new String[]{\"B\"}));\n+ }\n+\n+ @Test\n+ public void testCorrelationMatrixDefaultCP() {\n+ runCorrelationMatrix(true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testCorrelationMatrixDefaultSP() {\n+ runCorrelationMatrix(true, ExecType.SPARK);\n+ }\n+\n+ private void runCorrelationMatrix(boolean defaultProb, ExecType instType)\n+ {\n+ ExecMode platformOld = setExecMode(instType);\n+\n+ try\n+ {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"A\"), output(\"B\") };\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + expectedDir();\n+\n+ //generate actual dataset\n+ double[][] A = getRandomMatrix(rows, 10, -1, 1, spDense, 7);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/correlationMatrix.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")))\n+R = cor(X);\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"B\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/correlationMatrix.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1);\n+Y = cor(X);\n+write(Y, $2);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2592] New built-in function cor (correlation matrix)
Closes #1002. |
49,738 | 01.08.2020 14:10:58 | -7,200 | 1cf7bb54cc86472ec93f9b4bdf89bbece94e15b3 | Fix bufferpool leak in mvvar instructions, part 2
This additional patch fixes edge cases of the variable cleanup in mvvar
instructions: (1) for lists of matrices/frames, and (2) scenarios where
the src and target are the same object. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -682,9 +682,11 @@ public class VariableCPInstruction extends CPInstruction implements LineageTrace\n// remove existing variable bound to target name and\n// cleanup matrix/frame/list data if necessary\n- Data tgt = ec.removeVariable(getInput2().getName());\n- if( tgt != null)\n- ec.cleanupDataObject(tgt);\n+ if( srcData.getDataType().isMatrix() || srcData.getDataType().isFrame() ) {\n+ Data tgtData = ec.removeVariable(getInput2().getName());\n+ if( tgtData != null && srcData != tgtData )\n+ ec.cleanupDataObject(tgtData);\n+ }\n// do the actual move\nec.setVariable(getInput2().getName(), srcData);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -1196,6 +1196,8 @@ public abstract class AutomatedTestBase {\nfail(\"expected exception which has not been raised: \" + expectedException);\n}\ncatch(Exception e) {\n+ if( !outputBuffering )\n+ e.printStackTrace();\nif(errMessage != null && !errMessage.equals(\"\")) {\nboolean result = rCompareException(exceptionExpected, errMessage, e, false);\nif(exceptionExpected && !result) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/applications/GLMTest.java",
"new_path": "src/test/java/org/apache/sysds/test/applications/GLMTest.java",
"diff": "@@ -264,6 +264,7 @@ public class GLMTest extends AutomatedTestBase\nint expectedNumberOfJobs = -1; // 31;\n+ setOutputBuffering(false);\nrunTest(true, EXCEPTION_NOT_EXPECTED, null, expectedNumberOfJobs);\ndouble max_abs_beta = 0.0;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2594] Fix bufferpool leak in mvvar instructions, part 2
This additional patch fixes edge cases of the variable cleanup in mvvar
instructions: (1) for lists of matrices/frames, and (2) scenarios where
the src and target are the same object. |
49,706 | 04.08.2020 10:04:05 | -7,200 | 378d09286d3a047fe734f98e01d36db7a7e2e3c4 | [MINOR] Add license to notebook | [
{
"change_type": "MODIFY",
"old_path": "notebooks/databricks/MLContext.scala",
"new_path": "notebooks/databricks/MLContext.scala",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n// Databricks notebook source\n// MAGIC %md # Apache SystemDS on Databricks in 5 minutes\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add license to notebook |
49,706 | 04.08.2020 10:19:38 | -7,200 | f2c1593566e6e23d210dfa9478793b1ec68b1492 | [MINOR] Update readme and fix links in docs | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -34,6 +34,8 @@ heterogeneous and nested schema.\n**Python Documentation** [Python SystemDS Documentation](https://apache.github.io/systemds/api/python/index.html)\n+**Issue Tracker** [Jira Dashboard](https://issues.apache.org/jira/secure/Dashboard.jspa?selectPageId=12335852)\n+\n**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from Apache SystemML 1.2 in\nSeptember 2018. We will continue to support linear algebra programs over matrices, while replacing the underlying data model\nand compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own\n"
},
{
"change_type": "RENAME",
"old_path": "docs/img/systemml-logo.png",
"new_path": "docs/img/systemds-logo.png",
"diff": ""
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update readme and fix links in docs |
49,720 | 06.08.2020 20:03:27 | -7,200 | 76fbe254a11f62828d248da6dbf78194cb8d595e | Update MICE implementation to use matrix intermediates
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/mice.dml",
"new_path": "scripts/builtin/mice.dml",
"diff": "#\n#-------------------------------------------------------------\n-# Builtin function Implements Multiple Imputation using Chained Equations (MICE) for nominal data\n+# Built-in function Implements Multiple Imputation using Chained Equations (MICE)\n#\n# INPUT PARAMETERS:\n# ---------------------------------------------------------------------------------------------\n# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n-# F String --- Data Frame\n-# cMask Double --- A 0/1 row vector for identifying numeric (0) adn categorical features (1)\n+# X String --- Data Matrix (Recoded Matrix for categorical features)\n+# cMask Double --- A 0/1 row vector for identifying numeric (0) and categorical features (1)\n# iter Integer 3 Number of iteration for multiple imputations\n-# complete Integer 3 A complete dataset generated though a specific iteration\n# ---------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------\n# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n-# dataset Double --- imputed dataset\n-# singleSet Double --- A complete dataset generated though a specific iteration\n+# output Double --- imputed dataset\n-# Assumption missing value are represented with empty string i.e \",,\" in csv file\n-# variables with suffix n are storing continuous/numeric data and variables with suffix c are storing categorical data\n-s_mice= function(Frame[String] F, Matrix[Double] cMask, Integer iter = 3, Integer complete = 3, Boolean verbose = FALSE)\n-return(Frame[String] dataset, Frame[String] singleSet)\n-{\n-\n- if(ncol(F) == 1)\n- stop(\"invalid argument: can not apply mice on single column\")\n-\n- if(complete > iter)\n- complete = iter\n-\n-\n- # adding a temporary feature (in-case all attributes are of same type)\n- F = cbind(F, as.frame(matrix(1,nrow(F), 1)))\n- cMask = cbind(cMask, matrix(1,1,1))\n-\n- n = nrow(F)\n- row = n*complete;\n- col = ncol(F)\n- Result = matrix(0, rows=1, cols = col)\n- Mask_Result = matrix(0, rows=1, cols=col)\n- scat = seq(1, ncol(cMask))\n- cat = removeEmpty(target=scat, margin=\"rows\", select=t(cMask))\n-\n- if(nrow(cat) == ncol(F))\n- cMask[1,ncol(cMask)] = 0\n-\n- s=\"\"\n- for(i in 1: nrow(cat), check =0)\n- s = s+as.integer(as.scalar(cat[i, 1]))+\",\";\n-\n-\n- # encoding categorical columns using recode transformation\n- jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n- [X, M] = transformencode(target=F, spec=jspecR);\n-\n- XO = replace(target=X, pattern=NaN, replacement=0);\n-\n- # remove categorical features and impute continuous features with mean\n- eX_n = removeEmpty(target=X, margin=\"cols\", select=(cMask==0))\n- col_n = ncol(eX_n);\n- # storing the mask/address of missing values\n- Mask_n = is.na(eX_n);\n- inverseMask_n = 1 - Mask_n;\n- # replacing the empty cells in encoded data with 0\n- eX_n = replace(target=eX_n, pattern=NaN, replacement=0);\n- # filling the missing data with their means\n- X2_n = eX_n+(Mask_n*colMeans(eX_n))\n- # matrices for computing actul data\n- p_n = table(seq(1, ncol(eX_n)), removeEmpty(target=scat, margin=\"rows\", select=t(cMask==0)))\n- if(ncol(p_n) < ncol(cMask))\n- p_n = cbind(p_n, matrix(0, nrow(p_n), ncol(cMask)-ncol(p_n)))\n- q = XO * cMask\n- # Taking out the categorical features for initial imputation by mode\n- eX_c = removeEmpty(target = q, margin = \"cols\")\n- col_c = ncol(eX_c);\n- eX_c2 = removeEmpty(target = eX_c, margin = \"rows\", select = (rowSums(eX_c != 0)==col_c))\n- colMod = matrix(0, 1, ncol(eX_c))\n- # compute columnwise mode\n- parfor(i in 1: col_c) {\n- f = eX_c2[, i] # adding one in data for dealing with zero category\n- cat_counts = table(f, 1, n, 1); # counts for each category\n- mode = as.scalar(rowIndexMax(t(cat_counts)));\n- colMod[1,i] = mode\n+# Assumption missing value are represented with empty string i.e \",,\" in CSV file\n+# variables with suffix n are storing continuos/numeric data and variables with suffix c are storing categorical data\n+m_mice= function(Matrix[Double] X, Matrix[Double] cMask, Integer iter = 3, Boolean verbose = FALSE)\n+ return(Matrix[Double] output)\n+{\n+ lastIndex = ncol(X);\n+ sumMax = sum(cMask);\n+\n+ # if all features are numeric add a categorical features\n+ # if all features are categorical add a numeric features\n+ if(sumMax == 0 | sumMax == ncol(cMask)) {\n+ X = cbind(X, matrix(1, nrow(X), 1))\n+ cMask = cbind(cMask, matrix(ifelse(sumMax==0, 1, 0), 1, 1))\n}\n- # find the mask of missing values\n- tmpMask_c = (eX_c==0) * colMod # fill missing values with mode\n-\n- # Generate a matrix of actual length\n- p_c = table(seq(1, ncol(tmpMask_c)), removeEmpty(target=scat, margin =\"rows\", select=t(cMask)), ncol(tmpMask_c), ncol(cMask))\n-\n- Mask_c = tmpMask_c %*% p_c\n- inverseMask_c = Mask_c == 0\n- r = X2_n %*% p_n\n- qr = q + r\n- X2_c = qr + Mask_c\n- Mask_c = Mask_c != 0\n-\n-\n- # one-hot encoding of categorical features\n+ # separate categorical and continuous features\n+ nX = removeEmpty(target=X, margin=\"cols\", select=(cMask==0))\n+ cX = removeEmpty(target=X, margin=\"cols\", select= cMask)\n+\n+ # store the mask of numeric missing values\n+ Mask_n = is.na(nX);\n+ nX = replace(target=nX, pattern=NaN, replacement=0);\n+ # initial mean imputation\n+ X_n = nX+(Mask_n*colMeans(nX))\n+\n+ # store the mask of categorical missing values\n+ Mask_c = is.na(cX);\n+ cX = replace(target=cX, pattern=NaN, replacement=0);\n+ colMode = colMode(cX)\n+ # initial mode imputation\n+ X_c = cX+(Mask_c*colMode)\n+\n+ # reconstruct original matrix using sparse matrices p and q\n+ p = table(seq(1, ncol(nX)), removeEmpty(target=seq(1, ncol(cMask)), margin=\"rows\", select=t(cMask==0)), ncol(nX), ncol(X))\n+ q = table(seq(1, ncol(cX)), removeEmpty(target=seq(1, ncol(cMask)), margin=\"rows\", select=t(cMask)), ncol(cX), ncol(X))\n+ X1 = (X_n %*% p) + (X_c %*% q)\n+ Mask1 = is.na(X)\n+\n+ X = replace(target=X, pattern=NaN, replacement=0);\n+ d = ncol(X1)\n+ n = nrow(X1)\n+\n+ # compute index of categorical features\n+ encodeIndex = removeEmpty(target=t(seq(1, ncol(X1))), margin=\"cols\", select=cMask)\n+\n+ s = \"\";\n+ for(i in 1:ncol(encodeIndex))\n+ s = s + as.integer(as.scalar(encodeIndex[1, i])) + \",\";\n+\n+ # specifications for one-hot encoding of categorical features\njspecDC = \"{ids:true, dummycode:[\"+s+\"]}\";\n- [dX, dM] = transformencode(target=as.frame(X2_c), spec=jspecDC);\n- # recoding of metadata of OHE features to get the number of distinct elements\n- [metaTransform, metaTransformMeta] = transformencode(target=dM, spec=jspecR);\n- metaTransform = replace(target=metaTransform, pattern=NaN, replacement=0)\n- # counting distinct elements in each categorical feature\n- dcDistincts = colMaxs(metaTransform)\n- dist = dcDistincts + (1-cMask)\n-\n- # creating a mask matrix of OHE features\n- dXMask = matrix(0, 1, ncol(dX))\n- index = 1\n- for(k in 1:col) {\n- nDistk = as.scalar(dcDistincts[1,k]);\n- if(nDistk != 0) {\n- dXMask[1,index:(index+nDistk-1)] = matrix(1,1,nDistk)\n- index += nDistk;\n- }\n- else\n- index += 1\n- }\n-\n- #multiple imputations\n- for(k in 1:iter)\n+ for(k in 1:iter) # start iterative imputation\n{\n- Mask_Filled_n = Mask_n;\n- Mask_Filled_c = Mask_c\n- in_n = 1; in_c = 1; i=1; j=1; # variables for index selection\n- while(i <= ncol(dX))\n+ Mask_Filled = Mask1\n+ inverseMask = Mask1 == 0\n+ # OHE of categorical features\n+ [dX, dM] = transformencode(target=as.frame(X1), spec=jspecDC);\n+ dist = (colMaxs(X1) * cMask) + (cMask == 0) # number of distinct items in categorical features\n+ i=1; j=1; in_c=1;\n+\n+ while(i < ncol(dX))\n{\n- if(as.scalar(dXMask[1,i]) == 0)\n+ j = (i + as.scalar(dist[1,in_c])) - 1 # index value for iterating OHE columns\n+ if(sum(Mask1[, in_c]) > 0 & as.scalar(cMask[, in_c]) == 0) # impute numeric features\n{\n# construct column selector\n- sel = cbind(matrix(1,1,i-1), as.matrix(0), matrix(1,1,ncol(dX)-i));\n+ selX = matrix(1,1,ncol(dX))\n+ selX[1,i:j] = matrix(0,1,as.scalar(dist[1,in_c]))\n+ selY = cbind(matrix(1,1,in_c-1), as.matrix(0), matrix(1,1,d-in_c));\n# prepare train data set X and Y\n- slice1 = removeEmpty(target = dX, margin = \"rows\", select = inverseMask_n[,in_n])\n- train_X = removeEmpty(target = slice1, margin = \"cols\", select = sel);\n- train_Y = slice1[,i]\n+ slice1 = removeEmpty(target = dX, margin = \"rows\", select = inverseMask[,in_c])\n+ slice1a = removeEmpty(target = X1, margin = \"rows\", select = inverseMask[,in_c])\n+ train_X = removeEmpty(target = slice1, margin = \"cols\", select = selX);\n+ train_Y = slice1a[,in_c]\n+\n# prepare score data set X and Y for imputing Y\n- slice2 = removeEmpty(target = dX, margin = \"rows\", select = Mask_n[,in_n])\n- test_X = removeEmpty(target = slice2, margin = \"cols\", select = sel);\n- test_Y = slice2[,i]\n- # learning a regression line\n+ slice2 = removeEmpty(target = dX, margin = \"rows\", select = Mask1[,in_c])\n+ slice2a = removeEmpty(target = X1, margin = \"rows\", select = Mask1[,in_c])\n+ test_X = removeEmpty(target = slice2, margin = \"cols\", select = selX);\n+ test_Y = slice2a[,in_c]\n+\n+ # learn a regression line\nbeta = lm(X=train_X, y=train_Y, verbose=FALSE, icpt=1, reg = 1e-7, tol = 1e-7);\n# predicting missing values\npred = lmpredict(X=test_X, w=beta, icpt=1)\n# imputing missing column values (assumes Mask_Filled being 0/1-matrix)\n- R = removeEmpty(target=Mask_Filled_n[,in_n] * seq(1,n), margin=\"rows\");\n+ R = removeEmpty(target=Mask_Filled[, in_c] * seq(1,nrow(X1)), margin=\"rows\");\n# TODO modify removeEmpty to return zero row and n columns\nif(!(nrow(R) == 1 & as.scalar(R[1,1] == 0)))\n- Mask_Filled_n[,in_n] = table(R, 1, pred, n, 1);\n- in_n = in_n + 1;\n+ Mask_Filled[,in_c] = table(R, 1, pred, nrow(X1), 1);\n}\n-\n- if( (as.scalar(dXMask[1,i]) == 1) & (sum(Mask_c[, in_c]) != 0) )\n+ else if (sum(Mask1[, in_c]) > 0 & as.scalar(cMask[, in_c]) != 0) # impute categorical features\n{\n- j = (i + as.scalar(dist[1,in_c])) - 1\n-\n# construct column selector\nselX = matrix(1,1,ncol(dX))\nselX[1,i:j] = matrix(0,1,as.scalar(dist[1,in_c]))\n- selY = cbind(matrix(1,1,in_c-1), as.matrix(0), matrix(1,1,col-in_c));\n+ selY = cbind(matrix(1,1,in_c-1), as.matrix(0), matrix(1,1,d-in_c));\n# prepare train data set X and Y\n- slice1 = removeEmpty(target = dX, margin = \"rows\", select = inverseMask_c[,in_c])\n- slice1a = removeEmpty(target = X2_c, margin = \"rows\", select = inverseMask_c[,in_c])\n+ slice1 = removeEmpty(target = dX, margin = \"rows\", select = inverseMask[,in_c])\n+ slice1a = removeEmpty(target = X1, margin = \"rows\", select = inverseMask[,in_c])\ntrain_X = removeEmpty(target = slice1, margin = \"cols\", select = selX);\ntrain_Y = slice1a[,in_c]\n# prepare score data set X and Y for imputing Y\n- slice2 = removeEmpty(target = dX, margin = \"rows\", select = Mask_c[,in_c])\n- slice2a = removeEmpty(target = X2_c, margin = \"rows\", select = Mask_c[,in_c])\n+ slice2 = removeEmpty(target = dX, margin = \"rows\", select = Mask1[,in_c])\n+ slice2a = removeEmpty(target = X1, margin = \"rows\", select = Mask1[,in_c])\ntest_X = removeEmpty(target = slice2, margin = \"cols\", select = selX);\ntest_Y = slice2a[,in_c]\n@@ -202,83 +153,27 @@ return(Frame[String] dataset, Frame[String] singleSet)\n# predicting missing values\n[prob,pred,acc] = multiLogRegPredict(X=test_X, B=beta, Y = test_Y)\n# imputing missing column values (assumes Mask_Filled being 0/1-matrix)\n- R = removeEmpty(target=Mask_Filled_c[,in_c] * seq(1,n), margin=\"rows\");\n+ R = removeEmpty(target=Mask_Filled[,in_c] * seq(1,n), margin=\"rows\");\n#TODO modify removeEmpty to return zero row and n columns\nif(!(nrow(R) == 1 & as.scalar(R[1,1] == 0)))\n- Mask_Filled_c[,in_c] = table(R, 1, pred, n, 1);\n- i = as.integer(j)\n+ Mask_Filled[,in_c] = table(R, 1, pred, n, 1);\n}\n- if(in_c < col)\n+ i = as.integer(j)+1\nin_c = in_c + 1\n- i = i+1;\n}\n-\n- nM = ((Mask_Filled_n) %*% p_n) + Mask_Filled_c\n- Result = rbind(Result, nM+XO)\n- Mask_Result = rbind(Mask_Result, nM)\n- [dX, dM] = transformencode(target=as.frame(nM+XO), spec=jspecDC);\n+ X1 = X + Mask_Filled\n}\n-\n- # compute output indices\n- Result = Result[2: n*iter+1, ]\n- Mask_Result = Mask_Result[2: n*iter+1, ]\n- index = (((complete*n)-n)+1)\n- # voting for aggregation of categorical imputations\n- agg = cAggregate(Mask_Result*cMask, iter, n)\n-\n- # aggregating the results\n- Agg_Matrix = matrix(0,n, col)\n- for(d in 1:iter)\n- Agg_Matrix = Agg_Matrix + Mask_Result[(((d-1)*n)+1):(n*d),]\n- Agg_Matrix = (Agg_Matrix/iter)\n-\n- Agg_Matrix = Agg_Matrix * (cMask == 0)\n- Agg_Matrix = Agg_Matrix + agg\n-\n- dataset = XO + Agg_Matrix\n- singleSet = Result[index:row, ]\n-\n- # decoding nominal columns\n- dataset = transformdecode(target=dataset, spec=jspecR, meta=M);\n- singleSet = transformdecode(target=singleSet, spec=jspecR, meta=M);\n-\n- # removing extra categorical column\n- dataset = dataset[,1:col-1]\n- singleSet = singleSet[,1:col-1]\n+ output = X1[,1:lastIndex]\n}\n-\n-cAggregate = function(Matrix[Double] Mask_Result, Integer iter, Integer n)\n-return (Matrix[Double] agg)\n-{\n- conflict = matrix(0, n, ncol(Mask_Result))\n- uCount = 0\n- vCount = 0\n- for(d in seq(1,(iter-1), 1))\n- {\n- u = Mask_Result[(((d-1)*n)+1):(n*d),]\n- v = Mask_Result[(((d)*n)+1):(n*(d+1)),]\n- if(sum(u != v) > 0) {\n- conflict = u != v\n- u1 = conflict * u;\n- v1 = conflict * v;\n- for(i in 1: iter)\n- {\n- s = Mask_Result[(((i-1)*n)+1):(n*i),]\n- s = s * conflict\n- if(sum(u1 != s ) == 0)\n- uCount = uCount + 1\n- if(sum(v1 != s) == 0)\n- vCount = vCount + 1\n- }\n- # copy the results of u in v\n- if(uCount > vCount)\n- Mask_Result[(((d)*n)+1):(n*(d+1)),] = Mask_Result[(((d-1)*n)+1):(n*d),]\n- # copy the results of v in u\n- else\n- Mask_Result[(((d-1)*n)+1):(n*d),] = Mask_Result[(((d)*n)+1):(n*(d+1)),]\n- d = 1\n- }\n+colMode = function (Matrix[Double] X) return (Matrix[Double] colMode) {\n+ d = ncol(X)\n+ n = nrow(X)\n+ colMode = matrix(0, 1, ncol(X))\n+ # compute column wise mode\n+ parfor(i in 1: d) {\n+ X_c = removeEmpty(target=X, margin = \"rows\", select=(rowSums(X != 0)==d))\n+ cat_counts = table(X_c[, i], 1, n, 1); # counts for each category\n+ colMode[1,i] = as.scalar(rowIndexMax(t(cat_counts))) # mode\n}\n- agg = Mask_Result[1:n,]\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"diff": "@@ -36,9 +36,8 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nprivate static final String TEST_CLASS_DIR = TEST_DIR + BuiltinMiceTest.class.getSimpleName() + \"/\";\nprivate final static String DATASET = SCRIPT_DIR +\"functions/transform/input/ChickWeight.csv\";\n- private final static double eps = 0.2;\n+ private final static double eps = 0.16;\nprivate final static int iter = 3;\n- private final static int com = 2;\n@Override\npublic void setUp() {\n@@ -50,20 +49,34 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nrunMiceNominalTest(mask, 1, LopProperties.ExecType.CP);\n}\n+// @Test\n+// public void testMiceMixSpark() {\n+// double[][] mask = {{ 0.0, 0.0, 1.0, 1.0, 0.0}};\n+// runMiceNominalTest(mask, 1, LopProperties.ExecType.SPARK);\n+// }\n+\n@Test\npublic void testMiceNumberCP() {\ndouble[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\nrunMiceNominalTest(mask, 2, LopProperties.ExecType.CP);\n}\n+// @Test\n+// public void testMiceNumberSpark() {\n+// double[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\n+// runMiceNominalTest(mask, 2, LopProperties.ExecType.SPARK);\n+// }\n+\n@Test\npublic void testMiceCategoricalCP() {\ndouble[][] mask = {{ 1.0, 1.0, 1.0, 1.0, 1.0}};\nrunMiceNominalTest(mask, 3, LopProperties.ExecType.CP);\n}\n+\n// @Test\n- // public void testMiceSpark() {\n- // runMiceNominalTest( LopProperties.ExecType.SPARK);\n+// public void testMiceCategoricalSpark() {\n+// double[][] mask = {{ 1.0, 1.0, 1.0, 1.0, 1.0}};\n+// runMiceNominalTest(mask, 3, LopProperties.ExecType.SPARK);\n// }\nprivate void runMiceNominalTest(double[][] mask, int testType, LopProperties.ExecType instType) {\n@@ -72,18 +85,17 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nloadTestConfiguration(getTestConfiguration(TEST_NAME));\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[]{\"-nvargs\", \"X=\" + DATASET, \"Mask=\"+input(\"M\"), \"iteration=\" + iter, \"com=\" + com, \"dataN=\" + output(\"N\"), \"dataC=\" + output(\"C\")};\n+ programArgs = new String[]{\"-nvargs\", \"X=\" + DATASET, \"Mask=\"+input(\"M\"), \"iteration=\" + iter, \"dataN=\" + output(\"N\"), \"dataC=\" + output(\"C\")};\nwriteInputMatrixWithMTD(\"M\", mask, true);\nfullRScriptName = HOME + TEST_NAME + \".R\";\n- rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" +DATASET+ \" \" +inputDir() + \" \" + expectedDir();\n+ rCmd = getRCmd(DATASET, inputDir(), expectedDir());\n+ setOutputBuffering(false);\nrunTest(true, false, null, -1);\nrunRScript(true);\n-\n- switch (testType)\n- {\n+ switch (testType) {\ncase 1:\ntestCategoricalOutput();\ntestNumericOutput();\n@@ -128,6 +140,6 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nif(countTrue / (double)dmlfileC.size() > 0.98)\nAssert.assertTrue(true);\nelse\n- Assert.fail();\n+ Assert.fail(\"categorical test fails, the true value count is less than 98%\");\n}\n}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/mice.R",
"new_path": "src/test/scripts/functions/builtin/mice.R",
"diff": "@@ -26,34 +26,23 @@ library(dplyr)\nd <- read.csv(args[1], header=FALSE )\nmass <- as.matrix(readMM(paste(args[2], \"M.mtx\", sep=\"\")));\n-\nif(sum(mass) == ncol(d))\n{\nd = d[,3:4]\n+ d[] <- lapply(d, factor)\n+ d\nmass = mass[1,3:4]\n-meth=\"\"\n- for(i in 1: 2) {\n- d[[names(d)[i]]] = as.factor(d[[names(d)[i]]]);\n- meth = c(meth, \"polyreg\")\n- }\n-\n- meth=meth[-1]\n-\n+ meth = meth= rep(\"polyreg\", ncol(d))\n#impute\nimputeD <- mice(d,where = is.na(d), method = meth, m=3)\n- R = data.frame(complete(imputeD,3))\n- c = select_if(R, is.factor)\n-\n- # convert factor into numeric before casting to matrix\n- c = sapply(c, function(x) as.numeric(as.character(x)))\n- writeMM(as(as.matrix(c), \"CsparseMatrix\"), paste(args[3], \"C\", sep=\"\"));\n+ imputeD\n+ R = as.matrix(complete(imputeD,3))\n+ writeMM(as(R, \"CsparseMatrix\"), paste(args[3], \"C\", sep=\"\"));\n} else if (sum(mass) == 0)\n{\n- print(\"Generating R witout cat\")\nimputeD <- mice(d,where = is.na(d), method = \"norm.predict\", m=3)\n- R = data.frame(complete(imputeD,3))\n- n = select_if(R, is.numeric)\n- writeMM(as(as.matrix(n), \"CsparseMatrix\"), paste(args[3], \"N\", sep=\"\"));\n+ R = as.matrix(complete(imputeD,3))\n+ writeMM(as(as.matrix(R), \"CsparseMatrix\"), paste(args[3], \"N\", sep=\"\"));\n} else {\nmeth=\"\"\nfor(i in 1: ncol(mass)) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/mice.dml",
"new_path": "src/test/scripts/functions/builtin/mice.dml",
"diff": "#\n#-------------------------------------------------------------\n-X = read($X, data_type=\"frame\", format=\"csv\");\n-M = read($Mask)\n-[dataset, singleSet]= mice(F=X, cMask=M, iter=$iteration, complete=$com, verbose = FALSE)\n+# read data frame\n+F = read($X, data_type=\"frame\", format=\"csv\");\n+# the mask for identifying categorical columns\n+Mask = read($Mask)\n-if(sum(M) == ncol(X))\n+# Test cases\n+# case 1: if all columns are categorical\n+if(sum(Mask) == ncol(F))\n{\n- c = as.matrix(singleSet[,3:4]) # comparing only selected columns with R results because dataset is continuos and\n- write(c, $dataC) # for categorical imputation R polyreg only support upto 50 distinct items (50 categories/feature)\n+ scat = seq(1, ncol(Mask))\n+ s = \"1\";\n+ for(i in 2:ncol(F))\n+ s = s + \",\" + i;\n+ # encoding categorical columns using recode transformation\n+ jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n+ [X, M] = transformencode(target=F, spec=jspecR);\n+ # call mice\n+ dataset = mice(X=X,cMask=Mask, iter=$iteration, verbose = FALSE )\n+ # decode data back to original format\n+ output = as.matrix(transformdecode(target=dataset, spec=jspecR, meta=M));\n+ # cherry picking columns to compare with R results\n+ output = output[, 3:4]\n+ write(output, $dataC)\n}\n-else if (sum(M) == 0)\n-{\n- n = as.matrix(dataset) * (1-M)\n- n = removeEmpty(target=n, margin = \"cols\")\n- write(n, $dataN)\n+# case 2: if all data is numeric\n+else if(sum(Mask) == 0){\n+ # no transformation is required, cast the frame into matrix and call mice\n+ # as.matrix() will convert the null values into zeros, so explicitly replace zeros with NaN\n+ X = replace(target = as.matrix(F), pattern = 0, replacement = NaN)\n+ output = mice(X=X, cMask=Mask, iter=$iteration, verbose = FALSE )\n+ write(output, $dataN)\n}\n+# case 3: if the data is combination of numeric and categorical columns\nelse\n{\n- c = as.matrix(dataset) * (M)\n+ scat = seq(1, ncol(Mask))\n+ cat = removeEmpty(target=scat, margin=\"rows\", select=t(Mask))\n+ s = \"\" + as.integer(as.scalar(cat[1, 1]))\n+ for(i in 2:nrow(cat))\n+ s = s + \",\" + as.integer(as.scalar(cat[i, 1]));\n+\n+ # encoding categorical columns using recode transformation\n+ jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n+ [X, M] = transformencode(target=F, spec=jspecR);\n+ # call mice\n+ dataset = mice(X=X,cMask=Mask, iter=$iteration, verbose = FALSE )\n+ # decode data into original format\n+ output = as.matrix(transformdecode(target=dataset, spec=jspecR, meta=M));\n+ # below lines are only for testing purpose\n+ c = output * (Mask)\nc = removeEmpty(target=c, margin = \"cols\")\n- n = as.matrix(dataset) * (1-M)\n+ n = output * (1-Mask)\nn = removeEmpty(target=n, margin = \"cols\")\nwrite(n, $dataN)\nwrite(c, $dataC)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/caching/BufferpoolLeak.dml",
"new_path": "src/test/scripts/functions/caching/BufferpoolLeak.dml",
"diff": "X = rand(rows=$1, cols=$2, min=1, max=10);\nfor(i in 1:500) {\n# print(\"executed iteration \"+i)\n- [m1,m2] = mice(as.frame(X), matrix(0,1,ncol(X)),3,3, FALSE)\n+ m1 = mice(X, matrix(0,1,ncol(X)), 3, FALSE)\n}\nif( ncol(X) > $2 )\nprint(toString(m1));\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2596] Update MICE implementation to use matrix intermediates
Closes #972. |
49,698 | 06.08.2020 12:03:17 | -19,080 | f66da4272afa28c3dafc25575c46510508d8de9c | Verify PageRank script works with MLContext
* Tests PageRank script with MLContext against an R scripts
* keeps consistency of fullRScriptName throughout `AutomatedTestBase`
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -905,7 +905,9 @@ public abstract class AutomatedTestBase {\n*/\nprotected void runRScript(boolean newWay) {\n- String executionFile = sourceDirectory + selectedTest + \".R\";\n+ String executionFile = sourceDirectory + selectedTest + \".R\";;\n+ if(fullRScriptName != null)\n+ executionFile = fullRScriptName;\n// *** HACK ALERT *** HACK ALERT *** HACK ALERT ***\n// Some of the R scripts will fail if the \"expected\" directory doesn't exist.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextPageRankTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.mlcontext;\n+\n+import org.apache.log4j.Logger;\n+import org.apache.sysds.api.mlcontext.Script;\n+import org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+import static org.apache.sysds.api.mlcontext.ScriptFactory.dmlFromFile;\n+\n+public class MLContextPageRankTest extends MLContextTestBase {\n+ protected static Logger log = Logger.getLogger(MLContextPageRankTest.class);\n+\n+ protected final static String TEST_SCRIPT_PAGERANK = \"scripts/staging/PageRank.dml\";\n+ private final static double sparsity1 = 0.41; // dense\n+ private final static double sparsity2 = 0.05; // sparse\n+\n+ private final static double eps = 0.1;\n+\n+ private final static int rows = 1468;\n+ private final static int cols = 1468;\n+ private final static double alpha = 0.85;\n+ private final static double maxiter = 10;\n+\n+ @Test\n+ public void testPageRankSparse() {\n+ runPageRankTestMLC(true);\n+ }\n+\n+ @Test\n+ public void testPageRankDense() {\n+ runPageRankTestMLC(false);\n+ }\n+\n+\n+ private void runPageRankTestMLC(boolean sparse) {\n+\n+ //generate actual datasets\n+ double[][] G = getRandomMatrix(rows, cols, 1, 1, sparse?sparsity2:sparsity1, 234);\n+ double[][] p = getRandomMatrix(cols, 1, 0, 1e-14, 1, 71);\n+ double[][] e = getRandomMatrix(rows, 1, 0, 1e-14, 1, 72);\n+ double[][] u = getRandomMatrix(1, cols, 0, 1e-14, 1, 73);\n+ writeInputMatrixWithMTD(\"G\", G, true);\n+ writeInputMatrixWithMTD(\"p\", p, true);\n+ writeInputMatrixWithMTD(\"e\", e, true);\n+ writeInputMatrixWithMTD(\"u\", u, true);\n+\n+\n+ fullRScriptName = \"src/test/scripts/functions/codegenalg/Algorithm_PageRank.R\";\n+\n+ rCmd = getRCmd(inputDir(), String.valueOf(alpha),\n+ String.valueOf(maxiter), expectedDir());\n+ runRScript(true);\n+\n+ MatrixBlock outmat = new MatrixBlock();\n+\n+ Script pr = dmlFromFile(TEST_SCRIPT_PAGERANK);\n+ pr.in(\"G\", G).in(\"p\", p).in(\"e\", e).in(\"u\", u)\n+ .in(\"$5\", alpha).in(\"$6\", maxiter)\n+ .out(\"p\");\n+ outmat = ml.execute(pr).getMatrix(\"p\").toMatrixBlock();\n+\n+\n+ //compare matrices\n+ HashMap<MatrixValue.CellIndex, Double> rfile = readRMatrixFromFS(\"p\");\n+ TestUtils.compareMatrices(rfile, outmat, eps);\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextTestBase.java",
"diff": "@@ -67,7 +67,7 @@ public abstract class MLContextTestBase extends AutomatedTestBase {\n@Override\npublic void setUp() {\nClass<? extends MLContextTestBase> clazz = this.getClass();\n- String dir = (testDir == null) ? \"functions/mlcontext\" : testDir;\n+ String dir = (testDir == null) ? \"functions/mlcontext/\" : testDir;\nString name = (testName == null) ? clazz.getSimpleName() : testName;\naddTestConfiguration(dir, name);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2597] Verify PageRank script works with MLContext
* Tests PageRank script with MLContext against an R scripts
* keeps consistency of fullRScriptName throughout `AutomatedTestBase`
Closes #1005. |
49,706 | 08.08.2020 09:46:41 | -7,200 | 7af2ae04f28ddcb36158719a25a7fa34b22d3266 | [MINOR] Update docker images organization
Changes the docker images to use the docker organization systemds
add install dependency for R dbScan
Change the tests to use the new organizations docker images
Closes | [
{
"change_type": "MODIFY",
"old_path": ".github/action/Dockerfile",
"new_path": ".github/action/Dockerfile",
"diff": "#\n#-------------------------------------------------------------\n-FROM sebaba/testingsysds:2.0\n+FROM systemds/testingsysds:latest\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/build.sh",
"new_path": "docker/build.sh",
"diff": "# Build the docker containers\n# The first build is for running systemds through docker.\n-docker image build -f docker/sysds.Dockerfile -t sebaba/sysds:2.0 .\n+docker image build -f docker/sysds.Dockerfile -t systemds/sysds:latest .\n# The second build is for testing systemds. This image installs the R dependencies needed to run the tests.\n-docker image build -f docker/testsysds.Dockerfile -t sebaba/testingsysds:2.0 .\n+docker image build -f docker/testsysds.Dockerfile -t systemds/testingsysds:latest .\n# The third build is python docker for systemds.\n-docker image build -f docker/pythonsysds.Dockerfile -t sebaba/pythonsysds:2.0 .\n+docker image build -f docker/pythonsysds.Dockerfile -t systemds/pythonsysds:latest .\n# You might want to prune the docker system afterwards using\n# docker system prune\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docker/push.sh",
"diff": "+#/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+docker push systemds/sysds:latest\n+docker push systemds/testingsysds:latest\n+docker push systemds/pythonsysds:latest\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/installDependencies.R",
"new_path": "src/test/scripts/installDependencies.R",
"diff": "@@ -56,6 +56,7 @@ custom_install(\"sigmoid\");\ncustom_install(\"DescTools\");\ncustom_install(\"mice\");\ncustom_install(\"mclust\");\n+custom_install(\"dbscan\");\nprint(\"Installation Done\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update docker images organization
Changes the docker images to use the docker organization systemds
add install dependency for R dbScan
Change the tests to use the new organizations docker images
Closes #1008 |
49,698 | 09.08.2020 09:50:17 | -19,080 | 0fce525f85b1d9a7828b5c164eb17c1faf7b1934 | Verify PNMF script work with MLContext
- Input matrices X, W, H
- Output matrices W, H
- R vs dml result comparison (~1e-5)
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/mlcontext/MLContextPNMFTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.mlcontext;\n+\n+import org.apache.log4j.Logger;\n+import org.apache.sysds.api.mlcontext.MLResults;\n+import org.apache.sysds.api.mlcontext.Script;\n+import org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+import static org.apache.sysds.api.mlcontext.ScriptFactory.dmlFromFile;\n+\n+public class MLContextPNMFTest extends MLContextTestBase {\n+ protected static Logger log = Logger.getLogger(MLContextPNMFTest.class);\n+\n+ protected final static String TEST_SCRIPT_PNMF = \"scripts/staging/PNMF.dml\";\n+ private final static double sparsity1 = 0.7; // dense\n+ private final static double sparsity2 = 0.1; // sparse\n+\n+ private final static double eps = 1e-5;\n+\n+ private final static int rows = 1468;\n+ private final static int cols = 1207;\n+ private final static int rank = 20;\n+\n+ private final static double epsilon = 0.000000001;//1e-9\n+ private final static double maxiter = 10;\n+\n+ @Test\n+ public void testPNMFSparse() {\n+ runPNMFTestMLC(true);\n+ }\n+\n+ @Test\n+ public void testPNMFDense() {\n+ runPNMFTestMLC(false);\n+ }\n+\n+\n+ private void runPNMFTestMLC(boolean sparse) {\n+\n+ //generate actual datasets\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, sparse?sparsity2:sparsity1, 234);\n+ double[][] W = getRandomMatrix(rows, rank, 0, 1e-14, 1, 71);\n+ double[][] H = getRandomMatrix(rank, cols, 0, 1e-14, 1, 72);\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+ writeInputMatrixWithMTD(\"W\", W, true);\n+ writeInputMatrixWithMTD(\"H\", H, true);\n+\n+\n+ fullRScriptName = \"src/test/scripts/functions/codegenalg/Algorithm_PNMF.R\";\n+\n+ rCmd = getRCmd(inputDir(), String.valueOf(rank),\n+ String.valueOf(epsilon), String.valueOf(maxiter), expectedDir());\n+ runRScript(true);\n+\n+\n+ Script pnmf = dmlFromFile(TEST_SCRIPT_PNMF);\n+ pnmf.in(\"X\", X).in(\"W\", W).in(\"H\", H).in(\"$4\", rank)\n+ .in(\"$5\", epsilon).in(\"$6\", maxiter)\n+ .out(\"W\").out(\"H\");\n+ MLResults outres = ml.execute(pnmf);\n+ MatrixBlock dmlW = outres.getMatrix(\"W\").toMatrixBlock();\n+ MatrixBlock dmlH = outres.getMatrix(\"H\").toMatrixBlock();\n+\n+ //compare matrices\n+ HashMap<MatrixValue.CellIndex, Double> rW = readRMatrixFromFS(\"W\");\n+ HashMap<MatrixValue.CellIndex, Double> rH = readRMatrixFromFS(\"H\");\n+ TestUtils.compareMatrices(rW, dmlW, eps);\n+ TestUtils.compareMatrices(rH, dmlH, eps);\n+ }\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2602] Verify PNMF script work with MLContext
- Input matrices X, W, H
- Output matrices W, H
- R vs dml result comparison (~1e-5)
Closes #1013. |
49,689 | 10.08.2020 19:47:53 | -7,200 | 8afd129ca97e98cf7a9d10d3e624105c11c707a7 | [SYSTEMDS-2607,2608] Use relative timestamp for scoring
This patch changes the following:
- use timestamp relative to execution start time instead of epoch
- remove SpilledItem class and related data structures
- re-reading of spilled items (no deletion from disk) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCache.java",
"diff": "@@ -62,6 +62,7 @@ public class LineageCache\nstatic {\nlong maxMem = InfrastructureAnalyzer.getLocalMaxMemory();\nLineageCacheEviction.setCacheLimit((long)(CACHE_FRAC * maxMem));\n+ LineageCacheEviction.setStartTimestamp();\n}\n// Cache Synchronization Approach:\n@@ -220,7 +221,7 @@ public class LineageCache\npublic static boolean probe(LineageItem key) {\n//TODO problematic as after probe the matrix might be kicked out of cache\n- boolean p = (_cache.containsKey(key) || LineageCacheEviction.spillListContains(key));\n+ boolean p = _cache.containsKey(key); // in cache or in disk\nif (!p && DMLScript.STATISTICS && LineageCacheEviction._removelist.contains(key))\n// The sought entry was in cache but removed later\nLineageCacheStatistics.incrementDelHits();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -224,6 +224,11 @@ public class LineageCacheConfig\nbreak;\ncase HYBRID:\nWEIGHTS[0] = 1; WEIGHTS[1] = 0.0033;\n+ // FIXME: Relative timestamp fix reduces the absolute\n+ // value of the timestamp component of the scoring function\n+ // to a comparatively much smaller number. W[1] needs to be\n+ // re-tuned accordingly.\n+ // TODO: Automatic tuning of weights.\nbreak;\n}\n_cachepolicy = policy;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEntry.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEntry.java",
"diff": "@@ -35,6 +35,7 @@ public class LineageCacheEntry {\nprotected LineageCacheStatus _status;\nprotected LineageCacheEntry _nextEntry;\nprotected LineageItem _origItem;\n+ private String _outfile = null;\nprotected double score;\npublic LineageCacheEntry(LineageItem key, DataType dt, MatrixBlock Mval, ScalarObject Sval, long computetime) {\n@@ -122,8 +123,18 @@ public class LineageCacheEntry {\n_status = LineageCacheStatus.EMPTY;\n}\n+ protected synchronized void setOutfile(String outfile) {\n+ _outfile = outfile;\n+ }\n+\n+ protected synchronized String getOutfile() {\n+ return _outfile;\n+ }\n+\nprotected synchronized void setTimestamp() {\n- _timestamp = System.currentTimeMillis();\n+ _timestamp = System.currentTimeMillis() - LineageCacheEviction.getStartTimestamp();\n+ if (_timestamp < 0)\n+ throw new DMLRuntimeException (\"Execution timestamp shouldn't be -ve. Key: \"+_key);\nrecomputeScore();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheEviction.java",
"diff": "package org.apache.sysds.runtime.lineage;\nimport java.io.IOException;\n-import java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Map;\nimport java.util.Set;\n@@ -37,8 +36,8 @@ public class LineageCacheEviction\n{\nprivate static long _cachesize = 0;\nprivate static long CACHE_LIMIT; //limit in bytes\n+ private static long _startTimestamp = 0;\nprotected static final Set<LineageItem> _removelist = new HashSet<>();\n- private static final Map<LineageItem, SpilledItem> _spillList = new HashMap<>();\nprivate static String _outdir = null;\nprivate static TreeSet<LineageCacheEntry> weightedQueue = new TreeSet<>(LineageCacheConfig.LineageCacheComparator);\n@@ -46,7 +45,6 @@ public class LineageCacheEviction\n// reset cache size, otherwise the cache clear leads to unusable\n// space which means evictions could run into endless loops\n_cachesize = 0;\n- _spillList.clear();\nweightedQueue.clear();\n_outdir = null;\nif (DMLScript.STATISTICS)\n@@ -240,6 +238,14 @@ public class LineageCacheEviction\n//---------------- COSTING RELATED METHODS -----------------\n+ protected static void setStartTimestamp() {\n+ _startTimestamp = System.currentTimeMillis();\n+ }\n+\n+ protected static long getStartTimestamp() {\n+ return _startTimestamp;\n+ }\n+\nprivate static double getDiskSpillEstimate(LineageCacheEntry e) {\nif (!e.isMatrixValue() || e.isNullVal())\nreturn 0;\n@@ -299,6 +305,15 @@ public class LineageCacheEviction\nif (entry.isNullVal())\nthrow new DMLRuntimeException (\"Cannot spill null value to disk. Key: \"+entry._key);\n+ // Do nothing if the entry is already spilled before.\n+ if (entry._origItem == null && entry.getOutfile() != null)\n+ return;\n+ if (entry._origItem != null) {\n+ LineageCacheEntry tmp = cache.get(entry._origItem); //head\n+ if (tmp.getOutfile() != null)\n+ return;\n+ }\n+\nlong t0 = System.nanoTime();\nif (_outdir == null) {\n_outdir = LocalFileUtils.getUniqueWorkingDir(LocalFileUtils.CATEGORY_LINEAGE);\n@@ -316,12 +331,12 @@ public class LineageCacheEviction\n// Add all the entries associated with this matrix to spillList.\nif (entry._origItem == null) {\n- _spillList.put(entry._key, new SpilledItem(outfile));\n+ entry.setOutfile(outfile);\n}\nelse {\nLineageCacheEntry h = cache.get(entry._origItem); //head\nwhile (h != null) {\n- _spillList.put(h._key, new SpilledItem(outfile));\n+ h.setOutfile(outfile);\nh = h._nextEntry;\n}\n}\n@@ -336,19 +351,20 @@ public class LineageCacheEviction\nif (cache.get(key) == null)\nthrow new DMLRuntimeException (\"Spilled item should present in cache. Key: \"+key);\n+ LineageCacheEntry e = cache.get(key);\nlong t0 = System.nanoTime();\nMatrixBlock mb = null;\n// Read from local FS\ntry {\n- mb = LocalFileUtils.readMatrixBlockFromLocal(_spillList.get(key)._outfile);\n- } catch (IOException e) {\n- throw new DMLRuntimeException (\"Read from \" + _spillList.get(key)._outfile + \" failed.\", e);\n+ mb = LocalFileUtils.readMatrixBlockFromLocal(e.getOutfile());\n+ } catch (IOException exp) {\n+ throw new DMLRuntimeException (\"Read from \" + e.getOutfile() + \" failed.\", exp);\n}\n- LocalFileUtils.deleteFileIfExists(_spillList.get(key)._outfile, true);\n+ // Keep the entry in disk to save re-spilling.\n+ //LocalFileUtils.deleteFileIfExists(_spillList.get(key)._outfile, true);\nlong t1 = System.nanoTime();\n// Restore to cache\n- LineageCacheEntry e = cache.get(key);\ne.setValue(mb);\nif (e._origItem != null) {\n// Restore to all the entries having the same data.\n@@ -365,30 +381,10 @@ public class LineageCacheEviction\n// Adjust disk reading speed\nadjustReadWriteSpeed(e, ((double)(t1-t0))/1000000000, true);\n// TODO: set cache status as RELOADED for this entry\n- _spillList.remove(key);\nif (DMLScript.STATISTICS) {\nLineageCacheStatistics.incrementFSReadTime(t1-t0);\nLineageCacheStatistics.incrementFSHits();\n}\nreturn cache.get(key);\n}\n-\n- protected static boolean spillListContains(LineageItem key) {\n- return _spillList.containsKey(key);\n- }\n-\n- // ---------------- INTERNAL DATA STRUCTURES FOR EVICTION -----------------\n-\n- // TODO: Remove this class, and add outfile to LineageCacheEntry.\n- private static class SpilledItem {\n- String _outfile;\n- //long _computeTime;\n- //protected LineageItem _origItem;\n-\n- public SpilledItem(String outfile) {\n- _outfile = outfile;\n- //_computeTime = computetime;\n- //_origItem = origItem;\n- }\n- }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2607,2608] Use relative timestamp for scoring
This patch changes the following:
- use timestamp relative to execution start time instead of epoch
- remove SpilledItem class and related data structures
- re-reading of spilled items (no deletion from disk) |
49,720 | 10.08.2020 21:55:31 | -7,200 | 98ea24d9f7d713bdcc1c0898ee79eaa493b096b3 | Comparison operators for frame-frame ops (CP, Spark)
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"new_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"diff": "@@ -1812,6 +1812,10 @@ public class DMLTranslator\ntarget.setDataType(DataType.MATRIX);\ntarget.setValueType(ValueType.FP64);\n}\n+ else if(left.getDataType() == DataType.FRAME || right.getDataType() == DataType.FRAME) {\n+ target.setDataType(DataType.FRAME);\n+ target.setValueType(ValueType.BOOLEAN);\n+ }\nelse {\n// Added to support scalar relational comparison\ntarget.setDataType(DataType.SCALAR);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/RelationalExpression.java",
"new_path": "src/main/java/org/apache/sysds/parser/RelationalExpression.java",
"diff": "@@ -141,6 +141,8 @@ public class RelationalExpression extends Expression\nboolean isLeftMatrix = (_left.getOutput() != null && _left.getOutput().getDataType() == DataType.MATRIX);\nboolean isRightMatrix = (_right.getOutput() != null && _right.getOutput().getDataType() == DataType.MATRIX);\n+ boolean isLeftFrame = (_left.getOutput() != null && _left.getOutput().getDataType() == DataType.FRAME);\n+ boolean isRightFrame = (_right.getOutput() != null && _right.getOutput().getDataType() == DataType.FRAME);\nif(isLeftMatrix || isRightMatrix) {\n// Added to support matrix relational comparison\nif(isLeftMatrix && isRightMatrix) {\n@@ -155,6 +157,15 @@ public class RelationalExpression extends Expression\n//double; once we support boolean matrices this needs to change\noutput.setValueType(ValueType.FP64);\n}\n+ else if(isLeftFrame && isRightFrame) {\n+ output.setDataType(DataType.FRAME);\n+ output.setDimensions(_left.getOutput().getDim1(), _left.getOutput().getDim2());\n+ output.setValueType(ValueType.BOOLEAN);\n+ }\n+ else if( isLeftFrame || isRightFrame ) {\n+ raiseValidateError(\"Unsupported relational expression for mixed types \"\n+ +_left.getOutput().getDataType().name()+\" \"+_right.getOutput().getDataType().name());\n+ }\nelse {\noutput.setBooleanProperties();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/BinaryFrameFrameCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/BinaryFrameFrameCPInstruction.java",
"diff": "@@ -21,6 +21,7 @@ package org.apache.sysds.runtime.instructions.cp;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.matrix.data.FrameBlock;\n+import org.apache.sysds.runtime.matrix.operators.BinaryOperator;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\npublic class BinaryFrameFrameCPInstruction extends BinaryCPInstruction\n@@ -32,16 +33,26 @@ public class BinaryFrameFrameCPInstruction extends BinaryCPInstruction\n@Override\npublic void processInstruction(ExecutionContext ec) {\n- // Read input matrices\n+ // get input frames\nFrameBlock inBlock1 = ec.getFrameInput(input1.getName());\nFrameBlock inBlock2 = ec.getFrameInput(input2.getName());\n+ if(getOpcode().equals(\"dropInvalidType\")) {\n// Perform computation using input frames, and produce the result frame\nFrameBlock retBlock = inBlock1.dropInvalid(inBlock2);\n+ // Attach result frame with FrameBlock associated with output_name\n+ ec.setFrameOutput(output.getName(), retBlock);\n+ }\n+ else {\n+ // Execute binary operations\n+ BinaryOperator dop = (BinaryOperator) _optr;\n+ FrameBlock outBlock = inBlock1.binaryOperations(dop, inBlock2, null);\n+ // Attach result frame with FrameBlock associated with output_name\n+ ec.setFrameOutput(output.getName(), outBlock);\n+ }\n+\n// Release the memory occupied by input frames\nec.releaseFrameInput(input1.getName());\nec.releaseFrameInput(input2.getName());\n- // Attach result frame with FrameBlock associated with output_name\n- ec.setFrameOutput(output.getName(), retBlock);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinaryFrameFrameSPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinaryFrameFrameSPInstruction.java",
"diff": "@@ -29,7 +29,9 @@ import org.apache.sysds.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysds.runtime.instructions.InstructionUtils;\nimport org.apache.sysds.runtime.instructions.cp.CPOperand;\nimport org.apache.sysds.runtime.matrix.data.FrameBlock;\n+import org.apache.sysds.runtime.matrix.operators.BinaryOperator;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\n+import scala.Tuple2;\npublic class BinaryFrameFrameSPInstruction extends BinarySPInstruction {\nprotected BinaryFrameFrameSPInstruction(Operator op, CPOperand in1, CPOperand in2, CPOperand out, String opcode, String istr) {\n@@ -55,16 +57,33 @@ public class BinaryFrameFrameSPInstruction extends BinarySPInstruction {\n@Override\npublic void processInstruction(ExecutionContext ec) {\nSparkExecutionContext sec = (SparkExecutionContext)ec;\n+\n// Get input RDDs\nJavaPairRDD<Long, FrameBlock> in1 = sec.getFrameBinaryBlockRDDHandleForVariable(input1.getName());\n+ JavaPairRDD<Long, FrameBlock> out = null;\n+\n+ if(getOpcode().equals(\"dropInvalidType\")) {\n// get schema frame-block\nBroadcast<FrameBlock> fb = sec.getSparkContext().broadcast(sec.getFrameInput(input2.getName()));\n- JavaPairRDD<Long, FrameBlock> out = in1.mapValues(new isCorrectbySchema(fb.getValue()));\n+ out = in1.mapValues(new isCorrectbySchema(fb.getValue()));\n//release input frame\nsec.releaseFrameInput(input2.getName());\n- //set output RDD\n+ }\n+ else {\n+ JavaPairRDD<Long, FrameBlock> in2 = sec.getFrameBinaryBlockRDDHandleForVariable(input2.getName());\n+ // create output frame\n+ BinaryOperator dop = (BinaryOperator) _optr;\n+ // check for binary operations\n+ out = in1.join(in2).mapValues(new FrameComparison(dop));\n+ }\n+\n+ //set output RDD and maintain dependencies\nsec.setRDDHandleForVariable(output.getName(), out);\nsec.addLineageRDD(output.getName(), input1.getName());\n+ if( getOpcode().equals(\"dropInvalidType\") )\n+ sec.addLineageBroadcast(output.getName(), input2.getName());\n+ else\n+ sec.addLineageRDD(output.getName(), input2.getName());\n}\nprivate static class isCorrectbySchema implements Function<FrameBlock,FrameBlock> {\n@@ -81,4 +100,17 @@ public class BinaryFrameFrameSPInstruction extends BinarySPInstruction {\nreturn arg0.dropInvalid(schema_frame);\n}\n}\n+\n+ private static class FrameComparison implements Function<Tuple2<FrameBlock, FrameBlock>, FrameBlock> {\n+ private static final long serialVersionUID = 5850400295183766401L;\n+ private final BinaryOperator bop;\n+ public FrameComparison(BinaryOperator op){\n+ bop = op;\n+ }\n+\n+ @Override\n+ public FrameBlock call(Tuple2<FrameBlock, FrameBlock> arg0) throws Exception {\n+ return arg0._1().binaryOperations(bop, arg0._2(), null);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -41,7 +41,10 @@ import org.apache.sysds.api.DMLException;\nimport org.apache.sysds.common.Types.ValueType;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.caching.CacheBlock;\n+import org.apache.sysds.runtime.functionobjects.ValueComparisonFunction;\n+import org.apache.sysds.runtime.instructions.cp.*;\nimport org.apache.sysds.runtime.io.IOUtilFunctions;\n+import org.apache.sysds.runtime.matrix.operators.BinaryOperator;\nimport org.apache.sysds.runtime.transform.encode.EncoderRecode;\nimport org.apache.sysds.runtime.util.IndexRange;\nimport org.apache.sysds.runtime.util.UtilFunctions;\n@@ -277,6 +280,7 @@ public class FrameBlock implements CacheBlock, Externalizable\ncase BOOLEAN: _coldata[j] = new BooleanArray(new boolean[numRows]); break;\ncase INT32: _coldata[j] = new IntegerArray(new int[numRows]); break;\ncase INT64: _coldata[j] = new LongArray(new long[numRows]); break;\n+ case FP32: _coldata[j] = new FloatArray(new float[numRows]); break;\ncase FP64: _coldata[j] = new DoubleArray(new double[numRows]); break;\ndefault: throw new RuntimeException(\"Unsupported value type: \"+_schema[j]);\n}\n@@ -702,6 +706,8 @@ public class FrameBlock implements CacheBlock, Externalizable\ncase BOOLEAN: arr = new BooleanArray(new boolean[_numRows]); break;\ncase INT64: arr = new LongArray(new long[_numRows]); break;\ncase FP64: arr = new DoubleArray(new double[_numRows]); break;\n+ case INT32: arr = new IntegerArray(new int[_numRows]); break;\n+ case FP32: arr = new FloatArray(new float[_numRows]); break;\ndefault: throw new IOException(\"Unsupported value type: \"+vt);\n}\narr.readFields(in);\n@@ -837,6 +843,79 @@ public class FrameBlock implements CacheBlock, Externalizable\n+ 32 + value.length(); //char array\n}\n+ /**\n+ * This method performs the value comparison on two frames\n+ * if the values in both frames are equal, not equal, less than, greater than, less than/greater than and equal to\n+ * the output frame will store boolean value for each each comparison\n+ *\n+ * @param bop binary operator\n+ * @param that frame block of rhs of m * n dimensions\n+ * @param out output frame block\n+ * @return a boolean frameBlock\n+ */\n+ public FrameBlock binaryOperations(BinaryOperator bop, FrameBlock that, FrameBlock out) {\n+ if(getNumColumns() != that.getNumColumns() && getNumRows() != that.getNumColumns())\n+ throw new DMLRuntimeException(\"Frame dimension mismatch \"+getNumRows()+\" * \"+getNumColumns()+\n+ \" != \"+that.getNumRows()+\" * \"+that.getNumColumns());\n+ String[][] outputData = new String[getNumRows()][getNumColumns()];\n+\n+ //compare output value, incl implicit type promotion if necessary\n+ if( !(bop.fn instanceof ValueComparisonFunction) )\n+ throw new DMLRuntimeException(\"Unsupported binary operation on frames (only comparisons supported)\");\n+ ValueComparisonFunction vcomp = (ValueComparisonFunction) bop.fn;\n+\n+ for (int i = 0; i < getNumColumns(); i++) {\n+ if (getSchema()[i] == ValueType.STRING || that.getSchema()[i] == ValueType.STRING) {\n+ for (int j = 0; j < getNumRows(); j++) {\n+ if(checkAndSetEmpty(this, that, outputData, j, i))\n+ continue;\n+ String v1 = UtilFunctions.objectToString(get(j, i));\n+ String v2 = UtilFunctions.objectToString(that.get(j, i));\n+ outputData[j][i] = String.valueOf(vcomp.compare(v1, v2));\n+ }\n+ }\n+ else if (getSchema()[i] == ValueType.FP64 || that.getSchema()[i] == ValueType.FP64 ||\n+ getSchema()[i] == ValueType.FP32 || that.getSchema()[i] == ValueType.FP32) {\n+ for (int j = 0; j < getNumRows(); j++) {\n+ if(checkAndSetEmpty(this, that, outputData, j, i))\n+ continue;\n+ ScalarObject so1 = new DoubleObject(Double.parseDouble(get(j, i).toString()));\n+ ScalarObject so2 = new DoubleObject(Double.parseDouble(that.get(j, i).toString()));\n+ outputData[j][i] = String.valueOf(vcomp.compare(so1.getDoubleValue(), so2.getDoubleValue()));\n+ }\n+ }\n+ else if (getSchema()[i] == ValueType.INT64 || that.getSchema()[i] == ValueType.INT64 ||\n+ getSchema()[i] == ValueType.INT32 || that.getSchema()[i] == ValueType.INT32) {\n+ for (int j = 0; j < this.getNumRows(); j++) {\n+ if(checkAndSetEmpty(this, that, outputData, j, i))\n+ continue;\n+ ScalarObject so1 = new IntObject(Integer.parseInt(get(j, i).toString()));\n+ ScalarObject so2 = new IntObject(Integer.parseInt(that.get(j, i).toString()));\n+ outputData[j][i] = String.valueOf(vcomp.compare(so1.getLongValue(), so2.getLongValue()));\n+ }\n+ }\n+ else {\n+ for (int j = 0; j < getNumRows(); j++) {\n+ if(checkAndSetEmpty(this, that, outputData, j, i))\n+ continue;\n+ ScalarObject so1 = new BooleanObject( Boolean.parseBoolean(get(j, i).toString()));\n+ ScalarObject so2 = new BooleanObject( Boolean.parseBoolean(that.get(j, i).toString()));\n+ outputData[j][i] = String.valueOf(vcomp.compare(so1.getBooleanValue(), so2.getBooleanValue()));\n+ }\n+ }\n+ }\n+\n+ return new FrameBlock(UtilFunctions.nCopies(this.getNumColumns(), ValueType.BOOLEAN), outputData);\n+ }\n+\n+ private static boolean checkAndSetEmpty(FrameBlock fb1, FrameBlock fb2, String[][] out, int r, int c) {\n+ if(fb1.get(r, c) == null || fb2.get(r, c) == null) {\n+ out[r][c] = (fb1.get(r, c) == null && fb2.get(r, c) == null) ? \"true\" : \"false\";\n+ return true;\n+ }\n+ return false;\n+ }\n+\n///////\n// indexing and append operations\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/binary/frame/FrameEqualTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.binary.frame;\n+\n+import org.apache.sysds.api.DMLScript;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.common.Types.FileFormat;\n+import org.apache.sysds.hops.OptimizerUtils;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.AfterClass;\n+import org.junit.BeforeClass;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+public class FrameEqualTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"frameComparisonTest\";\n+ private final static String TEST_DIR = \"functions/binary/frame/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + FrameEqualTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 100;\n+ private final static Types.ValueType[] schemaStrings1 = {Types.ValueType.FP64, Types.ValueType.BOOLEAN, Types.ValueType.INT64, Types.ValueType.STRING, Types.ValueType.STRING, Types.ValueType.FP64};\n+ private final static Types.ValueType[] schemaStrings2 = {Types.ValueType.INT64, Types.ValueType.BOOLEAN, Types.ValueType.FP32, Types.ValueType.FP64, Types.ValueType.STRING, Types.ValueType.FP32};\n+\n+ public enum TestType {\n+ GREATER, LESS, EQUALS, NOT_EQUALS, GREATER_EQUALS, LESS_EQUALS,\n+ }\n+\n+ @BeforeClass\n+ public static void init() {\n+ TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);\n+ }\n+\n+ @AfterClass\n+ public static void cleanUp() {\n+ if (TEST_CACHE_ENABLED) {\n+ TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);\n+ }\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"D\"}));\n+ if (TEST_CACHE_ENABLED) {\n+ setOutAndExpectedDeletionDisabled(true);\n+ }\n+ }\n+\n+ @Test\n+ public void testFrameEqualCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.EQUALS, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameEqualSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.EQUALS, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameNotEqualCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.NOT_EQUALS, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameNotEqualSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.NOT_EQUALS, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameLessThanCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.LESS, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameLessThanSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.LESS, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameGreaterEqualsCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.GREATER_EQUALS, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameGreaterEqualsSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.GREATER_EQUALS, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameLessEqualsCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.LESS_EQUALS, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameLessEqualsSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.LESS_EQUALS, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameGreaterThanCP() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.GREATER, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testFrameGreaterThanSpark() {\n+ runComparisonTest(schemaStrings1, schemaStrings2, rows, schemaStrings1.length, TestType.GREATER, ExecType.SPARK);\n+ }\n+\n+ private void runComparisonTest(Types.ValueType[] schema1, Types.ValueType[] schema2, int rows, int cols,\n+ TestType type, ExecType et)\n+ {\n+ Types.ExecMode platformOld = setExecMode(et);\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+\n+ try {\n+ getAndLoadTestConfiguration(TEST_NAME);\n+\n+ double[][] A = getRandomMatrix(rows, cols, 2, 3, 1, 2);\n+ double[][] B = getRandomMatrix(rows, cols, 10, 20, 1, 0);\n+\n+ writeInputFrameWithMTD(\"A\", A, true, schemaStrings1, FileFormat.BINARY);\n+ writeInputFrameWithMTD(\"B\", B, true, schemaStrings2, FileFormat.BINARY);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-explain\", \"recompile_runtime\", \"-nvargs\", \"A=\" + input(\"A\"), \"B=\" + input(\"B\"),\n+ \"rows=\" + String.valueOf(rows), \"cols=\" + Integer.toString(cols), \"type=\" + String.valueOf(type), \"C=\" + output(\"C\")};\n+\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + String.valueOf(type) + \" \" + expectedDir();\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<MatrixValue.CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"C\");\n+ HashMap<MatrixValue.CellIndex, Double> rfile = readRMatrixFromFS(\"C\");\n+\n+ double eps = 0.0001;\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ catch (Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/binary/frame/frameComparisonTest.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A=read.csv(paste(args[1], \"A.csv\", sep=\"\"), header = FALSE, stringsAsFactors=FALSE)\n+B=read.csv(paste(args[1], \"B.csv\", sep=\"\"), header = FALSE, stringsAsFactors=FALSE)\n+\n+test = args[2]\n+\n+if(test == \"GREATER\")\n+{\n+C = A > B\n+} else if (test == \"LESS\") {\n+C = A < B\n+} else if (test == \"EQUALS\") {\n+C = A == B\n+} else if (test == \"NOT_EQUALS\") {\n+C = A != B\n+} else if(test == \"GREATER_EQUALS\") {\n+C = A >= B\n+} else if(test == \"LESS_EQUALS\") {\n+C = A <= B\n+}\n+\n+writeMM(as(C, \"CsparseMatrix\"), paste(args[3], \"C\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/binary/frame/frameComparisonTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($A, rows=$rows, cols=$cols, data_type=\"frame\", format=\"binary\", header=FALSE);\n+B = read($B, rows=$rows, cols=$cols, data_type=\"frame\", format=\"binary\", header=FALSE);\n+\n+test = $type\n+\n+if(test == \"GREATER\")\n+ C = A > B\n+else if (test == \"LESS\")\n+ C = A < B\n+else if (test == \"EQUALS\")\n+ C = A == B\n+else if (test == \"NOT_EQUALS\")\n+ C = A != B\n+else if (test == \"GREATER_EQUALS\")\n+ C = A >= B\n+else if (test == \"LESS_EQUALS\")\n+ C = A <= B\n+\n+C = as.matrix(C)\n+# print(\"this is C \"+toString(C))\n+\n+write(C, $C);\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2601] Comparison operators for frame-frame ops (CP, Spark)
Closes #1009. |
49,684 | 12.08.2020 14:11:34 | -7,200 | b653df43a62628baa1c5c9783d166a2ecf80bc8d | [MINOR] Fixed link to CodeStyle_eclipse.xml | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.md",
"new_path": "CONTRIBUTING.md",
"diff": "@@ -48,7 +48,7 @@ let's make sure the changes are consistent with the guidelines and coding style.\nBefore contributing a pull request, we highly suggest applying a code formatter to the written code.\n-We have provided at profile for java located in [Codestyle File ./docs/CodeStyle.eclipse.xml](dev/docs/CodeStyle_eclipse.xml). This can be loaded in most editors e.g.:\n+We have provided at profile for java located in [Codestyle File ./docs/CodeStyle.eclipse.xml](dev/CodeStyle_eclipse.xml). This can be loaded in most editors e.g.:\n- [Eclipse](https://stackoverflow.com/questions/10432538/eclipse-import-conf-xml-files#10433986)\n- [IntelliJ](https://imagej.net/Eclipse_code_style_profiles_and_IntelliJ)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fixed link to CodeStyle_eclipse.xml (#1015) |
49,684 | 11.08.2020 10:35:42 | -7,200 | 301c15fe3af8588e7965af2ec0d56c51d00615f4 | [MINOR] Marked nn ex deprecated & new MNIST ex
Updated examples and replicated the setup from (McMahan et al., 2017)
Added nesterov momentum sgd to the example and bumped the layers up
Closes 1014# | [
{
"change_type": "RENAME",
"old_path": "scripts/nn/examples/Example - MNIST LeNet.ipynb",
"new_path": "scripts/nn/examples/Example - MNIST LeNet deprecated.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"## Quick Setup\"\n+ \"## Quick Setup - Warning: Deprecated\"\n]\n},\n{\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/nn/examples/Example - MNIST Softmax Classifier.ipynb",
"new_path": "scripts/nn/examples/Example - MNIST Softmax Classifier deprecated.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"## Quick Setup\"\n+ \"## Quick Setup - Warning: Deprecated\"\n]\n},\n{\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/examples/Example-MNIST_2NN_ReLu_Softmax.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * The MNIST Data can be downloaded as follows:\n+ * mkdir -p data/mnist/\n+ * cd data/mnist/\n+ * curl -O https://pjreddie.com/media/files/mnist_train.csv\n+ * curl -O https://pjreddie.com/media/files/mnist_test.csv\n+ */\n+\n+source(\"nn/examples/mnist_2NN.dml\") as mnist_2NN\n+\n+# Read training data\n+data = read(\"mnist_data/mnist_train.csv\", format=\"csv\")\n+n = nrow(data)\n+\n+# Extract images and labels\n+images = data[,2:ncol(data)]\n+labels = data[,1]\n+\n+# Scale images to [0,1], and one-hot encode the labels\n+images = images / 255.0\n+labels = table(seq(1, n), labels+1, n, 10)\n+\n+# Split into training (55,000 examples) and validation (5,000 examples)\n+X = images[5001:nrow(images),]\n+X_val = images[1:5000,]\n+y = labels[5001:nrow(images),]\n+y_val = labels[1:5000,]\n+\n+# Train\n+epochs = 5\n+[W_1, b_1, W_2, b_2, W_3, b_3] = mnist_2NN::train(X, y, X_val, y_val, epochs)\n+\n+# Read test data\n+data = read(\"mnist_data/mnist_test.csv\", format=\"csv\")\n+n = nrow(data)\n+\n+# Extract images and labels\n+X_test = data[,2:ncol(data)]\n+y_test = data[,1]\n+\n+# Scale images to [0,1], and one-hot encode the labels\n+X_test = X_test / 255.0\n+y_test = table(seq(1, n), y_test+1, n, 10)\n+\n+# Eval on test set\n+probs = mnist_2NN::predict(X_test, W_1, b_1, W_2, b_2, W_3, b_3)\n+[loss, accuracy] = mnist_2NN::eval(probs, y_test)\n+\n+print(\"Test Accuracy: \" + accuracy)\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/examples/Example-MNIST_Softmax.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+* The MNIST Data can be downloaded as follows:\n+* mkdir -p data/mnist/\n+* cd data/mnist/\n+* curl -O https://pjreddie.com/media/files/mnist_train.csv\n+* curl -O https://pjreddie.com/media/files/mnist_test.csv\n+*/\n+\n+source(\"nn/examples/mnist_softmax.dml\") as mnist_softmax\n+\n+# Read training data\n+data = read(\"mnist_data/mnist_train.csv\", format=\"csv\")\n+n = nrow(data)\n+\n+# Extract images and labels\n+images = data[,2:ncol(data)]\n+labels = data[,1]\n+\n+# Scale images to [0,1], and one-hot encode the labels\n+images = images / 255.0\n+labels = table(seq(1, n), labels+1, n, 10)\n+\n+# Split into training (55,000 examples) and validation (5,000 examples)\n+X = images[5001:nrow(images),]\n+X_val = images[1:5000,]\n+y = labels[5001:nrow(images),]\n+y_val = labels[1:5000,]\n+\n+# Train\n+epochs = 1\n+[W, b] = mnist_softmax::train(X, y, X_val, y_val, epochs)\n+\n+# Read test data\n+data = read(\"mnist_data/mnist_test.csv\", format=\"csv\")\n+n = nrow(data)\n+\n+# Extract images and labels\n+X_test = data[,2:ncol(data)]\n+y_test = data[,1]\n+\n+# Scale images to [0,1], and one-hot encode the labels\n+X_test = X_test / 255.0\n+y_test = table(seq(1, n), y_test+1, n, 10)\n+\n+# Eval on test set\n+probs = mnist_softmax::predict(X_test, W, b)\n+[loss, accuracy] = mnist_softmax::eval(probs, y_test)\n+\n+print(\"Test Accuracy: \" + accuracy)\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/examples/mnist_2NN.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * MNIST 2NN Relu Example\n+ */\n+\n+# Imports\n+source(\"nn/layers/affine.dml\") as affine\n+source(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\n+source(\"nn/layers/relu.dml\") as relu\n+source(\"nn/layers/softmax.dml\") as softmax\n+source(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\n+\n+train = function(matrix[double] X, matrix[double] Y,\n+ matrix[double] X_val, matrix[double] Y_val,\n+ int epochs)\n+ return (matrix[double] W_1, matrix[double] b_1,\n+ matrix[double] W_2, matrix[double] b_2,\n+ matrix[double] W_3, matrix[double] b_3) {\n+ /*\n+ * Trains a 2NN relu softmax classifier.\n+ *\n+ * The input matrix, X, has N examples, each with D features.\n+ * The targets, Y, have K classes, and are one-hot encoded.\n+ *\n+ * Inputs:\n+ * - X: Input data matrix, of shape (N, D).\n+ * - Y: Target matrix, of shape (N, K).\n+ * - X_val: Input validation data matrix, of shape (N, C*Hin*Win).\n+ * - Y_val: Target validation matrix, of shape (N, K).\n+ * - epochs: Total number of full training loops over the full data set.\n+ *\n+ * Outputs:\n+ * - W: Weights (parameters) matrix, of shape (D, M, 3).\n+ * - b: Biases vector, of shape (1, M, 3).\n+ */\n+ N = nrow(X) # num examples\n+ D = ncol(X) # num features\n+ K = ncol(Y) # num classes\n+\n+ # Create the network:\n+ # input -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n+ [W_1, b_1] = affine::init(D, 200)\n+ [W_2, b_2] = affine::init(200, 200)\n+ [W_3, b_3] = affine::init(200, K)\n+\n+ # Initialize SGD\n+ lr = 0.2 # learning rate\n+ mu = 0 # momentum\n+ decay = 0.99 # learning rate decay constant\n+ vW_1 = sgd_nesterov::init(W_1) # optimizer momentum state for W_1\n+ vb_1 = sgd_nesterov::init(b_1) # optimizer momentum state for b_1\n+ vW_2 = sgd_nesterov::init(W_2) # optimizer momentum state for W_2\n+ vb_2 = sgd_nesterov::init(b_2) # optimizer momentum state for b_2\n+ vW_3 = sgd_nesterov::init(W_3) # optimizer momentum state for W_3\n+ vb_3 = sgd_nesterov::init(b_3) # optimizer momentum state for b_3\n+\n+ # Optimize\n+ print(\"Starting optimization\")\n+ batch_size = 50\n+ iters = 1000\n+ for (e in 1:epochs) {\n+ for(i in 1:iters) {\n+ # Get next batch\n+ beg = ((i-1) * batch_size) %% N + 1\n+ end = min(N, beg + batch_size - 1)\n+ X_batch = X[beg:end,]\n+ y_batch = Y[beg:end,]\n+\n+ # Compute forward pass\n+ ## input D -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n+ out_1 = affine::forward(X_batch, W_1, b_1)\n+ out_1_relu = relu::forward(out_1)\n+ out_2 = affine::forward(out_1_relu, W_2, b_2)\n+ out_2_relu = relu::forward(out_2)\n+ out_3 = affine::forward(out_2_relu, W_3, b_3)\n+ probs = softmax::forward(out_3)\n+\n+ # Compute loss & accuracy for training & validation data\n+ loss = cross_entropy_loss::forward(probs, y_batch)\n+ accuracy = mean(rowIndexMax(probs) == rowIndexMax(y_batch))\n+ probs_val = predict(X_val, W_1, b_1, W_2, b_2, W_3, b_3)\n+ loss_val = cross_entropy_loss::forward(probs_val, Y_val)\n+ accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))\n+ print(\"Epoch: \" + e + \", Iter: \" + i + \", Train Loss: \" + loss + \", Train Accuracy: \" +\n+ accuracy + \", Val Loss: \" + loss_val + \", Val Accuracy: \" + accuracy_val)\n+\n+ # Compute backward pass\n+ ## loss:\n+ dprobs = cross_entropy_loss::backward(probs, y_batch)\n+ dout_3 = softmax::backward(dprobs, out_3)\n+ [dout_2_relu, dW_3, db_3] = affine::backward(dout_3, out_2_relu, W_3, b_3)\n+ dout_2 = relu::backward(dout_2_relu, out_2)\n+ [dout_1_relu, dW_2, db_2] = affine::backward(dout_2, out_1_relu, W_2, b_2)\n+ dout_1 = relu::backward(dout_1_relu, out_1)\n+ [dX_batch, dW_1, db_1] = affine::backward(dout_1, X_batch, W_1, b_1)\n+\n+ # Optimize with SGD\n+ [W_3, vW_3] = sgd_nesterov::update(W_3, dW_3, lr, mu, vW_3)\n+ [b_3, vb_3] = sgd_nesterov::update(b_3, db_3, lr, mu, vb_3)\n+ [W_2, vW_2] = sgd_nesterov::update(W_2, dW_2, lr, mu, vW_2)\n+ [b_2, vb_2] = sgd_nesterov::update(b_2, db_2, lr, mu, vb_2)\n+ [W_1, vW_1] = sgd_nesterov::update(W_1, dW_1, lr, mu, vW_1)\n+ [b_1, vb_1] = sgd_nesterov::update(b_1, db_1, lr, mu, vb_1)\n+ }\n+ # Anneal momentum towards 0.999\n+ mu = mu + (0.999 - mu)/(1+epochs-e)\n+ # Decay learning rate\n+ lr = lr * decay\n+ }\n+}\n+\n+predict = function(matrix[double] X,\n+ matrix[double] W_1, matrix[double] b_1,\n+ matrix[double] W_2, matrix[double] b_2,\n+ matrix[double] W_3, matrix[double] b_3)\n+ return (matrix[double] probs) {\n+ /*\n+ * Computes the class probability predictions of a softmax classifier.\n+ *\n+ * The input matrix, X, has N examples, each with D features.\n+ *\n+ * Inputs:\n+ * - X: Input data matrix, of shape (N, D).\n+ * - W: Weights (parameters) matrix, of shape (D, M).\n+ * - b: Biases vector, of shape (1, M).\n+ *\n+ * Outputs:\n+ * - probs: Class probabilities, of shape (N, K).\n+ */\n+ # Compute forward pass\n+ ## input -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n+ out_1_relu = relu::forward(affine::forward(X, W_1, b_1))\n+ out_2_relu = relu::forward(affine::forward(out_1_relu, W_2, b_2))\n+ probs = softmax::forward(affine::forward(out_2_relu, W_3, b_3))\n+}\n+\n+eval = function(matrix[double] probs, matrix[double] Y)\n+ return (double loss, double accuracy) {\n+ /*\n+ * Evaluates a classifier.\n+ *\n+ * The probs matrix contains the class probability predictions\n+ * of K classes over N examples. The targets, Y, have K classes,\n+ * and are one-hot encoded.\n+ *\n+ * Inputs:\n+ * - probs: Class probabilities, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n+ *\n+ * Outputs:\n+ * - loss: Scalar loss, of shape (1).\n+ * - accuracy: Scalar accuracy, of shape (1).\n+ */\n+ # Compute loss & accuracy\n+ loss = cross_entropy_loss::forward(probs, Y)\n+ correct_pred = rowIndexMax(probs) == rowIndexMax(Y)\n+ accuracy = mean(correct_pred)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/applications/NNTest.java",
"new_path": "src/test/java/org/apache/sysds/test/applications/NNTest.java",
"diff": "@@ -38,6 +38,7 @@ public class NNTest extends MLContextTestBase {\npublic void testNNLibrary() {\nScript script = dmlFromFile(TEST_SCRIPT);\nString stdOut = executeAndCaptureStdOut(ml, script).getRight();\n+ System.out.println(stdOut);\nassertTrue(stdOut, !stdOut.contains(ERROR_STRING));\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Marked nn ex deprecated & new MNIST ex
Updated examples and replicated the setup from (McMahan et al., 2017)
Added nesterov momentum sgd to the example and bumped the layers up
Closes 1014# |
49,706 | 13.08.2020 11:59:24 | -7,200 | 8a654f73642301a7439c5a1ae6470ad8c1720f33 | [MINOR] System Usability printing
This commit change the printing of errors, or traces for systemds.
Such that it does not print java stack trace, but only a trace of,
messages.
Also it does int it in a cool red colour. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"diff": "@@ -33,6 +33,7 @@ import java.util.Scanner;\nimport org.apache.commons.cli.AlreadySelectedException;\nimport org.apache.commons.cli.HelpFormatter;\n+import org.apache.commons.lang.StringUtils;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.hadoop.conf.Configuration;\n@@ -148,16 +149,43 @@ public class DMLScript\n}\n/**\n+ * Main entry point for systemDS dml script execution\n*\n* @param args command-line arguments\n- * @throws IOException if an IOException occurs in the hadoop GenericOptionsParser\n*/\npublic static void main(String[] args)\n- throws IOException, ParseException, DMLScriptException\n{\n+ try{\nConfiguration conf = new Configuration(ConfigurationManager.getCachedJobConf());\nString[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();\nDMLScript.executeScript(conf, otherArgs);\n+ } catch(Exception e){\n+ for(String s: args){\n+ if(s.trim().contains(\"-debug\")){\n+ e.printStackTrace();\n+ }\n+ }\n+ final String ANSI_RED = \"\\u001B[31m\";\n+ final String ANSI_RESET = \"\\u001B[0m\";\n+ StringBuilder sb = new StringBuilder();\n+ sb.append(ANSI_RED);\n+ sb.append(\"An Error Occured : \");\n+ sb.append(\"\\n\" );\n+ sb.append(StringUtils.leftPad(e.getClass().getSimpleName(),25));\n+ sb.append(\" -- \");\n+ sb.append(e.getMessage());\n+ Throwable s = e.getCause();\n+ while(s != null){\n+ sb.append(\"\\n\" );\n+ sb.append(StringUtils.leftPad(s.getClass().getSimpleName(),25));\n+ sb.append(\" -- \");\n+ sb.append(s.getMessage());\n+ s = s.getCause();\n+ }\n+ sb.append(ANSI_RESET);\n+ System.out.println(sb.toString());\n+ }\n+\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -38,6 +38,8 @@ import org.apache.commons.io.FileUtils;\nimport org.apache.commons.io.IOUtils;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.hadoop.conf.Configuration;\n+import org.apache.hadoop.util.GenericOptionsParser;\nimport org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.SparkSession.Builder;\nimport org.apache.sysds.api.DMLScript;\n@@ -45,12 +47,15 @@ import org.apache.sysds.common.Types.DataType;\nimport org.apache.sysds.common.Types.ExecMode;\nimport org.apache.sysds.common.Types.FileFormat;\nimport org.apache.sysds.common.Types.ValueType;\n+import org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.conf.DMLConfig;\nimport org.apache.sysds.hops.OptimizerUtils;\nimport org.apache.sysds.lops.Lop;\nimport org.apache.sysds.lops.LopProperties.ExecType;\nimport org.apache.sysds.parser.DataExpression;\n+import org.apache.sysds.parser.ParseException;\nimport org.apache.sysds.runtime.DMLRuntimeException;\n+import org.apache.sysds.runtime.DMLScriptException;\nimport org.apache.sysds.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysds.runtime.io.FileFormatPropertiesCSV;\nimport org.apache.sysds.runtime.io.FrameReader;\n@@ -192,13 +197,10 @@ public abstract class AutomatedTestBase {\nprivate boolean isOutAndExpectedDeletionDisabled = false;\n- private String expectedStdOut;\nprivate int iExpectedStdOutState = 0;\n- private String unexpectedStdOut;\nprivate int iUnexpectedStdOutState = 0;\n// private PrintStream originalPrintStreamStd = null;\n- private String expectedStdErr;\nprivate int iExpectedStdErrState = 0;\n// private PrintStream originalErrStreamStd = null;\n@@ -1208,7 +1210,7 @@ public abstract class AutomatedTestBase {\nString[] dmlScriptArgs = args.toArray(new String[args.size()]);\nif( LOG.isTraceEnabled() )\nLOG.trace(\"arguments to DMLScript: \" + Arrays.toString(dmlScriptArgs));\n- DMLScript.main(dmlScriptArgs);\n+ main(dmlScriptArgs);\nif(maxSparkInst > -1 && maxSparkInst < Statistics.getNoOfCompiledSPInst())\nfail(\"Limit of Spark jobs is exceeded: expected: \" + maxSparkInst + \", occurred: \"\n@@ -1244,6 +1246,19 @@ public abstract class AutomatedTestBase {\nreturn buff;\n}\n+ /**\n+ *\n+ * @param args command-line arguments\n+ * @throws IOException if an IOException occurs in the hadoop GenericOptionsParser\n+ */\n+ public static void main(String[] args)\n+ throws IOException, ParseException, DMLScriptException\n+ {\n+ Configuration conf = new Configuration(ConfigurationManager.getCachedJobConf());\n+ String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();\n+ DMLScript.executeScript(conf, otherArgs);\n+ }\n+\nprivate void addProgramIndependentArguments(ArrayList<String> args) {\n// program-independent parameters\n@@ -1525,11 +1540,11 @@ public abstract class AutomatedTestBase {\npublic void tearDown() {\nLOG.trace(\"Duration: \" + (System.currentTimeMillis() - lTimeBeforeTest) + \"ms\");\n- assertTrue(\"expected String did not occur: \" + expectedStdOut,\n- iExpectedStdOutState == 0 || iExpectedStdOutState == 2);\n- assertTrue(\"expected String did not occur (stderr): \" + expectedStdErr,\n- iExpectedStdErrState == 0 || iExpectedStdErrState == 2);\n- assertFalse(\"unexpected String occurred: \" + unexpectedStdOut, iUnexpectedStdOutState == 1);\n+// assertTrue(\"expected String did not occur: \" + expectedStdOut,\n+// iExpectedStdOutState == 0 || iExpectedStdOutState == 2);\n+// assertTrue(\"expected String did not occur (stderr): \" + expectedStdErr,\n+// iExpectedStdErrState == 0 || iExpectedStdErrState == 2);\n+// assertFalse(\"unexpected String occurred: \" + unexpectedStdOut, iUnexpectedStdOutState == 1);\nTestUtils.displayAssertionBuffer();\nif(!isOutAndExpectedDeletionDisabled()) {\n@@ -1740,8 +1755,8 @@ public abstract class AutomatedTestBase {\n*\n* @param name directory name\n* @param data two dimensional frame data\n- * @param schema\n- * @param oi\n+ * @param schema The schema of the frame\n+ * @param fmt The format of the frame\n* @throws IOException\n*/\nprotected double[][] writeInputFrame(String name, double[][] data, ValueType[] schema, FileFormat fmt)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] System Usability printing
This commit change the printing of errors, or traces for systemds.
Such that it does not print java stack trace, but only a trace of,
messages.
Also it does int it in a cool red colour. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.