author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
49,738 | 28.05.2017 15:34:19 | 25,200 | fd393e460cedeb04be4cee3b57ecc1e14dc99884 | Fix codegen of double scalars in indexing expressions
Our indexing runtime uses safe casts (accordingly to machine precision)
to handling double scalars in indexing expressions. This patch fixes the
code generation runtime to similarly provide indexing abstractions for
for double scalars in such indexing expressions. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"diff": "@@ -29,6 +29,7 @@ import org.apache.sysml.runtime.compress.CompressedMatrixBlock;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class SpoofOperator implements Serializable\n{\n@@ -134,20 +135,42 @@ public abstract class SpoofOperator implements Serializable\n//abstraction for safely accessing sideways matrices without the need\n//to allocate empty matrices as dense, see prepInputMatrices\n+ protected static double getValue(double[] data, double index) {\n+ int iindex = UtilFunctions.toInt(index);\n+ return getValue(data, iindex);\n+ }\n+\nprotected static double getValue(double[] data, int index) {\nreturn (data!=null) ? data[index] : 0;\n}\n+ protected static double getValue(double[] data, int n, double rowIndex, double colIndex) {\n+ int irowIndex = UtilFunctions.toInt(rowIndex);\n+ int icolIndex = UtilFunctions.toInt(colIndex);\n+ return getValue(data, n, irowIndex, icolIndex);\n+ }\n+\nprotected static double getValue(double[] data, int n, int rowIndex, int colIndex) {\nreturn (data!=null) ? data[rowIndex*n+colIndex] : 0;\n}\n+ protected static double getValue(SideInput data, double rowIndex) {\n+ int irowIndex = UtilFunctions.toInt(rowIndex);\n+ return getValue(data, irowIndex);\n+ }\n+\nprotected static double getValue(SideInput data, int rowIndex) {\n//note: wrapper sideinput guaranteed to exist\nreturn (data.dBlock!=null) ? data.dBlock[rowIndex] :\n(data.mBlock!=null) ? data.mBlock.quickGetValue(rowIndex, 0) : 0;\n}\n+ protected static double getValue(SideInput data, int n, double rowIndex, double colIndex) {\n+ int irowIndex = UtilFunctions.toInt(rowIndex);\n+ int icolIndex = UtilFunctions.toInt(colIndex);\n+ return getValue(data, n, irowIndex, icolIndex);\n+ }\n+\nprotected static double getValue(SideInput data, int n, int rowIndex, int colIndex) {\n//note: wrapper sideinput guaranteed to exist\nreturn (data.dBlock!=null) ? data.dBlock[rowIndex*n+colIndex] :\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1638] Fix codegen of double scalars in indexing expressions
Our indexing runtime uses safe casts (accordingly to machine precision)
to handling double scalars in indexing expressions. This patch fixes the
code generation runtime to similarly provide indexing abstractions for
for double scalars in such indexing expressions. |
49,738 | 28.05.2017 16:04:32 | 25,200 | cb2d9ac434aec0f0be535ff278e41fa080355d36 | Fix codegen outer product closing conditions
So far, the outer product template accepted subsequent matrix products
such as (A %*% B) %*% C, where A %*% B is an outer-product-like
operation. We now mark such operators as invalid on close due to missing
sparse driver binary operation. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java",
"diff": "@@ -85,7 +85,9 @@ public class TemplateOuterProduct extends TemplateBase {\n@Override\npublic CloseType close(Hop hop) {\n// close on second matrix multiply (after open) or unary aggregate\n- if( hop instanceof AggUnaryOp && HopRewriteUtils.isOuterProductLikeMM(hop.getInput().get(0)) )\n+ if( hop instanceof AggUnaryOp && HopRewriteUtils.isOuterProductLikeMM(hop.getInput().get(0))\n+ || (hop instanceof AggBinaryOp && (HopRewriteUtils.isOuterProductLikeMM(hop.getInput().get(0))\n+ || HopRewriteUtils.isOuterProductLikeMM(hop.getInput().get(1)))) )\nreturn CloseType.CLOSED_INVALID;\nelse if( (hop instanceof AggUnaryOp)\n|| (hop instanceof AggBinaryOp && !HopRewriteUtils.isOuterProductLikeMM(hop)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1639] Fix codegen outer product closing conditions
So far, the outer product template accepted subsequent matrix products
such as (A %*% B) %*% C, where A %*% B is an outer-product-like
operation. We now mark such operators as invalid on close due to missing
sparse driver binary operation. |
49,768 | 30.05.2017 00:27:47 | 25,200 | 3f0c12ecedcee3cb97d30ab54f421e7c523cd9e0 | Automate Release Artifact verification
Updated Python3/Python2 test cases. | [
{
"change_type": "MODIFY",
"old_path": "dev/release/src/test/bin/verifyBuild.sh",
"new_path": "dev/release/src/test/bin/verifyBuild.sh",
"diff": "@@ -180,6 +180,10 @@ runCommand \"$SPARK_HOME/bin/spark-submit target/release/incubator-systemml/src/m\nrunCommand \"$SPARK_HOME/bin/spark-submit target/release/incubator-systemml/src/main/python/tests/test_mllearn_df.py\"\nrunCommand \"$SPARK_HOME/bin/spark-submit target/release/incubator-systemml/src/main/python/tests/test_mllearn_numpy.py\"\n+# Specifying python2 to be used\n+runCommand \"PYSPARK_PYTHON=python2 spark-submit --master local[*] target/release/incubator-systemml/src/main/python/tests/test_mlcontext.py\"\n+# Specifying python3 to be used\n+runCommand \"PYSPARK_PYTHON=python3 spark-submit --master local[*] target/release/incubator-systemml/src/main/python/tests/test_mlcontext.py\"\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Verification of binary files completed successfully.\"\n# echo \"================================================================================\"\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1440] Automate Release Artifact verification
Updated Python3/Python2 test cases. |
49,768 | 30.05.2017 22:47:57 | 25,200 | ceeec4bbffd76c20e8bd97f3f48d08df1e0cc8ab | Update license with current code change
Removed opecsv file and updated version for protobuf file in LICENSE file | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/bin/LICENSE",
"new_path": "src/assembly/bin/LICENSE",
"diff": "@@ -230,12 +230,11 @@ hadoop-yarn-server-web-proxy-2.6.0.jar\njackson-core-asl-1.9.13.jar\njackson-mapper-asl-1.9.13.jar\nlog4j-1.2.15.jar\n-opencsv-2.3.jar\n-\n+-------------------------------------------------------------------------------\nThe following compile-scope dependencies come under the Apache Software License 2.0.\nApache Wink :: JSON4J (http://www.apache.org/wink/wink-json4j/) org.apache.wink:wink-json4j:1.4\n-\n+-------------------------------------------------------------------------------\nThe following windows-only components come under the Apache Software License 2.0.\nhadoop.dll\n@@ -245,7 +244,6 @@ hadoop.pdb\nlibwinutils.lib\nwinutils.exe\nwinutils.pdb\n-\n===============================================================================\nThe following ANTLR dependency is distributed under the BSD license.\n@@ -290,7 +288,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nThe following Protocol Buffer Java API dependencies are distributed under the BSD license.\n-Protocol Buffer Java API (http://code.google.com/p/protobuf) com.google.protobuf:protobuf-java:2.5.0 (protobuf-java-2.5.0.jar)\n+Protocol Buffer Java API (http://code.google.com/p/protobuf) com.google.protobuf:protobuf-java:3.2.0 (protobuf-java-3.2.0.jar)\nCopyright 2014, Google Inc. All rights reserved.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1642] Update license with current code change
Removed opecsv file and updated version for protobuf file in LICENSE file |
49,772 | 02.06.2017 14:49:25 | 25,200 | aaca80061adae1fbf4d27b7466840daf852a0d54 | [MINOR] Updating deprecated Spark config. | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/README.md",
"new_path": "projects/breast_cancer/README.md",
"diff": "@@ -100,7 +100,7 @@ References:\n# Remove the max result size constraint.\nspark.driver.maxResultSize 0\n# Increase the message size.\n- spark.akka.frameSize 128\n+ spark.rpc.message.maxSize 128\n# Extend the network timeout threshold.\nspark.network.timeout 1000s\n# Setup some extra Java options for performance.\n@@ -136,3 +136,4 @@ References:\n- Host on server:\n- `python3 path/to/openslide-python/examples/deepzoom/deepzoom_multiserver.py -Q 100 -l HOSTING_URL_HERE path/to/data/`\n- Open local browser to `HOSTING_URL_HERE:5000`.\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updating deprecated Spark config. |
49,736 | 02.06.2017 17:19:14 | 28,800 | 33ebe969ba1fd3d68941cf0e8c1299daf5df15b4 | [MINOR] [DOC] Updated the documentation to clarify the common misconceptions regarding Caffe2DML and Python DSL. | [
{
"change_type": "MODIFY",
"old_path": "docs/beginners-guide-caffe2dml.md",
"new_path": "docs/beginners-guide-caffe2dml.md",
"diff": "@@ -110,6 +110,29 @@ For more detail on enabling native BLAS, please see the documentation for the [n\n## Frequently asked questions\n+#### What is the purpose of Caffe2DML API ?\n+\n+Most deep learning experts are more likely to be familiar with the Caffe's specification\n+rather than DML language. For these users, the Caffe2DML API reduces the learning curve to using SystemML.\n+Instead of requiring the users to write a DML script for training, fine-tuning and testing the model,\n+Caffe2DML takes as an input a network and solver specified in the Caffe specification\n+and automatically generates the corresponding DML.\n+\n+#### With Caffe2DML, does SystemML now require Caffe to be installed ?\n+\n+Absolutely not. We only support Caffe's API for convenience of the user as stated above.\n+Since the Caffe's API is specified in the protobuf format, we are able to generate the java parser files\n+and donot require Caffe to be installed. This is also true for Tensorboard feature of Caffe2DML.\n+\n+```\n+Dml.g4 ---> antlr ---> DmlLexer.java, DmlListener.java, DmlParser.java ---> parse foo.dml\n+caffe.proto ---> protoc ---> target/generated-sources/caffe/Caffe.java ---> parse caffe_network.proto, caffe_solver.proto\n+```\n+\n+Again, the SystemML engine doesnot invoke (or depend on) Caffe and TensorFlow for any of its runtime operators.\n+Since the grammar files for the respective APIs (i.e. `caffe.proto`) are used by SystemML,\n+we include their licenses in our jar files.\n+\n#### How can I speedup the training with Caffe2DML ?\n- Enable native BLAS to improve the performance of CP convolution and matrix multiplication operators.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/python-reference.md",
"new_path": "docs/python-reference.md",
"diff": "@@ -48,6 +48,9 @@ It implements basic matrix operators, matrix functions as well as converters to\ntypes (for example: Numpy arrays, PySpark DataFrame and Pandas\nDataFrame).\n+The primary reason for supporting this API is to reduce the learning curve for an average Python user,\n+who is more likely to know Numpy library, rather than the DML language.\n+\n### Operators\nThe operators supported are:\n@@ -107,6 +110,64 @@ Since matrix is backed by lazy evaluation and uses a recursive Depth First Searc\nyou may run into `RuntimeError: maximum recursion depth exceeded`.\nPlease see below [troubleshooting steps](http://apache.github.io/incubator-systemml/python-reference#maximum-recursion-depth-exceeded)\n+### Dealing with the loops\n+\n+It is important to note that this API doesnot pushdown loop, which means the\n+SystemML engine essentially gets an unrolled DML script.\n+This can lead to two issues:\n+\n+1. Since matrix is backed by lazy evaluation and uses a recursive Depth First Search (DFS),\n+you may run into `RuntimeError: maximum recursion depth exceeded`.\n+Please see below [troubleshooting steps](http://apache.github.io/incubator-systemml/python-reference#maximum-recursion-depth-exceeded)\n+\n+2. Significant parsing/compilation overhead of potentially large unrolled DML script.\n+\n+The unrolling of the for loop can be demonstrated by the below example:\n+\n+```python\n+>>> import systemml as sml\n+>>> import numpy as np\n+>>> m1 = sml.matrix(np.ones((3,3)) + 2)\n+\n+Welcome to Apache SystemML!\n+\n+>>> m2 = sml.matrix(np.ones((3,3)) + 3)\n+>>> m3 = m1\n+>>> for i in range(5):\n+... m3 = m1 * m3 + m1\n+...\n+>>> m3\n+# This matrix (mVar12) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.\n+mVar1 = load(\" \", format=\"csv\")\n+mVar3 = mVar1 * mVar1\n+mVar4 = mVar3 + mVar1\n+mVar5 = mVar1 * mVar4\n+mVar6 = mVar5 + mVar1\n+mVar7 = mVar1 * mVar6\n+mVar8 = mVar7 + mVar1\n+mVar9 = mVar1 * mVar8\n+mVar10 = mVar9 + mVar1\n+mVar11 = mVar1 * mVar10\n+mVar12 = mVar11 + mVar1\n+save(mVar12, \" \")\n+```\n+\n+We can reduce the impact of this unrolling by eagerly evaluating the variables inside the loop:\n+\n+```python\n+>>> import systemml as sml\n+>>> import numpy as np\n+>>> m1 = sml.matrix(np.ones((3,3)) + 2)\n+\n+Welcome to Apache SystemML!\n+\n+>>> m2 = sml.matrix(np.ones((3,3)) + 3)\n+>>> m3 = m1\n+>>> for i in range(5):\n+... m3 = m1 * m3 + m1\n+... sml.eval(m3)\n+\n+```\n### Built-in functions\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] [DOC] Updated the documentation to clarify the common misconceptions regarding Caffe2DML and Python DSL. |
49,772 | 02.06.2017 18:37:06 | 25,200 | 766cc48c00adba37fde9a34a3a06f6df8708205d | [MINOR] Updating the MNIST LeNet example notebook.
This improves the nomenclature & documentation, adds more cleanup,
removes the unnecessary saving of the MNIST data to CSV files, and
makes everything more Pythonic. | [
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"new_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"diff": "\"source\": [\n\"# Deep Learning Image Classification using Apache SystemML\\n\",\n\"\\n\",\n- \"This notebook shows SystemML Deep Learning functionality to map images of single digit numbers to their corresponding numeric representations. See [Getting Started with Deep Learning and Python](http://www.pyimagesearch.com/2014/09/22/getting-started-deep-learning-python/) for an explanation of the used deep learning concepts and assumptions.\\n\",\n+ \"This notebook demonstrates how to train a deep learning model on SystemML for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) problem of mapping images of single digit numbers to their corresponding numeric representations, using a classic [LeNet](http://yann.lecun.com/exdb/lenet/)-like convolutional neural network model. See [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/chap6.html) for more information on neural networks and deep learning.\\n\",\n\"\\n\",\n- \"The downloaded MNIST dataset contains labeled images of handwritten digits, where each example is a 28x28 pixel image of grayscale values in the range [0,255] stretched out as 784 pixels, and each label is one of 10 possible digits in [0,9]. We download 60,000 training examples, and 10,000 test examples, where the format is \\\"label, pixel_1, pixel_2, ..., pixel_n\\\". We train a SystemML LeNet model. The results of the learning algorithms have an accuracy of 98 percent.\\n\",\n+ \"The downloaded MNIST dataset contains labeled images of handwritten digits, where each example is a 28x28 pixel image of grayscale values in the range [0,255] stretched out as 784 pixels, and each label is one of 10 possible digits in [0,9]. We download 60,000 training examples, and 10,000 test examples, where the images and labels are stored in separate matrices. We then train a SystemML LeNet-like convolutional neural network (i.e. \\\"convnet\\\", \\\"CNN\\\") model. The resulting trained model has an accuracy of 98.6% on the test dataset.\\n\",\n\"\\n\",\n- \"1. [Download and Access MNIST data](#access_data)\\n\",\n+ \"1. [Download the MNIST data](#download_data)\\n\",\n\"1. [Train a CNN classifier for MNIST handwritten digits](#train)\\n\",\n- \"1. [Detect handwritten Digits](#predict)\\n\"\n+ \"1. [Detect handwritten Digits](#predict)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"source\": [\n\"<div style=\\\"text-align:center\\\" markdown=\\\"1\\\">\\n\",\n\"\\n\",\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"### This notebook is supported with SystemML 0.14.0 and above.\"\n+ \"### Note: This notebook is supported with SystemML 0.14.0 and above.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"scrolled\": false\n+ \"collapsed\": true\n},\n\"outputs\": [],\n\"source\": [\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n- \"from systemml import MLContext, dml\\n\",\n- \"\\n\",\n- \"ml = MLContext(sc)\\n\",\n+ \"%matplotlib inline\\n\",\n\"\\n\",\n- \"print (\\\"Spark Version:\\\" + sc.version)\\n\",\n- \"print (\\\"SystemML Version:\\\" + ml.version())\\n\",\n- \"print (\\\"SystemML Built-Time:\\\" + ml.buildTime())\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"import warnings\\n\",\n- \"warnings.filterwarnings(\\\"ignore\\\")\\n\",\n- \"from sklearn import datasets\\n\",\n- \"from sklearn.cross_validation import train_test_split\\n\",\n- \"from sklearn.metrics import classification_report\\n\",\n- \"import pandas as pd\\n\",\n- \"import numpy as np\\n\",\n\"import matplotlib.pyplot as plt\\n\",\n- \"#import matplotlib.image as mpimg\\n\",\n- \"%matplotlib inline\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"Create data directory.\"\n+ \"import numpy as np\\n\",\n+ \"import pandas as pd\\n\",\n+ \"from sklearn import datasets\\n\",\n+ \"from sklearn.cross_validation import train_test_split # module deprecated in 0.18\\n\",\n+ \"#from sklearn.model_selection import train_test_split # use this module for >=0.18\\n\",\n+ \"from sklearn import metrics\\n\",\n+ \"from systemml import MLContext, dml\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n- \"%%sh\\n\",\n- \"mkdir -p data/mnist/\\n\",\n- \"cd data/mnist/\"\n+ \"ml = MLContext(sc)\\n\",\n+ \"print(\\\"Spark Version: {}\\\".format(sc.version))\\n\",\n+ \"print(\\\"SystemML Version: {}\\\".format(ml.version()))\\n\",\n+ \"print(\\\"SystemML Built-Time: {}\\\".format(ml.buildTime()))\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"<a id=\\\"access_data\\\"></a>\\n\",\n- \"## Download and Access MNIST data\\n\",\n+ \"<a id=\\\"download_data\\\"></a>\\n\",\n+ \"## Download the MNIST data\\n\",\n\"\\n\",\n\"Download the [MNIST data from the MLData repository](http://mldata.org/repository/data/viewslug/mnist-original/), and then split and save.\"\n]\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"mnist = datasets.fetch_mldata(\\\"MNIST Original\\\")\\n\",\n\"\\n\",\n- \"print (\\\"Mnist data features:\\\" + str(mnist.data.shape))\\n\",\n- \"print (\\\"Mnist data label:\\\" + str(mnist.target.shape))\\n\",\n- \"\\n\",\n- \"trainX, testX, trainY, testY = train_test_split(mnist.data, mnist.target.astype(\\\"int0\\\"), test_size = 0.142857)\\n\",\n- \"\\n\",\n- \"trainD = np.concatenate((trainY.reshape(trainY.size, 1), trainX),axis=1)\\n\",\n- \"testD = np.concatenate((testY.reshape (testY.size, 1), testX),axis=1)\\n\",\n+ \"print(\\\"MNIST data features: {}\\\".format(mnist.data.shape))\\n\",\n+ \"print(\\\"MNIST data labels: {}\\\".format(mnist.target.shape))\\n\",\n\"\\n\",\n- \"print (\\\"Images for training:\\\" + str(trainD.shape))\\n\",\n- \"print (\\\"Images used for testing:\\\" + str(testD.shape))\\n\",\n- \"pix = int(np.sqrt(trainD.shape[1]))\\n\",\n- \"print (\\\"Each image is: \\\" + str(pix) + \\\" by \\\" + str(pix) + \\\" pixels\\\")\\n\",\n+ \"X_train, X_test, y_train, y_test = train_test_split(\\n\",\n+ \" mnist.data, mnist.target.astype(np.uint8).reshape(-1, 1),\\n\",\n+ \" test_size = 10000)\\n\",\n\"\\n\",\n- \"np.savetxt('data/mnist/mnist_train.csv', trainD, fmt='%u', delimiter=\\\",\\\")\\n\",\n- \"np.savetxt('data/mnist/mnist_test.csv', testD, fmt='%u', delimiter=\\\",\\\")\"\n+ \"print(\\\"Training images, labels: {}, {}\\\".format(X_train.shape, y_train.shape))\\n\",\n+ \"print(\\\"Testing images, labels: {}, {}\\\".format(X_test.shape, y_test.shape))\\n\",\n+ \"print(\\\"Each image is: {0:d}x{0:d} pixels\\\".format(int(np.sqrt(X_train.shape[1]))))\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Alternatively get the data from here. (Uncomment curl commands from following cell if you want to download using following approach)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"%%sh\\n\",\n- \"cd data/mnist\\n\",\n- \"# curl -O https://pjreddie.com/media/files/mnist_train.csv\\n\",\n- \"# curl -O https://pjreddie.com/media/files/mnist_test.csv\\n\",\n- \"wc -l mnist*\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### Read the data.\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"trainData = np.genfromtxt('data/mnist/mnist_train.csv', delimiter=\\\",\\\")\\n\",\n- \"testData = np.genfromtxt('data/mnist/mnist_test.csv', delimiter=\\\",\\\")\\n\",\n- \"\\n\",\n- \"print (\\\"Training data: \\\" + str(trainData.shape))\\n\",\n- \"print (\\\"Test data: \\\" + str(testData.shape))\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"pd.set_option('display.max_columns', 200)\\n\",\n- \"pd.DataFrame(testData[1:10,],dtype='uint')\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### Following command is not required for code above SystemML 0.14 (master branch dated 05/15/2017 or later)\"\n+ \"### Note: The following command is not required for code above SystemML 0.14 (master branch dated 05/15/2017 or later).\"\n]\n},\n{\n\"metadata\": {},\n\"source\": [\n\"<a id=\\\"train\\\"></a>\\n\",\n- \"## Develop LeNet CNN classifier on Training Data\"\n+ \"## Train a LeNet-like CNN classifier on the training data\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"### Train Model using SystemML LeNet CNN.\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"(on a Mac Book, this takes approx. 5-6 mins for 1 epoch)\"\n+ \"### Train a LeNet-like CNN model using SystemML\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n- \"scrolled\": true\n+ \"collapsed\": true\n},\n\"outputs\": [],\n\"source\": [\n\"script = \\\"\\\"\\\"\\n\",\n\" source(\\\"nn/examples/mnist_lenet.dml\\\") as mnist_lenet\\n\",\n\"\\n\",\n- \" # Bind training data\\n\",\n- \" n = nrow(data)\\n\",\n- \"\\n\",\n- \" # Extract images and labels\\n\",\n- \" images = data[,2:ncol(data)]\\n\",\n- \" labels = data[,1]\\n\",\n- \"\\n\",\n\" # Scale images to [-1,1], and one-hot encode the labels\\n\",\n- \" images = (images / 255.0) * 2 - 1\\n\",\n+ \" images = (images / 255) * 2 - 1\\n\",\n+ \" n = nrow(images)\\n\",\n\" labels = table(seq(1, n), labels+1, n, 10)\\n\",\n\"\\n\",\n\" # Split into training (55,000 examples) and validation (5,000 examples)\\n\",\n\" y = labels[5001:nrow(images),]\\n\",\n\" y_val = labels[1:5000,]\\n\",\n\"\\n\",\n- \" # Train the model using channel, height, and width to produce weights/biases.\\n\",\n+ \" # Train the model to produce weights & biases.\\n\",\n\" [W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win, epochs)\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"rets = ('W1', 'b1','W2','b2','W3','b3','W4','b4')\\n\",\n+ \"out = ('W1', 'b1', 'W2', 'b2', 'W3', 'b3', 'W4', 'b4')\\n\",\n+ \"prog = (dml(script).input(images=X_train, labels=y_train, epochs=1, C=1, Hin=28, Win=28)\\n\",\n+ \" .output(*out))\\n\",\n\"\\n\",\n- \"script = (dml(script).input(data=trainData, epochs=1, C=1, Hin=28, Win=28)\\n\",\n- \" .output(*rets)) \\n\",\n- \"\\n\",\n- \"W1, b1, W2, b2, W3, b3, W4, b4 = (ml.execute(script).get(*rets))\"\n+ \"W1, b1, W2, b2, W3, b3, W4, b4 = ml.execute(prog).get(*out)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"source\": [\n- \"Use trained model and predict on test data, and evaluate the quality of the predictions for each digit.\"\n+ \"Use the trained model to make predictions for the test data, and evaluate the quality of the predictions.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n- \"scriptPredict = \\\"\\\"\\\"\\n\",\n+ \"script_predict = \\\"\\\"\\\"\\n\",\n\" source(\\\"nn/examples/mnist_lenet.dml\\\") as mnist_lenet\\n\",\n\"\\n\",\n- \" # Separate images from lables and scale images to [-1,1]\\n\",\n- \" X_test = data[,2:ncol(data)]\\n\",\n- \" X_test = (X_test / 255.0) * 2 - 1\\n\",\n+ \" # Scale images to [-1,1]\\n\",\n+ \" X_test = (X_test / 255) * 2 - 1\\n\",\n\"\\n\",\n\" # Predict\\n\",\n- \" probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\\n\",\n- \" predictions = rowIndexMax(probs) - 1\\n\",\n+ \" y_prob = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\\n\",\n+ \" y_pred = rowIndexMax(y_prob) - 1\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"script = (dml(scriptPredict).input(data=testData, C=1, Hin=28, Win=28, W1=W1, b1=b1, W2=W2, b2=b2, W3=W3, b3=b3, W4=W4, b4=b4)\\n\",\n- \" .output(\\\"predictions\\\"))\\n\",\n+ \"prog = (dml(script_predict).input(X_test=X_test, C=1, Hin=28, Win=28, W1=W1, b1=b1,\\n\",\n+ \" W2=W2, b2=b2, W3=W3, b3=b3, W4=W4, b4=b4)\\n\",\n+ \" .output(\\\"y_pred\\\"))\\n\",\n\"\\n\",\n- \"predictions = ml.execute(script).get(\\\"predictions\\\").toNumPy()\\n\",\n- \"\\n\",\n- \"print (classification_report(testData[:,0], predictions))\"\n+ \"y_pred = ml.execute(prog).get(\\\"y_pred\\\").toNumPy()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"print(metrics.accuracy_score(y_test, y_pred))\\n\",\n+ \"print(metrics.classification_report(y_test, y_pred))\"\n]\n},\n{\n\"metadata\": {},\n\"source\": [\n\"<a id=\\\"predict\\\"></a>\\n\",\n- \"## Detect handwritten Digits\"\n+ \"## Detect handwritten digits\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Define a function that randomly selects a test image, display the image, and scores it.\"\n+ \"Define a function that randomly selects a test image, displays the image, and scores it.\"\n]\n},\n{\n},\n\"outputs\": [],\n\"source\": [\n- \"img_size = int(np.sqrt(testData.shape[1] - 1))\\n\",\n+ \"img_size = int(np.sqrt(X_test.shape[1]))\\n\",\n\"\\n\",\n\"def displayImage(i):\\n\",\n- \" image = (testData[i,1:]).reshape((img_size, img_size)).astype(\\\"uint8\\\")\\n\",\n+ \" image = (X_test[i]).reshape(img_size, img_size).astype(np.uint8)\\n\",\n\" imgplot = plt.imshow(image, cmap='gray') \"\n]\n},\n\"outputs\": [],\n\"source\": [\n\"def predictImage(i):\\n\",\n- \" image = testData[i,:].reshape(1,testData.shape[1])\\n\",\n- \" prog = dml(scriptPredict).input(data=image, C=1, Hin=28, Win=28, W1=W1, b1=b1, W2=W2, b2=b2, W3=W3, b3=b3, W4=W4, b4=b4) \\\\\\n\",\n- \" .output(\\\"predictions\\\")\\n\",\n- \" result = ml.execute(prog)\\n\",\n- \" return (result.get(\\\"predictions\\\").toNumPy())[0]\"\n+ \" image = X_test[i].reshape(1, -1)\\n\",\n+ \" out = \\\"y_pred\\\"\\n\",\n+ \" prog = (dml(script_predict).input(X_test=image, C=1, Hin=28, Win=28, W1=W1, b1=b1,\\n\",\n+ \" W2=W2, b2=b2, W3=W3, b3=b3, W4=W4, b4=b4)\\n\",\n+ \" .output(out))\\n\",\n+ \" pred = int(ml.execute(prog).get(out).toNumPy())\\n\",\n+ \" return pred\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n- \"i = np.random.choice(np.arange(0, len(testData)), size = (1,))\\n\",\n- \"\\n\",\n+ \"i = np.random.randint(len(X_test))\\n\",\n\"p = predictImage(i)\\n\",\n\"\\n\",\n- \"print (\\\"Image \\\" + str(i) + \\\"\\\\nPredicted digit: \\\" + str(p) + \\\"\\\\nActual digit: \\\" + str(testData[i,0]) + \\\"\\\\nResult: \\\" + str(p == testData[i,0]))\\n\",\n+ \"print(\\\"Image {}\\\\nPredicted digit: {}\\\\nActual digit: {}\\\\nResult: {}\\\".format(\\n\",\n+ \" i, p, int(y_test[i]), p == int(y_test[i])))\\n\",\n\"\\n\",\n- \"p\\n\",\n\"displayImage(i)\"\n]\n},\n\"outputs\": [],\n\"source\": [\n\"pd.set_option('display.max_columns', 28)\\n\",\n- \"pd.DataFrame((testData[i,1:]).reshape(img_size, img_size),dtype='uint')\"\n+ \"pd.DataFrame((X_test[i]).reshape(img_size, img_size), dtype='uint')\"\n]\n}\n],\n\"metadata\": {\n\"kernelspec\": {\n- \"display_name\": \"Python 2\",\n+ \"display_name\": \"Python 3 + Spark 2.x + SystemML\",\n\"language\": \"python\",\n- \"name\": \"python2\"\n+ \"name\": \"pyspark3_2.x\"\n},\n\"language_info\": {\n\"codemirror_mode\": {\n\"name\": \"ipython\",\n- \"version\": 2\n+ \"version\": 3\n},\n\"file_extension\": \".py\",\n\"mimetype\": \"text/x-python\",\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n- \"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.13\"\n+ \"pygments_lexer\": \"ipython3\",\n+ \"version\": \"3.6.1\"\n}\n},\n\"nbformat\": 4,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updating the MNIST LeNet example notebook.
This improves the nomenclature & documentation, adds more cleanup,
removes the unnecessary saving of the MNIST data to CSV files, and
makes everything more Pythonic. |
49,738 | 02.06.2017 21:47:59 | 25,200 | 50d211baa91e6a74b32cd8c1780758608d33c7c8 | New simplification rewrite 'aggregate elimination'
This new static algebraic simplification rewrite removes unnecessary
row- or column-wise aggregates which are directly fed into a full
row/column aggregate. For example, we now rewrite sum(rowSums(X)), as it
appears in nn-cross_entropy_loss::forward, to sum(X). | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"diff": "@@ -148,6 +148,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nhi = simplifyDistributiveBinaryOperation(hop, hi, i);//e.g., (X-Y*X) -> (1-Y)*X\nhi = simplifyBushyBinaryOperation(hop, hi, i); //e.g., (X*(Y*(Z%*%v))) -> (X*Y)*(Z%*%v)\nhi = simplifyUnaryAggReorgOperation(hop, hi, i); //e.g., sum(t(X)) -> sum(X)\n+ hi = removeUnnecessaryAggregates(hi); //e.g., sum(rowSums(X)) -> sum(X)\nhi = simplifyBinaryMatrixScalarOperation(hop, hi, i);//e.g., as.scalar(X*s) -> as.scalar(X)*s;\nhi = pushdownUnaryAggTransposeOperation(hop, hi, i); //e.g., colSums(t(X)) -> t(rowSums(X))\nhi = pushdownCSETransposeScalarOperation(hop, hi, i);//e.g., a=t(X), b=t(X^2) -> a=t(X), b=t(X)^2 for CSE t(X)\n@@ -817,6 +818,33 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nreturn hi;\n}\n+ private Hop removeUnnecessaryAggregates(Hop hi)\n+ {\n+ //sum(rowSums(X)) -> sum(X), sum(colSums(X)) -> sum(X)\n+ //min(rowMins(X)) -> min(X), min(colMins(X)) -> min(X)\n+ //max(rowMaxs(X)) -> max(X), max(colMaxs(X)) -> max(X)\n+ //sum(rowSums(X^2)) -> sum(X), sum(colSums(X^2)) -> sum(X)\n+ if( hi instanceof AggUnaryOp && hi.getInput().get(0) instanceof AggUnaryOp\n+ && ((AggUnaryOp)hi).getDirection()==Direction.RowCol\n+ && hi.getInput().get(0).getParent().size()==1 )\n+ {\n+ AggUnaryOp au1 = (AggUnaryOp) hi;\n+ AggUnaryOp au2 = (AggUnaryOp) hi.getInput().get(0);\n+ if( (au1.getOp()==AggOp.SUM && (au2.getOp()==AggOp.SUM || au2.getOp()==AggOp.SUM_SQ))\n+ || (au1.getOp()==AggOp.MIN && au2.getOp()==AggOp.MIN)\n+ || (au1.getOp()==AggOp.MAX && au2.getOp()==AggOp.MAX) )\n+ {\n+ Hop input = au2.getInput().get(0);\n+ HopRewriteUtils.removeAllChildReferences(au2);\n+ HopRewriteUtils.replaceChildReference(au1, au2, input);\n+\n+ LOG.debug(\"Applied removeUnnecessaryAggregates (line \"+hi.getBeginLine()+\").\");\n+ }\n+ }\n+\n+ return hi;\n+ }\n+\nprivate Hop simplifyBinaryMatrixScalarOperation( Hop parent, Hop hi, int pos )\nthrows HopsException\n{\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteEliminateAggregatesTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import java.util.HashMap;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class RewriteEliminateAggregatesTest extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME = \"RewriteEliminateAggregate\";\n+ private static final String TEST_DIR = \"functions/misc/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + RewriteEliminateAggregatesTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testEliminateSumSumNoRewrite() {\n+ testRewriteEliminateAggregate(1, false);\n+ }\n+\n+ @Test\n+ public void testEliminateMinMinNoRewrite() {\n+ testRewriteEliminateAggregate(2, false);\n+ }\n+\n+ @Test\n+ public void testEliminateMaxMaxNoRewrite() {\n+ testRewriteEliminateAggregate(3, false);\n+ }\n+\n+ @Test\n+ public void testEliminateSumSqSumNoRewrite() {\n+ testRewriteEliminateAggregate(4, false);\n+ }\n+\n+ @Test\n+ public void testEliminateMinSumNoRewrite() {\n+ testRewriteEliminateAggregate(5, false);\n+ }\n+\n+ @Test\n+ public void testEliminateSumSumRewrite() {\n+ testRewriteEliminateAggregate(1, true);\n+ }\n+\n+ @Test\n+ public void testEliminateMinMinRewrite() {\n+ testRewriteEliminateAggregate(2, true);\n+ }\n+\n+ @Test\n+ public void testEliminateMaxMaxRewrite() {\n+ testRewriteEliminateAggregate(3, true);\n+ }\n+\n+ @Test\n+ public void testEliminateSumSqSumRewrite() {\n+ testRewriteEliminateAggregate(4, true);\n+ }\n+\n+ @Test\n+ public void testEliminateMinSumRewrite() {\n+ testRewriteEliminateAggregate(5, true);\n+ }\n+\n+ private void testRewriteEliminateAggregate(int type, boolean rewrites)\n+ {\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+\n+ try\n+ {\n+ TestConfiguration config = getTestConfiguration(TEST_NAME);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{ \"-stats\",\"-args\",\n+ input(\"A\"), String.valueOf(type), output(\"Scalar\") };\n+\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = getRCmd(inputDir(), String.valueOf(type), expectedDir());\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ //generate actual dataset\n+ double[][] A = getRandomMatrix(123, 12, 0, 1, 0.9, 7);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ //run test\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare scalars\n+ HashMap<CellIndex, Double> dmlfile = readDMLScalarFromHDFS(\"Scalar\");\n+ HashMap<CellIndex, Double> rfile = readRScalarFromFS(\"Scalar\");\n+ TestUtils.compareScalars(dmlfile.toString(), rfile.toString());\n+\n+ //check for applied rewrites\n+ if( rewrites ) {\n+ Assert.assertEquals(type==5,\n+ heavyHittersContainsSubString(\"uar\", \"uac\"));\n+ }\n+ }\n+ finally {\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/RewriteEliminateAggregate.R",
"diff": "+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+A = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")))\n+type = args[2]\n+\n+if( type==1 ) {\n+ agg = sum(rowSums(A));\n+} else if( type==2 ) {\n+ agg = min(rowMins(A));\n+} else if( type==3 ) {\n+ agg = max(rowMaxs(A));\n+} else if( type==4 ) {\n+ agg = sum(rowSums(A^2));\n+} else if( type==5 ) {\n+ agg = sum(rowMins(A));\n+}\n+\n+write(agg, paste(args[3], \"Scalar\",sep=\"\"))\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/RewriteEliminateAggregate.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+type = $2;\n+\n+if( type==1 ) {\n+ agg = sum(rowSums(A));\n+}\n+else if( type==2 ) {\n+ agg = min(rowMins(A));\n+}\n+else if( type==3 ) {\n+ agg = max(rowMaxs(A));\n+}\n+else if( type==4 ) {\n+ agg = sum(rowSums(A^2));\n+}\n+else if( type==5 ) {\n+ agg = sum(rowMins(A));\n+}\n+\n+write(agg, $3);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"diff": "@@ -49,6 +49,7 @@ import org.junit.runners.Suite;\nReadAfterWriteTest.class,\nRewriteCSETransposeScalarTest.class,\nRewriteCTableToRExpandTest.class,\n+ RewriteEliminateAggregatesTest.class,\nRewriteFusedRandTest.class,\nRewriteLoopVectorization.class,\nRewriteMatrixMultChainOptTest.class,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1659] New simplification rewrite 'aggregate elimination'
This new static algebraic simplification rewrite removes unnecessary
row- or column-wise aggregates which are directly fed into a full
row/column aggregate. For example, we now rewrite sum(rowSums(X)), as it
appears in nn-cross_entropy_loss::forward, to sum(X). |
49,738 | 03.06.2017 11:26:19 | 25,200 | 10727e5d03711d59675d2adf15e04d98b5acf2b7 | [MINOR] Update maven test suites (consistency w/ actual test suite) | [
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/applications/parfor/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/applications/parfor/ZPackageSuite.java",
"diff": "@@ -27,12 +27,12 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nParForBivariateStatsTest.class,\n- ParForCVMulticlassSVMTest.class,\n- ParForUnivariateStatsTest.class,\n-\nParForCorrelationTest.class,\nParForCorrelationTestLarge.class,\n- ParForNaiveBayesTest.class\n+ ParForCVMulticlassSVMTest.class,\n+ ParForNaiveBayesTest.class,\n+ ParForSampleTest.class,\n+ ParForUnivariateStatsTest.class,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/conversion/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/conversion/ZPackageSuite.java",
"diff": "@@ -26,7 +26,7 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n- org.apache.sysml.test.integration.conversion.RDDConverterUtilsExtTest.class\n+ RDDConverterUtilsExtTest.class\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/aggregate/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/aggregate/ZPackageSuite.java",
"diff": "@@ -27,33 +27,31 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nAggregateInfTest.class,\n+ ColStdDevsTest.class,\n+ ColSumsSqTest.class,\nColSumTest.class,\n+ ColVariancesTest.class,\n+ FullAggregateTest.class,\n+ FullColAggregateTest.class,\n+ FullGroupedAggregateMatrixTest.class,\n+ FullGroupedAggregateTest.class,\n+ FullRowAggregateTest.class,\nLengthTest.class,\nMaxTest.class,\nMinTest.class,\nNColTest.class,\nNRowTest.class,\nProdTest.class,\n+ PushdownSumBinaryTest.class,\n+ RowStdDevsTest.class,\n+ RowSumsSqTest.class,\nRowSumTest.class,\n- SumTest.class,\n+ RowVariancesTest.class,\n+ StdDevTest.class,\nSumSqTest.class,\n- RowSumsSqTest.class,\n- ColSumsSqTest.class,\n+ SumTest.class,\nTraceTest.class,\n- StdDevTest.class,\n- RowStdDevsTest.class,\n- ColStdDevsTest.class,\nVarianceTest.class,\n- RowVariancesTest.class,\n- ColVariancesTest.class,\n-\n- FullAggregateTest.class,\n- FullColAggregateTest.class,\n- FullGroupedAggregateTest.class,\n- FullGroupedAggregateMatrixTest.class,\n- FullRowAggregateTest.class,\n-\n- PushdownSumBinaryTest.class,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/append/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/append/ZPackageSuite.java",
"diff": "@@ -26,11 +26,11 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n+ AppendChainTest.class,\n+ AppendMatrixTest.class,\nAppendVectorTest.class,\n- StringAppendTest.class,\nRBindCBindMatrixTest.class,\n- AppendChainTest.class,\n- AppendMatrixTest.class\n+ StringAppendTest.class,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix/ZPackageSuite.java",
"diff": "@@ -27,10 +27,8 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nBinUaggChainTest.class,\n-\nCentralMomentTest.class,\nCovarianceTest.class,\n-\nDiagMatrixMultiplicationTest.class,\nElementwiseAdditionMultiplicationTest.class,\nElementwiseAdditionTest.class,\n@@ -38,7 +36,6 @@ import org.junit.runners.Suite;\nElementwiseModulusTest.class,\nElementwiseMultiplicationTest.class,\nElementwiseSubtractionTest.class,\n-\nMapMultChainTest.class,\nMapMultLimitTest.class,\nMatrixMultiplicationTest.class,\n@@ -54,7 +51,6 @@ import org.junit.runners.Suite;\nUaggOuterChainTest.class,\nUltraSparseMRMatrixMultiplicationTest.class,\nZipMMSparkMatrixMultiplicationTest.class\n-\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java",
"diff": "@@ -26,18 +26,18 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n-\n- FullMatrixMultiplicationUltraSparseTest.class,\n+ FullDistributedMatrixMultiplicationTest.class,\nFullIntegerDivisionTest.class,\nFullMatrixMultiplicationTest.class,\n- FullMatrixMultiplicationTransposeSelfTest.class,\nFullMatrixMultiplicationTransposeSelf2Test.class,\n+ FullMatrixMultiplicationTransposeSelfTest.class,\n+ FullMatrixMultiplicationUltraSparseTest.class,\nFullMinMaxComparisonTest.class,\nFullPowerTest.class,\nFullPPredMatrixTest.class,\nFullPPredScalarLeftTest.class,\nFullPPredScalarRightTest.class,\n- FullDistributedMatrixMultiplicationTest.class\n+ MatrixMultShortLhsTest.class,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegen/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegen/ZPackageSuite.java",
"diff": "@@ -31,6 +31,7 @@ import org.junit.runners.Suite;\nAlgorithmL2SVM.class,\nAlgorithmLinregCG.class,\nAlgorithmMLogreg.class,\n+ AlgorithmMSVM.class,\nAlgorithmPNMF.class,\nCellwiseTmplTest.class,\nCompressedCellwiseTest.class,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/compress/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/compress/ZPackageSuite.java",
"diff": "@@ -39,6 +39,7 @@ import org.junit.runners.Suite;\nBasicTransposeSelfLeftMatrixMultTest.class,\nBasicUnaryAggregateTest.class,\nBasicVectorMatrixMultTest.class,\n+ CompressedL2SVM.class,\nCompressedLinregCG.class,\nCompressedSerializationTest.class,\nLargeCompressionTest.class,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/data/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/data/ZPackageSuite.java",
"diff": "@@ -28,6 +28,7 @@ import org.junit.runners.Suite;\[email protected]({\nFullReblockTest.class,\nFullStringInitializeTest.class,\n+ RandRuntimePlatformTest.class,\nRandTest1.class,\nRandTest2.class,\nRandTest3.class,\n@@ -42,8 +43,6 @@ import org.junit.runners.Suite;\nVariableTest.class,\nWriteMMTest.class,\nWriteTest.class,\n-\n- RandRuntimePlatformTest.class\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"diff": "@@ -34,10 +34,10 @@ import org.junit.runners.Suite;\nIfTest.class,\nInvalidFunctionAssignmentTest.class,\nInvalidFunctionSignatureTest.class,\n+ IPAConstantFoldingScalarVariablePropagationTest.class,\nIPALiteralReplacementTest.class,\nIPAScalarRecursionTest.class,\nIPAScalarVariablePropagationTest.class,\n- IPAConstantFoldingScalarVariablePropagationTest.class,\nIPAUnknownRecursionTest.class,\nLongOverflowTest.class,\nNegativeLoopIncrementsTest.class,\n@@ -50,6 +50,7 @@ import org.junit.runners.Suite;\nRewriteCSETransposeScalarTest.class,\nRewriteCTableToRExpandTest.class,\nRewriteEliminateAggregatesTest.class,\n+ RewriteFuseBinaryOpChainTest.class,\nRewriteFusedRandTest.class,\nRewriteLoopVectorization.class,\nRewriteMatrixMultChainOptTest.class,\n@@ -58,7 +59,6 @@ import org.junit.runners.Suite;\nRewritePushdownUaggTest.class,\nRewriteSimplifyRowColSumMVMultTest.class,\nRewriteSlicedMatrixMultTest.class,\n- RewriteFuseBinaryOpChainTest.class,\nScalarAssignmentTest.class,\nScalarFunctionTest.class,\nScalarMatrixUnaryBinaryTermTest.class,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/parfor/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/parfor/ZPackageSuite.java",
"diff": "@@ -28,19 +28,19 @@ import org.junit.runners.Suite;\[email protected]({\nForLoopPredicateTest.class,\nParForAdversarialLiteralsTest.class,\n+ ParForBlockwiseDataPartitioningTest.class,\nParForColwiseDataPartitioningTest.class,\nParForDataPartitionLeftIndexingTest.class,\nParForDependencyAnalysisTest.class,\nParForFunctionSerializationTest.class,\nParForMultipleDataPartitioningTest.class,\nParForNaNResultMergeTest.class,\n+ ParForParallelRemoteResultMergeTest.class,\n+ ParForRepeatedOptimizationTest.class,\nParForReplaceThreadIDRecompileTest.class,\nParForRowwiseDataPartitioningTest.class,\n-\n- ParForParallelRemoteResultMergeTest.class,\n+ ParForRulebasedOptimizerTest.class,\nParForSerialRemoteResultMergeTest.class,\n- ParForRepeatedOptimizationTest.class,\n- ParForRulebasedOptimizerTest.class\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/quaternary/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/quaternary/ZPackageSuite.java",
"diff": "@@ -29,8 +29,8 @@ import org.junit.runners.Suite;\nRewritesWeightedSigmoidTest.class,\nWeightedCrossEntropyTest.class,\nWeightedDivMatrixMultTest.class,\n- WeightedSquaredLossTest.class,\nWeightedSigmoidTest.class,\n+ WeightedSquaredLossTest.class,\nWeightedUnaryMatrixMultTest.class\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/tensor/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/tensor/ZPackageSuite.java",
"diff": "@@ -30,7 +30,8 @@ import org.junit.runners.Suite;\nConv2DBackwardTest.class,\nConv2DTest.class,\nPoolBackwardTest.class,\n- PoolTest.class\n+ PoolTest.class,\n+ ReluBackwardTest.class,\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/scalar/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/scalar/ZPackageSuite.java",
"diff": "@@ -40,9 +40,9 @@ import org.junit.runners.Suite;\nRoundTest.class,\nSinTest.class,\nSqrtTest.class,\n+ StopTest.class,\nStopTest2.class,\nStopTestCtrlStr.class,\n- StopTest.class,\nTanTest.class\n})\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/updateinplace/ZPackageSuite.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.updateinplace;\n+\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Suite;\n+\n+/** Group together the tests in this package into a single suite so that the Maven build\n+ * won't run two of them at once. */\n+@RunWith(Suite.class)\[email protected]({\n+ UpdateInPlaceTest.class,\n+})\n+\n+\n+/** This class is just a holder for the above JUnit annotations. */\n+public class ZPackageSuite {\n+\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update maven test suites (consistency w/ actual test suite) |
49,736 | 03.06.2017 17:05:38 | 25,200 | 88f4a468f48081031d926d917ebc4f3e9014fc7f | Added the documentation for bias_add and bias_multiply
builtin function. | [
{
"change_type": "MODIFY",
"old_path": "docs/dml-language-reference.md",
"new_path": "docs/dml-language-reference.md",
"diff": "@@ -1508,24 +1508,27 @@ Hence, the images are internally represented as a matrix with dimension (N, C *\n| Function name | Input matrices | Input Parameters | Notes |\n-|------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------|\n+|------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------|\n| conv2d | input, filter | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Performs 2D convolution operation |\n| conv2d_backward_filter | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt filter of 2D convolution |\n| conv2d_backward_data | filter, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt input of 2D convolution |\n| max_pool | input | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Performs max pooling operation |\n| max_pool_backward | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Computes the gradients wrt input of 2D maxpooling |\n+| bias_add | input, bias | | Adds the bias (row vector of size numChannels) to input with the given numChannels |\n+| bias_multiply | input, bias | | Multiplies the bias (row vector of size numChannels) to input with the given numChannels |\nExamples:\n-| Function | Parameters | Visualization |\n+| Function | Parameters | Visualization / Equivalent DML |\n|----------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| conv2d | stride=[1,1] |  |\n| conv2d | stride=[2,2] |  |\n| conv2d_backward_data | stride=[1,1] |  |\n| conv2d_backward_data | stride=[2,2] |  |\n| conv2d_backward_data | stride=[2,2] and 2x2 filter |  |\n-\n+| bias_add | | `ones = matrix(1, rows=1, cols=height*width); output = input + matrix(bias %*% ones, rows=1, cols=numChannels*height*width)` |\n+| bias_multiply | | `ones = matrix(1, rows=1, cols=height*width); output = input * matrix(bias %*% ones, rows=1, cols=numChannels*height*width)` |\n### Other Built-In Functions\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1661] Added the documentation for bias_add and bias_multiply
builtin function. |
49,738 | 05.06.2017 08:56:32 | 25,200 | 7c24948130c552f06fb34e3614945a2b60ebde1f | [HOTFIX][SYSTEMML-1664] Fix scala dl-utils access to removed internals | [
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/Utils.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/Utils.scala",
"diff": "@@ -237,14 +237,10 @@ object Utils {\nif(filePath == null)\nthrow new LanguageException(\"file path was not specified!\");\nif(filePath.startsWith(\"hdfs:\") || filePath.startsWith(\"gpfs:\")) {\n- if( !LocalFileUtils.validateExternalFilename(filePath, true) )\n- throw new LanguageException(\"Invalid (non-trustworthy) hdfs filename.\");\nval fs = FileSystem.get(ConfigurationManager.getCachedJobConf());\nreturn new InputStreamReader(fs.open(new Path(filePath)));\n}\nelse {\n- if( !LocalFileUtils.validateExternalFilename(filePath, false) )\n- throw new LanguageException(\"Invalid (non-trustworthy) local filename.\");\nreturn new InputStreamReader(new FileInputStream(new File(filePath)), \"ASCII\");\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX][SYSTEMML-1664] Fix scala dl-utils access to removed internals |
49,772 | 05.06.2017 12:32:31 | 25,200 | 882fa843ef26f37f34a6b631868daf81f58cc3da | [MINOR] Update ReLU layer now that has been fixed. | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/relu.dml",
"new_path": "scripts/nn/layers/relu.dml",
"diff": "@@ -36,7 +36,7 @@ forward = function(matrix[double] X)\n* Outputs:\n* - out: Outputs, of same shape as `X`.\n*/\n- out = max(X, 0)\n+ out = max(0, X)\n}\nbackward = function(matrix[double] dout, matrix[double] X)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update ReLU layer now that SYSTEMML-1621 has been fixed. |
49,737 | 06.06.2017 22:18:51 | 25,200 | 6b377319e205734c8f26ba28bd18d9e720151d7e | [Doc] Change PCA scale value in documentation
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/algorithms-matrix-factorization.md",
"new_path": "docs/algorithms-matrix-factorization.md",
"diff": "@@ -113,7 +113,7 @@ SystemML Language Reference for details.\n-nvargs INPUT=/user/ml/input.mtx\nK=10\nCENTER=1\n- SCALE=1O\n+ SCALE=1\nFMT=csv\nPROJDATA=1\nOUTPUT=/user/ml/pca_output/\n@@ -129,7 +129,7 @@ SystemML Language Reference for details.\n-nvargs INPUT=/user/ml/input.mtx\nK=10\nCENTER=1\n- SCALE=1O\n+ SCALE=1\nFMT=csv\nPROJDATA=1\nOUTPUT=/user/ml/pca_output/\n@@ -142,7 +142,7 @@ SystemML Language Reference for details.\n-nvargs INPUT=/user/ml/test_input.mtx\nK=10\nCENTER=1\n- SCALE=1O\n+ SCALE=1\nFMT=csv\nPROJDATA=1\nMODEL=/user/ml/pca_output/\n@@ -159,7 +159,7 @@ SystemML Language Reference for details.\n-nvargs INPUT=/user/ml/test_input.mtx\nK=10\nCENTER=1\n- SCALE=1O\n+ SCALE=1\nFMT=csv\nPROJDATA=1\nMODEL=/user/ml/pca_output/\n"
}
] | Java | Apache License 2.0 | apache/systemds | [Doc] Change PCA scale value in documentation
Closes #530 |
49,701 | 08.06.2017 16:22:28 | 25,200 | d0c5c5d29d4cc299649573fed3a2e112e828412d | Fix rbind size estimates in BinaryOp
In BinaryOp.inferOutputCharacteristics, RBIND is not checked and CBIND is checked twice. Fix the second case to refer to RBIND.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"diff": "@@ -844,7 +844,7 @@ public class BinaryOp extends Hop\nif( ldim1 > 0 || ldim2 > 0 || lnnz >= 0 )\nreturn new long[]{ldim1, ldim2, lnnz};\n}\n- else if( op== OpOp2.CBIND ) {\n+ else if( op == OpOp2.RBIND ) {\nlong ldim1 = -1, ldim2 = -1, lnnz = -1;\nif( mc[0].colsKnown() || mc[1].colsKnown() )\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1668] Fix rbind size estimates in BinaryOp
In BinaryOp.inferOutputCharacteristics, RBIND is not checked and CBIND is checked twice. Fix the second case to refer to RBIND.
Closes #533. |
49,701 | 09.06.2017 11:55:49 | 25,200 | 2869d53cec7b6032452c7dd40b4a6c2c5a5d9b5c | [MINOR] Fix ternary test to read proper files.
Some tests in TernaryAggregateTest refer to the wrong input program.
This fixes them to the correct one.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/ternary/TernaryAggregateTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/ternary/TernaryAggregateTest.java",
"diff": "@@ -164,22 +164,22 @@ public class TernaryAggregateTest extends AutomatedTestBase\n@Test\npublic void testTernaryAggregateRCDenseVectorCPNoRewrite() {\n- runTernaryAggregateTest(TEST_NAME2, false, true, false, ExecType.CP);\n+ runTernaryAggregateTest(TEST_NAME1, false, true, false, ExecType.CP);\n}\n@Test\npublic void testTernaryAggregateRCSparseVectorCPNoRewrite() {\n- runTernaryAggregateTest(TEST_NAME2, true, true, false, ExecType.CP);\n+ runTernaryAggregateTest(TEST_NAME1, true, true, false, ExecType.CP);\n}\n@Test\npublic void testTernaryAggregateRCDenseMatrixCPNoRewrite() {\n- runTernaryAggregateTest(TEST_NAME2, false, false, false, ExecType.CP);\n+ runTernaryAggregateTest(TEST_NAME1, false, false, false, ExecType.CP);\n}\n@Test\npublic void testTernaryAggregateRCSparseMatrixCPNoRewrite() {\n- runTernaryAggregateTest(TEST_NAME2, true, false, false, ExecType.CP);\n+ runTernaryAggregateTest(TEST_NAME1, true, false, false, ExecType.CP);\n}\n@Test\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix ternary test to read proper files.
Some tests in TernaryAggregateTest refer to the wrong input program.
This fixes them to the correct one.
Closes #539. |
49,738 | 11.06.2017 21:58:01 | 25,200 | e54ed71479e30372b1f8b9f4283ba1d5d77f322a | [MINOR] Extended rewrite 'fuse datagen and binary' by div operations | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"diff": "@@ -337,6 +337,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\n* @return high-level operator\n* @throws HopsException if HopsException occurs\n*/\n+ @SuppressWarnings(\"incomplete-switch\")\nprivate Hop fuseDatagenAndBinaryOperation( Hop hi )\nthrows HopsException\n{\n@@ -360,17 +361,18 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHop max = left.getInput().get(params.get(DataExpression.RAND_MAX));\ndouble sval = ((LiteralOp)right).getDoubleValue();\n- if( (bop.getOp()==OpOp2.MULT || bop.getOp()==OpOp2.PLUS || bop.getOp() == OpOp2.MINUS)\n+ if( HopRewriteUtils.isBinary(bop, OpOp2.MULT, OpOp2.PLUS, OpOp2.MINUS, OpOp2.DIV)\n&& min instanceof LiteralOp && max instanceof LiteralOp && pdf instanceof LiteralOp\n&& DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue()) )\n{\n//create fused data gen operator\nDataGenOp gen = null;\n- if( bop.getOp()==OpOp2.MULT )\n- gen = HopRewriteUtils.copyDataGenOp(inputGen, sval, 0);\n- else { //OpOp2.PLUS | OpOp2.MINUS\n- sval *= (bop.getOp()==OpOp2.MINUS) ? -1 : 1;\n- gen = HopRewriteUtils.copyDataGenOp(inputGen, 1, sval);\n+ switch( bop.getOp() ) { //fuse via scale and shift\n+ case MULT: gen = HopRewriteUtils.copyDataGenOp(inputGen, sval, 0); break;\n+ case PLUS:\n+ case MINUS: gen = HopRewriteUtils.copyDataGenOp(inputGen,\n+ 1, sval * ((bop.getOp()==OpOp2.MINUS)?-1:1)); break;\n+ case DIV: gen = HopRewriteUtils.copyDataGenOp(inputGen, 1/sval, 0); break;\n}\n//rewire all parents (avoid anomalies with replicated datagen)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Extended rewrite 'fuse datagen and binary' by div operations |
49,738 | 13.06.2017 13:00:58 | 25,200 | 14b4d548723a01eb58047993153fa4845ec1450b | Worst-case size propagation for fused codegen operators
This patch extends the generic codegen hop by the ability to propagate
worst-case size information according to its propagation types. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java",
"diff": "@@ -32,6 +32,7 @@ import org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.SpoofFused;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\npublic class SpoofFusedOp extends Hop implements MultiThreadedHop\n{\n@@ -91,11 +92,6 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nreturn 0;\n}\n- @Override\n- protected long[] inferOutputCharacteristics(MemoTable memo) {\n- return null;\n- }\n-\n@Override\npublic Lop constructLops() throws HopsException, LopsException {\nif( getLops() != null )\n@@ -141,6 +137,60 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nreturn \"spoof(\"+_class.getSimpleName()+\")\";\n}\n+ @Override\n+ protected long[] inferOutputCharacteristics( MemoTable memo )\n+ {\n+ long[] ret = null;\n+\n+ //get statistics of main input\n+ MatrixCharacteristics mc = memo.getAllInputStats(getInput().get(0));\n+\n+ if( mc.dimsKnown() ) {\n+ switch(_dimsType)\n+ {\n+ case ROW_DIMS:\n+ ret = new long[]{mc.getRows(), 1, -1};\n+ break;\n+ case ROW_DIMS2:\n+ ret = new long[]{mc.getRows(), 2, -1};\n+ break;\n+ case COLUMN_DIMS_ROWS:\n+ ret = new long[]{mc.getCols(), 1, -1};\n+ break;\n+ case COLUMN_DIMS_COLS:\n+ ret = new long[]{1, mc.getCols(), -1};\n+ break;\n+ case INPUT_DIMS:\n+ ret = new long[]{mc.getRows(), mc.getCols(), -1};\n+ break;\n+ case SCALAR:\n+ ret = new long[]{0, 0, -1};\n+ break;\n+ case MULTI_SCALAR:\n+ //dim2 statically set from outside\n+ ret = new long[]{1, _dim2, -1};\n+ break;\n+ case ROW_RANK_DIMS: {\n+ MatrixCharacteristics mc2 = memo.getAllInputStats(getInput().get(1));\n+ if( mc2.dimsKnown() )\n+ ret = new long[]{mc.getRows(), mc2.getCols(), -1};\n+ break;\n+ }\n+ case COLUMN_RANK_DIMS: {\n+ MatrixCharacteristics mc2 = memo.getAllInputStats(getInput().get(1));\n+ if( mc2.dimsKnown() )\n+ ret = new long[]{mc.getCols(), mc2.getCols(), -1};\n+ break;\n+ }\n+ default:\n+ throw new RuntimeException(\"Failed to infer worst-case size information \"\n+ + \"for type: \"+_dimsType.toString());\n+ }\n+ }\n+\n+ return ret;\n+ }\n+\n@Override\npublic void refreshSizeInformation() {\nswitch(_dimsType)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1687] Worst-case size propagation for fused codegen operators
This patch extends the generic codegen hop by the ability to propagate
worst-case size information according to its propagation types. |
49,738 | 14.06.2017 18:17:23 | 25,200 | 7602af94fbd9554f097a573bee87d1b51e709ccb | Fix validate conv2d_backward_data (size propagation) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveUnusedFunctions.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveUnusedFunctions.java",
"diff": "@@ -25,7 +25,6 @@ import java.util.Set;\nimport java.util.Map.Entry;\nimport org.apache.sysml.hops.HopsException;\n-import org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.FunctionStatementBlock;\nimport org.apache.sysml.parser.LanguageException;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1125,10 +1125,13 @@ public class BuiltinFunctionExpression extends DataIdentifier\noutput.setBlockDimensions(input.getOutput().getRowsInBlock(), input.getOutput().getColumnsInBlock());\n// stride1, stride2, padding1, padding2, numImg, numChannels, imgSize, imgSize,\n// filter_shape1=1, filter_shape2=1, filterSize/poolSize1, filterSize/poolSize1\n- if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD ||\n- this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA) {\n+ if( getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD ) {\noutput.setDimensions(input.getOutput().getDim1(), input.getOutput().getDim2());\n}\n+ else if( getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA ) {\n+ //args[0] .. filter, args[1] .. input\n+ output.setDimensions(_args[1].getOutput().getDim1(), -1);\n+ }\nelse if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER) {\noutput.setDimensions(filter.getOutput().getDim1(), filter.getOutput().getDim2());\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1686] Fix validate conv2d_backward_data (size propagation) |
49,772 | 15.06.2017 15:20:47 | 25,200 | d49ab98116054c98aa65a1f0d173a9c181b1f32f | [MINOR] Update `nn` library formatting | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_lenet.dml",
"new_path": "scripts/nn/examples/mnist_lenet.dml",
"diff": "@@ -114,8 +114,8 @@ train = function(matrix[double] X, matrix[double] Y,\n# Compute forward pass\n## layer 1: conv1 -> relu1 -> pool1\n- [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride,\n- pad, pad)\n+ [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf,\n+ stride, stride, pad, pad)\noutr1 = relu::forward(outc1)\n[outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,\nstrideh=2, stridew=2, pad=0, pad=0)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/conv2d_transpose.dml",
"new_path": "scripts/nn/layers/conv2d_transpose.dml",
"diff": "@@ -63,25 +63,24 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\nHout = strideh * (Hin-1) - 2*padh + Hf + out_padh\nWout = stridew * (Win-1) - 2*padw + Wf + out_padw\n- /*\n- * Transpose convolution aims to go in the other direction of\n- * (direct) convolution, i.e., given input X, produce output O such\n- * that running convolution on O recovers X. This is achieved by\n- * conv2d_backward_data (since the derivative wrt data must produce\n- * output of same size as the input to conv2d). By reusing a built-in\n- * operator we achieve efficiency and restrict the number of built-in\n- * operators to manageable levels. Plus, most other deep-learning\n- * packages make use of the same strategy which means this\n- * implementation of transpose convolution is 'in-sync' with them.\n- *\n- * One potential downside of reusing conv2d_backward_data is the fact\n- * that it rotates the filter by 180 degrees before applying it. This\n- * needs to be kept in mind when interpreting the output of transpose\n- * convolution.\n- */\n+ # Transpose convolution aims to go in the other direction of\n+ # (direct) convolution, i.e., given input X, produce output O such\n+ # that running convolution on O recovers X. This is achieved by\n+ # conv2d_backward_data (since the derivative wrt data must produce\n+ # output of same size as the input to conv2d). By reusing a built-in\n+ # operator we achieve efficiency and restrict the number of built-in\n+ # operators to manageable levels. Plus, most other deep-learning\n+ # packages make use of the same strategy which means this\n+ # implementation of transpose convolution is 'in-sync' with them.\n+ #\n+ # One potential downside of reusing conv2d_backward_data is the fact\n+ # that it rotates the filter by 180 degrees before applying it. This\n+ # needs to be kept in mind when interpreting the output of transpose\n+ # convolution.\nout = conv2d_backward_data(W, X, stride=[strideh,stridew], padding=[padh,padw],\ninput_shape=[N,F,Hout,Wout], filter_shape=[C,F,Hf,Wf])\n+ # Add bias term to each output filter\nout = bias_add(out, b)\n}\n@@ -120,36 +119,33 @@ backward = function(matrix[double] dout, int Hout, int Wout,\nN = nrow(X)\nF = nrow(b)\n- /*\n- * conv2d_backward_filter takes the input and delta map as first and\n- * second args, respectively. Given that we need to compute the\n- * grad (wrt to filter) for transpose convolution where the roles of\n- * the input and output are reversed, we reverse the order of the\n- * args (along with setting input_shape to the delta map shape).\n- * Effectively, we are running a direct convolution with X as the\n- * filter and the dout as the input. To convince oneself that the\n- * interconnections between the cells of the filter, input and delta\n- * map are preserved please keep in mind that the forward of\n- * convolution transpose rotates the filter by 180 degrees before\n- * applying it.\n- */\n+ # conv2d_backward_filter takes the input and delta map as first and\n+ # second args, respectively. Given that we need to compute the\n+ # grad (wrt to filter) for transpose convolution where the roles of\n+ # the input and output are reversed, we reverse the order of the\n+ # args (along with setting input_shape to the delta map shape).\n+ # Effectively, we are running a direct convolution with X as the\n+ # filter and the dout as the input. To convince oneself that the\n+ # interconnections between the cells of the filter, input and delta\n+ # map are preserved please keep in mind that the forward of\n+ # convolution transpose rotates the filter by 180 degrees before\n+ # applying it.\ndW = conv2d_backward_filter(dout, X, stride=[strideh,stridew], padding=[padh,padw],\ninput_shape=[N,F,Hout,Wout], filter_shape=[C,F,Hf,Wf])\n- /*\n- * Since the forward for transpose convolution makes a call to\n- * conv2d_backward_data, to compute its derivative wrt to data\n- * we can run conv2d by applying the filter on the delta\n- * map (this makes sense because convolution transpose is the\n- * 'reverse' of convolution). Its easy to see that this will produce\n- * output of the required size. To convince oneself that conv2d will\n- * respect the interconnections between the cells in the delta map\n- * and the filter, keep in mind that the forward function rotates the\n- * filter by 180 degrees before applying it.\n- */\n+ # Since the forward for transpose convolution makes a call to\n+ # conv2d_backward_data, to compute its derivative wrt to data\n+ # we can run conv2d by applying the filter on the delta\n+ # map (this makes sense because convolution transpose is the\n+ # 'reverse' of convolution). Its easy to see that this will produce\n+ # output of the required size. To convince oneself that conv2d will\n+ # respect the interconnections between the cells in the delta map\n+ # and the filter, keep in mind that the forward function rotates the\n+ # filter by 180 degrees before applying it.\ndX = conv2d(dout, W, input_shape=[N,F,Hout,Wout], filter_shape=[C,F,Hf,Wf],\nstride=[strideh,stridew], padding=[padh,padw])\n+ # Partial derivatives for bias vector\ndb = rowSums(matrix(colSums(dout), rows=F, cols=Hout*Wout))\n}\n@@ -202,47 +198,40 @@ init_bilinear = function(int C, int K)\nvect = 1 - abs(seq(0, K-1) / factor_up - center)\nweights = matrix(vect %*% t(vect), rows=1, cols=K*K)\n- /*\n- * To create a multi-channel channel-independent upsampling filter,\n- * we need to intersperse the filter weights with 0s. For instance,\n- * consider the case of 2X upsampling. In this case, K=4 and we have\n- * K^2=16 weights to include into the 3D tensor representing the\n- * filter which should look like the following (assuming 3 channels):\n- *\n- * <-16 weights-> <---------32 0s--------->\n- * X X ...... X X 0 0 0 ............. 0 0 0\n- * 0 .......... 0 X X .... X X 0 ...... 0 0\n- * 0 0 0 ............... 0 0 0 X X .... X X\n- *\n- * To be clear, the second row should have 16 0s followed by 16\n- * weights followed by 16 0s.\n- *\n- * To create the above filter, we take advantage of the fact that\n- * between two sets of non-zero weights, there is always a sequence\n- * of C*K*K 0s. In the above example, C*K^2 = 48 (e.g., 32 trailing\n- * 0s in the first row and 16 leading 0s in the second row).\n- *\n- * Note that, in the special case of C=1 we do not need to\n- * intersperse with 0s (no question of being channel-wise independent\n- * since we have only 1 channel).\n- */\n- #if(C > 1){\n- /*\n- * Append C*K*K trailing 0s to the K*K kernel and replicate the\n- * resulting row C times\n- */\n+ # To create a multi-channel channel-independent upsampling filter,\n+ # we need to intersperse the filter weights with 0s. For instance,\n+ # consider the case of 2X upsampling. In this case, K=4 and we have\n+ # K^2=16 weights to include into the 3D tensor representing the\n+ # filter which should look like the following (assuming 3 channels):\n+ #\n+ # <-16 weights-> <---------32 0s--------->\n+ # X X ...... X X 0 0 0 ............. 0 0 0\n+ # 0 .......... 0 X X .... X X 0 ...... 0 0\n+ # 0 0 0 ............... 0 0 0 X X .... X X\n+ #\n+ # To be clear, the second row should have 16 0s followed by 16\n+ # weights followed by 16 0s.\n+ #\n+ # To create the above filter, we take advantage of the fact that\n+ # between two sets of non-zero weights, there is always a sequence\n+ # of C*K*K 0s. In the above example, C*K^2 = 48 (e.g., 32 trailing\n+ # 0s in the first row and 16 leading 0s in the second row).\n+ #\n+ # Note that, in the special case of C=1 we do not need to\n+ # intersperse with 0s (no question of being channel-wise independent\n+ # since we have only 1 channel).\n+\n+ # Append C*K*K trailing 0s to the K*K kernel and replicate the\n+ # resulting row C times\nrepl_weights = matrix(1, rows=C, cols=1) %*% cbind(weights, matrix(0, rows=1, cols=C*K*K))\n- /*\n- * The above operation added extra C*K*K trailing 0s in the last row\n- * that we do not need. Thus, we need to:\n- * 1) reshape the resulting matrix into a row\n- * 2) 'Clip off' the last few 0s using indexing and reshape the\n- * result into the expected filter shape ([C, C, K, K])\n- */\n+ # The above operation added extra C*K*K trailing 0s in the last row\n+ # that we do not need. Thus, we need to:\n+ # 1) reshape the resulting matrix into a row\n+ # 2) 'Clip off' the last few 0s using indexing and reshape the\n+ # result into the expected filter shape ([C, C, K, K])\nrepl_weights_row = matrix(repl_weights, rows=1, cols=C*(C+1)*K^2)\nW = matrix(repl_weights_row[1,1:(C*K)^2], rows=C, cols=C*K^2)\n- #}else W = weights\nb = matrix(0, rows=C, cols=1)\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update `nn` library formatting |
49,772 | 15.06.2017 15:20:49 | 25,200 | 17838a3d34d6e3fa860222ff9af2e5c17f6c3a77 | [MINOR] Make use of util::channel_sums function in conv2d_builtin | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/conv2d_builtin.dml",
"new_path": "scripts/nn/layers/conv2d_builtin.dml",
"diff": "*\n* This implementation uses a built-in operator for higher performance.\n*/\n+source(\"nn/util.dml\") as util\nforward = function(matrix[double] X, matrix[double] W, matrix[double] b,\nint C, int Hin, int Win, int Hf, int Wf,\n@@ -127,7 +128,7 @@ backward = function(matrix[double] dout, int Hout, int Wout,\ninput_shape=[N,C,Hin,Win], filter_shape=[F,C,Hf,Wf])\n# Partial derivatives for bias vector\n- db = rowSums(matrix(colSums(dout), rows=F, cols=Hout*Wout))\n+ db = util::channel_sums(dout, F, Hout, Wout)\n}\ninit = function(int F, int C, int Hf, int Wf)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Make use of util::channel_sums function in conv2d_builtin |
49,710 | 15.06.2017 17:09:33 | 25,200 | c30757e909611705cf26b3bbbb71aff2123b5993 | Add snapshot version number to docs header
Closes Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/_config.yml",
"new_path": "docs/_config.yml",
"diff": "@@ -15,7 +15,7 @@ exclude:\n- lang-ref\n# These allow the documentation to be updated with newer releases\n-SYSTEMML_VERSION: Latest\n+SYSTEMML_VERSION: 1.0.0-SNAPSHOT\n# if 'analytics_on' is true, analytics section will be rendered on the HTML pages\nanalytics_on: true\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1694] Add snapshot version number to docs header
Closes #544. Closes #545. |
49,728 | 16.06.2017 09:57:55 | 25,200 | 948943d17f5dd97d0678cc9bd36224b41b0b9d97 | Update StepLinearReg to work with MLContext
Address write statements and reordering of coefficients issues.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/StepLinearRegDS.dml",
"new_path": "scripts/algorithms/StepLinearRegDS.dml",
"diff": "# thr Double 0.01 Threshold to stop the algorithm: if the decrease in the value of AIC falls below thr\n# no further features are being checked and the algorithm stops\n# fmt String \"text\" Matrix output format for B (the betas) only, usually \"text\" or \"csv\"\n+# write_beta Boolean TRUE Should the beta's be returned?\n+# 0 = no\n+# 1 = yes\n# --------------------------------------------------------------------------------------------\n# OUTPUT: Matrix of regression parameters (the betas) and its size depend on icpt input value:\n# OUTPUT SIZE: OUTPUT CONTENTS: HOW TO PREDICT Y FROM X AND B:\n#\n# HOW TO INVOKE THIS SCRIPT - EXAMPLE:\n# hadoop jar SystemML.jar -f StepLinearRegDS.dml -nvargs X=INPUT_DIR/X Y=INPUT_DIR/Y B=OUTPUT_DIR/betas\n-# O=OUTPUT_DIR/stats S=OUTPUT_DIR/selected icpt=2 thr=0.01 fmt=csv\n+# O=OUTPUT_DIR/stats S=OUTPUT_DIR/selected icpt=2 thr=0.01 fmt=csv write_beta=TRUE\nfileX = $X;\nfileY = $Y;\nfileB = $B;\nfileS = $S;\n+write_beta = ifdef($write_beta, TRUE);\n+\n# currently only the forward selection strategy in supported: start from one feature and iteratively add\n# features until AIC improves\ndir = \"forward\";\nfmt = ifdef ($fmt, \"text\");\n-intercept_status = ifdef ($icpt, 0);\n-thr = ifdef ($thr, 0.01);\n+intercept_status = ifdef ($icpt, 1);\n+thr = ifdef ($thr, 0.001);\nprint (\"BEGIN STEPWISE LINEAR REGRESSION SCRIPT\");\nprint (\"Reading X and Y...\");\n@@ -96,7 +101,6 @@ m_orig = ncol (X_orig);\n# BEGIN STEPWISE LINEAR REGRESSION\nif (dir == \"forward\") {\n-\ncontinue = TRUE;\ncolumns_fixed = matrix (0, rows = 1, cols = m_orig);\ncolumns_fixed_ordered = matrix (0, rows = 1, cols = 1);\n@@ -108,16 +112,33 @@ if (dir == \"forward\") {\nbeta = mean (y);\nAIC_best = 2 + n * log(sum((beta - y)^2) / n);\n} else {\n- beta = 0;\n+ beta = 0.0;\nAIC_best = n * log(sum(y^2) / n);\n}\n+\nAICs = matrix (AIC_best, rows = 1, cols = m_orig);\nprint (\"Best AIC without any features: \" + AIC_best);\n+ boa_ncol = ncol(X_orig)\n+ if (intercept_status != 0) {\n+ boa_ncol = boa_ncol + 1\n+ }\n+\n+ beta_out_all = matrix(0, rows = boa_ncol, cols = m_orig * 1);\n+\n+ y_ncol = 1;\n+\n# First pass to examine single features\n- parfor (i in 1:m_orig) {\n- [AIC_1] = linear_regression (X_orig[,i], y, m_orig, columns_fixed_ordered, \" \");\n+ parfor (i in 1:m_orig, check = 0) {\n+ columns_fixed_ordered_1 = matrix(i, rows=1, cols=1);\n+\n+ [AIC_1, beta_out_i] = linear_regression (X_orig[, i], y, m_orig, columns_fixed_ordered_1,\n+ write_beta, 0);\n+\nAICs[1, i] = AIC_1;\n+\n+ beta_out_all[, (i - 1) * y_ncol + 1 : i * y_ncol] = beta_out_i[, 1:1];\n+\n}\n# Determine the best AIC\n@@ -130,9 +151,13 @@ if (dir == \"forward\") {\n}\n}\n+ # beta best so far\n+ beta_best = beta_out_all[, (column_best-1) * y_ncol + 1: column_best * y_ncol];\n+\nif (column_best == 0) {\n- print (\"AIC of an empty model is \" + AIC_best + \" and adding no feature achieves more than \" + (thr * 100) + \"% decrease in AIC!\");\n- S = matrix (0, rows=1, cols=1);\n+ print (\"AIC of an empty model is \" + AIC_best + \" and adding no feature achieves more than \" +\n+ (thr * 100) + \"% decrease in AIC!\");\n+ Selected = matrix (0, rows = 1, cols = 1);\nif (intercept_status == 0) {\nB = matrix (beta, rows = m_orig, cols = 1);\n} else {\n@@ -140,8 +165,12 @@ if (dir == \"forward\") {\nB_tmp[m_orig + 1, ] = beta;\nB = B_tmp;\n}\n- write (S, fileS, format=fmt);\n- write (B, fileB, format=fmt);\n+\n+ beta_out = B;\n+\n+ write(Selected, fileS, format=fmt);\n+ write(beta_out, fileB, format=fmt);\n+\nstop (\"\");\n}\nprint (\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\n@@ -151,13 +180,20 @@ if (dir == \"forward\") {\nwhile (continue) {\n# Subsequent passes over the features\n- parfor (i in 1:m_orig) {\n+ beta_out_all_2 = matrix(0, rows = boa_ncol, cols = m_orig * 1);\n+\n+ parfor (i in 1:m_orig, check = 0) {\nif (as.scalar(columns_fixed[1, i]) == 0) {\n# Construct the feature matrix\nX = cbind (X_global, X_orig[, i]);\n- [AIC_2] = linear_regression (X, y, m_orig, columns_fixed_ordered, \" \");\n+ tmp = matrix(0, rows=1, cols=1);\n+ tmp[1, 1] = i;\n+ columns_fixed_ordered_2 = append(columns_fixed_ordered, tmp )\n+ [AIC_2, beta_out_i] = linear_regression (X, y, m_orig, columns_fixed_ordered_2, write_beta, 0);\n+ beta_out_all_2[, (i - 1) * y_ncol + 1 : i * y_ncol] = beta_out_i[,1:1];\n+\nAICs[1, i] = AIC_2;\n}\n}\n@@ -165,17 +201,22 @@ if (dir == \"forward\") {\n# Determine the best AIC\nfor (k in 1:m_orig) {\nAIC_cur = as.scalar (AICs[1, k]);\n- if ( (AIC_cur < AIC_best) & ((AIC_best - AIC_cur) > abs (thr * AIC_best)) & (as.scalar(columns_fixed[1,k]) == 0) ) {\n+ if ( (AIC_cur < AIC_best) & ((AIC_best - AIC_cur) > abs (thr * AIC_best)) &\n+ (as.scalar(columns_fixed[1, k]) == 0) ) {\ncolumn_best = k;\nAIC_best = as.scalar(AICs[1, k]);\n}\n}\n- # cbind best found features (i.e., columns) to X_global\n+ # have the best beta store in the matrix\n+ beta_best = beta_out_all_2[, (column_best - 1) * y_ncol + 1 : column_best * y_ncol];\n+\n+ # Append best found features (i.e., columns) to X_global\nif (as.scalar(columns_fixed[1, column_best]) == 0) { # new best feature found\nprint (\"Best AIC \" + AIC_best + \" achieved with feature: \" + column_best);\ncolumns_fixed[1, column_best] = 1;\ncolumns_fixed_ordered = cbind (columns_fixed_ordered, as.matrix(column_best));\n+\nif (ncol(columns_fixed_ordered) == m_orig) { # all features examined\nX_global = cbind (X_global, X_orig[, column_best]);\ncontinue = FALSE;\n@@ -185,22 +226,33 @@ if (dir == \"forward\") {\n} else {\ncontinue = FALSE;\n}\n+\n}\n# run linear regression with selected set of features\nprint (\"Running linear regression with selected features...\");\n- [AIC] = linear_regression (X_global, y, m_orig, columns_fixed_ordered, fileB);\n+ [AIC, beta_out] = linear_regression (X_global, y, m_orig, columns_fixed_ordered, write_beta, 1);\n+\n+ Selected = columns_fixed_ordered;\n+ if (intercept_status != 0) {\n+ Selected = cbind(Selected, matrix(boa_ncol, rows=1, cols=1))\n+ }\n+\n+ beta_out = reorder_matrix(boa_ncol, beta_out, Selected);\n+\n+ write(Selected, fileS, format=fmt);\n+ write(beta_out, fileB, format=fmt);\n} else {\nstop (\"Currently only forward selection strategy is supported!\");\n}\n+# Computes linear regression using a direct solver for (X^T X) beta = X^T y.\n+# It also outputs the AIC of the computed model.\n-/*\n-* Computes linear regression using a direct solver for (X^T X) beta = X^T y.\n-* It also outputs the AIC of the computed model.\n-*/\n-linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig, Matrix[Double] Selected, String fileB) return (Double AIC) {\n+linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\n+ Matrix[Double] Selected, Boolean write_beta, Boolean writeStats)\n+ return (Double AIC, Matrix[Double] beta) {\nintercept_status = ifdef ($icpt, 0);\nfmt = ifdef ($fmt, \"text\");\n@@ -213,6 +265,7 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\nX = cbind (X, ones_n);\nm = m - 1;\n}\n+\nm_ext = ncol(X);\nif (intercept_status == 2) { # scale-&-shift X columns to mean 0, variance 1\n@@ -256,8 +309,7 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\neq_deg_of_freedom = m_ext;\nAIC = (2 * eq_deg_of_freedom) + n * log (ss_res / n);\n- if (fileB != \" \") {\n-\n+ if(write_beta == 1) {\nfileO = ifdef ($O, \" \");\nfileS = $S;\n@@ -312,78 +364,46 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\nstr = append (str, \"ADJUSTED_R2_VS_0,\" + adjusted_R2_vs_0); # Adjusted R^2 of residual with bias included vs. zero constant\n}\n- if (fileO != \" \") {\n+ if (fileO != \" \" & writeStats != 0) {\nwrite(str, fileO);\n} else {\nprint (str);\n+ print (\"\");\n}\n- # Prepare the output matrix\n- print (\"Writing the output matrix...\");\n- if (intercept_status == 2) {\n- beta_out = cbind (beta, beta_unscaled);\n- } else {\n- beta_out = beta;\n+ # TODO IMP NOTE: with the fix in PR-22, we have not accounted for\n+ # intercept=2 and # the code before # was not matching so we have removed it\n+ # for now. Pl see the git revision history and diff to see the changes.\n+ # in future we will have this feature. For now it is disabled\n}\n-\n- # Output which features give the best AIC and are being used for linear regression\n- write (Selected, fileS, format=fmt);\n-\n- no_selected = ncol (Selected);\n- max_selected = max (Selected);\n- last = max_selected + 1;\n-\n- if (intercept_status != 0) {\n-\n- Selected_ext = cbind (Selected, as.matrix (last));\n- P1 = table (seq (1, ncol (Selected_ext)), t(Selected_ext));\n-\n- if (intercept_status == 2) {\n-\n- P1_beta = P1 * beta;\n- P2_beta = colSums (P1_beta);\n- P1_beta_unscaled = P1 * beta_unscaled;\n- P2_beta_unscaled = colSums(P1_beta_unscaled);\n-\n- if (max_selected < m_orig) {\n- P2_beta = cbind (P2_beta, matrix (0, rows=1, cols=(m_orig - max_selected)));\n- P2_beta_unscaled = cbind (P2_beta_unscaled, matrix (0, rows=1, cols=(m_orig - max_selected)));\n-\n- P2_beta[1, m_orig+1] = P2_beta[1, max_selected + 1];\n- P2_beta[1, max_selected + 1] = 0;\n-\n- P2_beta_unscaled[1, m_orig+1] = P2_beta_unscaled[1, max_selected + 1];\n- P2_beta_unscaled[1, max_selected + 1] = 0;\n}\n- beta_out = cbind (t(P2_beta), t(P2_beta_unscaled));\n-\n- } else {\n- P1_beta = P1 * beta;\n- P2_beta = colSums (P1_beta);\n- if (max_selected < m_orig) {\n- P2_beta = cbind (P2_beta, matrix (0, rows=1, cols=(m_orig - max_selected)));\n- P2_beta[1, m_orig+1] = P2_beta[1, max_selected + 1] ;\n- P2_beta[1, max_selected + 1] = 0;\n- }\n- beta_out = t(P2_beta);\n+reorder_matrix = function(\n+ double ncolX, # number of column in X, inlcuding the intercept column\n+ matrix[double] B, # beta\n+ matrix[double] S # Selected\n+) return (matrix[double] Y) {\n+ # This function assumes that B and S have same number of elements.\n+ # if the intercept is included in the model, all inputs should be adjusted\n+ # appropriately before calling this function.\n+ S = t(S);\n+ num_empty_B = ncolX - nrow(B);\n+ if (num_empty_B < 0) {\n+ stop(\"Error: unable to re-order the matrix. Reason: B more than matrix X\");\n}\n- } else {\n- P1 = table (seq (1, no_selected), t(Selected));\n- P1_beta = P1 * beta;\n- P2_beta = colSums (P1_beta);\n-\n- if (max_selected < m_orig) {\n- P2_beta = cbind (P2_beta, matrix (0, rows=1, cols=(m_orig - max_selected)));\n+ if (num_empty_B > 0) {\n+ pad_zeros = matrix(0, rows = num_empty_B, cols=1);\n+ B = rbind(B, pad_zeros);\n+ S = rbind(S, pad_zeros);\n}\n- beta_out = t(P2_beta);\n- }\n+ # since the table won't accept zeros as index we hack it.\n+ S0 = replace(target = S, pattern = 0, replacement = ncolX+1);\n+ seqS = seq(1, nrow(S0));\n+ P = table(seqS, S0, ncolX, ncolX);\n- write ( beta_out, fileB, format=fmt );\n- }\n+ Y = t(P) %*% B;\n}\n-\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1647] Update StepLinearReg to work with MLContext
Address write statements and reordering of coefficients issues.
Closes #525. |
49,738 | 15.06.2017 23:57:44 | 25,200 | ddcb9e0190989ea1837af8dd44676d52497c3e15 | Fix robustness codegen row-wise (unknowns, scalar/vect) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -75,7 +75,7 @@ public class TemplateCell extends TemplateBase\n@Override\npublic boolean open(Hop hop) {\n- return isValidOperation(hop)\n+ return hop.dimsKnown() && isValidOperation(hop)\n|| (hop instanceof IndexingOp && (((IndexingOp)hop)\n.isColLowerEqualsUpper() || hop.getDim2()==1));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -73,9 +73,9 @@ public class TemplateRow extends TemplateBase\n@Override\npublic boolean open(Hop hop) {\n- return (hop instanceof BinaryOp && hop.getInput().get(0).getDim2()>1\n+ return (hop instanceof BinaryOp && hop.dimsKnown() && hop.getInput().get(0).getDim2()>1\n&& hop.getInput().get(1).getDim2()==1 && TemplateCell.isValidOperation(hop))\n- || (hop instanceof AggBinaryOp && hop.getDim2()==1\n+ || (hop instanceof AggBinaryOp && hop.dimsKnown() && hop.getDim2()==1\n&& hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1)\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol\n&& hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1\n@@ -164,7 +164,7 @@ public class TemplateRow extends TemplateBase\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.RowTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\nHop c = hop.getInput().get(i);\n- if( me.isPlanRef(i) )\n+ if( me!=null && me.isPlanRef(i) )\nrConstructCplan(c, memo, tmp, inHops, inHops2, compileLiterals);\nelse {\nCNodeData cdata = TemplateUtils.createCNodeData(c, compileLiterals);\n@@ -258,7 +258,8 @@ public class TemplateRow extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\n// if one input is a matrix then we need to do vector by scalar operations\n- if(hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1 )\n+ if( (hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1)\n+ || (hop.getInput().get(1).getDim1() > 1 && hop.getInput().get(1).getDim2() > 1))\n{\nif( HopRewriteUtils.isBinary(hop, SUPPORTED_VECT_BINARY) ) {\nif( TemplateUtils.isMatrix(cdata1) && TemplateUtils.isMatrix(cdata2) ) {\n@@ -267,6 +268,8 @@ public class TemplateRow extends TemplateBase\n}\nelse {\nString opname = \"VECT_\"+((BinaryOp)hop).getOp().name()+\"_SCALAR\";\n+ if( TemplateUtils.isColVector(cdata1) )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\nif( TemplateUtils.isColVector(cdata2) )\ncdata2 = new CNodeUnary(cdata2, UnaryType.LOOKUP_R);\nout = new CNodeBinary(cdata1, cdata2, BinType.valueOf(opname));\n@@ -281,7 +284,9 @@ public class TemplateRow extends TemplateBase\nString primitiveOpName = ((BinaryOp)hop).getOp().toString();\nif( TemplateUtils.isColVector(cdata1) )\ncdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- if( TemplateUtils.isColVector(cdata2) )\n+ if( TemplateUtils.isColVector(cdata2) //vector or vector can be inferred from lhs\n+ || (TemplateUtils.isColVector(hop.getInput().get(0)) && cdata2 instanceof CNodeData\n+ && hop.getInput().get(1).getDataType().isMatrix()))\ncdata2 = new CNodeUnary(cdata2, UnaryType.LOOKUP_R);\nout = new CNodeBinary(cdata1, cdata2, BinType.valueOf(primitiveOpName));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -65,6 +65,11 @@ public class TemplateUtils\n|| hop.getDim1() == 1 && hop.getDim2() != 1 ) );\n}\n+ public static boolean isColVector(Hop hop) {\n+ return (hop.getDataType() == DataType.MATRIX\n+ && hop.getDim1() != 1 && hop.getDim2() == 1 );\n+ }\n+\npublic static boolean isColVector(CNode hop) {\nreturn (hop.getDataType() == DataType.MATRIX\n&& hop.getNumRows() != 1 && hop.getNumCols() == 1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1702] Fix robustness codegen row-wise (unknowns, scalar/vect) |
49,736 | 16.06.2017 10:14:44 | 28,800 | 933a17c5f38ac71d2aa3e855da1e38797fe6603d | Allow for `allreduce_parallel_batches` for multi-GPU training
Introduced `allreduce_parallel_batches` for multi-GPU training as per
Mike's suggestion.
Moved `train_algo` and `test_algo` from solver specification to Python API
to conform with Caffe as per Berthold's suggestion.
Updated the documentation for Caffe2DML.
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/beginners-guide-caffe2dml.md",
"new_path": "docs/beginners-guide-caffe2dml.md",
"diff": "@@ -94,6 +94,8 @@ lenet.setStatistics(True).setExplain(True)\n# If you want to force GPU execution. Please make sure the required dependency are available.\n# lenet.setGPU(True).setForceGPU(True)\n+# Example usage of train_algo, test_algo. Assume 2 gpus on driver\n+# lenet.set(train_algo=\"allreduce_parallel_batches\", test_algo=\"minibatch\", parallel_batches=2)\n# (Optional but recommended) Enable native BLAS.\nlenet.setConfigProperty(\"native.blas\", \"auto\")\n@@ -108,6 +110,16 @@ lenet.predict(X_test)\nFor more detail on enabling native BLAS, please see the documentation for the [native backend](http://apache.github.io/systemml/native-backend).\n+Common settings for `train_algo` and `test_algo` parameters:\n+\n+| | PySpark script | Changes to Network/Solver |\n+|--------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------|\n+| Single-node CPU execution (similar to Caffe with solver_mode: CPU) | `caffe2dml.set(train_algo=\"minibatch\", test_algo=\"minibatch\")` | Ensure that `batch_size` is set to appropriate value (for example: 64) |\n+| Single-node single-GPU execution | `caffe2dml.set(train_algo=\"minibatch\", test_algo=\"minibatch\").setGPU(True).setForceGPU(True)` | Ensure that `batch_size` is set to appropriate value (for example: 64) |\n+| Single-node multi-GPU execution (similar to Caffe with solver_mode: GPU) | `caffe2dml.set(train_algo=\"allreduce_parallel_batches\", test_algo=\"minibatch\", parallel_batches=num_gpu).setGPU(True).setForceGPU(True)` | Ensure that `batch_size` is set to appropriate value (for example: 64) |\n+| Distributed prediction | `caffe2dml.set(test_algo=\"allreduce\")` | |\n+| Distributed synchronous training | `caffe2dml.set(train_algo=\"allreduce_parallel_batches\", parallel_batches=num_cluster_cores)` | Ensure that `batch_size` is set to appropriate value (for example: 64) |\n+\n## Frequently asked questions\n#### What is the purpose of Caffe2DML API ?\n@@ -283,3 +295,12 @@ train_df.write.parquet('kaggle-cats-dogs.parquet')\nThough we recommend using Caffe2DML via its Python interfaces, it is possible to use it by creating an object of the class\n`org.apache.sysml.api.dl.Caffe2DML`. It is important to note that Caffe2DML's scala API is packaged in `systemml-*-extra.jar`.\n+\n+\n+#### How can I view the script generated by Caffe2DML ?\n+\n+To view the generated DML script (and additional debugging information), please set the `debug` parameter to True.\n+\n+```python\n+caffe2dmlObject.set(debug=True)\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/proto/caffe/caffe.proto",
"new_path": "src/main/proto/caffe/caffe.proto",
"diff": "@@ -98,7 +98,7 @@ message NetParameter {\n// NOTE\n// Update the next available ID when you add a new SolverParameter field.\n//\n-// SolverParameter next available ID: 43 (last added: test_algo)\n+// SolverParameter next available ID: 42 (last added: layer_wise_reduce)\nmessage SolverParameter {\n//////////////////////////////////////////////////////////////////////////////\n// Specifying the train and test networks\n@@ -114,10 +114,6 @@ message SolverParameter {\n// A test_level and/or a test_stage may also be specified for each test_net.\n//////////////////////////////////////////////////////////////////////////////\n- // SystemML extension\n- optional string train_algo = 41 [default = \"minibatch\"];\n- optional string test_algo = 42 [default = \"minibatch\"];\n-\n// Proto filename for the train net, possibly combined with one or more\n// test nets.\noptional string net = 24;\n@@ -132,8 +128,7 @@ message SolverParameter {\n// The states for the train/test nets. Must be unspecified or\n// specified once per net.\n//\n- // By default, all states will have solver = true;\n- // train_state will have phase = TRAIN,\n+ // By default, train_state will have phase = TRAIN,\n// and all test_state's will have phase = TEST.\n// Other defaults are set according to the NetState defaults.\noptional NetState train_state = 26;\n@@ -243,6 +238,9 @@ message SolverParameter {\n}\n// DEPRECATED: use type instead of solver_type\noptional SolverType solver_type = 30 [default = SGD];\n+\n+ // Overlap compute and communication for data parallel training\n+ optional bool layer_wise_reduce = 41 [default = true];\n}\n// A message that stores the solver snapshots\n@@ -422,7 +420,7 @@ message TransformationParameter {\noptional uint32 crop_size = 3 [default = 0];\n// mean_file and mean_value cannot be specified at the same time\noptional string mean_file = 4;\n- // if specified can be repeated once (would substract it from all the channels)\n+ // if specified can be repeated once (would subtract it from all the channels)\n// or can be repeated the same number of times as channels\n// (would subtract them from the corresponding channel)\nrepeated float mean_value = 5;\n@@ -438,7 +436,7 @@ message LossParameter {\noptional int32 ignore_label = 1;\n// How to normalize the loss for loss layers that aggregate across batches,\n// spatial dimensions, or other dimensions. Currently only implemented in\n- // SoftmaxWithLoss layer.\n+ // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers.\nenum NormalizationMode {\n// Divide by the number of examples in the batch times spatial dimensions.\n// Outputs that receive the ignore label will NOT be ignored in computing\n@@ -452,6 +450,8 @@ message LossParameter {\n// Do not normalize the loss.\nNONE = 3;\n}\n+ // For historical reasons, the default normalization for\n+ // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID.\noptional NormalizationMode normalization = 3 [default = VALID];\n// Deprecated. Ignored if normalization is specified. If normalization\n// is not specified, then setting this to false will be equivalent to\n@@ -502,11 +502,21 @@ message ConcatParameter {\n}\nmessage BatchNormParameter {\n- // If false, accumulate global mean/variance values via a moving average. If\n- // true, use those accumulated values instead of computing mean/variance\n- // across the batch.\n+ // If false, normalization is performed over the current mini-batch\n+ // and global statistics are accumulated (but not yet used) by a moving\n+ // average.\n+ // If true, those accumulated mean and variance values are used for the\n+ // normalization.\n+ // By default, it is set to false when the network is in the training\n+ // phase and true when the network is in the testing phase.\noptional bool use_global_stats = 1;\n- // How much does the moving average decay each iteration?\n+ // What fraction of the moving average remains each iteration?\n+ // Smaller values make the moving average decay faster, giving more\n+ // weight to the recent values.\n+ // Each iteration updates the moving average @f$S_{t-1}@f$ with the\n+ // current mean @f$ Y_t @f$ by\n+ // @f$ S_t = (1-\\beta)Y_t + \\beta \\cdot S_{t-1} @f$, where @f$ \\beta @f$\n+ // is the moving_average_fraction parameter.\noptional float moving_average_fraction = 2 [default = .999];\n// Small value to add to the variance estimate so that we don't divide by\n// zero.\n@@ -657,8 +667,8 @@ message DataParameter {\noptional bool mirror = 6 [default = false];\n// Force the encoded image to have 3 color channels\noptional bool force_encoded_color = 9 [default = false];\n- // Prefetch queue (Number of batches to prefetch to host memory, increase if\n- // data access bandwidth varies).\n+ // Prefetch queue (Increase if data feeding bandwidth varies, within the\n+ // limit of device memory for GPU training)\noptional uint32 prefetch = 10 [default = 4];\n}\n@@ -805,6 +815,7 @@ message ImageDataParameter {\nmessage InfogainLossParameter {\n// Specify the infogain matrix source.\noptional string source = 1;\n+ optional int32 axis = 2 [default = 1]; // axis of prob\n}\nmessage InnerProductParameter {\n@@ -927,9 +938,7 @@ message PythonParameter {\n// string, dictionary in Python dict format, JSON, etc. You may parse this\n// string in `setup` method and use it in `forward` and `backward`.\noptional string param_str = 3 [default = ''];\n- // Whether this PythonLayer is shared among worker solvers during data parallelism.\n- // If true, each worker solver sequentially run forward from this layer.\n- // This value should be set true if you are using it as a data layer.\n+ // DEPRECATED\noptional bool share_in_parallel = 4 [default = false];\n}\n@@ -1398,6 +1407,6 @@ message PReLUParameter {\n// Initial value of a_i. Default is a_i=0.25 for all i.\noptional FillerParameter filler = 1;\n- // Whether or not slope paramters are shared across channels.\n+ // Whether or not slope parameters are shared across channels.\noptional bool channel_shared = 2 [default = false];\n}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mllearn/estimators.py",
"new_path": "src/main/python/systemml/mllearn/estimators.py",
"diff": "@@ -737,15 +737,20 @@ class Caffe2DML(BaseSystemMLClassifier):\nif ignore_weights is not None:\nself.estimator.setWeightsToIgnore(ignore_weights)\n- def set(self, num_classes=None, debug=None):\n+ def set(self, debug=None, train_algo=None, test_algo=None, parallel_batches=None):\n\"\"\"\nSet input to Caffe2DML\nParameters\n----------\ndebug: to add debugging DML code such as classification report, print DML script, etc (default: False)\n+ train_algo: can be minibatch, batch, allreduce_parallel_batches or allreduce (default: minibatch)\n+ test_algo: can be minibatch, batch, allreduce_parallel_batches or allreduce (default: minibatch)\n\"\"\"\nif debug is not None: self.estimator.setInput(\"$debug\", str(debug).upper())\n+ if train_algo is not None: self.estimator.setInput(\"$train_algo\", str(train_algo).lower())\n+ if test_algo is not None: self.estimator.setInput(\"$test_algo\", str(test_algo).lower())\n+ if parallel_batches is not None: self.estimator.setInput(\"$parallel_batches\", str(parallel_batches))\nreturn self\ndef visualize(self, layerName=None, varType='weight', aggFn='mean'):\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"diff": "@@ -182,6 +182,9 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\n// Method called by Python mllearn to visualize variable of certain layer\ndef visualizeLayer(layerName:String, varType:String, aggFn:String): Unit = visualizeLayer(net, layerName, varType, aggFn)\n+ def getTrainAlgo():String = if(inputs.containsKey(\"$train_algo\")) inputs.get(\"$train_algo\") else \"minibatch\"\n+ def getTestAlgo():String = if(inputs.containsKey(\"$test_algo\")) inputs.get(\"$test_algo\") else \"minibatch\"\n+\n// ================================================================================================\n// The below method parses the provided network and solver file and generates DML script.\ndef getTrainingScript(isSingleNode:Boolean):(Script, String, String) = {\n@@ -209,47 +212,97 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\n// ----------------------------------------------------------------------------\n// Main logic\nforBlock(\"e\", \"1\", \"max_epochs\") {\n- solverParam.getTrainAlgo.toLowerCase match {\n+ getTrainAlgo.toLowerCase match {\ncase \"minibatch\" =>\nforBlock(\"i\", \"1\", \"num_iters_per_epoch\") {\ngetTrainingBatch(tabDMLScript)\n- tabDMLScript.append(\"iter = start_iter + i\\n\")\n+ tabDMLScript.append(\"iter = iter + 1\\n\")\n+ // -------------------------------------------------------\n+ // Perform forward, backward and update on minibatch\nforward; backward; update\n+ // -------------------------------------------------------\ndisplayLoss(lossLayers(0), shouldValidate)\nperformSnapshot\n}\ncase \"batch\" => {\n- tabDMLScript.append(\"iter = start_iter + i\\n\")\n+ tabDMLScript.append(\"iter = iter + 1\\n\")\n+ // -------------------------------------------------------\n+ // Perform forward, backward and update on entire dataset\nforward; backward; update\n+ // -------------------------------------------------------\ndisplayLoss(lossLayers(0), shouldValidate)\nperformSnapshot\n+ }\n+ case \"allreduce_parallel_batches\" => {\n+ // This setting uses the batch size provided by the user\n+ if(!inputs.containsKey(\"$parallel_batches\")) {\n+ throw new RuntimeException(\"The parameter parallel_batches is required for allreduce_parallel_batches\")\n+ }\n+ // The user specifies the number of parallel_batches\n+ // This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches\n+ assign(tabDMLScript, \"parallel_batches\", \"$parallel_batches\")\n+ assign(tabDMLScript, \"group_batch_size\", \"parallel_batches*\" + Caffe2DML.batchSize)\n+ assign(tabDMLScript, \"groups\", \"as.integer(ceil(\" + Caffe2DML.numImages + \"/group_batch_size))\")\n+ // Grab groups of mini-batches\n+ forBlock(\"g\", \"1\", \"groups\") {\n+ tabDMLScript.append(\"iter = iter + 1\\n\")\n+ // Get next group of mini-batches\n+ assign(tabDMLScript, \"group_beg\", \"((g-1) * group_batch_size) %% \" + Caffe2DML.numImages + \" + 1\")\n+ assign(tabDMLScript, \"group_end\", \"min(\" + Caffe2DML.numImages + \", group_beg + group_batch_size - 1)\")\n+ assign(tabDMLScript, \"X_group_batch\", Caffe2DML.X + \"[group_beg:group_end,]\")\n+ assign(tabDMLScript, \"y_group_batch\", Caffe2DML.y + \"[group_beg:group_end,]\")\n+ initializeGradients(\"parallel_batches\")\n+ parForBlock(\"j\", \"1\", \"parallel_batches\") {\n+ // Get a mini-batch in this group\n+ assign(tabDMLScript, \"beg\", \"((j-1) * \" + Caffe2DML.batchSize + \") %% nrow(X_group_batch) + 1\")\n+ assign(tabDMLScript, \"end\", \"min(nrow(X_group_batch), beg + \" + Caffe2DML.batchSize + \" - 1)\")\n+ assign(tabDMLScript, \"Xb\", \"X_group_batch[beg:end,]\")\n+ assign(tabDMLScript, \"yb\", \"y_group_batch[beg:end,]\")\n+ forward; backward\n+ flattenGradients\n+ }\n+ aggregateAggGradients\n+ update\n+ // -------------------------------------------------------\n+ assign(tabDMLScript, \"Xb\", \"X_group_batch\")\n+ assign(tabDMLScript, \"yb\", \"y_group_batch\")\n+ displayLoss(lossLayers(0), shouldValidate)\n+ performSnapshot\n+ }\n}\ncase \"allreduce\" => {\n+ // This is distributed synchronous gradient descent\nforBlock(\"i\", \"1\", \"num_iters_per_epoch\") {\n- getTrainingBatch(tabDMLScript)\n- assign(tabDMLScript, \"X_group_batch\", \"Xb\")\n- assign(tabDMLScript, \"y_group_batch\", \"yb\")\n- tabDMLScript.append(\"iter = start_iter + i\\n\")\n- initAggGradients\n- parForBlock(\"j\", \"1\", \"nrow(y_group_batch)\") {\n+ tabDMLScript.append(\"iter = iter + 1\\n\")\n+ // -------------------------------------------------------\n+ // Perform forward, backward and update on minibatch in parallel\n+ assign(tabDMLScript, \"beg\", \"((i-1) * \" + Caffe2DML.batchSize + \") %% \" + Caffe2DML.numImages + \" + 1\")\n+ assign(tabDMLScript, \"end\", \" min(beg + \" + Caffe2DML.batchSize + \" - 1, \" + Caffe2DML.numImages + \")\")\n+ assign(tabDMLScript, \"X_group_batch\", Caffe2DML.X + \"[beg:end,]\")\n+ assign(tabDMLScript, \"y_group_batch\", Caffe2DML.y + \"[beg:end,]\")\n+ tabDMLScript.append(\"local_batch_size = nrow(y_group_batch)\\n\")\n+ val localBatchSize = \"local_batch_size\"\n+ initializeGradients(localBatchSize)\n+ parForBlock(\"j\", \"1\", localBatchSize) {\nassign(tabDMLScript, \"Xb\", \"X_group_batch[j,]\")\nassign(tabDMLScript, \"yb\", \"y_group_batch[j,]\")\n- forward; backward(\"_agg\")\n- flattenAndStoreAggGradients_j\n+ forward; backward\n+ flattenGradients\n}\naggregateAggGradients\n- tabDMLScript.append(\"iter = start_iter + parallel_batches\\n\")\nupdate\n+ // -------------------------------------------------------\n+ assign(tabDMLScript, \"Xb\", \"X_group_batch\")\n+ assign(tabDMLScript, \"yb\", \"y_group_batch\")\ndisplayLoss(lossLayers(0), shouldValidate)\nperformSnapshot\n}\n}\n- case _ => throw new DMLRuntimeException(\"Unsupported train algo:\" + solverParam.getTrainAlgo)\n+ case _ => throw new DMLRuntimeException(\"Unsupported train algo:\" + getTrainAlgo)\n}\n// After every epoch, update the learning rate\ntabDMLScript.append(\"# Learning rate\\n\")\nlrPolicy.updateLearningRate(tabDMLScript)\n- tabDMLScript.append(\"start_iter = start_iter + num_iters_per_epoch\\n\")\n}\n// ----------------------------------------------------------------------------\n@@ -308,6 +361,8 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\nprivate def displayLoss(lossLayer:IsLossLayer, shouldValidate:Boolean):Unit = {\nif(solverParam.getDisplay > 0) {\n// Append the DML to compute training loss\n+ if(!getTrainAlgo.toLowerCase.startsWith(\"allreduce\")) {\n+ // Compute training loss for allreduce\ntabDMLScript.append(\"# Compute training loss & accuracy\\n\")\nifBlock(\"iter %% \" + solverParam.getDisplay + \" == 0\") {\nassign(tabDMLScript, \"loss\", \"0\"); assign(tabDMLScript, \"accuracy\", \"0\")\n@@ -318,19 +373,26 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\nappendTrainingVisualizationBody(dmlScript, numTabs)\nprintClassificationReport\n}\n+ }\n+ else {\n+ Caffe2DML.LOG.info(\"Training loss is not printed for train_algo=\" + getTrainAlgo)\n+ }\nif(shouldValidate) {\n+ if( getTrainAlgo.toLowerCase.startsWith(\"allreduce\") &&\n+ getTestAlgo.toLowerCase.startsWith(\"allreduce\")) {\n+ Caffe2DML.LOG.warn(\"The setting: train_algo=\" + getTrainAlgo + \" and test_algo=\" + getTestAlgo + \" is not recommended. Consider changing test_algo=minibatch\")\n+ }\n// Append the DML to compute validation loss\nval numValidationBatches = if(solverParam.getTestIterCount > 0) solverParam.getTestIter(0) else 0\ntabDMLScript.append(\"# Compute validation loss & accuracy\\n\")\nifBlock(\"iter %% \" + solverParam.getTestInterval + \" == 0\") {\nassign(tabDMLScript, \"loss\", \"0\"); assign(tabDMLScript, \"accuracy\", \"0\")\n- solverParam.getTestAlgo.toLowerCase match {\n+ getTestAlgo.toLowerCase match {\ncase \"minibatch\" => {\nassign(tabDMLScript, \"validation_loss\", \"0\")\nassign(tabDMLScript, \"validation_accuracy\", \"0\")\nforBlock(\"iVal\", \"1\", \"num_iters_per_epoch\") {\ngetValidationBatch(tabDMLScript)\n- tabDMLScript.append(\"iter = start_iter + i\\n\")\nforward; lossLayer.computeLoss(dmlScript, numTabs)\ntabDMLScript.append(\"validation_loss = validation_loss + loss\\n\")\ntabDMLScript.append(\"validation_accuracy = validation_accuracy + accuracy\\n\")\n@@ -344,7 +406,60 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\nassign(tabDMLScript, \"validation_loss\", \"loss\"); assign(tabDMLScript, \"validation_accuracy\", \"accuracy\")\n}\n- case _ => throw new DMLRuntimeException(\"Unsupported test algo:\" + solverParam.getTestAlgo)\n+ case \"allreduce_parallel_batches\" => {\n+ // This setting uses the batch size provided by the user\n+ if(!inputs.containsKey(\"$parallel_batches\")) {\n+ throw new RuntimeException(\"The parameter parallel_batches is required for allreduce_parallel_batches\")\n+ }\n+ // The user specifies the number of parallel_batches\n+ // This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches\n+ assign(tabDMLScript, \"parallel_batches_val\", \"$parallel_batches\")\n+ assign(tabDMLScript, \"group_batch_size_val\", \"parallel_batches_val*\" + Caffe2DML.batchSize)\n+ assign(tabDMLScript, \"groups_val\", \"as.integer(ceil(\" + Caffe2DML.numValidationImages + \"/group_batch_size_val))\")\n+ assign(tabDMLScript, \"validation_accuracy\", \"0\")\n+ assign(tabDMLScript, \"validation_loss\", \"0\")\n+ // Grab groups of mini-batches\n+ forBlock(\"g_val\", \"1\", \"groups_val\") {\n+ assign(tabDMLScript, \"group_beg_val\", \"((g_val-1) * group_batch_size_val) %% \" + Caffe2DML.numValidationImages + \" + 1\")\n+ assign(tabDMLScript, \"group_end_val\", \"min(\" + Caffe2DML.numValidationImages + \", group_beg_val + group_batch_size_val - 1)\")\n+ assign(tabDMLScript, \"X_group_batch_val\", Caffe2DML.XVal + \"[group_beg_val:group_end_val,]\")\n+ assign(tabDMLScript, \"y_group_batch_val\", Caffe2DML.yVal + \"[group_beg_val:group_end_val,]\")\n+ assign(tabDMLScript, \"group_validation_loss\", matrix(\"0\", \"parallel_batches_val\", \"1\"))\n+ assign(tabDMLScript, \"group_validation_accuracy\", matrix(\"0\", \"parallel_batches_val\", \"1\"))\n+ // Run graph on each mini-batch in this group in parallel (ideally on multiple GPUs)\n+ parForBlock(\"iVal\", \"1\", \"parallel_batches_val\") {\n+ assign(tabDMLScript, \"beg_val\", \"((iVal-1) * \" + Caffe2DML.batchSize + \") %% nrow(y_group_batch_val) + 1\")\n+ assign(tabDMLScript, \"end_val\", \"min(nrow(y_group_batch_val), beg_val + \" + Caffe2DML.batchSize + \" - 1)\")\n+ assign(tabDMLScript, \"Xb\", \"X_group_batch_val[beg_val:end_val,]\")\n+ assign(tabDMLScript, \"yb\", \"y_group_batch_val[beg_val:end_val,]\")\n+ net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))\n+ lossLayer.computeLoss(dmlScript, numTabs)\n+ assign(tabDMLScript, \"group_validation_loss[iVal,1]\", \"loss\")\n+ assign(tabDMLScript, \"group_validation_accuracy[iVal,1]\", \"accuracy\")\n+ }\n+ assign(tabDMLScript, \"validation_loss\", \"validation_loss + sum(group_validation_loss)\")\n+ assign(tabDMLScript, \"validation_accuracy\", \"validation_accuracy + sum(group_validation_accuracy)\")\n+ }\n+ assign(tabDMLScript, \"validation_accuracy\", \"validation_accuracy/groups_val\")\n+ }\n+ case \"allreduce\" => {\n+ // This setting doesnot use the batch size for validation and allows the parfor optimizer to select plan\n+ // by minimizing the memory requirement (i.e. batch size = 1)\n+ assign(tabDMLScript, \"group_validation_loss\", matrix(\"0\", Caffe2DML.numValidationImages, \"1\"))\n+ assign(tabDMLScript, \"group_validation_accuracy\", matrix(\"0\", Caffe2DML.numValidationImages, \"1\"))\n+ parForBlock(\"iVal\", \"1\", Caffe2DML.numValidationImages) {\n+ assign(tabDMLScript, \"Xb\", Caffe2DML.XVal + \"[iVal,]\")\n+ assign(tabDMLScript, \"yb\", Caffe2DML.yVal + \"[iVal,]\")\n+ net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))\n+ lossLayer.computeLoss(dmlScript, numTabs)\n+ assign(tabDMLScript, \"group_validation_loss[iVal,1]\", \"loss\")\n+ assign(tabDMLScript, \"group_validation_accuracy[iVal,1]\", \"accuracy\")\n+ }\n+ assign(tabDMLScript, \"validation_loss\", \"sum(group_validation_loss)\")\n+ assign(tabDMLScript, \"validation_accuracy\", \"mean(group_validation_accuracy)\")\n+ }\n+\n+ case _ => throw new DMLRuntimeException(\"Unsupported test algo:\" + getTestAlgo)\n}\ntabDMLScript.append(print( dmlConcat( asDMLString(\"Iter:\"), \"iter\",\nasDMLString(\", validation loss:\"), \"validation_loss\", asDMLString(\", validation accuracy:\"), \"validation_accuracy\" )))\n@@ -368,23 +483,22 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\ntabDMLScript.append(\"# Perform forward pass\\n\")\nnet.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))\n}\n- private def backward():Unit = backward(\"\")\n- private def backward(suffix:String):Unit = {\n+ private def backward():Unit = {\ntabDMLScript.append(\"# Perform backward pass\\n\")\n- net.getLayers.reverse.map(layer => net.getCaffeLayer(layer).backward(tabDMLScript, suffix))\n+ net.getLayers.reverse.map(layer => net.getCaffeLayer(layer).backward(tabDMLScript, \"\"))\n}\nprivate def update():Unit = {\ntabDMLScript.append(\"# Update the parameters\\n\")\nnet.getLayers.map(layer => solver.update(tabDMLScript, net.getCaffeLayer(layer)))\n}\n- private def initAggGradients():Unit = {\n- tabDMLScript.append(\"# Data structure to store gradients computed in parallel\")\n+ private def initializeGradients(parallel_batches:String):Unit = {\n+ tabDMLScript.append(\"# Data structure to store gradients computed in parallel\\n\")\nnet.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {\n- if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + \"_agg\", matrix(\"0\", \"parallel_batches\", multiply(nrow(l.weight), ncol(l.weight))))\n- if(l.shouldUpdateBias) assign(tabDMLScript, l.dBias + \"_agg\", matrix(\"0\", \"parallel_batches\", multiply(nrow(l.bias), ncol(l.bias))))\n+ if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + \"_agg\", matrix(\"0\", parallel_batches, multiply(nrow(l.weight), ncol(l.weight))))\n+ if(l.shouldUpdateBias) assign(tabDMLScript, l.dBias + \"_agg\", matrix(\"0\", parallel_batches, multiply(nrow(l.bias), ncol(l.bias))))\n})\n}\n- private def flattenAndStoreAggGradients_j():Unit = {\n+ private def flattenGradients():Unit = {\ntabDMLScript.append(\"# Flatten and store gradients for this parallel execution\\n\")\nnet.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {\nif(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + \"_agg[j,]\",\n@@ -404,7 +518,7 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\n}\n// Set iteration-related variables such as max_epochs, num_iters_per_epoch, lr, etc.\ndef setIterationVariables():Unit = {\n- solverParam.getTrainAlgo.toLowerCase match {\n+ getTrainAlgo.toLowerCase match {\ncase \"batch\" =>\nassign(tabDMLScript, \"max_epochs\", solverParam.getMaxIter.toString)\ncase _ => {\n@@ -412,14 +526,13 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\nceilDivide(tabDMLScript, \"max_epochs\", solverParam.getMaxIter.toString, \"num_iters_per_epoch\")\n}\n}\n- assign(tabDMLScript, \"start_iter\", \"0\")\n+ assign(tabDMLScript, \"iter\", \"0\")\nassign(tabDMLScript, \"lr\", solverParam.getBaseLr.toString)\n}\n// -------------------------------------------------------------------------------------------\n}\n-class Caffe2DMLModel(val mloutput: MLResults,\n- val numClasses:String, val sc: SparkContext, val solver:CaffeSolver,\n+class Caffe2DMLModel(val numClasses:String, val sc: SparkContext, val solver:CaffeSolver,\nval net:CaffeNetwork, val lrPolicy:LearningRatePolicy,\nval estimator:Caffe2DML)\nextends Model[Caffe2DMLModel] with HasMaxOuterIter with BaseSystemMLClassifierModel with DMLGenerator {\n@@ -427,14 +540,14 @@ class Caffe2DMLModel(val mloutput: MLResults,\n// Invoked by Python, MLPipeline\nval uid:String = \"caffe_model_\" + (new Random).nextLong\ndef this(estimator:Caffe2DML) = {\n- this(null, Utils.numClasses(estimator.net), estimator.sc, estimator.solver,\n+ this(Utils.numClasses(estimator.net), estimator.sc, estimator.solver,\nestimator.net,\n// new CaffeNetwork(estimator.solverParam.getNet, caffe.Caffe.Phase.TEST, estimator.numChannels, estimator.height, estimator.width),\nestimator.lrPolicy, estimator)\n}\noverride def copy(extra: org.apache.spark.ml.param.ParamMap): Caffe2DMLModel = {\n- val that = new Caffe2DMLModel(mloutput, numClasses, sc, solver, net, lrPolicy, estimator)\n+ val that = new Caffe2DMLModel(numClasses, sc, solver, net, lrPolicy, estimator)\ncopyValues(that, extra)\n}\n// --------------------------------------------------------------\n@@ -459,11 +572,9 @@ class Caffe2DMLModel(val mloutput: MLResults,\nassign(tabDMLScript, \"X\", \"X_full\")\n// Initialize the layers and solvers. Reads weights and bias if readWeights is true.\n- val readWeights = {\n- if(mloutput == null && estimator.inputs.containsKey(\"$weights\")) true\n- else if(mloutput == null) throw new DMLRuntimeException(\"Cannot call predict/score without calling either fit or by providing weights\")\n- else false\n- }\n+ if(!estimator.inputs.containsKey(\"$weights\") && estimator.mloutput == null)\n+ throw new DMLRuntimeException(\"Cannot call predict/score without calling either fit or by providing weights\")\n+ val readWeights = estimator.inputs.containsKey(\"$weights\") || estimator.mloutput != null\ninitWeights(net, solver, readWeights)\n// Donot update mean and variance in batchnorm\n@@ -472,7 +583,7 @@ class Caffe2DMLModel(val mloutput: MLResults,\nval lossLayers = getLossLayers(net)\nassign(tabDMLScript, \"Prob\", matrix(\"0\", Caffe2DML.numImages, numClasses))\n- estimator.solverParam.getTestAlgo.toLowerCase match {\n+ estimator.getTestAlgo.toLowerCase match {\ncase \"minibatch\" => {\nceilDivide(tabDMLScript(), \"num_iters\", Caffe2DML.numImages, Caffe2DML.batchSize)\nforBlock(\"i\", \"1\", \"num_iters\") {\n@@ -486,15 +597,41 @@ class Caffe2DMLModel(val mloutput: MLResults,\nnet.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))\nassign(tabDMLScript, \"Prob\", lossLayers(0).out)\n}\n- case \"allreduce\" => {\n- ceilDivide(tabDMLScript(), \"num_iters\", Caffe2DML.numImages, Caffe2DML.batchSize)\n- parForBlock(\"i\", \"1\", \"num_iters\") {\n- getTestBatch(tabDMLScript)\n+ case \"allreduce_parallel_batches\" => {\n+ // This setting uses the batch size provided by the user\n+ if(!estimator.inputs.containsKey(\"$parallel_batches\")) {\n+ throw new RuntimeException(\"The parameter parallel_batches is required for allreduce_parallel_batches\")\n+ }\n+ // The user specifies the number of parallel_batches\n+ // This ensures that the user of generated script remembers to provide the commandline parameter $parallel_batches\n+ assign(tabDMLScript, \"parallel_batches\", \"$parallel_batches\")\n+ assign(tabDMLScript, \"group_batch_size\", \"parallel_batches*\" + Caffe2DML.batchSize)\n+ assign(tabDMLScript, \"groups\", \"as.integer(ceil(\" + Caffe2DML.numImages + \"/group_batch_size))\")\n+ // Grab groups of mini-batches\n+ forBlock(\"g\", \"1\", \"groups\") {\n+ assign(tabDMLScript, \"group_beg\", \"((g-1) * group_batch_size) %% \" + Caffe2DML.numImages + \" + 1\")\n+ assign(tabDMLScript, \"group_end\", \"min(\" + Caffe2DML.numImages + \", group_beg + group_batch_size - 1)\")\n+ assign(tabDMLScript, \"X_group_batch\", \"X_full[group_beg:group_end,]\")\n+ // Run graph on each mini-batch in this group in parallel (ideally on multiple GPUs)\n+ parForBlock(\"j\", \"1\", \"parallel_batches\") {\n+ assign(tabDMLScript, \"beg\", \"((j-1) * \" + Caffe2DML.batchSize + \") %% nrow(X_group_batch) + 1\")\n+ assign(tabDMLScript, \"end\", \"min(nrow(X_group_batch), beg + \" + Caffe2DML.batchSize + \" - 1)\")\n+ assign(tabDMLScript, \"Xb\", \"X_group_batch[beg:end,]\")\nnet.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))\nassign(tabDMLScript, \"Prob[beg:end,]\", lossLayers(0).out)\n}\n}\n- case _ => throw new DMLRuntimeException(\"Unsupported test algo:\" + estimator.solverParam.getTestAlgo)\n+ }\n+ case \"allreduce\" => {\n+ // This setting doesnot use the batch size for scoring and allows the parfor optimizer to select plan\n+ // by minimizing the memory requirement (i.e. batch size = 1)\n+ parForBlock(\"i\", \"1\", Caffe2DML.numImages) {\n+ assign(tabDMLScript, \"Xb\", \"X_full[i,]\")\n+ net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))\n+ assign(tabDMLScript, \"Prob[i,]\", lossLayers(0).out)\n+ }\n+ }\n+ case _ => throw new DMLRuntimeException(\"Unsupported test algo:\" + estimator.getTestAlgo)\n}\nval predictionScript = dmlScript.toString()\n@@ -505,10 +642,10 @@ class Caffe2DMLModel(val mloutput: MLResults,\nupdateMeanVarianceForBatchNorm(net, true)\nval script = dml(predictionScript).out(\"Prob\").in(estimator.inputs)\n- if(mloutput != null) {\n+ if(estimator.mloutput != null) {\n// fit was called\n- net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.in(l.weight, mloutput.getMatrix(l.weight)))\n- net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.in(l.bias, mloutput.getMatrix(l.bias)))\n+ net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.in(l.weight, estimator.mloutput.getMatrix(l.weight)))\n+ net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.in(l.bias, estimator.mloutput.getMatrix(l.bias)))\n}\n(script, \"X_full\")\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Allow for `allreduce_parallel_batches` for multi-GPU training
- Introduced `allreduce_parallel_batches` for multi-GPU training as per
Mike's suggestion.
- Moved `train_algo` and `test_algo` from solver specification to Python API
to conform with Caffe as per Berthold's suggestion.
- Updated the documentation for Caffe2DML.
Closes #543. |
49,738 | 16.06.2017 21:21:12 | 25,200 | 1001b3faac4accdfbb5f98a39ac3233befe2d1d7 | [MINOR] Fix performance dnn matrix multiply integration (nnz handling)
This minor patch avoid unnecessary recomputation of the number of
non-zeros for dense temporary inputs to our java matrix multiplications.
On a single epoch of lenet over mnist, this improved performance
slightly from 298s to 290s (average over 3 runs, w/ codegen enabled). | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java",
"diff": "@@ -302,10 +302,8 @@ public class LibMatrixDNNHelper {\nstatic void singleThreadedMatMult(MatrixBlock m1, MatrixBlock m2, MatrixBlock ret,\nboolean recomputeNNZM1, boolean recomputeNNZM2, ConvolutionParameters params) throws DMLRuntimeException {\nif(!params.enableNative || m1.isInSparseFormat() || m2.isInSparseFormat()) {\n- if(recomputeNNZM1)\n- m1.recomputeNonZeros();\n- if(recomputeNNZM2)\n- m2.recomputeNonZeros();\n+ prepNonZerosForMatrixMult(m1, recomputeNNZM1);\n+ prepNonZerosForMatrixMult(m2, recomputeNNZM2);\nLibMatrixMult.matrixMult(m1, m2, ret, false);\n}\nelse {\n@@ -539,4 +537,15 @@ public class LibMatrixDNNHelper {\n}\n}\n}\n+\n+ private static void prepNonZerosForMatrixMult(MatrixBlock mb, boolean update) {\n+ if( !update )\n+ return;\n+ //non-zeros are not evaluated for dense matrix multiplies\n+ //so we simply need to ensure the block is not marked empty\n+ if( !mb.isInSparseFormat() )\n+ mb.setNonZeros(mb.getNumRows() * mb.getNumColumns());\n+ else\n+ mb.recomputeNonZeros();\n+ }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix performance dnn matrix multiply integration (nnz handling)
This minor patch avoid unnecessary recomputation of the number of
non-zeros for dense temporary inputs to our java matrix multiplications.
On a single epoch of lenet over mnist, this improved performance
slightly from 298s to 290s (average over 3 runs, w/ codegen enabled). |
49,738 | 16.06.2017 22:12:55 | 25,200 | 723a7517ab937096135e911631c18188a634a922 | Fix parfor data partitioning (access pattern analysis)
This patch fixes null pointer exceptions on parfor data partitioning
rewrites w/ complex indexing expressions that cannot be parsed into a
linear function for analysis (e.g., in case of variables). | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java",
"new_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java",
"diff": "@@ -1894,7 +1894,7 @@ public class ParForStatementBlock extends ForStatementBlock\n{\nret = rParseBinaryExpression((BinaryExpression) l);\nLong cvalR = parseLongConstant(r);\n- if( cvalR != null )\n+ if( ret != null && cvalR != null )\nret.addConstant(cvalR);\nelse\nreturn null;\n@@ -1903,7 +1903,7 @@ public class ParForStatementBlock extends ForStatementBlock\n{\nret = rParseBinaryExpression((BinaryExpression) r);\nLong cvalL = parseLongConstant(l);\n- if( cvalL != null )\n+ if( ret != null && cvalL != null )\nret.addConstant(cvalL);\nelse\nreturn null;\n@@ -1926,20 +1926,20 @@ public class ParForStatementBlock extends ForStatementBlock\nif( l instanceof BinaryExpression)\n{\nret = rParseBinaryExpression((BinaryExpression) l);\n- //change to plus\n- Long cvalR = parseLongConstant(r);\n- ret.addConstant(cvalR*(-1));\n+ if( ret != null ) //change to plus\n+ ret.addConstant(parseLongConstant(r)*(-1));\n}\nelse if (r instanceof BinaryExpression)\n{\nret = rParseBinaryExpression((BinaryExpression) r);\n- //change to plus\n+ if( ret != null ) { //change to plus\nret._a*=(-1);\nfor( int i=0; i<ret._b.length; i++ )\nret._b[i]*=(-1);\nLong cvalL = parseLongConstant(l);\nret.addConstant(cvalL);\n}\n+ }\nelse // atomic case\n{\n//change everything to plus\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java",
"diff": "@@ -116,7 +116,7 @@ import org.apache.sysml.yarn.ropt.YarnClusterAnalyzer;\n* the independent iterations in parallel. See ParForStatementBlock for the loop dependency\n* analysis. At runtime level, iterations are guaranteed to be completely independent.\n*\n- * NEW FUNCTIONALITIES (not for BI 2.0 release)\n+ * NEW FUNCTIONALITIES\n* TODO: reduction variables (operations: +=, -=, /=, *=, min, max)\n* TODO: papply(A,1:2,FUN) language construct (compiled to ParFOR) via DML function repository => modules OK, but second-order functions required\n*\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1715] Fix parfor data partitioning (access pattern analysis)
This patch fixes null pointer exceptions on parfor data partitioning
rewrites w/ complex indexing expressions that cannot be parsed into a
linear function for analysis (e.g., in case of variables). |
49,738 | 17.06.2017 14:12:03 | 25,200 | 72645d391782096b222b7568077a99ba185cfbf6 | [MINOR] Fix frame read/write test base directory | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/FrameReadWriteTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/FrameReadWriteTest.java",
"diff": "@@ -219,20 +219,11 @@ public class FrameReadWriteTest extends AutomatedTestBase\n}\n}\n- /**\n- *\n- * @param frame1\n- * @param frame2\n- * @param fprop\n- * @return\n- * @throws DMLRuntimeException, IOException\n- */\n-\nvoid writeAndVerifyData(OutputInfo oinfo, FrameBlock frame1, FrameBlock frame2, CSVFileFormatProperties fprop)\nthrows DMLRuntimeException, IOException\n{\n- String fname1 = TEST_DIR + \"/frameData1\";\n- String fname2 = TEST_DIR + \"/frameData2\";\n+ String fname1 = SCRIPT_DIR + TEST_DIR + \"/frameData1\";\n+ String fname2 = SCRIPT_DIR + TEST_DIR + \"/frameData2\";\n//Create reader/writer\nFrameWriter writer = FrameWriterFactory.createFrameWriter(oinfo, fprop);\n@@ -252,5 +243,4 @@ public class FrameReadWriteTest extends AutomatedTestBase\nMapReduceTool.deleteFileIfExistOnHDFS(fname1);\nMapReduceTool.deleteFileIfExistOnHDFS(fname2);\n}\n-\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix frame read/write test base directory |
49,738 | 17.06.2017 14:41:35 | 25,200 | a625c6423655210fa91b41162734c9214d3aaa0d | Fix IPA nnz propagation w/ multiple function calls | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallSizeInfo.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallSizeInfo.java",
"diff": "@@ -55,8 +55,8 @@ public class FunctionCallSizeInfo\n//indicators for which function arguments of valid functions it\n//is safe to propagate the number of non-zeros\n- //(mapping from function keys to set of function input HopIDs)\n- private final Map<String, Set<Long>> _fcandSafeNNZ;\n+ //(mapping from function keys to set of function input positions)\n+ private final Map<String, Set<Integer>> _fcandSafeNNZ;\n//indicators which literal function arguments can be safely\n//propagated into and replaced in the respective functions\n@@ -90,7 +90,7 @@ public class FunctionCallSizeInfo\n_fgraph = fgraph;\n_fcand = new HashSet<String>();\n_fcandUnary = new HashSet<String>();\n- _fcandSafeNNZ = new HashMap<String, Set<Long>>();\n+ _fcandSafeNNZ = new HashMap<String, Set<Integer>>();\n_fSafeLiterals = new HashMap<String, Set<Integer>>();\nconstructFunctionCallSizeInfo();\n@@ -176,12 +176,12 @@ public class FunctionCallSizeInfo\n* number of non-zeros.\n*\n* @param fkey function key\n- * @param inputHopID hop ID of the input\n+ * @param pos function input position\n* @return true if nnz can safely be propagated\n*/\n- public boolean isSafeNnz(String fkey, long inputHopID) {\n+ public boolean isSafeNnz(String fkey, int pos) {\nreturn _fcandSafeNNZ.containsKey(fkey)\n- && _fcandSafeNNZ.get(fkey).contains(inputHopID);\n+ && _fcandSafeNNZ.get(fkey).contains(pos);\n}\n/**\n@@ -254,12 +254,13 @@ public class FunctionCallSizeInfo\n//(considered for valid functions only)\nfor( String fkey : _fcand ) {\nFunctionOp first = _fgraph.getFunctionCalls(fkey).get(0);\n- HashSet<Long> tmp = new HashSet<Long>();\n- for( Hop input : first.getInput() ) {\n+ HashSet<Integer> tmp = new HashSet<Integer>();\n+ for( int j=0; j<first.getInput().size(); j++ ) {\n//if nnz known it is safe to propagate those nnz because for multiple calls\n//we checked of equivalence and hence all calls have the same nnz\n+ Hop input = first.getInput().get(0);\nif( input.getNnz()>=0 )\n- tmp.add(input.getHopID());\n+ tmp.add(j);\n}\n_fcandSafeNNZ.put(fkey, tmp);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"diff": "@@ -541,7 +541,7 @@ public class InterProceduralAnalysis\nMatrixObject mo = new MatrixObject(ValueType.DOUBLE, null);\nMatrixCharacteristics mc = new MatrixCharacteristics( input.getDim1(), input.getDim2(),\nConfigurationManager.getBlocksize(), ConfigurationManager.getBlocksize(),\n- fcallSizes.isSafeNnz(fkey, input.getHopID())?input.getNnz():-1 );\n+ fcallSizes.isSafeNnz(fkey, i)?input.getNnz():-1 );\nMatrixFormatMetaData meta = new MatrixFormatMetaData(mc,null,null);\nmo.setMetaData(meta);\nvars.put(dat.getName(), mo);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/IPANnzPropagationTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+\n+public class IPANnzPropagationTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"IPANnzPropagation1\";\n+ private final static String TEST_NAME2 = \"IPANnzPropagation2\";\n+ private final static String TEST_DIR = \"functions/misc/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + IPANnzPropagationTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[]{}));\n+ addTestConfiguration(TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[]{}));\n+ }\n+\n+ @Test\n+ public void testNnzPropgationPositive() {\n+ runIPANnzPropgationTest(TEST_NAME1);\n+ }\n+\n+ @Test\n+ public void testNnzPropgationNegative() {\n+ runIPANnzPropgationTest(TEST_NAME2);\n+ }\n+\n+\n+ private void runIPANnzPropgationTest(String testname)\n+ {\n+ // Save old settings\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+\n+ try\n+ {\n+ // Setup test\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{\"-stats\", \"-explain\", \"recompile_hops\"};\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+ rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK;\n+\n+ runTest(true, false, null, -1);\n+\n+ //check for propagated nnz\n+ checkNumCompiledSparkInst(testname.equals(TEST_NAME1) ? 0 : 1);\n+ checkNumExecutedSparkInst(0);\n+ }\n+ finally {\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/IPANnzPropagation1.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function(matrix[double] X) return (double sum) {\n+ if( 1==1 ) {}\n+ sum = sum(X);\n+}\n+\n+X = rand(rows=1000, cols=1000000000, sparsity=1e-6)\n+s1 = foo(X);\n+s2 = foo(X);\n+print(s1+\" \"+s2);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/IPANnzPropagation2.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+foo = function(matrix[double] X) return (double sum) {\n+ if( 1==1 ) {}\n+ sum = sum(X);\n+}\n+\n+X = rand(rows=1000, cols=1000000000, sparsity=1e-6)\n+s1 = foo(X);\n+X = rand(rows=1000, cols=1000000000, sparsity=1e-7)\n+s2 = foo(X);\n+print(s1+\" \"+s2);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java",
"diff": "@@ -36,6 +36,7 @@ import org.junit.runners.Suite;\nInvalidFunctionSignatureTest.class,\nIPAConstantFoldingScalarVariablePropagationTest.class,\nIPALiteralReplacementTest.class,\n+ IPANnzPropagationTest.class,\nIPAScalarRecursionTest.class,\nIPAScalarVariablePropagationTest.class,\nIPAUnknownRecursionTest.class,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1699] Fix IPA nnz propagation w/ multiple function calls |
49,738 | 17.06.2017 19:51:30 | 25,200 | 23a164a83c480dc78b2df9da099a5140c7572b7e | Fix codegen rowwise correctness scalar-vector ops
This patch fixes result correctness issues of the codegen row-wise
template for scalar-vector operations if the vector is extracted from
side inputs. An example, where the underlying issue led to wrong
algorithm results was lenet over mnist. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"diff": "@@ -67,7 +67,7 @@ public class CNodeBinary extends CNode\nreturn ssComm || vsComm || vvComm;\n}\n- public String getTemplate(boolean sparse) {\n+ public String getTemplate(boolean sparse, boolean scalarVector) {\nswitch (this) {\ncase DOT_PRODUCT:\nreturn sparse ? \" double %TMP% = LibSpoofPrimitives.dotProduct(%IN1v%, %IN2%, %IN1i%, %POS1%, %POS2%, %LEN%);\\n\" :\n@@ -88,6 +88,10 @@ public class CNodeBinary extends CNode\ncase VECT_GREATER_ADD:\ncase VECT_GREATEREQUAL_ADD: {\nString vectName = getVectorPrimitiveName();\n+ if( scalarVector )\n+ return sparse ? \" LibSpoofPrimitives.vect\"+vectName+\"Add(%IN1%, %IN2v%, %OUT%, %IN2i%, %POS2%, %POSOUT%, %LEN%);\\n\" :\n+ \" LibSpoofPrimitives.vect\"+vectName+\"Add(%IN1%, %IN2%, %OUT%, %POS2%, %POSOUT%, %LEN%);\\n\";\n+ else\nreturn sparse ? \" LibSpoofPrimitives.vect\"+vectName+\"Add(%IN1v%, %IN2%, %OUT%, %IN1i%, %POS1%, %POSOUT%, %LEN%);\\n\" :\n\" LibSpoofPrimitives.vect\"+vectName+\"Add(%IN1%, %IN2%, %OUT%, %POS1%, %POSOUT%, %LEN%);\\n\";\n}\n@@ -107,6 +111,10 @@ public class CNodeBinary extends CNode\ncase VECT_GREATER_SCALAR:\ncase VECT_GREATEREQUAL_SCALAR: {\nString vectName = getVectorPrimitiveName();\n+ if( scalarVector )\n+ return sparse ? \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %IN2v%, %IN2i%, %POS2%, %LEN%);\\n\" :\n+ \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %IN2%, %POS2%, %LEN%);\\n\";\n+ else\nreturn sparse ? \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1v%, %IN2%, %IN1i%, %POS1%, %LEN%);\\n\" :\n\" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %IN2%, %POS1%, %LEN%);\\n\";\n}\n@@ -239,8 +247,10 @@ public class CNodeBinary extends CNode\nboolean lsparse = sparse && (_inputs.get(0) instanceof CNodeData\n&& !_inputs.get(0).getVarname().startsWith(\"b\")\n&& !_inputs.get(0).isLiteral());\n+ boolean scalarVector = (_inputs.get(0).getDataType().isScalar()\n+ && _inputs.get(1).getDataType().isMatrix());\nString var = createVarname();\n- String tmp = _type.getTemplate(lsparse);\n+ String tmp = _type.getTemplate(lsparse, scalarVector);\ntmp = tmp.replaceAll(\"%TMP%\", var);\n//replace input references and start indexes\n@@ -346,8 +356,9 @@ public class CNodeBinary extends CNode\ncase VECT_LESSEQUAL_ADD:\ncase VECT_GREATER_ADD:\ncase VECT_GREATEREQUAL_ADD:\n- _rows = _inputs.get(1)._rows;\n- _cols = _inputs.get(1)._cols;\n+ boolean vectorScalar = _inputs.get(1).getDataType()==DataType.SCALAR;\n+ _rows = _inputs.get(vectorScalar ? 0 : 1)._rows;\n+ _cols = _inputs.get(vectorScalar ? 0 : 1)._cols;\n_dataType= DataType.MATRIX;\nbreak;\n@@ -377,8 +388,9 @@ public class CNodeBinary extends CNode\ncase VECT_LESSEQUAL:\ncase VECT_GREATER:\ncase VECT_GREATEREQUAL:\n- _rows = _inputs.get(0)._rows;\n- _cols = _inputs.get(0)._cols;\n+ boolean scalarVector = (_inputs.get(0).getDataType()==DataType.SCALAR);\n+ _rows = _inputs.get(scalarVector ? 1 : 0)._rows;\n+ _cols = _inputs.get(scalarVector ? 1 : 0)._cols;\n_dataType= DataType.MATRIX;\nbreak;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1714] Fix codegen rowwise correctness scalar-vector ops
This patch fixes result correctness issues of the codegen row-wise
template for scalar-vector operations if the vector is extracted from
side inputs. An example, where the underlying issue led to wrong
algorithm results was lenet over mnist. |
49,737 | 19.06.2017 11:39:00 | 25,200 | 3cde999c09941d7fc9b4b03f733071b85ec8a343 | python launch script for spark-submit
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "bin/systemml-spark-submit.py",
"diff": "+#!/usr/bin/env python\n+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+import os\n+import sys\n+from os.path import join, exists, abspath\n+from os import environ\n+import glob\n+import argparse\n+import shutil\n+import platform\n+\n+if environ.get('SPARK_HOME') is None:\n+ print('SPARK_HOME not set')\n+ sys.exit(1)\n+else:\n+ spark_home = environ.get('SPARK_HOME')\n+ spark_path = join(spark_home, 'bin', 'spark-submit')\n+\n+\n+# error help print\n+def print_usage_and_exit():\n+ print('Usage: ./systemml-spark-submit.py -f <dml-filename> [arguments]')\n+ sys.exit(1)\n+\n+cparser = argparse.ArgumentParser(description='System-ML Spark Submit Script')\n+\n+# SPARK-SUBMIT Options\n+cparser.add_argument('--master', default='local[*]', help='local, yarn-client, yarn-cluster', metavar='')\n+cparser.add_argument('--driver-memory', default='5G', help='Memory for driver (e.g. 512M)', metavar='')\n+cparser.add_argument('--num-executors', default='2', help='Number of executors to launch', metavar='')\n+cparser.add_argument('--executor-memory', default='2G', help='Memory per executor', metavar='')\n+cparser.add_argument('--executor-cores', default='1', help='Number of cores', metavar='')\n+cparser.add_argument('--conf', help='Spark configuration file', nargs='+', metavar='')\n+\n+# SYSTEM-ML Options\n+cparser.add_argument('-nvargs', help='List of attributeName-attributeValue pairs', nargs='+', metavar='')\n+cparser.add_argument('-args', help='List of positional argument values', metavar='', nargs='+')\n+cparser.add_argument('-config', help='System-ML configuration file (e.g SystemML-config.xml)', metavar='')\n+cparser.add_argument('-exec', default='hybrid_spark', help='System-ML backend (e.g spark, spark-hybrid)', metavar='')\n+cparser.add_argument('-explain', help='explains plan levels can be hops, runtime, '\n+ 'recompile_hops, recompile_runtime', nargs='?', const='runtime', metavar='')\n+cparser.add_argument('-debug', help='runs in debug mode', action='store_true')\n+cparser.add_argument('-stats', help='Monitor and report caching/recompilation statistics, '\n+ 'heavy hitter <count> is 10 unless overridden', nargs='?', const='10', metavar='')\n+cparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '\n+ 'set <force> option to skip conservative memory estimates '\n+ 'and use GPU wherever possible', nargs='?')\n+cparser.add_argument('-f', required=True, help='specifies dml/pydml file to execute; '\n+ 'path can be local/hdfs/gpfs', metavar='')\n+\n+args = cparser.parse_args()\n+\n+# Optional arguments\n+ml_options = []\n+if args.nvargs is not None:\n+ ml_options.append('-nvargs')\n+ ml_options.append(' '.join(args.nvargs))\n+if args.args is not None:\n+ ml_options.append('-args')\n+ ml_options.append(' '.join(args.args))\n+if args.debug is not False:\n+ ml_options.append('-debug')\n+if args.explain is not None:\n+ ml_options.append('-explain')\n+ ml_options.append(args.explain)\n+if args.gpu is not None:\n+ ml_options.append('-gpu')\n+ ml_options.append(args.gpu)\n+if args.stats is not None:\n+ ml_options.append('-stats')\n+ ml_options.append(args.stats)\n+\n+# Assign script file to name received from argparse module\n+script_file = args.f\n+\n+# find the systemML root path which contains the bin folder, the script folder and the target folder\n+# tolerate path with spaces\n+script_dir = os.path.dirname(os.path.realpath(__file__))\n+project_root_dir = os.path.dirname(script_dir)\n+user_dir = os.getcwd()\n+\n+scripts_dir = join(project_root_dir, 'scripts')\n+build_dir = join(project_root_dir, 'target')\n+lib_dir = join(build_dir, 'lib')\n+\n+systemml_jar = build_dir + os.sep + \"SystemML.jar\"\n+jcuda_jars = glob.glob(lib_dir + os.sep + \"jcu*.jar\")\n+target_jars = ','.join(jcuda_jars) # Include all JCuda Jars\n+\n+log4j_properties_path = join(project_root_dir, 'conf', 'log4j.properties.template')\n+\n+build_err_msg = 'You must build the project before running this script.'\n+build_dir_err_msg = 'Could not find target directory ' + build_dir + '. ' + build_err_msg\n+\n+# check if the project had been built and the jar files exist\n+if not (exists(build_dir)):\n+ print(build_dir_err_msg)\n+ sys.exit(1)\n+\n+print('================================================================================')\n+\n+# if the present working directory is the project root or bin folder, then use the temp folder as user.dir\n+if user_dir == project_root_dir or user_dir == join(project_root_dir, 'bin'):\n+ user_dir = join(project_root_dir, 'temp')\n+ print('Output dir: ' + user_dir)\n+\n+# if the SystemML-config.xml does not exist, create it from the template\n+systemml_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml')\n+systemml_template_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml.template')\n+if not (exists(systemml_config_path)):\n+ shutil.copyfile(systemml_template_config_path, systemml_config_path)\n+ print('... created ' + systemml_config_path)\n+\n+# if SystemML-config.xml is provided as arguments\n+if args.config is None:\n+ systemml_config_path_arg = systemml_config_path\n+else:\n+ systemml_config_path_arg = args.config\n+\n+\n+# from http://stackoverflow.com/questions/1724693/find-a-file-in-python\n+def find_file(name, path):\n+ for root, dirs, files in os.walk(path):\n+ if name in files:\n+ return join(root, name)\n+ return None\n+\n+# if the script file path was omitted, try to complete the script path\n+if not (exists(script_file)):\n+ script_file_name = abspath(script_file)\n+ script_file_found = find_file(script_file, scripts_dir)\n+ if script_file_found is None:\n+ print('Could not find DML script: ' + script_file)\n+ print_usage_and_exit()\n+ else:\n+ script_file = script_file_found\n+ print('DML Script:' + script_file)\n+\n+default_conf = 'spark.driver.extraJavaOptions=-Dlog4j.configuration=file:{}'.format(log4j_properties_path)\n+\n+# Backslash problem in windows.\n+if platform.system() == 'Windows':\n+ default_conf = default_conf.replace('\\\\', '//')\n+\n+if args.conf is not None:\n+ conf = ' --conf '.join(args.conf + [default_conf])\n+else:\n+ conf = default_conf\n+\n+cmd_spark = [spark_path, '--class', 'org.apache.sysml.api.DMLScript',\n+ '--master', args.master, '--driver-memory', args.driver_memory,\n+ '--num-executors', args.num_executors, '--executor-memory', args.executor_memory,\n+ '--executor-cores', args.executor_cores, '--conf', conf, '--jars', target_jars,\n+ systemml_jar]\n+\n+cmd_system_ml = ['-config', systemml_config_path_arg,\n+ '-exec', vars(args)['exec'], '-f', script_file, ' '.join(ml_options)]\n+\n+cmd = cmd_spark + cmd_system_ml\n+\n+return_code = os.system(' '.join(cmd))\n+# For debugging\n+# print(' '.join(cmd))\n+\n+if return_code != 0:\n+ print('Failed to run SystemML. Exit code :' + str(return_code))\n+ print(' '.join(cmd))\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1532] python launch script for spark-submit
Closes #501 |
49,717 | 19.06.2017 11:44:22 | 25,200 | df8d4a63d8d09cae94b6ca2634e31da554302c72 | fix need to use -force for gpu
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java",
"diff": "@@ -49,6 +49,7 @@ import org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitionFormat;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.mapred.DistributedCacheInput;\n@@ -546,7 +547,8 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nExecType et = ExecType.CP;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)) {\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())) {\net = ExecType.GPU;\n}\n@@ -625,7 +627,8 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\n{\nLop matmultCP = null;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)) {\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())) {\nHop h1 = getInput().get(0);\nHop h2 = getInput().get(1);\nLop left; Lop right;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"diff": "@@ -39,6 +39,7 @@ import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -149,7 +150,8 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\n}\nelse { //general case\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)) {\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())) {\n// Only implemented methods for GPU\nif ((_op == AggOp.SUM && (_direction == Direction.RowCol || _direction == Direction.Row || _direction == Direction.Col))\n|| (_op == AggOp.SUM_SQ && (_direction == Direction.RowCol || _direction == Direction.Row || _direction == Direction.Col))\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"diff": "@@ -53,6 +53,7 @@ import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitionFormat;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.mapred.DistributedCacheInput;\n@@ -578,7 +579,8 @@ public class BinaryOp extends Hop\nelse //general case\not = HopsOpOp2LopsU.get(op);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())\n&& (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW) ) {\net = ExecType.GPU;\n}\n@@ -596,7 +598,8 @@ public class BinaryOp extends Hop\nExecType et = optFindExecType();\nif ( et == ExecType.CP )\n{\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())\n&& (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW || op == OpOp2.SOLVE)) {\net = ExecType.GPU;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"new_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"diff": "@@ -44,6 +44,7 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject.UpdateType;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -787,7 +788,8 @@ public abstract class Hop\n}\nprotected ExecType findGPUExecTypeByMemEstimate(ExecType et) {\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)) {\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())) {\nreturn ExecType.GPU;\n}\nreturn et;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"diff": "@@ -200,9 +200,6 @@ public class OptimizerUtils\n*/\npublic static final boolean ALLOW_COMBINE_FILE_INPUT_FORMAT = true;\n-\n- public static long GPU_MEMORY_BUDGET = -1;\n-\n//////////////////////\n// Optimizer levels //\n//////////////////////\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java",
"diff": "@@ -35,6 +35,7 @@ import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.lops.Transform.OperationTypes;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n/**\n@@ -151,7 +152,8 @@ public class ReorgOp extends Hop implements MultiThreadedHop\nsetLops(lin); //if input of size 1x1, avoid unnecessary transpose\nelse { //general case\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)) {\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget())) {\net = ExecType.GPU;\n}\nTransform transform1 = new Transform( lin,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"diff": "@@ -42,6 +42,7 @@ import org.apache.sysml.lops.PartialAggregate.CorrectionLocationType;\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n/** Primary use cases for now, are\n@@ -649,7 +650,8 @@ public class TernaryOp extends Hop\nthrow new HopsException(\"Unexpected operation: \" + _op + \", expecting \" + OpOp3.PLUS_MULT + \" or\" + OpOp3.MINUS_MULT);\nExecType et = null;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET) )\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n+ .initialGPUMemBudget()) )\net = ExecType.GPU;\nelse\net = optFindExecType();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"diff": "@@ -46,6 +46,9 @@ public class GPUContextPool {\n*/\npublic static int PER_PROCESS_MAX_GPUS = -1;\n+\n+ private static long INITIAL_GPU_MEMORY_BUDGET = -1;\n+\n/**\n* Whether cuda has been initialized\n*/\n@@ -80,6 +83,7 @@ public class GPUContextPool {\n* @throws DMLRuntimeException ?\n*/\npublic synchronized static void initializeGPU() throws DMLRuntimeException {\n+ initialized = true;\nGPUContext.LOG.info(\"Initializing CUDA\");\nlong start = System.nanoTime();\nJCuda.setExceptionsEnabled(true);\n@@ -110,7 +114,22 @@ public class GPUContextPool {\npool.add(gCtx);\n}\n+ // Initialize the initial memory budget\n+ // If there are heterogeneous GPUs on the machine (different memory sizes)\n+ // initially available memory is set to the GPU with the lowest memory\n+ // This is because at runtime, we wouldn't know which GPU a certain\n+ // operation gets scheduled on\n+ long minAvailableMemory = Integer.MAX_VALUE;\n+ for (GPUContext gCtx : pool) {\n+ gCtx.initializeThread();\n+ minAvailableMemory = Math.min(minAvailableMemory, gCtx.getAvailableMemory());\n+ }\n+ INITIAL_GPU_MEMORY_BUDGET = minAvailableMemory;\n+\n+\nGPUContext.LOG.info(\"Total number of GPUs on the machine: \" + deviceCount);\n+ GPUContext.LOG.info(\"Initial GPU memory: \" + initialGPUMemBudget());\n+\n//int[] device = {-1};\n//cudaGetDevice(device);\n//cudaDeviceProp prop = getGPUProperties(device[0]);\n@@ -119,7 +138,6 @@ public class GPUContextPool {\n//long sharedMemPerBlock = prop.sharedMemPerBlock;\n//LOG.debug(\"Active CUDA device number : \" + device[0]);\n//LOG.debug(\"Max Blocks/Threads/SharedMem on active device: \" + maxBlocks + \"/\" + maxThreadsPerBlock + \"/\" + sharedMemPerBlock);\n- initialized = true;\nGPUStatistics.cudaInitTime = System.nanoTime() - start;\n}\n@@ -187,4 +205,19 @@ public class GPUContextPool {\n}\n+ /**\n+ * Gets the initial GPU memory budget. This is the minimum of the\n+ * available memories across all the GPUs on the machine(s)\n+ * @return minimum available memory\n+ * @throws RuntimeException if error initializing the GPUs\n+ */\n+ public static synchronized long initialGPUMemBudget() throws RuntimeException {\n+ try {\n+ if (!initialized)\n+ initializeGPU();\n+ return INITIAL_GPU_MEMORY_BUDGET;\n+ } catch (DMLRuntimeException e){\n+ throw new RuntimeException(e);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -3261,7 +3261,7 @@ public class LibMatrixCUDA {\n// step 4: compute QR factorization\nPointer work = gCtx.allocate(instName, lwork[0] * Sizeof.DOUBLE);\n- Pointer tau = gCtx.allocate(instName, Math.max(m, m) * Sizeof.DOUBLE);\n+ Pointer tau = gCtx.allocate(instName, m * Sizeof.DOUBLE);\nPointer devInfo = gCtx.allocate(Sizeof.INT);\nif (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nJCusolverDn.cusolverDnDgeqrf(gCtx.getCusolverDnHandle(), m, n, A, m, tau, work, lwork[0], devInfo);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1701] fix need to use -force for gpu
Closes #546 |
49,738 | 18.06.2017 23:59:22 | 25,200 | 9389a5e1e0bd081ef0037321a7d1e7eac328cfbe | New codegen common-subexpression elimination for cplans
This patch introduces a general-purpose CSE rewrites for code generation
plans and applies it during cleanup of cplans before hop dag
modification. The advantages are better generated code (without
unnecessary lookups as often encountered in multi-agg templates) and
better plan cache hit rates. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"diff": "@@ -46,6 +46,7 @@ import org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.hops.codegen.template.TemplateBase;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.CloseType;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\n+import org.apache.sysml.hops.codegen.template.CPlanCSERewriter;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable;\nimport org.apache.sysml.hops.codegen.template.PlanSelection;\nimport org.apache.sysml.hops.codegen.template.PlanSelectionFuseCostBased;\n@@ -347,7 +348,8 @@ public class SpoofCompiler\nHashMap<Long, Pair<Hop[],CNodeTpl>> cplans = constructCPlans(roots, compileLiterals);\n//cleanup codegen plans (remove unnecessary inputs, fix hop-cnodedata mapping,\n- //remove empty templates with single cnodedata input, remove spurious lookups)\n+ //remove empty templates with single cnodedata input, remove spurious lookups,\n+ //perform common subexpression elimination)\ncplans = cleanupCPlans(cplans);\n//explain before modification\n@@ -663,33 +665,26 @@ public class SpoofCompiler\n*\n* @param cplans set of cplans\n*/\n- private static HashMap<Long, Pair<Hop[],CNodeTpl>> cleanupCPlans(HashMap<Long, Pair<Hop[],CNodeTpl>> cplans) {\n+ private static HashMap<Long, Pair<Hop[],CNodeTpl>> cleanupCPlans(HashMap<Long, Pair<Hop[],CNodeTpl>> cplans)\n+ {\nHashMap<Long, Pair<Hop[],CNodeTpl>> cplans2 = new HashMap<Long, Pair<Hop[],CNodeTpl>>();\n+ CPlanCSERewriter cse = new CPlanCSERewriter();\n+\nfor( Entry<Long, Pair<Hop[],CNodeTpl>> e : cplans.entrySet() ) {\nCNodeTpl tpl = e.getValue().getValue();\nHop[] inHops = e.getValue().getKey();\n- //collect cplan leaf node names\n- HashSet<Long> leafs = new HashSet<Long>();\n- if( tpl instanceof CNodeMultiAgg )\n- for( CNode out : ((CNodeMultiAgg)tpl).getOutputs() )\n- rCollectLeafIDs(out, leafs);\n- else\n- rCollectLeafIDs(tpl.getOutput(), leafs);\n+ //perform common subexpression elimination\n+ tpl = cse.eliminateCommonSubexpressions(tpl);\n- //create clean cplan w/ minimal inputs\n- if( inHops.length == leafs.size() )\n- cplans2.put(e.getKey(), e.getValue());\n- else {\n- tpl.cleanupInputs(leafs);\n+ //update input hops (order-preserving)\n+ HashSet<Long> inputHopIDs = tpl.getInputHopIDs(false);\nArrayList<Hop> tmp = new ArrayList<Hop>();\n- for( Hop hop : inHops ) {\n- if( hop!= null && leafs.contains(hop.getHopID()) )\n- tmp.add(hop);\n- }\n- cplans2.put(e.getKey(), new Pair<Hop[],CNodeTpl>(\n- tmp.toArray(new Hop[0]),tpl));\n- }\n+ for( Hop input : inHops )\n+ if( inputHopIDs.contains(input.getHopID()) )\n+ tmp.add(input);\n+ inHops = tmp.toArray(new Hop[0]);\n+ cplans2.put(e.getKey(), new Pair<Hop[],CNodeTpl>(inHops, tpl));\n//remove invalid plans with column indexing on main input\nif( tpl instanceof CNodeCell ) {\n@@ -734,16 +729,6 @@ public class SpoofCompiler\nreturn cplans2;\n}\n- private static void rCollectLeafIDs(CNode node, HashSet<Long> leafs) {\n- //collect leaf variable names\n- if( node instanceof CNodeData && !((CNodeData)node).isLiteral() )\n- leafs.add(((CNodeData) node).getHopID());\n-\n- //recursively process cplan\n- for( CNode c : node.getInput() )\n- rCollectLeafIDs(c, leafs);\n- }\n-\nprivate static void rFindAndRemoveLookupMultiAgg(CNodeMultiAgg node, CNodeData mainInput) {\n//process all outputs individually\nfor( CNode output : node.getOutputs() )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNode.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNode.java",
"diff": "@@ -83,6 +83,10 @@ public abstract class CNode\n_generated = false;\n}\n+ public void resetHash() {\n+ _hash = 0;\n+ }\n+\npublic void setNumRows(long rows) {\n_rows = rows;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeData.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeData.java",
"diff": "@@ -28,6 +28,7 @@ public class CNodeData extends CNode\n{\nprotected final String _name;\nprotected final long _hopID;\n+ private boolean _strictEquals;\npublic CNodeData(Hop hop) {\nthis(hop, hop.getDim1(), hop.getDim2(), hop.getDataType());\n@@ -36,6 +37,7 @@ public class CNodeData extends CNode\npublic CNodeData(Hop hop, long rows, long cols, DataType dt) {\n//note: previous rewrites might have created hops with equal name\n//hence, we also keep the hopID to uniquely identify inputs\n+ super();\n_name = hop.getName();\n_hopID = hop.getHopID();\n_rows = rows;\n@@ -67,6 +69,11 @@ public class CNodeData extends CNode\nreturn _hopID;\n}\n+ public void setStrictEquals(boolean flag) {\n+ _strictEquals = flag;\n+ _hash = 0;\n+ }\n+\n@Override\npublic String codegen(boolean sparse) {\nreturn \"\";\n@@ -97,6 +104,7 @@ public class CNodeData extends CNode\nreturn (o instanceof CNodeData\n&& super.equals(o)\n&& isLiteral() == ((CNodeData)o).isLiteral()\n- && (isLiteral() ? _name.equals(((CNodeData)o)._name) : true));\n+ && (isLiteral() ? _name.equals(((CNodeData)o)._name) :\n+ _strictEquals ? _hopID == ((CNodeData)o)._hopID : true));\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTpl.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTpl.java",
"diff": "@@ -51,14 +51,6 @@ public abstract class CNodeTpl extends CNode implements Cloneable\n_inputs.add(in);\n}\n- public void cleanupInputs(HashSet<Long> filter) {\n- ArrayList<CNode> tmp = new ArrayList<CNode>();\n- for( CNode in : _inputs )\n- if( in instanceof CNodeData && filter.contains(((CNodeData) in).getHopID()) )\n- tmp.add(in);\n- _inputs = tmp;\n- }\n-\npublic String[] getInputNames() {\nString[] ret = new String[_inputs.size()];\nfor( int i=0; i<_inputs.size(); i++ )\n@@ -66,6 +58,14 @@ public abstract class CNodeTpl extends CNode implements Cloneable\nreturn ret;\n}\n+ public HashSet<Long> getInputHopIDs(boolean inclLiterals) {\n+ HashSet<Long> ret = new HashSet<Long>();\n+ for( CNode input : _inputs )\n+ if( !input.isLiteral() || inclLiterals )\n+ ret.add(((CNodeData)input).getHopID());\n+ return ret;\n+ }\n+\npublic void resetVisitStatusOutputs() {\ngetOutput().resetVisitStatus();\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanCSERewriter.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.codegen.template;\n+\n+import java.util.Arrays;\n+import java.util.HashMap;\n+import java.util.HashSet;\n+import java.util.List;\n+\n+import org.apache.sysml.hops.codegen.cplan.CNode;\n+import org.apache.sysml.hops.codegen.cplan.CNodeData;\n+import org.apache.sysml.hops.codegen.cplan.CNodeMultiAgg;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTpl;\n+\n+public class CPlanCSERewriter\n+{\n+ public CNodeTpl eliminateCommonSubexpressions(CNodeTpl tpl)\n+ {\n+ //Note: Compared to our traditional common subexpression elimination, on cplans,\n+ //we don't have any parent references, and hence cannot use a collect-merge approach.\n+ //In contrast, we exploit the hash signatures of cnodes as used in the plan cache.\n+ //However, note that these signatures ignore input hops by default (for better plan\n+ //cache hit rates), but are temporarily set to strict evaluation for this rewrite.\n+\n+ List<CNode> outputs = (tpl instanceof CNodeMultiAgg) ?\n+ ((CNodeMultiAgg)tpl).getOutputs() :\n+ Arrays.asList(tpl.getOutput());\n+\n+ //step 1: set data nodes to strict comparison\n+ HashSet<Long> memo = new HashSet<Long>();\n+ for( CNode out : outputs )\n+ rSetStrictDataNodeComparision(out, memo, true);\n+\n+ //step 2: perform common subexpression elimination\n+ HashMap<CNode,CNode> cseSet = new HashMap<CNode,CNode>();\n+ memo.clear();\n+ for( CNode out : outputs )\n+ rEliminateCommonSubexpression(out, cseSet, memo);\n+\n+ //step 3: reset data nodes to imprecise comparison\n+ memo.clear();\n+ for( CNode out : outputs )\n+ rSetStrictDataNodeComparision(out, memo, true);\n+\n+ return tpl;\n+ }\n+\n+ private void rEliminateCommonSubexpression(CNode current, HashMap<CNode,CNode> cseSet, HashSet<Long> memo) {\n+ //avoid redundant re-evaluation\n+ if( memo.contains(current.getID()) )\n+ return;\n+\n+ //replace input with existing common subexpression\n+ for( int i=0; i<current.getInput().size(); i++ ) {\n+ CNode input = current.getInput().get(i);\n+ if( cseSet.containsKey(input) )\n+ current.getInput().set(i, cseSet.get(input));\n+ }\n+\n+ //process inputs recursively\n+ for( CNode input : current.getInput() )\n+ rEliminateCommonSubexpression(input, cseSet, memo);\n+\n+ //process node itself\n+ cseSet.put(current, current);\n+ memo.add(current.getID());\n+ }\n+\n+ private void rSetStrictDataNodeComparision(CNode current, HashSet<Long> memo, boolean flag) {\n+ //avoid redundant re-evaluation\n+ if( memo.contains(current.getID()) )\n+ return;\n+\n+ //process inputs recursively and node itself\n+ for( CNode input : current.getInput() ) {\n+ rSetStrictDataNodeComparision(input, memo, flag);\n+ input.resetHash();\n+ }\n+ if( current instanceof CNodeData )\n+ ((CNodeData)current).setStrictEquals(flag);\n+ memo.add(current.getID());\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -163,7 +163,8 @@ public class TemplateCell extends TemplateBase\nif( me!=null && me.isPlanRef(i) && !(c instanceof DataOp)\n&& (me.type!=TemplateType.MultiAggTpl || memo.contains(c.getHopID(), TemplateType.CellTpl)))\nrConstructCplan(c, memo, tmp, inHops, compileLiterals);\n- else if( me!=null && me.type==TemplateType.MultiAggTpl && HopRewriteUtils.isMatrixMultiply(hop) && i==0 )\n+ else if( me!=null && (me.type==TemplateType.MultiAggTpl || me.type==TemplateType.CellTpl)\n+ && HopRewriteUtils.isMatrixMultiply(hop) && i==0 ) //skip transpose\nrConstructCplan(c.getInput().get(0), memo, tmp, inHops, compileLiterals);\nelse {\nCNodeData cdata = TemplateUtils.createCNodeData(c, compileLiterals);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1719] New codegen common-subexpression elimination for cplans
This patch introduces a general-purpose CSE rewrites for code generation
plans and applies it during cleanup of cplans before hop dag
modification. The advantages are better generated code (without
unnecessary lookups as often encountered in multi-agg templates) and
better plan cache hit rates. |
49,772 | 19.06.2017 13:52:47 | 25,200 | cbfb21cbcdcac699f93cdbb851138f17f6fcd9b6 | [MINOR] Cleanup in the `nn` library. | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/conv2d_transpose.dml",
"new_path": "scripts/nn/layers/conv2d_transpose.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * 2D Transpose convolutional layer.\n+ * 2D Transpose Convolutional layer.\n*\n* Utilizes built-in convolution operators for higher performance.\n*/\n+source(\"nn/util.dml\") as util\nforward = function(matrix[double] X, matrix[double] W, matrix[double] b,\nint C, int Hin, int Win, int Hf, int Wf,\n@@ -146,7 +147,7 @@ backward = function(matrix[double] dout, int Hout, int Wout,\nstride=[strideh,stridew], padding=[padh,padw])\n# Partial derivatives for bias vector\n- db = rowSums(matrix(colSums(dout), rows=F, cols=Hout*Wout))\n+ db = util::channel_sums(dout, F, Hout, Wout)\n}\ninit = function(int F, int C, int Hf, int Wf)\n@@ -235,3 +236,4 @@ init_bilinear = function(int C, int K)\nb = matrix(0, rows=C, cols=1)\n}\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/test.dml",
"new_path": "scripts/nn/test/test.dml",
"diff": "@@ -69,6 +69,97 @@ batch_norm1d = function() {\n}\n}\n+batch_norm2d = function() {\n+ /*\n+ * Test for the 2D (spatial) batch normalization function.\n+ */\n+ print(\"Testing the 2D (spatial) batch normalization function.\")\n+\n+ # Generate data\n+ N = 2 # Number of examples\n+ C = 3 # num channels\n+ Hin = 4 # input height\n+ Win = 5 # input width\n+ mode = 'train' # execution mode\n+ mu = 0.9 # momentum of moving averages\n+ eps = 1e-5 # smoothing term\n+ X = matrix(\"70 29 23 55 72\n+ 42 98 68 48 39\n+ 34 73 44 6 40\n+ 74 18 18 53 53\n+\n+ 63 85 72 61 72\n+ 32 36 23 29 63\n+ 9 43 43 49 43\n+ 31 43 89 94 50\n+\n+ 62 12 32 41 87\n+ 25 48 99 52 61\n+ 12 83 60 55 34\n+ 30 42 68 88 51\n+\n+\n+ 67 59 62 67 84\n+ 8 76 24 19 57\n+ 10 89 63 72 2\n+ 59 56 16 15 70\n+\n+ 32 69 55 39 93\n+ 84 36 4 30 40\n+ 70 100 36 76 59\n+ 69 15 40 24 34\n+\n+ 51 67 11 13 32\n+ 66 85 55 85 38\n+ 32 35 17 83 34\n+ 55 58 52 0 99\", rows=N, cols=C*Hin*Win)\n+\n+ # Create layer\n+ [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)\n+\n+ # Forward\n+ [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+\n+ # Equivalency check\n+ target = matrix(\"0.86215019 -0.76679718 -1.00517964 0.26619387 0.94161105\n+ -0.25030172 1.97460198 0.78268933 -0.01191914 -0.36949289\n+ -0.56814504 0.98134136 -0.17084086 -1.68059683 -0.32976246\n+ 1.02107191 -1.20383179 -1.20383179 0.18673301 0.18673301\n+\n+ 0.50426388 1.41921711 0.87856293 0.42108631 0.87856293\n+ -0.78498828 -0.61863315 -1.15928721 -0.90975463 0.50426388\n+ -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167\n+ -0.82657707 -0.32751167 1.58557224 1.79351616 -0.0363903\n+\n+ 0.4607178 -1.49978399 -0.71558321 -0.36269283 1.44096887\n+ -0.99005347 -0.08822262 1.91148913 0.06861746 0.42150795\n+ -1.49978399 1.28412855 0.38229787 0.18624771 -0.63716316\n+ -0.79400325 -0.32348287 0.69597805 1.48017895 0.0294075\n+\n+\n+ 0.74295878 0.42511559 0.54430676 0.74295878 1.41837597\n+ -1.60113597 1.10053277 -0.96544927 -1.16410136 0.34565473\n+ -1.52167511 1.61702824 0.5840373 0.94161105 -1.83951855\n+ 0.42511559 0.30592418 -1.28329265 -1.32302308 0.86215019\n+\n+ -0.78498828 0.75379658 0.17155361 -0.4938668 1.75192738\n+ 1.37762833 -0.61863315 -1.9494741 -0.86816585 -0.45227802\n+ 0.79538536 2.04304862 -0.61863315 1.04491806 0.33790874\n+ 0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072\n+\n+ 0.0294075 0.65676796 -1.53899395 -1.46057391 -0.71558321\n+ 0.61755812 1.36254871 0.18624771 1.36254871 -0.48032296\n+ -0.71558321 -0.59795308 -1.30373383 1.28412855 -0.63716316\n+ 0.18624771 0.30387771 0.06861746 -1.97030437 1.91148913\",\n+ rows=1, cols=N*C*Hin*Win)\n+ out = matrix(out, rows=1, cols=N*C*Hin*Win)\n+ for (i in 1:length(out)) {\n+ rel_error = test_util::check_rel_error(as.scalar(out[1,i]),\n+ as.scalar(target[1,i]), 1e-3, 1e-4)\n+ }\n+}\n+\nconv2d = function() {\n/*\n* Test for the 2D convolution functions.\n@@ -491,97 +582,6 @@ max_pool2d = function() {\ntmp = test_util::check_all_equal(out_builtin, target)\n}\n-batch_norm2d = function() {\n- /*\n- * Test for the 2D (spatial) batch normalization function.\n- */\n- print(\"Testing the 2D (spatial) batch normalization function.\")\n-\n- # Generate data\n- N = 2 # Number of examples\n- C = 3 # num channels\n- Hin = 4 # input height\n- Win = 5 # input width\n- mode = 'train' # execution mode\n- mu = 0.9 # momentum of moving averages\n- eps = 1e-5 # smoothing term\n- X = matrix(\"70 29 23 55 72\n- 42 98 68 48 39\n- 34 73 44 6 40\n- 74 18 18 53 53\n-\n- 63 85 72 61 72\n- 32 36 23 29 63\n- 9 43 43 49 43\n- 31 43 89 94 50\n-\n- 62 12 32 41 87\n- 25 48 99 52 61\n- 12 83 60 55 34\n- 30 42 68 88 51\n-\n-\n- 67 59 62 67 84\n- 8 76 24 19 57\n- 10 89 63 72 2\n- 59 56 16 15 70\n-\n- 32 69 55 39 93\n- 84 36 4 30 40\n- 70 100 36 76 59\n- 69 15 40 24 34\n-\n- 51 67 11 13 32\n- 66 85 55 85 38\n- 32 35 17 83 34\n- 55 58 52 0 99\", rows=N, cols=C*Hin*Win)\n-\n- # Create layer\n- [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)\n-\n- # Forward\n- [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n-\n- # Equivalency check\n- target = matrix(\"0.86215019 -0.76679718 -1.00517964 0.26619387 0.94161105\n- -0.25030172 1.97460198 0.78268933 -0.01191914 -0.36949289\n- -0.56814504 0.98134136 -0.17084086 -1.68059683 -0.32976246\n- 1.02107191 -1.20383179 -1.20383179 0.18673301 0.18673301\n-\n- 0.50426388 1.41921711 0.87856293 0.42108631 0.87856293\n- -0.78498828 -0.61863315 -1.15928721 -0.90975463 0.50426388\n- -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167\n- -0.82657707 -0.32751167 1.58557224 1.79351616 -0.0363903\n-\n- 0.4607178 -1.49978399 -0.71558321 -0.36269283 1.44096887\n- -0.99005347 -0.08822262 1.91148913 0.06861746 0.42150795\n- -1.49978399 1.28412855 0.38229787 0.18624771 -0.63716316\n- -0.79400325 -0.32348287 0.69597805 1.48017895 0.0294075\n-\n-\n- 0.74295878 0.42511559 0.54430676 0.74295878 1.41837597\n- -1.60113597 1.10053277 -0.96544927 -1.16410136 0.34565473\n- -1.52167511 1.61702824 0.5840373 0.94161105 -1.83951855\n- 0.42511559 0.30592418 -1.28329265 -1.32302308 0.86215019\n-\n- -0.78498828 0.75379658 0.17155361 -0.4938668 1.75192738\n- 1.37762833 -0.61863315 -1.9494741 -0.86816585 -0.45227802\n- 0.79538536 2.04304862 -0.61863315 1.04491806 0.33790874\n- 0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072\n-\n- 0.0294075 0.65676796 -1.53899395 -1.46057391 -0.71558321\n- 0.61755812 1.36254871 0.18624771 1.36254871 -0.48032296\n- -0.71558321 -0.59795308 -1.30373383 1.28412855 -0.63716316\n- 0.18624771 0.30387771 0.06861746 -1.97030437 1.91148913\",\n- rows=1, cols=N*C*Hin*Win)\n- out = matrix(out, rows=1, cols=N*C*Hin*Win)\n- for (i in 1:length(out)) {\n- rel_error = test_util::check_rel_error(as.scalar(out[1,i]),\n- as.scalar(target[1,i]), 1e-3, 1e-4)\n- }\n-}\n-\ntanh = function() {\n/*\n* Test for the `tanh` forward function.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Cleanup in the `nn` library. |
49,772 | 19.06.2017 13:52:54 | 25,200 | 585b85fe0ff7753b69e8027bf6f8df0a8b594d30 | [MINOR] Update docs for softmax | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/softmax.dml",
"new_path": "scripts/nn/layers/softmax.dml",
"diff": "forward = function(matrix[double] scores)\nreturn (matrix[double] probs) {\n/*\n- * Computes the forward pass for a softmax classifier. The inputs\n- * are interpreted as unnormalized, log-probabilities for each of\n- * N examples, and the softmax function transforms them to normalized\n- * probabilities.\n+ * Computes the forward pass for a softmax classifier. The input\n+ * has N examples, each with D values that are interpreted as\n+ * unnormalized, log-probabilities for each of D classes. The softmax\n+ * function transforms these values to normalized probabilities across\n+ * the D classes, for every example.\n*\n* This can be interpreted as a generalization of the sigmoid\n* function to multiple classes.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update docs for softmax |
49,738 | 19.06.2017 14:52:52 | 25,200 | 57cd6cd5ce0b79e750a7e47e609106a290a09cab | Fix mtd handling on multi-part read from object stores | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java",
"diff": "@@ -464,7 +464,7 @@ public class IOUtilFunctions\nFileStatus[] dStatus = fs.listStatus(file);\nfor( FileStatus fdStatus : dStatus )\nif( !fdStatus.getPath().getName().startsWith(\"_\") //skip internal files\n- && !fdStatus.getPath().equals(file.toString()+\".mtd\") ) //mtd file\n+ && !fdStatus.getPath().toString().equals(file.toString()+\".mtd\") ) //mtd file\ntmp.add(fdStatus.getPath());\nret = tmp.toArray(new Path[0]);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1697] Fix mtd handling on multi-part read from object stores |
49,738 | 21.06.2017 00:05:32 | 25,200 | a5c834b27da9cfeffe0ad6e606c43fe3246831d2 | [HOTFIX][SYSTEMML-1663] Fix and disable element-wise mult chain rewrite
This patch fixes the custom hop comparator to find an ordering of
element-wise multiplication chains (scalars, vectors, matrices), which
fixes the test issue of PR549. Due to additional issues that could cause
result incorrectness or runtime errors, I'm temporarily disabling this
rewrite and related tests. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java",
"diff": "@@ -96,8 +96,8 @@ public class ProgramRewriter\n_dagRuleSet.add( new RewriteRemoveUnnecessaryCasts() );\nif( OptimizerUtils.ALLOW_COMMON_SUBEXPRESSION_ELIMINATION )\n_dagRuleSet.add( new RewriteCommonSubexpressionElimination() );\n- if ( OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES)\n- _dagRuleSet.add( new RewriteElementwiseMultChainOptimization() ); //dependency: cse\n+ //if ( OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES)\n+ // _dagRuleSet.add( new RewriteElementwiseMultChainOptimization() ); //dependency: cse\nif( OptimizerUtils.ALLOW_CONSTANT_FOLDING )\n_dagRuleSet.add( new RewriteConstantFolding() ); //dependency: cse\nif( OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION )\n@@ -108,7 +108,7 @@ public class ProgramRewriter\n_dagRuleSet.add( new RewriteIndexingVectorization() ); //dependency: cse, simplifications\n_dagRuleSet.add( new RewriteInjectSparkPReadCheckpointing() ); //dependency: reblock\n- //add statment block rewrite rules\n+ //add statement block rewrite rules\nif( OptimizerUtils.ALLOW_BRANCH_REMOVAL )\n_sbRuleSet.add( new RewriteRemoveUnnecessaryBranches() ); //dependency: constant folding\nif( OptimizerUtils.ALLOW_SPLIT_HOP_DAGS )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java",
"diff": "@@ -42,7 +42,7 @@ import com.google.common.collect.Multiset;\n*\n* Rewrite a chain of element-wise multiply hops that contain identical elements.\n* For example `(B * A) * B` is rewritten to `A * (B^2)` (or `(B^2) * A`), where `^` is element-wise power.\n- * The order of the multiplicands depends on their data types, dimentions (matrix or vector), and sparsity.\n+ * The order of the multiplicands depends on their data types, dimensions (matrix or vector), and sparsity.\n*\n* Does not rewrite in the presence of foreign parents in the middle of the e-wise multiply chain,\n* since foreign parents may rely on the individual results.\n@@ -136,6 +136,8 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n// sorted contains all leaves, sorted by data type, stripped from their parents\n// Construct right-deep EMult tree\n+ // TODO compile binary outer mult for transition from row and column vectors to matrices\n+ // TODO compile subtree for column vectors to avoid blow-up of intermediates on row-col vector transition\nfinal Iterator<Map.Entry<Hop, Integer>> iterator = sorted.entrySet().iterator();\nHop first = constructPower(iterator.next());\n@@ -160,13 +162,15 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n}\n/**\n- * A Comparator that orders Hops by their data type, dimention, and sparsity.\n+ * A Comparator that orders Hops by their data type, dimension, and sparsity.\n* The order is as follows:\n* scalars > row vectors > col vectors >\n* non-vector matrices ordered by sparsity (higher nnz first, unknown sparsity last) >\n* other data types.\n* Disambiguate by Hop ID.\n*/\n+ //TODO replace by ComparableHop wrapper around hop that implements equals and compareTo\n+ //in order to ensure comparisons that are 'consistent with equals'\nprivate static final Comparator<Hop> compareByDataType = new Comparator<Hop>() {\nprivate final int[] orderDataType = new int[Expression.DataType.values().length];\n{\n@@ -198,7 +202,7 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\nif (o2.getDim2() != 1) return 1; // col vectors greater than non-vectors\nreturn compareBySparsityThenId(o1, o2); // both col vectors\n} else if (o2.getDim2() == 1) { // 2 is col vector; 1 is not\n- return 1; // col vectors greater than non-vectors\n+ return -1; // col vectors greater than non-vectors\n} else { // both non-vectors\nreturn compareBySparsityThenId(o1, o2);\n}\n@@ -244,6 +248,9 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n// Because RewriteCommonSubexpressionElimination already ran, it is safe to compare by equality.\nemults.add(root);\n+ // TODO proper handling of DAGs (avoid collecting the same leaf multiple times)\n+ // TODO exclude hops with unknown dimensions and move rewrites to dynamic rewrites\n+\nfinal ArrayList<Hop> inputs = root.getInput();\nfinal Hop left = inputs.get(0), right = inputs.get(1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java",
"diff": "@@ -61,6 +61,7 @@ public class RewriteElementwiseMultChainOptimizationTest extends AutomatedTestBa\ntestRewriteMatrixMultChainOp(TEST_NAME1, false, ExecType.SPARK);\n}\n+ /* TODO enable together with RewriteElementwiseMultChainOptimization\n@Test\npublic void testMatrixMultChainOptRewritesCP() {\ntestRewriteMatrixMultChainOp(TEST_NAME1, true, ExecType.CP);\n@@ -70,6 +71,7 @@ public class RewriteElementwiseMultChainOptimizationTest extends AutomatedTestBa\npublic void testMatrixMultChainOptRewritesSP() {\ntestRewriteMatrixMultChainOp(TEST_NAME1, true, ExecType.SPARK);\n}\n+ */\nprivate void testRewriteMatrixMultChainOp(String testname, boolean rewrites, ExecType et)\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX][SYSTEMML-1663] Fix and disable element-wise mult chain rewrite
This patch fixes the custom hop comparator to find an ordering of
element-wise multiplication chains (scalars, vectors, matrices), which
fixes the test issue of PR549. Due to additional issues that could cause
result incorrectness or runtime errors, I'm temporarily disabling this
rewrite and related tests. |
49,701 | 21.06.2017 20:50:33 | 25,200 | 87cc5ee67d055ed0628f3128b10dbdeb3ec149d5 | Remove Guava from compile-time dependencies
Remove addition of Guava compile-time dependency from
Replace with Java 8 and standard collections classes.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagValidator.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopDagValidator.java",
"diff": "@@ -22,10 +22,10 @@ package org.apache.sysml.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.Set;\n+import java.util.stream.Collectors;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-\nimport org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.FunctionOp;\nimport org.apache.sysml.hops.Hop;\n@@ -35,8 +35,6 @@ import org.apache.sysml.parser.Expression;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.utils.Explain;\n-import com.google.common.collect.Lists;\n-\nimport static org.apache.sysml.hops.HopsException.check;\n/**\n@@ -90,9 +88,12 @@ public class HopDagValidator {\n//check visit status\nfinal boolean seen = !state.seen.add(id);\n- check(seen == hop.isVisited(), hop,\n- \"(parents: %s) seen previously is %b but does not match hop visit status\",\n- Lists.transform(hop.getParent(), Hop::getHopID), seen);\n+ if (seen != hop.isVisited()) {\n+ String parentIDs = hop.getParent().stream()\n+ .map(h -> Long.toString(h.getHopID())).collect(Collectors.joining(\", \"));\n+ //noinspection ConstantConditions\n+ check(false, hop, parentIDs, seen);\n+ }\nif (seen) return; // we saw the Hop previously, no need to re-validate\n//check parent linking\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java",
"diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.Comparator;\n+import java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Iterator;\nimport java.util.Map;\n@@ -34,9 +35,6 @@ import org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.parser.Expression;\n-import com.google.common.collect.HashMultiset;\n-import com.google.common.collect.Multiset;\n-\n/**\n* Prerequisite: RewriteCommonSubexpressionElimination must run before this rule.\n*\n@@ -79,7 +77,7 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\nif (isBinaryMult(root)) {\nfinal Hop left = root.getInput().get(0), right = root.getInput().get(1);\nfinal Set<BinaryOp> emults = new HashSet<>();\n- final Multiset<Hop> leaves = HashMultiset.create();\n+ final Map<Hop, Integer> leaves = new HashMap<>(); // poor man's HashMultiset\nfindEMultsAndLeaves((BinaryOp)root, emults, leaves);\n// 2. Ensure it is profitable to do a rewrite.\n@@ -101,7 +99,7 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\nfinal Hop newRoot = HopRewriteUtils.rewireAllParentChildReferences(root, replacement);\n// 6. Recurse at leaves (no need to repeat the interior emults)\n- for (final Hop leaf : leaves.elementSet()) {\n+ for (final Hop leaf : leaves.keySet()) {\nrecurseInputs(leaf);\n}\nreturn newRoot;\n@@ -123,15 +121,15 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n}\n}\n- private static Hop constructReplacement(final Set<BinaryOp> emults, final Multiset<Hop> leaves) {\n+ private static Hop constructReplacement(final Set<BinaryOp> emults, final Map<Hop, Integer> leaves) {\n// Sort by data type\nfinal SortedMap<Hop,Integer> sorted = new TreeMap<>(compareByDataType);\n- for (final Multiset.Entry<Hop> entry : leaves.entrySet()) {\n- final Hop h = entry.getElement();\n+ for (final Map.Entry<Hop, Integer> entry : leaves.entrySet()) {\n+ final Hop h = entry.getKey();\n// unlink parents that are in the emult set(we are throwing them away)\n// keep other parents\nh.getParent().removeIf(parent -> parent instanceof BinaryOp && emults.contains(parent));\n- sorted.put(h, entry.getCount());\n+ sorted.put(h, entry.getValue());\n}\n// sorted contains all leaves, sorted by data type, stripped from their parents\n@@ -244,7 +242,8 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n* @param emults Out parameter. The set of BinaryOp element-wise multiply hops in the emult chain (including root).\n* @param leaves Out parameter. The multiset of multiplicands in the emult chain.\n*/\n- private static void findEMultsAndLeaves(final BinaryOp root, final Set<BinaryOp> emults, final Multiset<Hop> leaves) {\n+ private static void findEMultsAndLeaves(final BinaryOp root, final Set<BinaryOp> emults,\n+ final Map<Hop, Integer> leaves) {\n// Because RewriteCommonSubexpressionElimination already ran, it is safe to compare by equality.\nemults.add(root);\n@@ -257,12 +256,16 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\nif (isBinaryMult(left))\nfindEMultsAndLeaves((BinaryOp) left, emults, leaves);\nelse\n- leaves.add(left);\n+ addMultiset(leaves, left);\nif (isBinaryMult(right))\nfindEMultsAndLeaves((BinaryOp) right, emults, leaves);\nelse\n- leaves.add(right);\n+ addMultiset(leaves, right);\n+ }\n+\n+ private static <K> void addMultiset(final Map<K,Integer> map, final K k) {\n+ map.put(k, map.getOrDefault(k, 0) + 1);\n}\n/**\n@@ -271,7 +274,7 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\n* @param leaves The multiset of multiplicands in the emult chain.\n* @return If the multiset is worth optimizing.\n*/\n- private static boolean isOptimizable(Set<BinaryOp> emults, final Multiset<Hop> leaves) {\n+ private static boolean isOptimizable(final Set<BinaryOp> emults, final Map<Hop, Integer> leaves) {\nreturn emults.size() >= 2;\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1724] Remove Guava from compile-time dependencies
Remove addition of Guava compile-time dependency from SYSTEMML-1663.
Replace with Java 8 and standard collections classes.
Closes #549. |
49,738 | 21.06.2017 22:29:17 | 25,200 | ea805c863cacba4a4b8ac5c02d4fbf9154a93277 | Fix correctness codegen vector primitives (abs, sm-div) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"diff": "@@ -228,7 +228,7 @@ public class LibSpoofPrimitives\npublic static double[] vectDivWrite(double bval, double[] a, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = bval / a[ai] / bval;\n+ c[j] = bval / a[ai];\nreturn c;\n}\n@@ -327,12 +327,11 @@ public class LibSpoofPrimitives\n//custom vector plus\npublic static void vectPlusAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n- for( int j = ai; j < ai+len; j++, ci++)\n- c[ci] += a[j] + bval;\n+ LibMatrixMult.vectAdd(a, bval, c, ai, ci, len);\n}\npublic static void vectPlusAdd(double bval, double[] a, double[] c, int ai, int ci, int len) {\n- vectPlusAdd(a, bval, c, ai, ci, len);\n+ LibMatrixMult.vectAdd(a, bval, c, ai, ci, len);\n}\npublic static void vectPlusAdd(double[] a, double bval, double[] c, int[] aix, int ai, int ci, int len) {\n@@ -610,20 +609,20 @@ public class LibSpoofPrimitives\npublic static void vectAbsAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\nfor( int j = ai; j < ai+len; j++ )\n- c[ci + aix[j]] += Math.log(a[j]);\n+ c[ci + aix[j]] += Math.abs(a[j]);\n}\npublic static double[] vectAbsWrite(double[] a, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = Math.log(a[ai]);\n+ c[j] = Math.abs(a[ai]);\nreturn c;\n}\npublic static double[] vectAbsWrite(double[] a, int[] aix, int ai, int len) {\ndouble[] c = allocVector(len, true);\nfor( int j = ai; j < ai+len; j++ )\n- c[aix[j]] = Math.log(a[j]);\n+ c[aix[j]] = Math.abs(a[j]);\nreturn c;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"diff": "@@ -3197,6 +3197,25 @@ public class LibMatrixMult\n}\n}\n+ //note: public for use by codegen for consistency\n+ public static void vectAdd( double[] a, double bval, double[] c, int ai, int ci, final int len ) {\n+ final int bn = len%8;\n+ //rest, not aligned to 8-blocks\n+ for( int j = 0; j < bn; j++, ai++, ci++)\n+ c[ ci ] += a[ ai ];\n+ //unrolled 8-block (for better ILP)\n+ for( int j = bn; j < len; j+=8, ai+=8, ci+=8) {\n+ c[ ci+0 ] += a[ ai+0 ] + bval;\n+ c[ ci+1 ] += a[ ai+1 ] + bval;\n+ c[ ci+2 ] += a[ ai+2 ] + bval;\n+ c[ ci+3 ] += a[ ai+3 ] + bval;\n+ c[ ci+4 ] += a[ ai+4 ] + bval;\n+ c[ ci+5 ] += a[ ai+5 ] + bval;\n+ c[ ci+6 ] += a[ ai+6 ] + bval;\n+ c[ ci+7 ] += a[ ai+7 ] + bval;\n+ }\n+ }\n+\n//note: public for use by codegen for consistency\npublic static void vectAdd( double[] a, double[] c, int ai, int ci, final int len )\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1730] Fix correctness codegen vector primitives (abs, sm-div) |
49,738 | 21.06.2017 23:07:52 | 25,200 | f516e4bdc9af606d2112564901ebb2e27467569d | Fix correctness matrix-scalar pow (sparse-safeness)
This patch a correctness issue of right matrix-scalar pow operations and
the special case pow(X,0). This right scalar operator was marked
statically sparse-safe although it should be conditionally sparse-safe
if the given constant is not equal 0. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/operators/RightScalarOperator.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/operators/RightScalarOperator.java",
"diff": "@@ -52,18 +52,12 @@ public class RightScalarOperator extends ScalarOperator\n|| (fn instanceof GreaterThanEquals && _constant>0)\n|| (fn instanceof LessThan && _constant<=0)\n|| (fn instanceof LessThanEquals && _constant<0)\n- || (fn instanceof Divide && _constant!=0));\n+ || (fn instanceof Divide && _constant!=0)\n+ || (fn instanceof Power && _constant!=0));\n}\n@Override\npublic double executeScalar(double in) throws DMLRuntimeException {\nreturn fn.execute(in, _constant);\n}\n-\n- @Override\n- protected boolean isSparseSafeStatic() {\n- //add power as only rhs op sparse safe (1^0=1 but 0^1=0).\n- return (super.isSparseSafeStatic()\n- || fn instanceof Power);\n- }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1624] Fix correctness matrix-scalar pow (sparse-safeness)
This patch a correctness issue of right matrix-scalar pow operations and
the special case pow(X,0). This right scalar operator was marked
statically sparse-safe although it should be conditionally sparse-safe
if the given constant is not equal 0. |
49,767 | 22.06.2017 11:05:24 | 25,200 | 345682404c3fb1348484c375e811ee3f5805a691 | Reshape util to convert tensors in NCHW to CNHW format
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/run_tests.dml",
"new_path": "scripts/nn/test/run_tests.dml",
"diff": "@@ -97,6 +97,7 @@ test::max_pool2d()\ntest::padding()\ntest::tanh()\ntest::threshold()\n+test::transpose_NCHW_to_CNHW()\nprint(\"---\")\nprint(\"Other tests complete -- look for any ERRORs or WARNINGs.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/test.dml",
"new_path": "scripts/nn/test/test.dml",
"diff": "@@ -488,6 +488,39 @@ padding = function() {\n}\n}\n+transpose_NCHW_to_CNHW = function() {\n+ /*\n+ * Test for `transpose_NCHW_to_CNHW` function.\n+ */\n+ print(\"Testing transpose_NCHW_to_CNHW function.\")\n+\n+ # Generate data\n+ N = 2\n+ C = 3\n+ H = 4\n+ W = 5\n+ X = matrix(seq(1, N*C*H*W), rows=N, cols=C*H*W)\n+\n+ out = util::transpose_NCHW_to_CNHW(X, C)\n+\n+ target =\n+ matrix(\"1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n+ 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80\n+ 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40\n+ 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100\n+ 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60\n+ 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120\",\n+ rows=C, cols=N*H*W)\n+\n+ # Equivalency check\n+ for (i in 1:nrow(out)) {\n+ for(j in 1:ncol(out)) {\n+ rel_error = test_util::check_rel_error(as.scalar(out[i,j]),\n+ as.scalar(target[i,j]), 1e-10, 1e-12)\n+ }\n+ }\n+}\n+\nmax_pool2d = function() {\n/*\n* Test for the 2D max pooling functions.\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/util.dml",
"new_path": "scripts/nn/util.dml",
"diff": "@@ -216,3 +216,55 @@ threshold = function(matrix[double] X, double thresh)\nout = X > thresh\n}\n+/*\n+ * Reshape util for tensors in NCHW format.\n+ * Transposes the 1st and 2nd dimensions.\n+ */\n+transpose_NCHW_to_CNHW = function(matrix[double] X, int C) return (matrix[double] out){\n+ /*\n+ * Inputs:\n+ * - X: Input with N rows and channels flattened within each row in\n+ * channel-major format (NCHW).\n+ * - C: Number of channels (dimensionality of depth).\n+ *\n+ * Outputs:\n+ * - out: Transposed output with C rows.\n+ */\n+ N = nrow(X)\n+ D = ncol(X) / C\n+\n+ /*\n+ * This is an easy reshape because the channels remain intact. By\n+ * reshaping X to a matrix with N*C rows, we can reduce our task to\n+ * re-ordering rows (followed by the obvious reshape to achieve the\n+ * required output shape with C rows).\n+ *\n+ * The difficult part is to obtain the permutation matrix required\n+ * for re-ordering the rows. In this case, since we want to bring the\n+ * ith channels from all rows together, we will need a column vector\n+ * of the following form:\n+ * [1, 1+C, 1+2C, ..., 1+(N-1)C,\n+ * 2, 2+C, ..., 2+(N-1)C,\n+ * 3, 3+C, ..., 3+(N-1)C,\n+ * .\n+ * .\n+ * .\n+ * C, 2C, ..., NC]'\n+ * This vector can be produced via an outer call.\n+ */\n+ col_idx = outer(seq(1,C), C*t(seq(0,N-1)), \"+\")\n+\n+ /*\n+ * Generate the permutation matrix by:\n+ * - reshaping the result of outer into a col\n+ * - invoking table\n+ */\n+ permut = table(seq(1, N*C), matrix(col_idx, rows=N*C, cols=1), N*C, N*C)\n+\n+ /*\n+ * Generate the output by:\n+ * - pre-multiplying the (reshaped) X with the permutation matrix\n+ * - reshape to get the output shape with C rows\n+ */\n+ out = matrix(permut %*% matrix(X, rows=N*C, cols=D), rows=C, cols=N*D)\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1728] Reshape util to convert tensors in NCHW to CNHW format
Closes #552. |
49,717 | 22.06.2017 17:04:49 | 25,200 | 57e11e99c3f110b68ad5e3397f10b30533ab9b79 | Prepare for GPU on Jenkins, disable expensive NN tests
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/release-process.md",
"new_path": "docs/release-process.md",
"diff": "@@ -259,6 +259,18 @@ For examples, see the [Spark MLContext Programming Guide](http://apache.github.i\nVerify that the performance suite located at scripts/perftest/ executes on Spark and Hadoop. Testing should\ninclude 80MB, 800MB, 8GB, and 80GB data sizes.\n+# Run NN Unit Tests for GPU\n+\n+<a href=\"#release-candidate-checklist\">Up to Checklist</a>\n+\n+The unit tests for NN operators for GPU take a long time to run and are therefor not run as part of the Jenkins build.\n+They must be run before a release. To run them, edit the\n+[NeuralNetworkOpTests.java|https://github.com/apache/systemml/blob/master/src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java]\n+file and remove all the `@Ignore` annotations from all the tests. Then run the NN unit tests using mvn verify:\n+```\n+mvn -Dit.test=org.apache.sysml.test.gpu.NeuralNetworkOpTests verify -PgpuTests\n+```\n+\n# Voting\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java",
"new_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java",
"diff": "@@ -35,6 +35,15 @@ import org.junit.Test;\n/**\n* Test neural network operations on the GPU\n+ * Because of the large number of cases that each test deals with, this class takes\n+ * very long to run. (It took about 9 hours to run the testMaxPoolBackward() to completion.\n+ * The recommended course of action before a release is\n+ * 1. Remove the @Ignore annotations\n+ * 2. Run just these test on a machine with CUDA 8 installed.\n+ * Only this class can be run like so:\n+ * <code>\n+ * mvn -Dit.test=org.apache.sysml.test.gpu.NeuralNetworkOpTests verify -PgpuTests\n+ * </code>\n*/\npublic class NeuralNetworkOpTests extends GPUTests {\n@@ -100,6 +109,7 @@ public class NeuralNetworkOpTests extends GPUTests {\nreturn 1e-5;\n}\n+ @Ignore\n@Test\npublic void testConv2d() {\nString scriptStr = \"O = conv2d(image, filter, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], filter_shape=[K,C,R,S])\";\n@@ -253,6 +263,7 @@ public class NeuralNetworkOpTests extends GPUTests {\nclearGPUMemory();\n}\n+ @Ignore\n@Test\npublic void testConv2dBackwardFilter() {\nString scriptStr = \"O = conv2d_backward_filter(image, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], filter_shape=[K,C,R,S])\";\n@@ -298,9 +309,9 @@ public class NeuralNetworkOpTests extends GPUTests {\nfilterSizeInMB, N, K, P, Q, doutSizeInMB,\nstrideH, strideW, padH, padW);\nMatrix image = generateInputMatrix(spark, (int) N,\n- (int) (C * H * W), 0.-127.0, 127, sparsity, seed);\n+ (int) (C * H * W), -127.0, 127, sparsity, seed);\nMatrix dout = generateInputMatrix(spark, (int) N,\n- (int) (K * P * Q), 0.-127.0, 127, sparsity, seed);\n+ (int) (K * P * Q), -127.0, 127, sparsity, seed);\nHashMap<String, Object> inputs = new HashMap<>();\ninputs.put(\"N\", N);\ninputs.put(\"C\", C);\n@@ -336,6 +347,7 @@ public class NeuralNetworkOpTests extends GPUTests {\n}\n}\n+ @Ignore\n@Test\npublic void testConv2dBackwardData() {\nString scriptStr = \"O = conv2d_backward_data(filter, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], filter_shape=[K,C,R,S])\";\n@@ -382,9 +394,9 @@ public class NeuralNetworkOpTests extends GPUTests {\nstrideH, strideW, padH, padW);\nMatrix filter = generateInputMatrix(spark, (int) K,\n- (int) (C * R * S), 0.-127.0, 127, sparsity, seed);\n+ (int) (C * R * S), -127.0, 127, sparsity, seed);\nMatrix dout = generateInputMatrix(spark, (int) N,\n- (int) (K * P * Q), 0.-127.0, 127, sparsity, seed);\n+ (int) (K * P * Q), -127.0, 127, sparsity, seed);\nHashMap<String, Object> inputs = new HashMap<>();\ninputs.put(\"N\", N);\ninputs.put(\"C\", C);\n@@ -420,6 +432,7 @@ public class NeuralNetworkOpTests extends GPUTests {\n}\n}\n+ @Ignore\n@Test\npublic void testMaxPool() {\nString scriptStr = \"O = max_pool(image, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], pool_size=[R,S])\";\n@@ -464,7 +477,7 @@ public class NeuralNetworkOpTests extends GPUTests {\nP, Q, doutSizeInMB, strideH, strideW, padH, padW);\nMatrix image = generateInputMatrix(spark, (int) N,\n- (int) (C * H * W), 0.-127.0, 127, sparsity, seed);\n+ (int) (C * H * W), -127.0, 127, sparsity, seed);\nHashMap<String, Object> inputs = new HashMap<>();\ninputs.put(\"N\", N);\ninputs.put(\"C\", C);\n@@ -497,6 +510,7 @@ public class NeuralNetworkOpTests extends GPUTests {\n}\n}\n+ @Ignore\n@Test\npublic void testMaxPoolBackward() {\nString scriptStr = \"O = max_pool_backward(image, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], pool_size=[R,S])\";\n@@ -541,9 +555,9 @@ public class NeuralNetworkOpTests extends GPUTests {\nP, Q, doutSizeInMB, strideH, strideW, padH, padW);\nMatrix image = generateInputMatrix(spark, (int) N,\n- (int) (C * H * W), 0.-127.0, 127, sparsity, seed);\n+ (int) (C * H * W), -127.0, 127, sparsity, seed);\nMatrix dout = generateInputMatrix(spark, (int) N, (int) (C * P * Q),\n- 0.-127.0, 127, sparsity, seed);\n+ -127.0, 127, sparsity, seed);\nHashMap<String, Object> inputs = new HashMap<>();\ninputs.put(\"N\", N);\ninputs.put(\"C\", C);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-703] Prepare for GPU on Jenkins, disable expensive NN tests
Closes #550 |
49,738 | 24.06.2017 13:41:17 | 25,200 | e42133fecacc4c5b7e4192533e93a647abbb58b1 | Extended codegen row template (multiple matrix inputs)
Given the recent generalization of vector primitives for scalar-vector
and sparse-unsafe operations, this patch now enables codegen row-wise
operations over multiple input matrices, which helps to reduces the
number of intermediates due to template switches between row-wise and
cell-wise templates. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -441,7 +441,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nfor( Long hopID : R ) {\nMemoTableEntry me = memo.getBest(hopID, TemplateType.RowTpl);\nif( me.type == TemplateType.RowTpl && memo.contains(hopID, TemplateType.CellTpl)\n- && rIsRowTemplateWithoutAgg(memo, memo._hopRefs.get(hopID), new HashSet<Long>())) {\n+ && isRowTemplateWithoutAgg(memo, memo._hopRefs.get(hopID), new HashSet<Long>())) {\nList<MemoTableEntry> blacklist = memo.get(hopID, TemplateType.RowTpl);\nmemo.remove(memo._hopRefs.get(hopID), new HashSet<MemoTableEntry>(blacklist));\nif( LOG.isTraceEnabled() ) {\n@@ -523,6 +523,17 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n}\n}\n+ private static boolean isRowTemplateWithoutAgg(CPlanMemoTable memo, Hop current, HashSet<Long> visited) {\n+ //consider all aggregations other than root operation\n+ MemoTableEntry me = memo.getBest(current.getHopID(), TemplateType.RowTpl);\n+ boolean ret = true;\n+ for(int i=0; i<3; i++)\n+ if( me.isPlanRef(i) )\n+ ret &= rIsRowTemplateWithoutAgg(memo,\n+ current.getInput().get(i), visited);\n+ return ret;\n+ }\n+\nprivate static boolean rIsRowTemplateWithoutAgg(CPlanMemoTable memo, Hop current, HashSet<Long> visited) {\nif( visited.contains(current.getHopID()) )\nreturn true;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateBase.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateBase.java",
"diff": "@@ -28,8 +28,8 @@ public abstract class TemplateBase\npublic enum TemplateType {\n//ordering specifies type preferences\nMultiAggTpl,\n- RowTpl,\nOuterProdTpl,\n+ RowTpl,\nCellTpl;\npublic int getRank() {\nreturn this.ordinal();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -71,8 +71,8 @@ public class TemplateRow extends TemplateBase\n@Override\npublic boolean open(Hop hop) {\n- return (hop instanceof BinaryOp && hop.dimsKnown() && hop.getInput().get(0).getDim2()>1\n- && hop.getInput().get(1).getDim2()==1 && TemplateCell.isValidOperation(hop))\n+ return (hop instanceof BinaryOp && hop.dimsKnown() && isValidBinaryOperation(hop)\n+ && hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1)\n|| (hop instanceof AggBinaryOp && hop.dimsKnown() && hop.getDim2()==1\n&& hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1)\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol\n@@ -83,10 +83,7 @@ public class TemplateRow extends TemplateBase\n@Override\npublic boolean fuse(Hop hop, Hop input) {\nreturn !isClosed() &&\n- ( (hop instanceof BinaryOp && TemplateUtils.isOperationSupported(hop)\n- && (HopRewriteUtils.isBinaryMatrixColVectorOperation(hop)\n- || HopRewriteUtils.isBinaryMatrixScalarOperation(hop)\n- || HopRewriteUtils.isBinaryMatrixMatrixOperationWithSharedInput(hop)) )\n+ ( (hop instanceof BinaryOp && isValidBinaryOperation(hop) )\n|| (HopRewriteUtils.isBinary(hop, OpOp2.CBIND) && hop.getInput().indexOf(input)==0\n&& input.getDim2()==1 && hop.getInput().get(1).getDim2()==1\n&& HopRewriteUtils.isEmpty(hop.getInput().get(1)))\n@@ -104,9 +101,7 @@ public class TemplateRow extends TemplateBase\npublic boolean merge(Hop hop, Hop input) {\n//merge rowagg tpl with cell tpl if input is a vector\nreturn !isClosed() &&\n- ((hop instanceof BinaryOp && TemplateUtils.isOperationSupported(hop)\n- && (input.getDim2()==1 //matrix-scalar/vector-vector ops )\n- || HopRewriteUtils.isBinaryMatrixMatrixOperationWithSharedInput(hop)))\n+ ((hop instanceof BinaryOp && isValidBinaryOperation(hop))\n||(hop instanceof AggBinaryOp && input.getDim2()==1\n&& HopRewriteUtils.isTransposeOperation(hop.getInput().get(0))));\n}\n@@ -122,6 +117,14 @@ public class TemplateRow extends TemplateBase\nreturn CloseType.OPEN;\n}\n+ private boolean isValidBinaryOperation(Hop hop) {\n+ //exclude unsupported and matrix-rowvector ops\n+ return TemplateUtils.isOperationSupported(hop)\n+ && (HopRewriteUtils.isBinaryMatrixScalarOperation(hop)\n+ || HopRewriteUtils.isBinaryMatrixColVectorOperation(hop)\n+ || HopRewriteUtils.isBinaryMatrixMatrixOperation(hop));\n+ }\n+\n@Override\npublic Pair<Hop[], CNodeTpl> constructCplan(Hop hop, CPlanMemoTable memo, boolean compileLiterals) {\n//recursively process required cplan output\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1714] Extended codegen row template (multiple matrix inputs)
Given the recent generalization of vector primitives for scalar-vector
and sparse-unsafe operations, this patch now enables codegen row-wise
operations over multiple input matrices, which helps to reduces the
number of intermediates due to template switches between row-wise and
cell-wise templates. |
49,738 | 26.06.2017 22:33:23 | 25,200 | 50dafa038ff3282f327260f2d413bdfd907bfe04 | Fix rewrite 'fuse axpy binary ops' for outer products
This patch fixes the dynamic simplification rewrite
fuseAxpyBinaryOperationChain to not trigger on outer products of vectors
and adds related negative test cases. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallSizeInfo.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/FunctionCallSizeInfo.java",
"diff": "@@ -345,6 +345,21 @@ public class FunctionCallSizeInfo\nsb.append(\"\\n\");\n}\n+ sb.append(\"Valid #non-zeros for propagation: \\n\");\n+ for( Entry<String, Set<Integer>> e : _fcandSafeNNZ.entrySet() ) {\n+ sb.append(\"--\");\n+ sb.append(e.getKey());\n+ sb.append(\": \");\n+ for( Integer pos : e.getValue() ) {\n+ sb.append(pos);\n+ sb.append(\":\");\n+ sb.append(_fgraph.getFunctionCalls(e.getKey())\n+ .get(0).getInput().get(pos).getName());\n+ sb.append(\" \");\n+ }\n+ sb.append(\"\\n\");\n+ }\n+\nreturn sb.toString();\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java",
"diff": "@@ -2131,7 +2131,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nprivate Hop fuseAxpyBinaryOperationChain(Hop parent, Hop hi, int pos)\n{\n//patterns: (a) X + s*Y -> X +* sY, (b) s*Y+X -> X +* sY, (c) X - s*Y -> X -* sY\n- if( hi instanceof BinaryOp\n+ if( hi instanceof BinaryOp && !((BinaryOp) hi).isOuterVectorOperator()\n&& (((BinaryOp)hi).getOp()==OpOp2.PLUS || ((BinaryOp)hi).getOp()==OpOp2.MINUS) )\n{\nBinaryOp bop = (BinaryOp) hi;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFuseBinaryOpChainTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFuseBinaryOpChainTest.java",
"diff": "@@ -32,7 +32,6 @@ import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\nimport org.apache.sysml.test.utils.TestUtils;\n-import org.apache.sysml.utils.Statistics;\n/**\n* Regression test for function recompile-once issue with literal replacement.\n@@ -43,6 +42,7 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\nprivate static final String TEST_NAME1 = \"RewriteFuseBinaryOpChainTest1\"; //+* (X+s*Y)\nprivate static final String TEST_NAME2 = \"RewriteFuseBinaryOpChainTest2\"; //-* (X-s*Y)\nprivate static final String TEST_NAME3 = \"RewriteFuseBinaryOpChainTest3\"; //+* (s*Y+X)\n+ private static final String TEST_NAME4 = \"RewriteFuseBinaryOpChainTest4\"; //outer(X, s*Y, \"+\") not applied\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RewriteFuseBinaryOpChainTest.class.getSimpleName() + \"/\";\n@@ -55,6 +55,7 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n}\n@Test\n@@ -147,6 +148,18 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\ntestFuseBinaryChain( TEST_NAME3, true, ExecType.MR );\n}\n+ //negative tests\n+\n+ @Test\n+ public void testOuterBinaryPlusNoRewriteCP() {\n+ testFuseBinaryChain( TEST_NAME4, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testOuterBinaryPlusRewriteCP() {\n+ testFuseBinaryChain( TEST_NAME4, true, ExecType.CP);\n+ }\n+\n/**\n*\n* @param testname\n@@ -199,7 +212,10 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\nprefix = Instruction.SP_INST_PREFIX;\nString opcode = (testname.equals(TEST_NAME1)||testname.equals(TEST_NAME3)) ? prefix+\"+*\" : prefix+\"-*\";\n- Assert.assertTrue(\"Rewrite not applied.\",Statistics.getCPHeavyHitterOpCodes().contains(opcode));\n+ if( testname.equals(TEST_NAME4) )\n+ Assert.assertFalse(\"Rewrite applied.\", heavyHittersContainsSubString(opcode));\n+ else\n+ Assert.assertTrue(\"Rewrite not applied.\", heavyHittersContainsSubString(opcode));\n}\n}\nfinally\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/RewriteFuseBinaryOpChainTest4.R",
"diff": "+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(1, 10, 1);\n+Y = matrix(2, 1, 10);\n+lambda = 7;\n+\n+S = outer(as.vector(X), as.vector(lambda*Y), \"+\");\n+\n+writeMM(as(S, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/misc/RewriteFuseBinaryOpChainTest4.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(1, 10, 1);\n+Y = matrix(2, 1, 10);\n+lambda = 7;\n+if(1==1){}\n+\n+S = outer(X, lambda*Y, \"+\");\n+\n+write(S,$1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1678] Fix rewrite 'fuse axpy binary ops' for outer products
This patch fixes the dynamic simplification rewrite
fuseAxpyBinaryOperationChain to not trigger on outer products of vectors
and adds related negative test cases. |
49,717 | 27.06.2017 17:14:41 | 25,200 | 9f808c43e380a90f814f2e5b7a78397edd1bbb90 | [HOTFIX] for | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/gpu/ScalarMatrixElementwiseOpTests.java",
"new_path": "src/test/java/org/apache/sysml/test/gpu/ScalarMatrixElementwiseOpTests.java",
"diff": "@@ -136,7 +136,7 @@ public class ScalarMatrixElementwiseOpTests extends GPUTests {\nList<Object> cpuOut = runOnCPU(spark, scriptStr, inputs, Arrays.asList(output));\nList<Object> gpuOut = runOnGPU(spark, scriptStr, inputs, Arrays.asList(output));\n//assertHeavyHitterPresent(heavyHitterOpCode);\n- assertEqualMatrices ((Matrix)cpuOut.get(0), (Matrix)gpuOut.get(0));\n+ assertEqualObjects (cpuOut.get(0), gpuOut.get(0));\n}\n@Test\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] for SYSTEMML-1731 |
49,772 | 30.06.2017 12:05:35 | 25,200 | cbde17babd2452c4f265e0b6c56296f246c007a4 | [MINOR] Add clarity to depthwise convolution documentation. | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/conv2d_depthwise.dml",
"new_path": "scripts/nn/layers/conv2d_depthwise.dml",
"diff": "@@ -42,6 +42,10 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\n* depth 1 that expand each input channel to M output channels, where\n* M is a \"depth multiplier\".\n*\n+ * Although there are C*M filters of depth 1, instead of storing W as\n+ * shape `(C*M, 1*Hf*Wf)`, we reshape it to `(C, M*Hf*Wf)` for\n+ * performance reasons.\n+ *\n* Inputs:\n* - X: Inputs, of shape (N, C*Hin*Win).\n* - W: Weights, of shape (C, M*Hf*Wf).\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/layers/conv2d_transpose_depthwise.dml",
"new_path": "scripts/nn/layers/conv2d_transpose_depthwise.dml",
"diff": "@@ -41,6 +41,20 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\n* The resulting C/M separate output channels are then concatenated\n* together channel-wise into a single volume of C/M output channels.\n*\n+ * For clarity, if we were to use the same terminology as a regular\n+ * depthwise convolution, a depthwise transpose convolution has the\n+ * ability to contract each group of M input channels (from a total of\n+ * C*M input channels) back to a single output channel, thus leading\n+ * to C output channels. Thus, this is the \"transpose\" of the regular\n+ * depthwise convolution. To keep the convention of always referring\n+ * to the number of input channels as C, in this depthwise transpose\n+ * layer we can reformulate the above by dividing by M. With this\n+ * reformulation, we can now state that there are C input channels,\n+ * and for each group of M inputs we output a single output channel,\n+ * for a total of C/M output channels. For this, we use 1 filter of\n+ * depth M for each group of M input channels, and we store W as\n+ * `(C/M, M*Hf*Wf)`.\n+ *\n* Inputs:\n* - X: Inputs, of shape (N, C*Hin*Win).\n* - W: Weights, of shape (C/M, M*Hf*Wf).\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add clarity to depthwise convolution documentation. |
49,710 | 30.06.2017 12:34:59 | 25,200 | 42883995285981d420db6f8baa4133e4e1efbde1 | [MINOR] Fix typo in ALS notebook
Closes | [
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/ALS_python_demo.ipynb",
"new_path": "samples/jupyter-notebooks/ALS_python_demo.ipynb",
"diff": "{\n\"cell_type\": \"code\",\n\"execution_count\": 1,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"from pyspark.sql import SparkSession\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": 3,\n\"metadata\": {\n- \"collapsed\": false,\n\"scrolled\": true\n},\n\"outputs\": [\n{\n\"cell_type\": \"code\",\n\"execution_count\": 4,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": 5,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"#-----------------------------------------------------------------\\n\",\n\"source\": [\n\"### Running the Algorithm\\n\",\n\"\\n\",\n- \"We'll first create an MLContext object which the entry point for SystemML. Inputs and outputs are defined through a dml function.\"\n+ \"We'll first create an MLContext object which is the entry point for SystemML. Inputs and outputs are defined through a dml function.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": 6,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"ml = MLContext(sc)\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": 7,\n\"metadata\": {\n- \"collapsed\": false,\n\"scrolled\": true\n},\n\"outputs\": [\n{\n\"cell_type\": \"code\",\n\"execution_count\": 8,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"predict_dml = \\\\\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": 9,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"# user for which we want to recommend movies\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": 13,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"def show_recommendations(userId, preds):\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": 17,\n\"metadata\": {\n- \"collapsed\": false,\n\"scrolled\": true\n},\n\"outputs\": [\n\"metadata\": {\n\"anaconda-cloud\": {},\n\"kernelspec\": {\n- \"display_name\": \"Python [default]\",\n+ \"display_name\": \"Python 3\",\n\"language\": \"python\",\n\"name\": \"python3\"\n},\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython3\",\n- \"version\": \"3.5.2\"\n+ \"version\": \"3.6.1\"\n}\n},\n\"nbformat\": 4,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix typo in ALS notebook
Closes #558. |
49,772 | 30.06.2017 16:31:51 | 25,200 | 5da30b51b4afa9b88bb5b91941e7f956a23becc8 | [MINOR] Cleanup `nn` library. | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/test.dml",
"new_path": "scripts/nn/test/test.dml",
"diff": "@@ -835,19 +835,19 @@ top_k = function() {\n2 3 4 1\", rows=2, cols=4)\n# test top_1\n- print(\"Case 1: test top_1\")\n+ print(\" - Testing top_1.\")\n[values_top1, indices_top1] = util::top_k(X, 1)\ncheck_values_top1 = test_util::check_all_equal(values_top1, expected_values_top1)\ncheck_indices_top1 = test_util::check_all_equal(indices_top1, expected_indices_top1)\n# test top_2\n- print(\"Case 2: test top_2\")\n+ print(\" - Testing top_2.\")\n[values_top2, indices_top2] = util::top_k(X, 2)\ncheck_values_top2 = test_util::check_all_equal(values_top2, expected_values_top2)\ncheck_indices_top2 = test_util::check_all_equal(indices_top2, expected_indices_top2)\n# test top_All\n- print(\"Case 3: test top_All\")\n+ print(\" - Testing top_All.\")\n[values_topAll, indices_topAll] = util::top_k(X, 4)\ncheck_values_topAll = test_util::check_all_equal(values_topAll, expected_values_topAll)\ncheck_indices_topAll = test_util::check_all_equal(indices_topAll, expected_indices_topAll)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/util.dml",
"new_path": "scripts/nn/util.dml",
"diff": "@@ -216,57 +216,50 @@ threshold = function(matrix[double] X, double thresh)\nout = X > thresh\n}\n+transpose_NCHW_to_CNHW = function(matrix[double] X, int C)\n+ return (matrix[double] out) {\n/*\n* Reshape util for tensors in NCHW format.\n* Transposes the 1st and 2nd dimensions.\n- */\n-transpose_NCHW_to_CNHW = function(matrix[double] X, int C) return (matrix[double] out){\n- /*\n+ *\n* Inputs:\n- * - X: Input with N rows and channels flattened within each row in\n- * channel-major format (NCHW).\n+ * - X: Inputs, of shape (N, C*H*W).\n* - C: Number of channels (dimensionality of depth).\n*\n* Outputs:\n- * - out: Transposed output with C rows.\n+ * - out: Outputs with the N and C axes transposed, of\n+ * shape (C, N*H*W).\n*/\n-\nN = nrow(X)\nD = ncol(X) / C\n- /*\n- * This is an easy reshape because the channels remain intact. By\n- * reshaping X to a matrix with N*C rows, we can reduce our task to\n- * re-ordering rows (followed by the obvious reshape to achieve the\n- * required output shape with C rows).\n- *\n- * The difficult part is to obtain the permutation matrix required\n- * for re-ordering the rows. In this case, since we want to bring the\n- * ith channels from all rows together, we will need a column vector\n- * of the following form:\n- * [1, 1+C, 1+2C, ..., 1+(N-1)C,\n- * 2, 2+C, ..., 2+(N-1)C,\n- * 3, 3+C, ..., 3+(N-1)C,\n- * .\n- * .\n- * .\n- * C, 2C, ..., NC]'\n- * This vector can be produced via an outer call.\n- */\n+ # This is an easy reshape because the channels remain intact. By\n+ # reshaping X to a matrix with N*C rows, we can reduce our task to\n+ # re-ordering rows (followed by the obvious reshape to achieve the\n+ # required output shape with C rows).\n+ #\n+ # The difficult part is to obtain the permutation matrix required\n+ # for re-ordering the rows. In this case, since we want to bring the\n+ # ith channels from all rows together, we will need a column vector\n+ # of the following form:\n+ # [1, 1+C, 1+2C, ..., 1+(N-1)C,\n+ # 2, 2+C, ..., 2+(N-1)C,\n+ # 3, 3+C, ..., 3+(N-1)C,\n+ # .\n+ # .\n+ # .\n+ # C, 2C, ..., NC]'\n+ # This vector can be produced via an outer call.\ncol_idx = outer(seq(1,C), C*t(seq(0,N-1)), \"+\")\n- /*\n- * Generate the permutation matrix by:\n- * - reshaping the result of outer into a col\n- * - invoking table\n- */\n+ # Generate the permutation matrix by:\n+ # - reshaping the result of outer into a col\n+ # - invoking table\npermut = table(seq(1, N*C), matrix(col_idx, rows=N*C, cols=1), N*C, N*C)\n- /*\n- * Generate the output by:\n- * - pre-multiplying the (reshaped) X with the permutation matrix\n- * - reshape to get the output shape with C rows\n- */\n+ # Generate the output by:\n+ # - pre-multiplying the (reshaped) X with the permutation matrix\n+ # - reshape to get the output shape with C rows\nout = matrix(permut %*% matrix(X, rows=N*C, cols=D), rows=C, cols=N*D)\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Cleanup `nn` library. |
49,717 | 05.07.2017 13:59:35 | 25,200 | 33cb26ded9d28786159aba0d235db7ec25a442a5 | [MINOR] Available families and algorithms printed from perftest script | [
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/run_perftest.py",
"new_path": "scripts/perftest/python/run_perftest.py",
"diff": "@@ -264,9 +264,10 @@ if __name__ == '__main__':\n# Argparse Module\ncparser = argparse.ArgumentParser(description='SystemML Performance Test Script')\n- cparser.add_argument('--family', help='specify class of algorithms (e.g regression, binomial)',\n+ cparser.add_argument('--family', help='specify class of algorithms (available : ' + ', '.join(ML_ALGO.keys()) + ')',\nmetavar='', choices=ML_ALGO.keys(), nargs='+')\n- cparser.add_argument('--algo', help='specify the type of algorithm to run (Overrides --family)', metavar='',\n+ cparser.add_argument('--algo', help='specify the type of algorithm to run '\n+ '(Overrides --family, available : ' + ', '.join(all_algos) + ')', metavar='',\nchoices=all_algos, nargs='+')\ncparser.add_argument('--exec-type', default='singlenode', help='System-ML backend '\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Available families and algorithms printed from perftest script |
49,717 | 05.07.2017 14:50:38 | 25,200 | 1e1d3727f4d88c9ef053d56da7aec640e0b88424 | [MINOR] More updates to the perftest help message | [
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/run_perftest.py",
"new_path": "scripts/perftest/python/run_perftest.py",
"diff": "@@ -262,28 +262,32 @@ if __name__ == '__main__':\n# Remove duplicates algorithms and used as default inputs\nall_algos = set(reduce(lambda x, y: x + y, ML_ALGO.values()))\n+ # Families\n+ all_families = ML_ALGO.keys()\n+\n# Argparse Module\ncparser = argparse.ArgumentParser(description='SystemML Performance Test Script')\n- cparser.add_argument('--family', help='specify class of algorithms (available : ' + ', '.join(ML_ALGO.keys()) + ')',\n- metavar='', choices=ML_ALGO.keys(), nargs='+')\n- cparser.add_argument('--algo', help='specify the type of algorithm to run '\n- '(Overrides --family, available : ' + ', '.join(all_algos) + ')', metavar='',\n+ cparser.add_argument('--family', help='space separated list of classes of algorithms '\n+ '(available : ' + ', '.join(sorted(all_families)) + ')',\n+ metavar='', choices=all_families, nargs='+')\n+ cparser.add_argument('--algo', help='space separated list of algorithm to run '\n+ '(Overrides --family, available : ' + ', '.join(sorted(all_algos)) + ')', metavar='',\nchoices=all_algos, nargs='+')\ncparser.add_argument('--exec-type', default='singlenode', help='System-ML backend '\n- '(e.g singlenode, spark-hybrid)', metavar='',\n+ '(available : singlenode, spark-hybrid)', metavar='',\nchoices=default_execution_mode)\n- cparser.add_argument('--mat-type', default=default_mat_type, help='type of matrix to generate '\n- '(e.g dense or sparse)', metavar='', choices=default_mat_type,\n+ cparser.add_argument('--mat-type', default=default_mat_type, help='space separated list of types of matrix to generate '\n+ '(available : dense, sparse)', metavar='', choices=default_mat_type,\nnargs='+')\n- cparser.add_argument('--mat-shape', default=default_mat_shape, help='shape of matrix '\n- 'to generate (e.g 10k_1k)', metavar='', nargs='+')\n- cparser.add_argument('--temp-dir', default=default_temp_dir, help='specify temporary directory',\n- metavar='')\n- cparser.add_argument('--filename', default='perf_test', help='specify output file for the perf'\n- ' metics', metavar='')\n+ cparser.add_argument('--mat-shape', default=default_mat_shape, help='space separated list of shapes of matrices '\n+ 'to generate (e.g 10k_1k, 20M_4k)', metavar='', nargs='+')\n+ cparser.add_argument('--temp-dir', default=default_temp_dir, help='temporary directory '\n+ 'where generated, training and prediction data is put', metavar='')\n+ cparser.add_argument('--filename', default='perf_test', help='name of the output file for the perf'\n+ ' metrics', metavar='')\ncparser.add_argument('--mode', default=default_workload,\n- help='specify type of workload to run (e.g data-gen, train, predict)',\n+ help='space separated list of types of workloads to run (available: data-gen, train, predict)',\nmetavar='', choices=default_workload, nargs='+')\n# Args is a namespace\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] More updates to the perftest help message |
49,738 | 05.07.2017 23:46:11 | 25,200 | f0cb8cc86feae0d0b5825f01cf85b47337336fa7 | [MINOR] Fix consistency matrix/frame writers (crc files, part names) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterBinaryBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterBinaryBlock.java",
"diff": "@@ -68,6 +68,7 @@ public class FrameWriterBinaryBlock extends FrameWriter\n//sequential write to single file\nwriteBinaryBlockFrameToSequenceFile(path, job, fs, src, blen, 0, (int)rlen);\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, path);\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterBinaryBlockParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterBinaryBlockParallel.java",
"diff": "@@ -28,6 +28,7 @@ import java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport org.apache.hadoop.fs.FileSystem;\n+import org.apache.hadoop.fs.LocalFileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.mapred.JobConf;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -45,7 +46,7 @@ import org.apache.sysml.runtime.util.MapReduceTool;\n*/\npublic class FrameWriterBinaryBlockParallel extends FrameWriterBinaryBlock\n{\n-\n+ @Override\nprotected void writeBinaryBlockFrameToHDFS( Path path, JobConf job, FrameBlock src, long rlen, long clen )\nthrows IOException, DMLRuntimeException\n{\n@@ -75,7 +76,7 @@ public class FrameWriterBinaryBlockParallel extends FrameWriterBinaryBlock\nArrayList<WriteFileTask> tasks = new ArrayList<WriteFileTask>();\nint blklen = (int)Math.ceil((double)rlen / blen / numThreads) * blen;\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteFileTask(newPath, job, fs, src, i*blklen, Math.min((i+1)*blklen, (int)rlen), blen));\n}\n@@ -86,6 +87,13 @@ public class FrameWriterBinaryBlockParallel extends FrameWriterBinaryBlock\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n+\n+ // delete crc files if written to local file system\n+ if (fs instanceof LocalFileSystem) {\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\ncatch (Exception e) {\nthrow new IOException(\"Failed parallel write of binary block input.\", e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCSV.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCSV.java",
"diff": "@@ -78,6 +78,7 @@ public class FrameWriterTextCSV extends FrameWriter\n//sequential write to single text file\nwriteCSVFrameToFile(path, job, fs, src, 0, (int)rlen, csvprops);\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, path);\n}\nprotected final void writeCSVFrameToFile( Path path, JobConf job, FileSystem fs, FrameBlock src, int rl, int ru, CSVFileFormatProperties props )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCSVParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCSVParallel.java",
"diff": "@@ -28,6 +28,7 @@ import java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport org.apache.hadoop.fs.FileSystem;\n+import org.apache.hadoop.fs.LocalFileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.mapred.JobConf;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -77,7 +78,7 @@ public class FrameWriterTextCSVParallel extends FrameWriterTextCSV\nArrayList<WriteFileTask> tasks = new ArrayList<WriteFileTask>();\nint blklen = (int)Math.ceil((double)rlen / numThreads);\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteFileTask(newPath, job, fs, src, i*blklen, (int)Math.min((i+1)*blklen, rlen), csvprops));\n}\n@@ -88,6 +89,13 @@ public class FrameWriterTextCSVParallel extends FrameWriterTextCSV\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n+\n+ // delete crc files if written to local file system\n+ if (fs instanceof LocalFileSystem) {\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\ncatch (Exception e) {\nthrow new IOException(\"Failed parallel write of csv output.\", e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCell.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCell.java",
"diff": "@@ -67,6 +67,7 @@ public class FrameWriterTextCell extends FrameWriter\n//sequential write to single text file\nwriteTextCellFrameToFile(path, job, fs, src, 0, (int)rlen);\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, path);\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCellParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameWriterTextCellParallel.java",
"diff": "@@ -28,6 +28,7 @@ import java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport org.apache.hadoop.fs.FileSystem;\n+import org.apache.hadoop.fs.LocalFileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.mapred.JobConf;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -43,7 +44,6 @@ import org.apache.sysml.runtime.util.MapReduceTool;\n*/\npublic class FrameWriterTextCellParallel extends FrameWriterTextCell\n{\n-\n@Override\nprotected void writeTextCellFrameToHDFS( Path path, JobConf job, FrameBlock src, long rlen, long clen )\nthrows IOException\n@@ -73,7 +73,7 @@ public class FrameWriterTextCellParallel extends FrameWriterTextCell\nArrayList<WriteFileTask> tasks = new ArrayList<WriteFileTask>();\nint blklen = (int)Math.ceil((double)rlen / numThreads);\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteFileTask(newPath, job, fs, src, i*blklen, (int)Math.min((i+1)*blklen, rlen)));\n}\n@@ -84,6 +84,13 @@ public class FrameWriterTextCellParallel extends FrameWriterTextCell\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n+\n+ // delete crc files if written to local file system\n+ if (fs instanceof LocalFileSystem) {\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\ncatch (Exception e) {\nthrow new IOException(\"Failed parallel write of text output.\", e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java",
"diff": "@@ -88,6 +88,10 @@ public class IOUtilFunctions\nreturn scheme.startsWith(\"s3\") || scheme.startsWith(\"swift\");\n}\n+ public static String getPartFileName(int pos) {\n+ return String.format(\"0-m-%05d\", pos);\n+ }\n+\npublic static void closeSilently( Closeable io ) {\ntry {\nif( io != null )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/WriterBinaryBlockParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/WriterBinaryBlockParallel.java",
"diff": "@@ -73,7 +73,7 @@ public class WriterBinaryBlockParallel extends WriterBinaryBlock\nArrayList<WriteFileTask> tasks = new ArrayList<WriteFileTask>();\nint blklen = (int)Math.ceil((double)rlen / brlen / numThreads) * brlen;\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteFileTask(newPath, job, fs, src, i*blklen, Math.min((i+1)*blklen, rlen), brlen, bclen));\n}\n@@ -84,18 +84,16 @@ public class WriterBinaryBlockParallel extends WriterBinaryBlock\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n- }\n- catch (Exception e) {\n- throw new IOException(\"Failed parallel write of binary block input.\", e);\n- }\n// delete crc files if written to local file system\nif (fs instanceof LocalFileSystem) {\n- int blklen = (int)Math.ceil((double)rlen / numThreads);\n- for(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n- IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, newPath);\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\n+ catch (Exception e) {\n+ throw new IOException(\"Failed parallel write of binary block input.\", e);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/WriterMatrixMarketParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/WriterMatrixMarketParallel.java",
"diff": "@@ -71,7 +71,7 @@ public class WriterMatrixMarketParallel extends WriterMatrixMarket\nArrayList<WriteMMTask> tasks = new ArrayList<WriteMMTask>();\nint blklen = (int)Math.ceil((double)rlen / numThreads);\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteMMTask(newPath, job, fs, src, i*blklen, (int)Math.min((i+1)*blklen, rlen)));\n}\n@@ -82,18 +82,16 @@ public class WriterMatrixMarketParallel extends WriterMatrixMarket\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n- }\n- catch (Exception e) {\n- throw new IOException(\"Failed parallel write of text output.\", e);\n- }\n// delete crc files if written to local file system\nif (fs instanceof LocalFileSystem) {\n- int blklen = (int)Math.ceil((double)rlen / numThreads);\n- for(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n- IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, newPath);\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\n+ catch (Exception e) {\n+ throw new IOException(\"Failed parallel write of text output.\", e);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/WriterTextCSVParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/WriterTextCSVParallel.java",
"diff": "@@ -74,7 +74,7 @@ public class WriterTextCSVParallel extends WriterTextCSV\nint rlen = src.getNumRows();\nint blklen = (int)Math.ceil((double)rlen / numThreads);\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteCSVTask(newPath, job, fs, src, i*blklen, (int)Math.min((i+1)*blklen, rlen), csvprops));\n}\n@@ -85,19 +85,16 @@ public class WriterTextCSVParallel extends WriterTextCSV\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n- }\n- catch (Exception e) {\n- throw new IOException(\"Failed parallel write of csv output.\", e);\n- }\n// delete crc files if written to local file system\nif (fs instanceof LocalFileSystem) {\n- int rlen = src.getNumRows();\n- int blklen = (int)Math.ceil((double)rlen / numThreads);\n- for(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n- IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, newPath);\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\n+ catch (Exception e) {\n+ throw new IOException(\"Failed parallel write of csv output.\", e);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/WriterTextCellParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/WriterTextCellParallel.java",
"diff": "@@ -70,7 +70,7 @@ public class WriterTextCellParallel extends WriterTextCell\nArrayList<WriteTextTask> tasks = new ArrayList<WriteTextTask>();\nint blklen = (int)Math.ceil((double)rlen / numThreads);\nfor(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n+ Path newPath = new Path(path, IOUtilFunctions.getPartFileName(i));\ntasks.add(new WriteTextTask(newPath, job, fs, src, i*blklen, (int)Math.min((i+1)*blklen, rlen)));\n}\n@@ -81,18 +81,16 @@ public class WriterTextCellParallel extends WriterTextCell\n//check for exceptions\nfor( Future<Object> task : rt )\ntask.get();\n- }\n- catch (Exception e) {\n- throw new IOException(\"Failed parallel write of text output.\", e);\n- }\n// delete crc files if written to local file system\nif (fs instanceof LocalFileSystem) {\n- int blklen = (int)Math.ceil((double)rlen / numThreads);\n- for(int i=0; i<numThreads & i*blklen<rlen; i++) {\n- Path newPath = new Path(path, String.format(\"0-m-%05d\",i));\n- IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, newPath);\n+ for(int i=0; i<numThreads & i*blklen<rlen; i++)\n+ IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs,\n+ new Path(path, IOUtilFunctions.getPartFileName(i)));\n+ }\n}\n+ catch (Exception e) {\n+ throw new IOException(\"Failed parallel write of text output.\", e);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix consistency matrix/frame writers (crc files, part names) |
49,717 | 06.07.2017 15:18:35 | 25,200 | 66b28c6e356e894c7e6c21655dab85484bf4840a | JCuda jars in extra assembly jar
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/extra.xml",
"new_path": "src/assembly/extra.xml",
"diff": "<outputDirectory>.</outputDirectory>\n</fileSet>\n</fileSets>\n+\n+ <!-- Include platform specific JCuda Jars -->\n+ <dependencySets>\n+ <dependencySet>\n+ <includes>\n+ <include>org.jcuda:*</include>\n+ </includes>\n+ <unpack>true</unpack>\n+ <scope>compile</scope>\n+ </dependencySet>\n+ </dependencySets>\n+\n</assembly>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/assembly/extra/LICENSE",
"new_path": "src/assembly/extra/LICENSE",
"diff": "@@ -460,3 +460,51 @@ Copyright 2017 The TensorFlow Authors. All rights reserved.\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n+\n+===============================================================================\n+\n+The following compile-scope dependencies come under the MIT License\n+\n+JCuda (jcuda.org)\n+\n+org.jcuda:jcuda:0.8.0\n+org.jcuda:jcublas:0.8.0\n+org.jcuda:jcufft:0.8.0\n+org.jcuda:jcusparse:0.8.0\n+org.jcuda:jcusolver:0.8.0\n+org.jcuda:jcurand:0.8.0\n+org.jcuda:jnvgraph:0.8.0\n+org.jcuda:jcudnn:0.8.0\n+org.jcuda:jcuda-natives:0.8.0\n+org.jcuda:jcublas-natives:0.8.0\n+org.jcuda:jcufft-natives:0.8.0\n+org.jcuda:jcusparse-natives:0.8.0\n+org.jcuda:jcusolver-natives:0.8.0\n+org.jcuda:jcurand-natives:0.8.0\n+org.jcuda:jnvgraph-natives:0.8.0\n+org.jcuda:jcudnn-natives:0.8.0\n+\n+\n+The MIT License (MIT)\n+\n+Copyright (c) 2008-2016 Marco Hutter - http://www.jcuda.org\n+\n+Permission is hereby granted, free of charge, to any person obtaining a copy\n+of this software and associated documentation files (the \"Software\"), to deal\n+in the Software without restriction, including without limitation the rights\n+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n+copies of the Software, and to permit persons to whom the Software is\n+furnished to do so, subject to the following conditions:\n+\n+The above copyright notice and this permission notice shall be included in all\n+copies or substantial portions of the Software.\n+\n+THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n+SOFTWARE.\n+\n+===============================================================================\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1744] JCuda jars in extra assembly jar
Closes #559 |
49,717 | 07.07.2017 11:23:17 | 25,200 | 152eba1a7d5de2d34ab97db7d49596b41569aeb5 | Write output of systemml run from perf test scripts
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/run_perftest.py",
"new_path": "scripts/perftest/python/run_perftest.py",
"diff": "@@ -82,7 +82,7 @@ ML_PREDICT = {'Kmeans': 'Kmeans-predict',\n# Responsible for execution and metric logging\n-def algorithm_workflow(algo, exec_type, config_path, file_name, action_mode):\n+def algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode):\n\"\"\"\nThis function is responsible for overall workflow. This does the following actions\nCheck if the input is key value argument or list of positional args\n@@ -99,7 +99,7 @@ def algorithm_workflow(algo, exec_type, config_path, file_name, action_mode):\nconfig_path : String\nPath to read the json file from\n- file_name : String\n+ dml_file_name : String\nDML file name to be used while processing the arguments give\naction_mode : String\n@@ -116,8 +116,8 @@ def algorithm_workflow(algo, exec_type, config_path, file_name, action_mode):\nlist_args = ' '.join(config_data)\nargs = {'-args': list_args}\n- folder_name = config_path.split('/')[-1]\n- mat_type, mat_shape, intercept = get_folder_metrics(folder_name, action_mode)\n+ config_file_name = config_path.split('/')[-1]\n+ mat_type, mat_shape, intercept = get_folder_metrics(config_file_name, action_mode)\nexit_flag_success = get_existence(config_path, action_mode)\n@@ -125,7 +125,7 @@ def algorithm_workflow(algo, exec_type, config_path, file_name, action_mode):\nprint('data already exists {}'.format(config_path))\ntime = 'data_exists'\nelse:\n- time = exec_dml_and_parse_time(exec_type, file_name, args)\n+ time = exec_dml_and_parse_time(exec_type, dml_file_name, config_file_name, args)\n# Write a _SUCCESS file only if time is found and in data-gen action_mode\nif len(time.split('.')) == 2 and action_mode == 'data-gen':\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/utils.py",
"new_path": "scripts/perftest/python/utils.py",
"diff": "@@ -138,7 +138,7 @@ def get_existence(path, action_mode):\nreturn exist\n-def exec_dml_and_parse_time(exec_type, file_name, args, Time=True):\n+def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, args, Time=True):\n\"\"\"\nThis function is responsible of execution of input arguments via python sub process,\nWe also extract time obtained from the output of this subprocess\n@@ -146,9 +146,12 @@ def exec_dml_and_parse_time(exec_type, file_name, args, Time=True):\nexec_type: String\nContains the execution type singlenode / hybrid_spark\n- file_name: String\n+ dml_file_name: String\nDML file name to be used while processing the arguments give\n+ execution_output_file: String\n+ Name of the file where the output of the DML run is written out\n+\nargs: Dictionary\nKey values pairs depending on the arg type\n@@ -156,7 +159,7 @@ def exec_dml_and_parse_time(exec_type, file_name, args, Time=True):\nBoolean argument used to extract time from raw output logs.\n\"\"\"\n- algorithm = file_name + '.dml'\n+ algorithm = dml_file_name + '.dml'\nif exec_type == 'singlenode':\nexec_script = join(os.environ.get('SYSTEMML_HOME'), 'bin', 'systemml-standalone.py')\n@@ -189,11 +192,15 @@ def exec_dml_and_parse_time(exec_type, file_name, args, Time=True):\nout1, err1 = proc1.communicate()\nif \"Error\" in str(err1):\n- print('Error Found in {}'.format(file_name))\n+ print('Error Found in {}'.format(dml_file_name))\ntotal_time = 'failure'\nelse:\ntotal_time = parse_time(proc1_log)\n+ with open(execution_output_file, 'w') as f:\n+ for row in proc1_log:\n+ f.write(\"%s\\n\" % str(row))\n+\nelse:\ntotal_time = 'not_specified'\n@@ -242,7 +249,8 @@ def exec_test_data(exec_type, path):\nargs = {'-args': ' '.join([X, Y, X_test, Y_test, 'csv'])}\n# Call the exec script without time\n- exec_dml_and_parse_time(exec_type, test_split_script, args, False)\n+ config_file_name = path.split('/')[-1]\n+ exec_dml_and_parse_time(exec_type, test_split_script, config_file_name, args, False)\ndef check_predict(current_algo, ML_PREDICT):\n"
}
] | Java | Apache License 2.0 | apache/systemds | Write output of systemml run from perf test scripts
Closes #561 |
49,738 | 07.07.2017 11:49:51 | 25,200 | 1db240767ebab1dd645af6debdf0742182ff078c | [MINOR] Fix misc javadoc issues and compiler warnings | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java",
"new_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java",
"diff": "@@ -131,7 +131,7 @@ public class Connection implements Closeable\n* Connection constructor, the starting point for any other JMLC API calls.\n* This variant allows to enable a set of boolean compiler configurations.\n*\n- * @param config one or many boolean compiler configurations to enable.\n+ * @param configs one or many boolean compiler configurations to enable.\n*/\npublic Connection(CompilerConfig.ConfigType... configs) {\n//basic constructor, which also constructs the compiler config\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -2751,7 +2751,7 @@ public class LibMatrixCUDA {\nif (ec.getGPUContext(0) != gCtx)\nthrow new DMLRuntimeException(\"GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function\");\nif(constant == 0) {\n- MatrixObject out = getSparseMatrixOutputForGPUInstruction(ec, 0, instName, outputName);\n+ getSparseMatrixOutputForGPUInstruction(ec, 0, instName, outputName);\n} else {\n//MatrixObject out = ec.getMatrixObject(outputName);\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, instName, outputName); // Allocated the dense output matrix\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"diff": "@@ -49,7 +49,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\n* In general all implementations use internally dense outputs\n* for direct access, but change the final result to sparse if necessary.\n* The only exceptions are ultra-sparse matrix mult, wsloss and wsigmoid.\n- * <p>\n*/\npublic class LibMatrixMult\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix misc javadoc issues and compiler warnings |
49,737 | 07.07.2017 12:42:48 | 25,200 | 627a6c40d6db9c19e06cf0bc4a0e43089e5e4c5f | [MINOR] l2-svm.dml and l2-svm-predict.dml doc improvements
Add input parameter descriptions to l2-svm.dml and l2-svm-predict.dml
scripts.
Fix CsplineCG.dml input parameter description spacing issue.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/l2-svm-predict.dml",
"new_path": "scripts/algorithms/l2-svm-predict.dml",
"diff": "# Given ground truth labels, the script will compute an\n# accuracy (%) for the predictions\n#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Location to read the matrix X of feature vectors\n+# model String --- Location of the existing model generated by l2-svm\n+# fmt String \"text\" The output format of the output, such as \"text\" or \"csv\"\n+# Y String --- [OPTIONAL] Location to read the true label matrix Y. Only needed\n+# for evaluating performance (accuracy, confusion) of the model.\n+# confusion String --- [OPTIONAL] Location to write confusion matrix, valid if Y supplied\n+# accuracy String --- [OPTIONAL] Location to write accuracy matrix, valid if Y supplied\n+# scores String --- [OPTIONAL] Location to write model predictions\n+# ---------------------------------------------------------------------------------------------\n+#\n# Example Usage:\n# hadoop jar SystemML.jar -f l2-svm-predict.dml -nvargs X=data Y=labels model=model scores=scores accuracy=accuracy confusion=confusion fmt=\"text\"\n#\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/l2-svm.dml",
"new_path": "scripts/algorithms/l2-svm.dml",
"diff": "# Assume input and output directories are on hdfs as INPUT_DIR and OUTPUT_DIR\n# Assume epsilon = 0.001, lambda = 1, maxiterations = 100\n#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Location to read the matrix X of feature vectors\n+# Y String --- Location to read response matrix Y\n+# icpt Int 0 Intercept presence\n+# 0 = no intercept\n+# 1 = add intercept;\n+# tol Double 0.001 Tolerance (epsilon);\n+# reg Double 1.0 Regularization parameter (lambda) for L2 regularization\n+# maxiter Int 100 Maximum number of conjugate gradient iterations\n+# model String --- Location to write model\n+# fmt String \"text\" The output format of the output, such as \"text\" or \"csv\"\n+# Log String --- [OPTIONAL] Location to write the log file\n+# ---------------------------------------------------------------------------------------------\n+\n# hadoop jar SystemML.jar -f $L2SVM_HOME/l2-svm.dml -nvargs X=$INPUT_DIR/X Y=$INPUT_DIR/Y icpt=0 tol=0.001 reg=1 maxiter=100 model=$OUPUT_DIR/w Log=$OUTPUT_DIR/Log fmt=\"text\"\n#\n# Note about inputs:\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] l2-svm.dml and l2-svm-predict.dml doc improvements
Add input parameter descriptions to l2-svm.dml and l2-svm-predict.dml
scripts.
Fix CsplineCG.dml input parameter description spacing issue.
Closes #560. |
49,738 | 07.07.2017 17:14:39 | 25,200 | 4a6165b796590a6388a9c182612761219731d77f | [MINOR] Performance frame transformencode (selective row iterators)
This patch adds selective row iterators to frame blocks, which allows
the transform recode encoder to iterate over rows of selected columns
which avoids unnecessary string conversions for unused columns. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -504,6 +504,17 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nreturn new StringRowIterator(0, _numRows);\n}\n+ /**\n+ * Get a row iterator over the frame where all selected fields are\n+ * encoded as strings independent of their value types.\n+ *\n+ * @param cols column selection, 1-based\n+ * @return string array iterator\n+ */\n+ public Iterator<String[]> getStringRowIterator(int[] cols) {\n+ return new StringRowIterator(0, _numRows, cols);\n+ }\n+\n/**\n* Get a row iterator over the frame where all fields are encoded\n* as strings independent of their value types.\n@@ -516,6 +527,19 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nreturn new StringRowIterator(rl, ru);\n}\n+ /**\n+ * Get a row iterator over the frame where all selected fields are\n+ * encoded as strings independent of their value types.\n+ *\n+ * @param rl lower row index\n+ * @param ru upper row index\n+ * @param cols column selection, 1-based\n+ * @return string array iterator\n+ */\n+ public Iterator<String[]> getStringRowIterator(int rl, int ru, int[] cols) {\n+ return new StringRowIterator(rl, ru, cols);\n+ }\n+\n/**\n* Get a row iterator over the frame where all fields are encoded\n* as boxed objects according to their value types.\n@@ -526,6 +550,17 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nreturn new ObjectRowIterator(0, _numRows);\n}\n+ /**\n+ * Get a row iterator over the frame where all selected fields are\n+ * encoded as boxed objects according to their value types.\n+ *\n+ * @param cols column selection, 1-based\n+ * @return object array iterator\n+ */\n+ public Iterator<Object[]> getObjectRowIterator(int[] cols) {\n+ return new ObjectRowIterator(0, _numRows, cols);\n+ }\n+\n/**\n* Get a row iterator over the frame where all fields are encoded\n* as boxed objects according to their value types.\n@@ -538,6 +573,19 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nreturn new ObjectRowIterator(rl, ru);\n}\n+ /**\n+ * Get a row iterator over the frame where all selected fields are\n+ * encoded as boxed objects according to their value types.\n+ *\n+ * @param rl lower row index\n+ * @param ru upper row index\n+ * @param cols column selection, 1-based\n+ * @return object array iterator\n+ */\n+ public Iterator<Object[]> getObjectRowIterator(int rl, int ru, int[] cols) {\n+ return new ObjectRowIterator(rl, ru, cols);\n+ }\n+\n///////\n// serialization / deserialization (implementation of writable and externalizable)\n@@ -1111,14 +1159,20 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n// row iterators (over strings and boxed objects)\nprivate abstract class RowIterator<T> implements Iterator<T[]> {\n- protected T[] _curRow = null;\n+ protected final int[] _cols;\n+ protected final T[] _curRow;\n+ protected final int _maxPos;\nprotected int _curPos = -1;\n- protected int _maxPos = -1;\nprotected RowIterator(int rl, int ru) {\n- _curPos = rl;\n+ this(rl, ru, UtilFunctions.getSeqArray(1, getNumColumns(), 1));\n+ }\n+\n+ protected RowIterator(int rl, int ru, int[] cols) {\n+ _curRow = createRow(cols.length);\n+ _cols = cols;\n_maxPos = ru;\n- _curRow = createRow(getNumColumns());\n+ _curPos = rl;\n}\n@Override\n@@ -1139,6 +1193,10 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nsuper(rl, ru);\n}\n+ public StringRowIterator(int rl, int ru, int[] cols) {\n+ super(rl, ru, cols);\n+ }\n+\n@Override\nprotected String[] createRow(int size) {\nreturn new String[size];\n@@ -1146,8 +1204,8 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n@Override\npublic String[] next( ) {\n- for( int j=0; j<getNumColumns(); j++ ) {\n- Object tmp = get(_curPos, j);\n+ for( int j=0; j<_cols.length; j++ ) {\n+ Object tmp = get(_curPos, _cols[j]-1);\n_curRow[j] = (tmp!=null) ? tmp.toString() : null;\n}\n_curPos++;\n@@ -1160,6 +1218,10 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nsuper(rl, ru);\n}\n+ public ObjectRowIterator(int rl, int ru, int[] cols) {\n+ super(rl, ru, cols);\n+ }\n+\n@Override\nprotected Object[] createRow(int size) {\nreturn new Object[size];\n@@ -1167,8 +1229,8 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n@Override\npublic Object[] next( ) {\n- for( int j=0; j<getNumColumns(); j++ )\n- _curRow[j] = get(_curPos, j);\n+ for( int j=0; j<_cols.length; j++ )\n+ _curRow[j] = get(_curPos, _cols[j]-1);\n_curPos++;\nreturn _curRow;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/transform/decode/DecoderFactory.java",
"new_path": "src/main/java/org/apache/sysml/runtime/transform/decode/DecoderFactory.java",
"diff": "@@ -56,7 +56,7 @@ public class DecoderFactory\nTfMetaUtils.parseJsonIDList(jSpec, colnames, TfUtils.TXMETHOD_DUMMYCODE)));\nrcIDs = new ArrayList<Integer>(CollectionUtils.union(rcIDs, dcIDs));\nList<Integer> ptIDs = new ArrayList<Integer>(CollectionUtils\n- .subtract(UtilFunctions.getSequenceList(1, meta.getNumColumns(), 1), rcIDs));\n+ .subtract(UtilFunctions.getSeqList(1, meta.getNumColumns(), 1), rcIDs));\n//create default schema if unspecified (with double columns for pass-through)\nif( schema == null ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderFactory.java",
"new_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderFactory.java",
"diff": "@@ -65,7 +65,7 @@ public class EncoderFactory\nrcIDs = new ArrayList<Integer>(CollectionUtils.union(rcIDs, dcIDs));\nList<Integer> binIDs = TfMetaUtils.parseBinningColIDs(jSpec, colnames);\nList<Integer> ptIDs = new ArrayList<Integer>(CollectionUtils.subtract(\n- CollectionUtils.subtract(UtilFunctions.getSequenceList(1, clen, 1), rcIDs), binIDs));\n+ CollectionUtils.subtract(UtilFunctions.getSeqList(1, clen, 1), rcIDs), binIDs));\nList<Integer> oIDs = Arrays.asList(ArrayUtils.toObject(\nTfMetaUtils.parseJsonIDList(jSpec, colnames, TfUtils.TXMETHOD_OMIT)));\nList<Integer> mvIDs = Arrays.asList(ArrayUtils.toObject(\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java",
"new_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java",
"diff": "@@ -112,7 +112,7 @@ public class EncoderRecode extends Encoder\nif( !isApplicable() )\nreturn;\n- Iterator<String[]> iter = in.getStringRowIterator();\n+ Iterator<String[]> iter = in.getStringRowIterator(_colList);\nwhile( iter.hasNext() ) {\nString[] row = iter.next();\nfor( int j=0; j<_colList.length; j++ ) {\n@@ -122,7 +122,7 @@ public class EncoderRecode extends Encoder\n_rcdMaps.put(colID, new HashMap<String,Long>());\n//probe and build column map\nHashMap<String,Long> map = _rcdMaps.get(colID);\n- String key = row[colID-1];\n+ String key = row[j];\nif( key!=null && !key.isEmpty() && !map.containsKey(key) )\nmap.put(key, Long.valueOf(map.size()+1));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java",
"new_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java",
"diff": "@@ -314,6 +314,37 @@ public class UtilFunctions\nreturn 1L + (long) Math.floor(to/incr - from/incr);\n}\n+ /**\n+ * Obtain sequence list\n+ *\n+ * @param low lower bound (inclusive)\n+ * @param up upper bound (inclusive)\n+ * @param incr increment\n+ * @return list of integers\n+ */\n+ public static List<Integer> getSeqList(int low, int up, int incr) {\n+ ArrayList<Integer> ret = new ArrayList<Integer>();\n+ for( int i=low; i<=up; i+=incr )\n+ ret.add(i);\n+ return ret;\n+ }\n+\n+ /**\n+ * Obtain sequence array\n+ *\n+ * @param low lower bound (inclusive)\n+ * @param up upper bound (inclusive)\n+ * @param incr increment\n+ * @return array of integers\n+ */\n+ public static int[] getSeqArray(int low, int up, int incr) {\n+ int len = (int) getSeqLength(low, up, incr);\n+ int[] ret = new int[len];\n+ for( int i=0, val=low; i<len; i++, val+=incr )\n+ ret[i] = val;\n+ return ret;\n+ }\n+\npublic static int roundToNext(int val, int factor) {\n//round up to next non-zero multiple of factor\nint pval = Math.max(val, factor);\n@@ -507,21 +538,6 @@ public class UtilFunctions\nreturn String.format(\"%d\", arg);\n}\n- /**\n- * Obtain sequence list\n- *\n- * @param low lower bound (inclusive)\n- * @param up upper bound (inclusive)\n- * @param incr increment\n- * @return list of integers\n- */\n- public static List<Integer> getSequenceList(int low, int up, int incr) {\n- ArrayList<Integer> ret = new ArrayList<Integer>();\n- for( int i=low; i<=up; i+=incr )\n- ret.add(i);\n- return ret;\n- }\n-\npublic static double getDouble(Object obj) {\nreturn (obj instanceof Double) ? (Double)obj :\nDouble.parseDouble(obj.toString());\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Performance frame transformencode (selective row iterators)
This patch adds selective row iterators to frame blocks, which allows
the transform recode encoder to iterate over rows of selected columns
which avoids unnecessary string conversions for unused columns. |
49,738 | 09.07.2017 00:32:47 | 25,200 | 352c256a3d71bb587162120134f87e4a9a2df507 | Fix simplification rewrite binary matrix-scalar ops
This patch fixes the rewrite for simplifying matrix-scalar to
scalar-scalar operations to correctly check for binary operations that
are supported over scalars. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"new_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"diff": "@@ -28,6 +28,8 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.lops.Binary;\n+import org.apache.sysml.lops.BinaryScalar;\nimport org.apache.sysml.lops.CSVReBlock;\nimport org.apache.sysml.lops.Checkpoint;\nimport org.apache.sysml.lops.Compression;\n@@ -1143,53 +1145,53 @@ public abstract class Hop\n}\n- protected static final HashMap<Hop.OpOp2, org.apache.sysml.lops.Binary.OperationTypes> HopsOpOp2LopsB;\n+ protected static final HashMap<Hop.OpOp2, Binary.OperationTypes> HopsOpOp2LopsB;\nstatic {\n- HopsOpOp2LopsB = new HashMap<Hop.OpOp2, org.apache.sysml.lops.Binary.OperationTypes>();\n- HopsOpOp2LopsB.put(OpOp2.PLUS, org.apache.sysml.lops.Binary.OperationTypes.ADD);\n- HopsOpOp2LopsB.put(OpOp2.MINUS, org.apache.sysml.lops.Binary.OperationTypes.SUBTRACT);\n- HopsOpOp2LopsB.put(OpOp2.MULT, org.apache.sysml.lops.Binary.OperationTypes.MULTIPLY);\n- HopsOpOp2LopsB.put(OpOp2.DIV, org.apache.sysml.lops.Binary.OperationTypes.DIVIDE);\n- HopsOpOp2LopsB.put(OpOp2.MODULUS, org.apache.sysml.lops.Binary.OperationTypes.MODULUS);\n- HopsOpOp2LopsB.put(OpOp2.INTDIV, org.apache.sysml.lops.Binary.OperationTypes.INTDIV);\n- HopsOpOp2LopsB.put(OpOp2.MINUS1_MULT, org.apache.sysml.lops.Binary.OperationTypes.MINUS1_MULTIPLY);\n- HopsOpOp2LopsB.put(OpOp2.LESS, org.apache.sysml.lops.Binary.OperationTypes.LESS_THAN);\n- HopsOpOp2LopsB.put(OpOp2.LESSEQUAL, org.apache.sysml.lops.Binary.OperationTypes.LESS_THAN_OR_EQUALS);\n- HopsOpOp2LopsB.put(OpOp2.GREATER, org.apache.sysml.lops.Binary.OperationTypes.GREATER_THAN);\n- HopsOpOp2LopsB.put(OpOp2.GREATEREQUAL, org.apache.sysml.lops.Binary.OperationTypes.GREATER_THAN_OR_EQUALS);\n- HopsOpOp2LopsB.put(OpOp2.EQUAL, org.apache.sysml.lops.Binary.OperationTypes.EQUALS);\n- HopsOpOp2LopsB.put(OpOp2.NOTEQUAL, org.apache.sysml.lops.Binary.OperationTypes.NOT_EQUALS);\n- HopsOpOp2LopsB.put(OpOp2.MIN, org.apache.sysml.lops.Binary.OperationTypes.MIN);\n- HopsOpOp2LopsB.put(OpOp2.MAX, org.apache.sysml.lops.Binary.OperationTypes.MAX);\n- HopsOpOp2LopsB.put(OpOp2.AND, org.apache.sysml.lops.Binary.OperationTypes.OR);\n- HopsOpOp2LopsB.put(OpOp2.OR, org.apache.sysml.lops.Binary.OperationTypes.AND);\n- HopsOpOp2LopsB.put(OpOp2.SOLVE, org.apache.sysml.lops.Binary.OperationTypes.SOLVE);\n- HopsOpOp2LopsB.put(OpOp2.POW, org.apache.sysml.lops.Binary.OperationTypes.POW);\n- HopsOpOp2LopsB.put(OpOp2.LOG, org.apache.sysml.lops.Binary.OperationTypes.NOTSUPPORTED);\n- }\n-\n- protected static final HashMap<Hop.OpOp2, org.apache.sysml.lops.BinaryScalar.OperationTypes> HopsOpOp2LopsBS;\n+ HopsOpOp2LopsB = new HashMap<Hop.OpOp2, Binary.OperationTypes>();\n+ HopsOpOp2LopsB.put(OpOp2.PLUS, Binary.OperationTypes.ADD);\n+ HopsOpOp2LopsB.put(OpOp2.MINUS, Binary.OperationTypes.SUBTRACT);\n+ HopsOpOp2LopsB.put(OpOp2.MULT, Binary.OperationTypes.MULTIPLY);\n+ HopsOpOp2LopsB.put(OpOp2.DIV, Binary.OperationTypes.DIVIDE);\n+ HopsOpOp2LopsB.put(OpOp2.MODULUS, Binary.OperationTypes.MODULUS);\n+ HopsOpOp2LopsB.put(OpOp2.INTDIV, Binary.OperationTypes.INTDIV);\n+ HopsOpOp2LopsB.put(OpOp2.MINUS1_MULT, Binary.OperationTypes.MINUS1_MULTIPLY);\n+ HopsOpOp2LopsB.put(OpOp2.LESS, Binary.OperationTypes.LESS_THAN);\n+ HopsOpOp2LopsB.put(OpOp2.LESSEQUAL, Binary.OperationTypes.LESS_THAN_OR_EQUALS);\n+ HopsOpOp2LopsB.put(OpOp2.GREATER, Binary.OperationTypes.GREATER_THAN);\n+ HopsOpOp2LopsB.put(OpOp2.GREATEREQUAL, Binary.OperationTypes.GREATER_THAN_OR_EQUALS);\n+ HopsOpOp2LopsB.put(OpOp2.EQUAL, Binary.OperationTypes.EQUALS);\n+ HopsOpOp2LopsB.put(OpOp2.NOTEQUAL, Binary.OperationTypes.NOT_EQUALS);\n+ HopsOpOp2LopsB.put(OpOp2.MIN, Binary.OperationTypes.MIN);\n+ HopsOpOp2LopsB.put(OpOp2.MAX, Binary.OperationTypes.MAX);\n+ HopsOpOp2LopsB.put(OpOp2.AND, Binary.OperationTypes.OR);\n+ HopsOpOp2LopsB.put(OpOp2.OR, Binary.OperationTypes.AND);\n+ HopsOpOp2LopsB.put(OpOp2.SOLVE, Binary.OperationTypes.SOLVE);\n+ HopsOpOp2LopsB.put(OpOp2.POW, Binary.OperationTypes.POW);\n+ HopsOpOp2LopsB.put(OpOp2.LOG, Binary.OperationTypes.NOTSUPPORTED);\n+ }\n+\n+ protected static final HashMap<Hop.OpOp2, BinaryScalar.OperationTypes> HopsOpOp2LopsBS;\nstatic {\n- HopsOpOp2LopsBS = new HashMap<Hop.OpOp2, org.apache.sysml.lops.BinaryScalar.OperationTypes>();\n- HopsOpOp2LopsBS.put(OpOp2.PLUS, org.apache.sysml.lops.BinaryScalar.OperationTypes.ADD);\n- HopsOpOp2LopsBS.put(OpOp2.MINUS, org.apache.sysml.lops.BinaryScalar.OperationTypes.SUBTRACT);\n- HopsOpOp2LopsBS.put(OpOp2.MULT, org.apache.sysml.lops.BinaryScalar.OperationTypes.MULTIPLY);\n- HopsOpOp2LopsBS.put(OpOp2.DIV, org.apache.sysml.lops.BinaryScalar.OperationTypes.DIVIDE);\n- HopsOpOp2LopsBS.put(OpOp2.MODULUS, org.apache.sysml.lops.BinaryScalar.OperationTypes.MODULUS);\n- HopsOpOp2LopsBS.put(OpOp2.INTDIV, org.apache.sysml.lops.BinaryScalar.OperationTypes.INTDIV);\n- HopsOpOp2LopsBS.put(OpOp2.LESS, org.apache.sysml.lops.BinaryScalar.OperationTypes.LESS_THAN);\n- HopsOpOp2LopsBS.put(OpOp2.LESSEQUAL, org.apache.sysml.lops.BinaryScalar.OperationTypes.LESS_THAN_OR_EQUALS);\n- HopsOpOp2LopsBS.put(OpOp2.GREATER, org.apache.sysml.lops.BinaryScalar.OperationTypes.GREATER_THAN);\n- HopsOpOp2LopsBS.put(OpOp2.GREATEREQUAL, org.apache.sysml.lops.BinaryScalar.OperationTypes.GREATER_THAN_OR_EQUALS);\n- HopsOpOp2LopsBS.put(OpOp2.EQUAL, org.apache.sysml.lops.BinaryScalar.OperationTypes.EQUALS);\n- HopsOpOp2LopsBS.put(OpOp2.NOTEQUAL, org.apache.sysml.lops.BinaryScalar.OperationTypes.NOT_EQUALS);\n- HopsOpOp2LopsBS.put(OpOp2.MIN, org.apache.sysml.lops.BinaryScalar.OperationTypes.MIN);\n- HopsOpOp2LopsBS.put(OpOp2.MAX, org.apache.sysml.lops.BinaryScalar.OperationTypes.MAX);\n- HopsOpOp2LopsBS.put(OpOp2.AND, org.apache.sysml.lops.BinaryScalar.OperationTypes.AND);\n- HopsOpOp2LopsBS.put(OpOp2.OR, org.apache.sysml.lops.BinaryScalar.OperationTypes.OR);\n- HopsOpOp2LopsBS.put(OpOp2.LOG, org.apache.sysml.lops.BinaryScalar.OperationTypes.LOG);\n- HopsOpOp2LopsBS.put(OpOp2.POW, org.apache.sysml.lops.BinaryScalar.OperationTypes.POW);\n- HopsOpOp2LopsBS.put(OpOp2.PRINT, org.apache.sysml.lops.BinaryScalar.OperationTypes.PRINT);\n+ HopsOpOp2LopsBS = new HashMap<Hop.OpOp2, BinaryScalar.OperationTypes>();\n+ HopsOpOp2LopsBS.put(OpOp2.PLUS, BinaryScalar.OperationTypes.ADD);\n+ HopsOpOp2LopsBS.put(OpOp2.MINUS, BinaryScalar.OperationTypes.SUBTRACT);\n+ HopsOpOp2LopsBS.put(OpOp2.MULT, BinaryScalar.OperationTypes.MULTIPLY);\n+ HopsOpOp2LopsBS.put(OpOp2.DIV, BinaryScalar.OperationTypes.DIVIDE);\n+ HopsOpOp2LopsBS.put(OpOp2.MODULUS, BinaryScalar.OperationTypes.MODULUS);\n+ HopsOpOp2LopsBS.put(OpOp2.INTDIV, BinaryScalar.OperationTypes.INTDIV);\n+ HopsOpOp2LopsBS.put(OpOp2.LESS, BinaryScalar.OperationTypes.LESS_THAN);\n+ HopsOpOp2LopsBS.put(OpOp2.LESSEQUAL, BinaryScalar.OperationTypes.LESS_THAN_OR_EQUALS);\n+ HopsOpOp2LopsBS.put(OpOp2.GREATER, BinaryScalar.OperationTypes.GREATER_THAN);\n+ HopsOpOp2LopsBS.put(OpOp2.GREATEREQUAL, BinaryScalar.OperationTypes.GREATER_THAN_OR_EQUALS);\n+ HopsOpOp2LopsBS.put(OpOp2.EQUAL, BinaryScalar.OperationTypes.EQUALS);\n+ HopsOpOp2LopsBS.put(OpOp2.NOTEQUAL, BinaryScalar.OperationTypes.NOT_EQUALS);\n+ HopsOpOp2LopsBS.put(OpOp2.MIN, BinaryScalar.OperationTypes.MIN);\n+ HopsOpOp2LopsBS.put(OpOp2.MAX, BinaryScalar.OperationTypes.MAX);\n+ HopsOpOp2LopsBS.put(OpOp2.AND, BinaryScalar.OperationTypes.AND);\n+ HopsOpOp2LopsBS.put(OpOp2.OR, BinaryScalar.OperationTypes.OR);\n+ HopsOpOp2LopsBS.put(OpOp2.LOG, BinaryScalar.OperationTypes.LOG);\n+ HopsOpOp2LopsBS.put(OpOp2.POW, BinaryScalar.OperationTypes.POW);\n+ HopsOpOp2LopsBS.put(OpOp2.PRINT, BinaryScalar.OperationTypes.PRINT);\n}\nprotected static final HashMap<Hop.OpOp2, org.apache.sysml.lops.Unary.OperationTypes> HopsOpOp2LopsU;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"diff": "@@ -846,8 +846,14 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nprivate Hop simplifyBinaryMatrixScalarOperation( Hop parent, Hop hi, int pos )\nthrows HopsException\n{\n+ // Note: This rewrite is not applicable for all binary operations because some of them\n+ // are undefined over scalars. We explicitly exclude potential conflicting matrix-scalar binary\n+ // operations; other operations like cbind/rbind will never occur as matrix-scalar operations.\n+\nif( HopRewriteUtils.isUnary(hi, OpOp1.CAST_AS_SCALAR)\n- && hi.getInput().get(0) instanceof BinaryOp )\n+ && hi.getInput().get(0) instanceof BinaryOp\n+ && !HopRewriteUtils.isBinary(hi.getInput().get(0), OpOp2.QUANTILE,\n+ OpOp2.CENTRALMOMENT, OpOp2.MINUS1_MULT, OpOp2.MINUS_NZ, OpOp2.LOG_NZ))\n{\nBinaryOp bin = (BinaryOp) hi.getInput().get(0);\nBinaryOp bout = null;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1755] Fix simplification rewrite binary matrix-scalar ops
This patch fixes the rewrite for simplifying matrix-scalar to
scalar-scalar operations to correctly check for binary operations that
are supported over scalars. |
49,703 | 12.07.2017 10:54:31 | 25,200 | 757288a702b3fedb0cca4bee217f9b1218467657 | [MINOR] Update links on Contributing to SystemML page
Update main Jekyll GitHub Pages link.
Add link for installing Jekyll locally.
Add links to code style files. | [
{
"change_type": "MODIFY",
"old_path": "docs/contributing-to-systemml.md",
"new_path": "docs/contributing-to-systemml.md",
"diff": "@@ -179,9 +179,10 @@ the Pull Request, and the issue can be resolved and closed.\nDocumentation is one useful way to become involved with SystemML. SystemML online documentation\nis generated from markdown using Jekyll. For more information, please see GitHub's\n-[Using Jekyll with Pages](https://help.github.com/articles/using-jekyll-with-pages/).\n+[Using Jekyll as a static site generator with GitHub Pages](https://help.github.com/articles/using-jekyll-as-a-static-site-generator-with-github-pages/).\n-After installing Jekyll, Jekyll can be run from the `docs` folder via:\n+After installing Jekyll locally (see [Setting up your GitHub Pages site locally with Jekyll](https://help.github.com/articles/setting-up-your-github-pages-site-locally-with-jekyll/)),\n+Jekyll can be run from the `docs` folder via:\nbundle exec jekyll serve\n@@ -209,9 +210,10 @@ branch and perform the `subtree` command again.\n### Java Code Format\nJava in SystemML should be formatted using a standard format. The \"SystemML Format\" at\n-`dev/code-style/systemml-style-eclipse.xml` can be imported into Eclipse and\n-`dev/code-style/systemml-style-intellij.xml` can be imported into IntelliJ\n-for this purpose.\n+[`dev/code-style/systemml-style-eclipse.xml`](https://github.com/apache/systemml/blob/master/dev/code-style/systemml-style-eclipse.xml)\n+can be imported into Eclipse and\n+[`dev/code-style/systemml-style-intellij.xml`](https://github.com/apache/systemml/blob/master/dev/code-style/systemml-style-intellij.xml)\n+can be imported into IntelliJ for this purpose.\nNo trailing whitespace is preferred. IDEs such as Eclipse and IntelliJ can be configured\nfor this option.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update links on Contributing to SystemML page
Update main Jekyll GitHub Pages link.
Add link for installing Jekyll locally.
Add links to code style files. |
49,703 | 12.07.2017 18:28:15 | 25,200 | 0226899e32073a1f074fffce8e6b05e4615742c9 | Fix Explain countCompiledInstructions for CP
Increment the ExplainCounts numCPInst value in countCompiledInstructions
based on the value of the CP parameter.
Change countCompiledInstructions method return type to void.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/Explain.java",
"new_path": "src/main/java/org/apache/sysml/utils/Explain.java",
"diff": "@@ -989,15 +989,30 @@ public class Explain\n}\n}\n- private static int countCompiledInstructions( ArrayList<Instruction> instSet, ExplainCounts counts, boolean MR, boolean CP, boolean SP )\n+ /**\n+ * Count the number of Hadoop instructions, CP instructions, Spark\n+ * instructions, and/or Spark reblock instructions in a list of\n+ * instructions.\n+ *\n+ * @param instSet\n+ * list of instructions\n+ * @param counts\n+ * explain counts\n+ * @param MR\n+ * if true, count Hadoop instructions\n+ * @param CP\n+ * if true, count CP instructions\n+ * @param SP\n+ * if true, count Spark instructions and Spark reblock\n+ * instructions\n+ */\n+ private static void countCompiledInstructions( ArrayList<Instruction> instSet, ExplainCounts counts, boolean MR, boolean CP, boolean SP )\n{\n- int ret = 0;\n-\nfor( Instruction inst : instSet )\n{\nif( MR && inst instanceof MRJobInstruction )\ncounts.numJobs++;\n- else if( SP && inst instanceof CPInstruction )\n+ else if( CP && inst instanceof CPInstruction )\ncounts.numCPInst++;\nelse if( SP && inst instanceof SPInstruction )\ncounts.numJobs++;\n@@ -1006,8 +1021,6 @@ public class Explain\nif( SP && (inst instanceof CSVReblockSPInstruction || inst instanceof ReblockSPInstruction) )\ncounts.numReblocks++;\n}\n-\n- return ret;\n}\nprivate static String explainFunctionCallGraph(FunctionCallGraph fgraph, HashSet<String> fstack, String fkey, int level)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1763] Fix Explain countCompiledInstructions for CP
Increment the ExplainCounts numCPInst value in countCompiledInstructions
based on the value of the CP parameter.
Change countCompiledInstructions method return type to void.
Closes #569. |
49,703 | 12.07.2017 18:35:59 | 25,200 | 4e3ebcaeb8e4679f65d0caf755cc3eec68c0da3c | Fix cbind value in AppendGAlignedSP constructor
Set the _cbind field to the value of the cbind parameter in the AppendGAlignedSP
constructor.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/AppendGAlignedSP.java",
"new_path": "src/main/java/org/apache/sysml/lops/AppendGAlignedSP.java",
"diff": "@@ -36,7 +36,7 @@ public class AppendGAlignedSP extends Lop\nsuper(Lop.Type.Append, dt, vt);\ninit(input1, input2, input3, dt, vt);\n- _cbind = true;\n+ _cbind = cbind;\n}\npublic void init(Lop input1, Lop input2, Lop input3, DataType dt, ValueType vt)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1764] Fix cbind value in AppendGAlignedSP constructor
Set the _cbind field to the value of the cbind parameter in the AppendGAlignedSP
constructor.
Closes #571. |
49,736 | 13.07.2017 13:12:09 | 28,800 | f046051d43478a8e850092b4303a85d1a86f5dbb | [HOTFIX] Bugfix in validation of convolution operations.
Also, updated the DML documentation as per Prithvi's recommendation.
This hotfix also contains minor bugfix in getP, getQ methods of
ConvolutionUtils and also ConvolutionParameter wrapper class. | [
{
"change_type": "MODIFY",
"old_path": "docs/dml-language-reference.md",
"new_path": "docs/dml-language-reference.md",
"diff": "@@ -1508,15 +1508,15 @@ The images are assumed to be stored NCHW format, where N = batch size, C = #chan\nHence, the images are internally represented as a matrix with dimension (N, C * H * W).\n-| Function name | Input matrices | Input Parameters | Notes |\n-|------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------|\n-| conv2d | input, filter | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Performs 2D convolution operation |\n-| conv2d_backward_filter | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt filter of 2D convolution |\n-| conv2d_backward_data | filter, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt input of 2D convolution |\n-| max_pool | input | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Performs max pooling operation |\n-| max_pool_backward | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Computes the gradients wrt input of 2D maxpooling |\n-| bias_add | input, bias | | Adds the bias (row vector of size numChannels) to input with the given numChannels |\n-| bias_multiply | input, bias | | Multiplies the bias (row vector of size numChannels) to input with the given numChannels |\n+| Function name | Input matrices | Dimension of first input matrix | Dimension of second input matrix (if applicable) | Dimension of output matrix | Input Parameters | Notes |\n+|----------------------------|----------------|-----------------------------------------------------------|-----------------------------------------------------------|------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|\n+| conv2d | input, filter | [batch_size X num_channels* height_image* width_image] | [num_filters X num_channels* height_filter* width_filter] | [batch_size X num_channels_out* height_out* width_out] | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[num_filters, num_channels, height_filter, width_filter] | Performs 2D convolution operation |\n+| conv2d_backward_filter | input, dout | [batch_size X num_channels* height_image* width_image] | [batch_size X num_channels_out* height_out* width_out] | [num_filters X num_channels* height_filter* width_filter] | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[num_filters, num_channels, height_filter, width_filter] | Computes the gradients wrt filter of 2D convolution |\n+| conv2d_backward_data | filter, dout | [num_filters X num_channels* height_filter* width_filter] | [batch_size X num_channels_out* height_out* width_out] | [batch_size X num_channels* height_image* width_image] | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[num_filters, num_channels, height_filter, width_filter] | Computes the gradients wrt input of 2D convolution |\n+| max_pool | input | [batch_size X num_channels* height_image* width_image] | | [batch_size X num_channels* height_out* width_out] | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Performs max pooling operation |\n+| max_pool_backward | input, dout | [batch_size X num_channels* height_image* width_image] | [batch_size X num_channels* height_out* width_out] | [batch_size X num_channels* height_image* width_image] | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Computes the gradients wrt input of 2D maxpooling |\n+| bias_add | input, bias | [batch_size X num_channels* height_image* width_image] | [num_channels X 1] | [batch_size X num_channels* height_image* width_image] | | Adds the bias (row vector of size num_channels) to input with the given num_channels |\n+| bias_multiply | input, bias | [batch_size X num_channels* height_image* width_image] | [num_channels X 1] | [batch_size X num_channels* height_image* width_image] | | Multiplies the bias (row vector of size num_channels) to input with the given num_channels |\nExamples:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1116,61 +1116,57 @@ public class BuiltinFunctionExpression extends DataIdentifier\n// conv2d_backward_filter and conv2d_backward_data\nExpression input = _args[0]; // For conv2d_backward_filter, this is input and for conv2d_backward_data, this is filter\n- Expression filter = null;\n+ Expression input2 = null;\nif(!(this.getOpCode() == BuiltinFunctionOp.MAX_POOL || this.getOpCode() == BuiltinFunctionOp.AVG_POOL)) {\n- filter = _args[1]; // For conv2d_backward functions, this is dout\n- checkMatrixParam(filter);\n+ input2 = _args[1]; // For conv2d_backward functions, this is dout\n+ checkMatrixParam(input2);\n}\noutput.setDataType(DataType.MATRIX);\noutput.setValueType(ValueType.DOUBLE);\noutput.setBlockDimensions(input.getOutput().getRowsInBlock(), input.getOutput().getColumnsInBlock());\n// stride1, stride2, padding1, padding2, numImg, numChannels, imgSize, imgSize,\n// filter_shape1=1, filter_shape2=1, filterSize/poolSize1, filterSize/poolSize1\n- if( getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD ) {\n- output.setDimensions(input.getOutput().getDim1(), input.getOutput().getDim2());\n- }\n- else if( getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA ) {\n- //args[0] .. filter, args[1] .. input\n- output.setDimensions(_args[1].getOutput().getDim1(), -1);\n- }\n- else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER) {\n- output.setDimensions(filter.getOutput().getDim1(), filter.getOutput().getDim2());\n- }\n- else if(this.getOpCode() == BuiltinFunctionOp.CONV2D || this.getOpCode() == BuiltinFunctionOp.MAX_POOL) {\ntry {\n- int start = 1;\n- if(this.getOpCode() == BuiltinFunctionOp.CONV2D) {\n- start = 2;\n+ int start = 2;\n+ if(!(this.getOpCode() == BuiltinFunctionOp.MAX_POOL || this.getOpCode() == BuiltinFunctionOp.AVG_POOL)) {\n+ start = 1;\n}\nlong stride_h = (long) getDoubleValue(_args[start++]);\nlong stride_w = (long) getDoubleValue(_args[start++]);\nlong pad_h = (long) getDoubleValue(_args[start++]);\nlong pad_w = (long) getDoubleValue(_args[start++]);\n- start++;\n+ long N = (long) getDoubleValue(_args[start++]);\nlong C = (long) getDoubleValue(_args[start++]);\nlong H = (long) getDoubleValue(_args[start++]);\nlong W = (long) getDoubleValue(_args[start++]);\nlong K = -1;\n- if(this.getOpCode() == BuiltinFunctionOp.CONV2D) {\n+ if(!(this.getOpCode() == BuiltinFunctionOp.MAX_POOL || this.getOpCode() == BuiltinFunctionOp.AVG_POOL)) {\nK = (long) getDoubleValue(_args[start]);\n}\n- start++; start++;\n+ start++; start++; // Increment index for K and C\nlong R = (long) getDoubleValue(_args[start++]);\nlong S = (long) getDoubleValue(_args[start++]);\nlong P = ConvolutionUtils.getP(H, R, stride_h, pad_h);\nlong Q = ConvolutionUtils.getP(W, S, stride_w, pad_w);\nif(this.getOpCode() == BuiltinFunctionOp.CONV2D)\n- output.setDimensions(input.getOutput().getDim1(), K*P*Q);\n+ output.setDimensions(N, K*P*Q);\n+ else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER)\n+ output.setDimensions(K, C*R*S);\n+ else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA)\n+ output.setDimensions(N, C*H*W);\n+ else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL)\n+ output.setDimensions(N, C*P*Q);\n+ else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD)\n+ output.setDimensions(N, C*H*W);\nelse\n- output.setDimensions(input.getOutput().getDim1(), C*P*Q);\n+ throw new LanguageException(\"\");\n}\ncatch(Exception e) {\noutput.setDimensions(input.getOutput().getDim1(), -1); // To make sure that output dimensions are not incorrect\n}\n- }\n- else\n- throw new LanguageException(\"Unsupported op: \" + this.getOpCode());\ncheckMatrixParam(input);\n+ if(input2 != null)\n+ checkMatrixParam(input2);\nbreak;\n}\ndefault:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java",
"diff": "@@ -103,7 +103,13 @@ public class ConvolutionParameters implements Serializable {\nthis.stride_w = stride_w;\nthis.pad_h = pad_h;\nthis.pad_w = pad_w;\n+ if(H <= 0 || R <= 0 || stride_h < 0 || pad_h < 0)\n+ P = -1;\n+ else\nP = (int) ConvolutionUtils.getP(H, R, stride_h, pad_h);\n+ if(W <= 0 || S <= 0 || stride_w < 0 || pad_w < 0)\n+ Q = -1;\n+ else\nQ = (int) ConvolutionUtils.getQ(W, S, stride_w, pad_w);\nthis.numThreads = numThreads;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java",
"new_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java",
"diff": "@@ -44,21 +44,16 @@ public class ConvolutionUtils {\n}\npublic static long getP(long H, long R, long verticalStride, long heightPadding) {\n- long ret = (H + 2 * heightPadding - R) / verticalStride + 1;\n- if(ret <= 0) {\n- throw new RuntimeException(\"Incorrect output patch size: \"\n- + \"(image_height + 2 * pad_h - filter_height) / verticalStride + 1) needs to be positive, but is \" + ret\n- + \" (\" + H + \" + 2 * \" + heightPadding + \" - \" + R + \") / \" + verticalStride + \" + 1))\");\n+ if(H <= 0 || R <= 0 || heightPadding < 0 || verticalStride < 0) {\n+ throw new RuntimeException(\"Incorrect parameters: height=\" + H + \" filter_height=\" + R + \" stride=\" + verticalStride + \" pad=\" + heightPadding);\n}\n- return ret;\n+ return (H + 2 * heightPadding - R) / verticalStride + 1;\n}\npublic static long getQ(long W, long S, long horizontalStride, long widthPadding) {\n- long ret = (W + 2 * widthPadding - S) / horizontalStride + 1;\n- if(ret <= 0) {\n- throw new RuntimeException(\"Incorrect output patch size: (image_width + 2 * pad_w - filter_width) / horizontalStride + 1) needs to be positive, but is \" + ret\n- + \" (\" + W + \" + 2 * \" + widthPadding + \" - \" + S + \") / \" + horizontalStride + \" + 1))\");\n+ if(W <= 0 || S <= 0 || widthPadding < 0 || horizontalStride < 0) {\n+ throw new RuntimeException(\"Incorrect parameters: width=\" + W + \" filter_width=\" + S + \" stride=\" + horizontalStride + \" pad=\" + widthPadding);\n}\n- return ret;\n+ return (W + 2 * widthPadding - S) / horizontalStride + 1;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] [SYSTEMML-540] Bugfix in validation of convolution operations.
- Also, updated the DML documentation as per Prithvi's recommendation.
- This hotfix also contains minor bugfix in getP, getQ methods of
ConvolutionUtils and also ConvolutionParameter wrapper class. |
49,737 | 13.07.2017 14:28:56 | 25,200 | cd1ae5b42499b3b97731de8b28a6d1db9cc9e7f3 | [MINOR] Performance test bug fixes
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/datagen.py",
"new_path": "scripts/perftest/python/datagen.py",
"diff": "import itertools\nfrom os.path import join\n-from utils import split_rowcol, config_writer\n+from utils import split_rowcol, config_writer, mat_type_check\n# This file contains configuration settings for data generation\nDATA_FORMAT = 'csv'\n@@ -181,8 +181,8 @@ def stats1_datagen(matrix_dim, matrix_type, datagen_dir):\nNC = int(int(col)/2)\nconfig = dict(R=row, C=col, NC=NC, MAXDOMAIN=MAXDOMAIN, DATA=DATA, TYPES=TYPES, SETSIZE=SETSIZE,\n- LABELSETSIZE=LABELSETSIZE, TYPES1=TYPES1, TYPES2=TYPES2, INDEX1=INDEX1, INDEX2=INDEX2,\n- fmt=DATA_FORMAT)\n+ LABELSETSIZE=LABELSETSIZE, TYPES1=TYPES1, TYPES2=TYPES2, INDEX1=INDEX1,\n+ INDEX2=INDEX2, fmt=DATA_FORMAT)\nconfig_writer(full_path + '.json', config)\n@@ -207,7 +207,7 @@ def stats2_datagen(matrix_dim, matrix_type, datagen_dir):\nreturn full_path\n-def config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir):\n+def config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir, dense_algos):\n\"\"\"\nThis function has two responsibilities. Generate the configuration files for\ndatagen algorithms and return a dictionary that will be used for execution.\n@@ -217,11 +217,17 @@ def config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir)\nfamily type.\nmatrix_type: String\n- Type of matrix to generate e.g dense or sparse\n+ Type of matrix to generate e.g dense, sparse, all\nmatrix_shape: String\nShape of matrix to generate e.g 100k_10\n+ datagen_dir: String\n+ Path of the data generation directory\n+\n+ dense_algos: List\n+ Algorithms that support only dense matrix type\n+\nreturn: Dictionary {string: list}\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n@@ -233,11 +239,8 @@ def config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir)\n# Cross Product of all configurations\nfor current_family in distinct_families:\n- if current_family in FAMILY_NO_MATRIX_TYPE:\n- config = list(itertools.product(matrix_shape, ['dense']))\n- config_bundle[current_family] = config\n- else:\n- config = list(itertools.product(matrix_shape, matrix_type))\n+ current_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)\n+ config = list(itertools.product(matrix_shape, current_matrix_type))\n# clustering : [[10k_1, dense], [10k_2, dense], ...]\nconfig_bundle[current_family] = config\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/predict.py",
"new_path": "scripts/perftest/python/predict.py",
"diff": "#-------------------------------------------------------------\nimport sys\n-import os\nfrom os.path import join\n-import glob\n-from utils import create_dir, config_writer\n+from utils import config_writer, relevant_folders, mat_type_check\n# Contains configuration setting for predicting\nDATA_FORMAT = 'csv'\n@@ -221,7 +219,7 @@ def glm_gamma_predict(save_file_name, datagen_dir, train_dir, predict_dir):\nreturn full_path_predict\n-def config_packets_predict(algo_payload, datagen_dir, train_dir, predict_dir):\n+def config_packets_predict(algo_payload, matrix_type, matrix_shape, datagen_dir, train_dir, predict_dir, dense_algos):\n\"\"\"\nThis function has two responsibilities. Generate the configuration files for\nprediction algorithms and return a dictionary that will be used for execution.\n@@ -230,6 +228,12 @@ def config_packets_predict(algo_payload, datagen_dir, train_dir, predict_dir):\nThe first tuple index contains algorithm name and the second index contains\nfamily type.\n+ matrix_type: String\n+ Type of matrix to generate e.g dense, sparse, all\n+\n+ matrix_shape: String\n+ Shape of matrix to generate e.g 100k_10\n+\ndatagen_dir: String\nPath of the data generation directory\n@@ -239,45 +243,39 @@ def config_packets_predict(algo_payload, datagen_dir, train_dir, predict_dir):\npredict_dir: String\nPath of the prediction directory\n+ dense_algos: List\n+ Algorithms that support only dense matrix type\n+\nreturn: Dictionary {string: list}\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n\"\"\"\n-\n- algo_payload_distinct = set(map(lambda x: x[0], algo_payload))\n-\nconfig_bundle = {}\n- for k, v in algo_payload:\n+ for k, _ in algo_payload:\nconfig_bundle[k] = []\n- for current_algo in algo_payload_distinct:\n- # Get all train folders related to the algorithm\n- train_path = join(train_dir, current_algo)\n- train_subdir = glob.glob(train_path + \"*\")\n- train_folders = list(filter(lambda x: os.path.isdir(x), train_subdir))\n+ for current_algo, current_family in algo_payload:\n+ current_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)\n+ train_folders = relevant_folders(train_dir, current_algo, current_family,\n+ current_matrix_type, matrix_shape, 'train')\nif len(train_folders) == 0:\nprint('training folders not present for {}'.format(current_algo))\nsys.exit()\nfor current_train_folder in train_folders:\n- save_name = current_train_folder.split('/')[-1]\n- # Get all datagen folders\n- data_gen_folder_name = '.'.join(save_name.split('.')[1:-1])\n- data_gen_path = join(datagen_dir, data_gen_folder_name)\n- data_gen_subdir = glob.glob(data_gen_path + \"*\")\n- data_gen_folder = list(filter(lambda x: os.path.isdir(x), data_gen_subdir))\n-\n- if len(data_gen_folder) == 0:\n+ current_data_gen_dir = relevant_folders(datagen_dir, current_algo, current_family,\n+ current_matrix_type, matrix_shape, 'data-gen')\n+ if len(current_data_gen_dir) == 0:\nprint('data-gen folders not present for {}'.format(current_family))\nsys.exit()\n- # Ideally we will have more than one datagen directory to be found\n- current_data_gen_dir = list(data_gen_folder)[0]\n-\n+ save_name = current_train_folder.split('/')[-1]\nalgo_func = '_'.join([current_algo.lower().replace('-', '_')] + ['predict'])\n- conf_path = globals()[algo_func](save_name, current_data_gen_dir,\n+\n+ # current_data_gen_dir has index 0 as we would expect one datagen for each algorithm\n+ conf_path = globals()[algo_func](save_name, current_data_gen_dir[0],\ncurrent_train_folder, predict_dir)\nconfig_bundle[current_algo].append(conf_path)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/run_perftest.py",
"new_path": "scripts/perftest/python/run_perftest.py",
"diff": "@@ -26,13 +26,14 @@ import argparse\nfrom functools import reduce\nimport os\nfrom os.path import join\n-from utils import get_families, config_reader, create_dir, get_existence, \\\n- exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics\nimport logging\nfrom datetime import datetime\nfrom datagen import config_packets_datagen\nfrom train import config_packets_train\nfrom predict import config_packets_predict\n+from utils import get_families, config_reader, create_dir, get_existence, \\\n+ exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics\n+\n# A packet is a dictionary\n# with key as the algorithm\n@@ -80,6 +81,8 @@ ML_PREDICT = {'Kmeans': 'Kmeans-predict',\n'GLM_gamma': 'GLM-predict',\n'GLM_binomial': 'GLM-predict'}\n+DENSE_TYPE_ALGOS = ['clustering', 'stats1', 'stats2']\n+\n# Responsible for execution and metric logging\ndef algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode):\n@@ -152,7 +155,7 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, temp_dir, mode\nContains the execution type singlenode / hybrid_spark\nmat_type: List\n- Type of matrix to generate dense or sparse\n+ Type of matrix to generate dense, sparse, all\nmat_shape: List\nDimensions of the input matrix with rows and columns\n@@ -201,12 +204,12 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, temp_dir, mode\nif 'data-gen' in mode:\ndata_gen_dir = join(temp_dir, 'data-gen')\ncreate_dir(data_gen_dir)\n- conf_packet = config_packets_datagen(algos_to_run, mat_type, mat_shape, data_gen_dir)\n+ conf_packet = config_packets_datagen(algos_to_run, mat_type, mat_shape, data_gen_dir,\n+ DENSE_TYPE_ALGOS)\nfor family_name, config_folders in conf_packet.items():\nfor config in config_folders:\nfile_name = ML_GENDATA[family_name]\nalgorithm_workflow(family_name, exec_type, config, file_name, 'data-gen')\n-\n# Statistic family do not require to be split\nif family_name not in ['stats1', 'stats2']:\nexec_test_data(exec_type, config)\n@@ -215,7 +218,8 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, temp_dir, mode\ndata_gen_dir = join(temp_dir, 'data-gen')\ntrain_dir = join(temp_dir, 'train')\ncreate_dir(train_dir)\n- conf_packet = config_packets_train(algos_to_run, data_gen_dir, train_dir)\n+ conf_packet = config_packets_train(algos_to_run, mat_type, mat_shape, data_gen_dir,\n+ train_dir, DENSE_TYPE_ALGOS)\nfor algo_name, config_files in conf_packet.items():\nfor config in config_files:\nfile_name = ML_TRAIN[algo_name]\n@@ -227,9 +231,12 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, temp_dir, mode\npredict_dir = join(temp_dir, 'predict')\ncreate_dir(predict_dir)\nalgos_to_run_perdict = list(filter(lambda algo: check_predict(algo[0], ML_PREDICT), algos_to_run))\n- if len(algos_to_run_perdict) < 0:\n+ if len(algos_to_run_perdict) < 1:\n+ # No algorithms with predict found\npass\n- conf_packet = config_packets_predict(algos_to_run_perdict, data_gen_dir, train_dir, predict_dir)\n+ conf_packet = config_packets_predict(algos_to_run_perdict, mat_type, mat_shape, data_gen_dir,\n+ train_dir, predict_dir, DENSE_TYPE_ALGOS)\n+\nfor algo_name, config_files in conf_packet.items():\nfor config in config_files:\nfile_name = ML_PREDICT[algo_name]\n@@ -243,11 +250,12 @@ if __name__ == '__main__':\nprint('SYSTEMML_HOME not found')\nsys.exit()\n+ # Supported Arguments\n+ mat_type = ['dense', 'sparse', 'all']\n+ workload = ['data-gen', 'train', 'predict']\n+ execution_mode = ['hybrid_spark', 'singlenode']\n# Default Arguments\n- default_mat_type = ['dense', 'sparse']\n- default_workload = ['data-gen', 'train', 'predict']\ndefault_mat_shape = ['10k_100']\n- default_execution_mode = ['hybrid_spark', 'singlenode']\n# Default temp directory, contains everything generated in perftest\ndefault_temp_dir = join(systemml_home, 'scripts', 'perftest', 'temp')\n@@ -274,11 +282,11 @@ if __name__ == '__main__':\n'(Overrides --family, available : ' + ', '.join(sorted(all_algos)) + ')', metavar='',\nchoices=all_algos, nargs='+')\n- cparser.add_argument('--exec-type', default='singlenode', help='System-ML backend '\n- '(available : singlenode, spark-hybrid)', metavar='',\n- choices=default_execution_mode)\n- cparser.add_argument('--mat-type', default=default_mat_type, help='space separated list of types of matrix to generate '\n- '(available : dense, sparse)', metavar='', choices=default_mat_type,\n+ cparser.add_argument('--exec-type', default='hybrid_spark', help='System-ML backend '\n+ 'available : ' + ','.join(execution_mode), metavar='',\n+ choices=execution_mode)\n+ cparser.add_argument('--mat-type', default=['all'], help='space separated list of types of matrix to generate '\n+ 'available : ' + ','.join(mat_type), metavar='', choices=mat_type,\nnargs='+')\ncparser.add_argument('--mat-shape', default=default_mat_shape, help='space separated list of shapes of matrices '\n'to generate (e.g 10k_1k, 20M_4k)', metavar='', nargs='+')\n@@ -286,9 +294,9 @@ if __name__ == '__main__':\n'where generated, training and prediction data is put', metavar='')\ncparser.add_argument('--filename', default='perf_test', help='name of the output file for the perf'\n' metrics', metavar='')\n- cparser.add_argument('--mode', default=default_workload,\n+ cparser.add_argument('--mode', default=workload,\nhelp='space separated list of types of workloads to run (available: data-gen, train, predict)',\n- metavar='', choices=default_workload, nargs='+')\n+ metavar='', choices=workload, nargs='+')\n# Args is a namespace\nargs = cparser.parse_args()\n@@ -297,6 +305,11 @@ if __name__ == '__main__':\n# Debug arguments\n# print(arg_dict)\n+ # default_mat_type validity\n+ if len(args.mat_type) > 2:\n+ print('length of --mat-type argument cannot be greater than two')\n+ sys.exit()\n+\n# Check for validity of input arguments\nif args.family is not None:\nfor fam in args.family:\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/train.py",
"new_path": "scripts/perftest/python/train.py",
"diff": "#-------------------------------------------------------------\nimport sys\n-import glob\n-import os\nfrom os.path import join\n-from utils import config_writer\n+from utils import config_writer, relevant_folders, mat_type_check\nfrom functools import reduce\n# Contains configuration setting for training\n@@ -48,8 +46,8 @@ def binomial_m_svm_train(save_folder_name, datagen_dir, train_dir):\nmodel = join(full_path_train, 'model.data')\nLog = join(full_path_train, 'Log.data')\n- config = dict(X=X, Y=Y, icpt=icpt, classes=2, reg=reg, tol=tol, maxiter=maxiter, model=model,\n- Log=Log, fmt=DATA_FORMAT)\n+ config = dict(X=X, Y=Y, icpt=icpt, classes=2, reg=reg, tol=tol, maxiter=maxiter,\n+ model=model, Log=Log, fmt=DATA_FORMAT)\nconfig_writer(full_path_train + '.json', config)\nreturn data_folders\n@@ -117,8 +115,8 @@ def multinomial_m_svm_train(save_folder_name, datagen_dir, train_dir):\nmodel = join(full_path_train, 'model.data')\nLog = join(full_path_train, 'Log.data')\n- config = dict(X=X, Y=Y, icpt=icpt, classes=150, reg=reg, tol=tol, maxiter=maxiter, model=model,\n- Log=Log, fmt=DATA_FORMAT)\n+ config = dict(X=X, Y=Y, icpt=icpt, classes=150, reg=reg, tol=tol, maxiter=maxiter,\n+ model=model, Log=Log, fmt=DATA_FORMAT)\nconfig_writer(full_path_train + '.json', config)\ndata_folders.append(full_path_train)\n@@ -358,7 +356,7 @@ def regression2_glm_poisson_train(save_folder_name, datagen_dir, train_dir):\nreturn data_folders\n-def config_packets_train(algo_payload, datagen_dir, train_dir):\n+def config_packets_train(algo_payload, matrix_type, matrix_shape, datagen_dir, train_dir, dense_algos):\n\"\"\"\nThis function has two responsibilities. Generate the configuration files for\ninput training algorithms and return a dictionary that will be used for execution.\n@@ -367,39 +365,45 @@ def config_packets_train(algo_payload, datagen_dir, train_dir):\nThe first tuple index contains algorithm name and the second index contains\nfamily type.\n+ matrix_type: String\n+ Type of matrix to generate e.g dense, sparse, all\n+\n+ matrix_shape: String\n+ Shape of matrix to generate e.g 100k_10\n+\ndatagen_dir: String\nPath of the data generation directory\ntrain_dir: String\nPath of the training directory\n+ dense_algos: List\n+ Algorithms that support only dense matrix type\n+\nreturn: {string: list}\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n-\n\"\"\"\nconfig_bundle = {}\n- for k, v in algo_payload:\n+ for k, _ in algo_payload:\nconfig_bundle[k] = []\nfor current_algo, current_family in algo_payload:\n- data_gen_path = join(datagen_dir, current_family)\n- data_gen_subdir = glob.glob(data_gen_path + \"*\")\n-\n- # Filter for specific data gen\n- data_gen_folders = list(filter(lambda x: os.path.isdir(x), data_gen_subdir))\n+ current_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)\n+ data_gen_folders = relevant_folders(datagen_dir, current_algo, current_family,\n+ current_matrix_type, matrix_shape, 'data-gen')\nif len(data_gen_folders) == 0:\nprint('datagen folders not present for {}'.format(current_family))\nsys.exit()\n- for current_folder in data_gen_folders:\n- file_path_last = current_folder.split('/')[-1]\n+ for current_datagen_dir in data_gen_folders:\n+ file_path_last = current_datagen_dir.split('/')[-1]\nsave_name = '.'.join([current_algo] + [file_path_last])\nalgo_func = '_'.join([current_family] + [current_algo.lower().replace('-', '_')]\n+ ['train'])\n- conf_path = globals()[algo_func](save_name, current_folder, train_dir)\n+ conf_path = globals()[algo_func](save_name, current_datagen_dir, train_dir)\nconfig_bundle[current_algo].append(conf_path)\nconfig_packets = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/utils.py",
"new_path": "scripts/perftest/python/utils.py",
"diff": "@@ -27,11 +27,14 @@ import subprocess\nimport shlex\nimport re\nimport logging\n+import sys\n+import glob\n+from functools import reduce\n# This file contains all the utility functions required for performance test module\n-def get_families(current_algo, ML_ALGO):\n+def get_families(current_algo, ml_algo):\n\"\"\"\nGiven current algorithm we get its families.\n@@ -46,7 +49,7 @@ def get_families(current_algo, ML_ALGO):\n\"\"\"\nfamily_list = []\n- for family, algos in ML_ALGO.items():\n+ for family, algos in ml_algo.items():\nif current_algo in algos:\nfamily_list.append(family)\nreturn family_list\n@@ -138,7 +141,7 @@ def get_existence(path, action_mode):\nreturn exist\n-def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, args, Time=True):\n+def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, args, time=True):\n\"\"\"\nThis function is responsible of execution of input arguments via python sub process,\nWe also extract time obtained from the output of this subprocess\n@@ -181,7 +184,7 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, arg\nproc1 = subprocess.Popen(shlex.split(cmd_string), stdout=subprocess.PIPE,\nstderr=subprocess.PIPE)\n- if Time:\n+ if time:\nproc1_log = []\nwhile proc1.poll() is None:\nraw_std_out = proc1.stdout.readline()\n@@ -189,7 +192,7 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, arg\nproc1_log.append(decode_raw)\nlogging.log(10, decode_raw)\n- out1, err1 = proc1.communicate()\n+ _, err1 = proc1.communicate()\nif \"Error\" in str(err1):\nprint('Error Found in {}'.format(dml_file_name))\n@@ -197,9 +200,9 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, execution_output_file, arg\nelse:\ntotal_time = parse_time(proc1_log)\n- with open(execution_output_file, 'w') as f:\n+ with open(execution_output_file, 'w') as file:\nfor row in proc1_log:\n- f.write(\"%s\\n\" % str(row))\n+ file.write(\"%s\\n\" % str(row))\nelse:\ntotal_time = 'not_specified'\n@@ -253,20 +256,18 @@ def exec_test_data(exec_type, path):\nexec_dml_and_parse_time(exec_type, test_split_script, config_file_name, args, False)\n-def check_predict(current_algo, ML_PREDICT):\n+def check_predict(current_algo, ml_predict):\n\"\"\"\nTo check if the current algorithm requires to run the predict\ncurrent_algo: String\nAlgorithm being processed\n- ML_PREDICT: Dictionary\n+ ml_predict: Dictionary\nKey value pairs of algorithm and predict file to process\n\"\"\"\n- if current_algo in ML_PREDICT.keys():\n+ if current_algo in ml_predict.keys():\nreturn True\n- else:\n- return False\ndef get_folder_metrics(folder_name, action_mode):\n@@ -302,3 +303,88 @@ def get_folder_metrics(folder_name, action_mode):\nintercept = 'none'\nreturn mat_type, mat_shape, intercept\n+\n+\n+def mat_type_check(current_family, matrix_types, dense_algos):\n+ \"\"\"\n+ Some Algorithms support different matrix_type. This function give us the right matrix_type given\n+ an algorithm\n+\n+ current_family: String\n+ Current family being porcessed in this function\n+\n+ matrix_type: List\n+ Type of matrix to generate dense, sparse, all\n+\n+ dense_algos: List\n+ Algorithms that support only dense matrix type\n+\n+ return: List\n+ Return the list of right matrix types supported by the family\n+ \"\"\"\n+ current_type = []\n+ for current_matrix_type in matrix_types:\n+ if current_matrix_type == 'all':\n+ if current_family in dense_algos:\n+ current_type.append('dense')\n+ else:\n+ current_type.append('dense')\n+ current_type.append('sparse')\n+\n+ if current_matrix_type == 'sparse':\n+ if current_family in dense_algos:\n+ sys.exit('{} does not support {} matrix type'.format(current_family,\n+ current_matrix_type))\n+ else:\n+ current_type.append(current_matrix_type)\n+\n+ if current_matrix_type == 'dense':\n+ current_type.append(current_matrix_type)\n+\n+ return current_type\n+\n+\n+def relevant_folders(path, algo, family, matrix_type, matrix_shape, mode):\n+ \"\"\"\n+ Finds the right folder to read the data based on given parameters\n+\n+ path: String\n+ Location of data-gen and training folders\n+\n+ algo: String\n+ Current algorithm being processed by this function\n+\n+ family: String\n+ Current family being processed by this function\n+\n+ matrix_type: List\n+ Type of matrix to generate dense, sparse, all\n+\n+ matrix_shape: List\n+ Dimensions of the input matrix with rows and columns\n+\n+ mode: String\n+ Based on mode and arguments we read the specific folders e.g data-gen folder or train folder\n+\n+ return: List\n+ List of folder locations to read data from\n+ \"\"\"\n+ folders = []\n+ for current_matrix_type in matrix_type:\n+ for current_matrix_shape in matrix_shape:\n+ if mode == 'data-gen':\n+ data_gen_path = join(path, family)\n+ sub_folder_name = '.'.join([current_matrix_type, current_matrix_shape])\n+ path_subdir = glob.glob(data_gen_path + '.' + sub_folder_name + \"*\")\n+\n+ if mode == 'train':\n+ train_path = join(path, algo)\n+ sub_folder_name = '.'.join([family, current_matrix_type, current_matrix_shape])\n+ path_subdir = glob.glob(train_path + '.' + sub_folder_name + \"*\")\n+\n+ path_folders = list(filter(lambda x: os.path.isdir(x), path_subdir))\n+ folders.append(path_folders)\n+\n+ folders_flat = reduce(lambda x, y: x + y, folders)\n+\n+ return folders_flat\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Performance test bug fixes
Closes #565 |
49,717 | 13.07.2017 15:01:11 | 25,200 | 32ba9cf9fdff2aba7432c7a4e51317b6e5bf1a18 | Added mem estimates for various GPU ops
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java",
"diff": "@@ -21,19 +21,19 @@ package org.apache.sysml.hops;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.Hop.MultiThreadedHop;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.lops.Aggregate;\nimport org.apache.sysml.lops.Binary;\nimport org.apache.sysml.lops.DataPartition;\nimport org.apache.sysml.lops.Group;\n-import org.apache.sysml.hops.Hop.MultiThreadedHop;\nimport org.apache.sysml.lops.Lop;\nimport org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.MMCJ;\n+import org.apache.sysml.lops.MMCJ.MMCJType;\nimport org.apache.sysml.lops.MMRJ;\nimport org.apache.sysml.lops.MMTSJ;\n-import org.apache.sysml.lops.MMCJ.MMCJType;\nimport org.apache.sysml.lops.MMTSJ.MMTSJType;\nimport org.apache.sysml.lops.MMZip;\nimport org.apache.sysml.lops.MapMult;\n@@ -344,9 +344,46 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\n{\ndouble ret = 0;\n+ if (DMLScript.USE_ACCELERATOR) {\n+ // In GPU Mode, intermediate memory is only needed in case of one of the matrix blocks is sparse\n+ // When sparse block is converted to dense and a dense MM takes place, we need (dim1 * dim2)\n+ // When dense block is converted to sparse and a sparse MM takes place, we need (dim1 * dim2 * 2)\n+\n+ Hop in1 = _input.get(0);\n+ Hop in2 = _input.get(1);\n+ double in1Sparsity = OptimizerUtils.getSparsity(in1.getDim1(), in1.getDim2(), in1.getNnz());\n+ double in2Sparsity = OptimizerUtils.getSparsity(in2.getDim1(), in2.getDim2(), in2.getNnz());\n+\n+ boolean in1Sparse = in1Sparsity < MatrixBlock.SPARSITY_TURN_POINT;\n+ boolean in2Sparse = in2Sparsity < MatrixBlock.SPARSITY_TURN_POINT;\n+\n+ boolean in1UltraSparse = in1Sparsity < MatrixBlock.ULTRA_SPARSITY_TURN_POINT;\n+ boolean in2UltraSparse = in2Sparsity < MatrixBlock.ULTRA_SPARSITY_TURN_POINT;\n+\n+ // For Matmult X * Y, if X is sparse, Y is dense, X is converted to dense\n+ // If X is ultrasparse, Y is converted to sparse\n+ if (in1Sparse ^ in2Sparse) { // one sparse, one dense\n+ if (in1Sparse) {\n+ if (in1UltraSparse) {\n+ ret += 2 * OptimizerUtils.estimateSizeExactSparsity(in2.getDim1(), in2.getDim2(), in2.getNnz());\n+ } else {\n+ ret += OptimizerUtils.estimateSizeExactSparsity(in1.getDim1(), in1.getDim2(), in1.getNnz());\n+ }\n+ } else if (in2Sparse) {\n+ if (in2UltraSparse) {\n+ ret += 2 * OptimizerUtils.estimateSizeExactSparsity(in1.getDim1(), in1.getDim2(), in1.getNnz());\n+ } else {\n+ ret += OptimizerUtils.estimateSizeExactSparsity(in2.getDim1(), in2.getDim2(), in2.getNnz());\n+ }\n+ }\n+\n+ }\n+\n+ }\n+\n//account for potential final dense-sparse transformation (worst-case sparse representation)\nif( dim2 >= 2 ) //vectors always dense\n- ret = OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, MatrixBlock.SPARSITY_TURN_POINT);\n+ ret += OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, MatrixBlock.SPARSITY_TURN_POINT);\nreturn ret;\n}\n@@ -544,8 +581,8 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nExecType et = ExecType.CP;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())) {\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\net = ExecType.GPU;\n}\n@@ -624,8 +661,8 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\n{\nLop matmultCP = null;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())) {\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\nHop h1 = getInput().get(0);\nHop h2 = getInput().get(1);\nLop left; Lop right;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"diff": "@@ -149,8 +149,8 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\n}\nelse { //general case\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())) {\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\n// Only implemented methods for GPU\nif ((_op == AggOp.SUM && (_direction == Direction.RowCol || _direction == Direction.Row || _direction == Direction.Col))\n|| (_op == AggOp.SUM_SQ && (_direction == Direction.RowCol || _direction == Direction.Row || _direction == Direction.Col))\n@@ -329,7 +329,14 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\n@Override\nprotected double computeOutputMemEstimate( long dim1, long dim2, long nnz )\n{\n- double sparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ double sparsity = -1;\n+ if (DMLScript.USE_ACCELERATOR) {\n+ // The GPU version (for the time being) only does dense outputs\n+ sparsity = 1.0;\n+ } else {\n+ sparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ }\n+\nreturn OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, sparsity);\n}\n@@ -366,10 +373,31 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\nbreak;\ncase VAR:\n//worst-case correction LASTFOURROWS / LASTFOURCOLUMNS\n- if( _direction == Direction.Col ) //(potentially sparse)\n+ if (DMLScript.USE_ACCELERATOR) {\n+ // The GPU implementation only operates on dense data\n+ // It allocates 2 dense blocks to help with these ops:\n+ // Assume Y = var(X) Or colVars(X), Or rowVars(X)\n+ // 1. Y = mean/rowMeans/colMeans(X) <-- Y is a scalar or row-vector or col-vector\n+ // 2. temp1 = X - Y <-- temp1 is a matrix of size(X)\n+ // 3. temp2 = temp1 ^ 2 <-- temp2 is a matrix of size(X)\n+ // 4. temp3 = sum/rowSums/colSums(temp2) <-- temp3 is a scalar or a row-vector or col-vector\n+ // 5. Y = temp3 / (size(X) or nrow(X) or ncol(X)) <-- Y is a scalar or a row-vector or col-vector\n+\n+ long in1dim1 = getInput().get(0).getDim1();\n+ long in1dim2 = getInput().get(0).getDim2();\n+\n+ val = 2 * OptimizerUtils.estimateSize(in1dim1, in1dim2); // For temp1 & temp2\n+ if (_direction == Direction.Col){\n+ val += OptimizerUtils.estimateSize(in1dim1, 1); // For temp3\n+ } else if (_direction == Direction.Row){\n+ val += OptimizerUtils.estimateSize(1, in1dim2); // For temp3\n+ }\n+\n+ } else if( _direction == Direction.Col ) { //(potentially sparse)\nval = OptimizerUtils.estimateSizeExactSparsity(4, dim2, sparsity);\n- else if( _direction == Direction.Row ) //(always dense)\n+ } else if( _direction == Direction.Row ) { //(always dense)\nval = OptimizerUtils.estimateSizeExactSparsity(dim1, 4, 1.0);\n+ }\nbreak;\ncase MAXINDEX:\ncase MININDEX:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"diff": "@@ -584,8 +584,8 @@ public class BinaryOp extends Hop\nelse //general case\not = HopsOpOp2LopsU.get(op);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))\n&& (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW\n|| op == OpOp2.MINUS_NZ || op == OpOp2.MINUS1_MULT || op == OpOp2.MODULUS || op == OpOp2.INTDIV\n|| op == OpOp2.LESS || op == OpOp2.LESSEQUAL || op == OpOp2.EQUAL || op == OpOp2.NOTEQUAL\n@@ -606,8 +606,8 @@ public class BinaryOp extends Hop\nExecType et = optFindExecType();\nif ( et == ExecType.CP )\n{\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))\n&& (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW\n|| op == OpOp2.SOLVE || op == OpOp2.MINUS1_MULT || op == OpOp2.MODULUS || op == OpOp2.INTDIV\n|| op == OpOp2.LESS || op == OpOp2.LESSEQUAL || op == OpOp2.EQUAL || op == OpOp2.NOTEQUAL\n@@ -829,10 +829,24 @@ public class BinaryOp extends Hop\nret = getInput().get(0).getMemEstimate() * 3;\n}\nelse if ( op == OpOp2.SOLVE ) {\n+ if (DMLScript.USE_ACCELERATOR) {\n+ // Solve on the GPU takes an awful lot of intermediate space\n+ // First the inputs are converted from row-major to column major\n+ // Then a workspace and a temporary output (workSize, tauSize) are needed\n+ long m = getInput().get(0).getDim1();\n+ long n = getInput().get(0).getDim2();\n+ long tauSize = OptimizerUtils.estimateSize(m, 1);\n+ long workSize = OptimizerUtils.estimateSize(m, n);\n+ long AtmpSize = OptimizerUtils.estimateSize(m, n);\n+ long BtmpSize = OptimizerUtils.estimateSize(n, 1);\n+ return (tauSize + workSize + AtmpSize + BtmpSize);\n+ } else {\n// x=solve(A,b) relies on QR decomposition of A, which is done using Apache commons-math\n// matrix of size same as the first input\n- double interOutput = OptimizerUtils.estimateSizeExactSparsity(getInput().get(0).getDim1(), getInput().get(0).getDim2(), 1.0);\n+ double interOutput = OptimizerUtils\n+ .estimateSizeExactSparsity(getInput().get(0).getDim1(), getInput().get(0).getDim2(), 1.0);\nreturn interOutput;\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"new_path": "src/main/java/org/apache/sysml/hops/Hop.java",
"diff": "@@ -790,8 +790,8 @@ public abstract class Hop\n}\nprotected ExecType findGPUExecTypeByMemEstimate(ExecType et) {\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())) {\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\nreturn ExecType.GPU;\n}\nreturn et;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java",
"diff": "@@ -151,8 +151,8 @@ public class ReorgOp extends Hop implements MultiThreadedHop\nsetLops(lin); //if input of size 1x1, avoid unnecessary transpose\nelse { //general case\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget())) {\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\net = ExecType.GPU;\n}\nTransform transform1 = new Transform( lin,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"diff": "@@ -650,11 +650,12 @@ public class TernaryOp extends Hop\nthrow new HopsException(\"Unexpected operation: \" + _op + \", expecting \" + OpOp3.PLUS_MULT + \" or\" + OpOp3.MINUS_MULT);\nExecType et = null;\n- if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < GPUContextPool\n- .initialGPUMemBudget()) )\n+ if (DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR\n+ || getMemEstimate() < Math.min(GPUContextPool.initialGPUMemBudget(), OptimizerUtils.getLocalMemBudget()))) {\net = ExecType.GPU;\n- else\n+ } else {\net = optFindExecType();\n+ }\nPlusMult plusmult = null;\nif( et == ExecType.CP || et == ExecType.SPARK || et == ExecType.GPU ) {\n@@ -727,9 +728,15 @@ public class TernaryOp extends Hop\n// Output is a vector of length = #of quantiles to be computed, and it is likely to be dense.\nreturn OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, 1.0);\ncase PLUS_MULT:\n- case MINUS_MULT:\n+ case MINUS_MULT: {\n+ if (DMLScript.USE_ACCELERATOR) {\n+ // For the GPU, the input is converted to dense\n+ sparsity = 1.0;\n+ } else {\nsparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ }\nreturn OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, sparsity);\n+ }\ndefault:\nthrow new RuntimeException(\"Memory for operation (\" + _op + \") can not be estimated.\");\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java",
"diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.hops;\nimport java.util.ArrayList;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.Hop.MultiThreadedHop;\nimport org.apache.sysml.lops.Aggregate;\nimport org.apache.sysml.lops.Aggregate.OperationTypes;\n@@ -548,7 +549,12 @@ public class UnaryOp extends Hop implements MultiThreadedHop\n@Override\nprotected double computeOutputMemEstimate( long dim1, long dim2, long nnz )\n{\n- double sparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ double sparsity = -1;\n+ if (DMLScript.USE_ACCELERATOR) {\n+ sparsity = 1.0; // Output is always dense (for now) on the GPU\n+ } else {\n+ sparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ }\nreturn OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, sparsity);\n}\n@@ -563,6 +569,10 @@ public class UnaryOp extends Hop implements MultiThreadedHop\nret = getInput().get(0).getMemEstimate() * 3;\n}\n+ if (DMLScript.USE_ACCELERATOR) {\n+ OptimizerUtils.estimateSize(dim1, dim2); // Intermediate memory required to convert sparse to dense\n+ }\n+\nreturn ret;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"diff": "@@ -54,7 +54,7 @@ public class CSRPointer {\nprivate static final Log LOG = LogFactory.getLog(CSRPointer.class.getName());\n- private static final double ULTRA_SPARSITY_TURN_POINT = 0.0004;\n+ private static final double ULTRA_SPARSITY_TURN_POINT = 0.00004;\npublic static cusparseMatDescr matrixDescriptor;\n/**\n* {@link GPUContext} instance to track the GPU to do work on\n@@ -242,7 +242,7 @@ public class CSRPointer {\n* Estimates the number of non-zero elements from the result of a sparse matrix multiplication C = A * B\n* and returns the {@link CSRPointer} to C with the appropriate GPU memory.\n*\n- * @param gCtx ?\n+ * @param gCtx a valid {@link GPUContext}\n* @param handle a valid {@link cusparseHandle}\n* @param A Sparse Matrix A on GPU\n* @param transA 'T' if A is to be transposed, 'N' otherwise\n@@ -268,7 +268,7 @@ public class CSRPointer {\n/**\n* Factory method to allocate an empty CSR Sparse matrix on the GPU\n*\n- * @param gCtx ?\n+ * @param gCtx a valid {@link GPUContext}\n* @param nnz2 number of non-zeroes\n* @param rows number of rows\n* @return a {@link CSRPointer} instance that encapsulates the CSR matrix on GPU\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -355,17 +355,41 @@ public class LibMatrixCUDA {\nthrow new DMLRuntimeException(\"Error status returned by CuDNN:\" + jcuda.jcudnn.cudnnStatus.stringFor(status));\n}\n- public static void conv2dBiasAdd(GPUContext gCtx, String instName, MatrixObject image, MatrixObject bias, MatrixObject filter, MatrixObject outputBlock, int N, int C, int H, int W,\n+ /**\n+ * Does a 2D convolution followed by a bias_add\n+ *\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param image input image matrix object\n+ * @param bias bias matrix object\n+ * @param filter filter matrix object\n+ * @param output output matrix object\n+ * @param N number of input images\n+ * @param C number of channels\n+ * @param H height of each image\n+ * @param W width of each image\n+ * @param K number of output \"channels\"\n+ * @param R height of filter\n+ * @param S width of filter\n+ * @param pad_h padding height\n+ * @param pad_w padding width\n+ * @param stride_h stride height\n+ * @param stride_w string width\n+ * @param P output height\n+ * @param Q output width\n+ * @throws DMLRuntimeException if error\n+ */\n+ public static void conv2dBiasAdd(GPUContext gCtx, String instName, MatrixObject image, MatrixObject bias, MatrixObject filter, MatrixObject output, int N, int C, int H, int W,\nint K, int R, int S, int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q)\nthrows DMLRuntimeException {\n/*\n- int rows = (int) outputBlock.getNumRows();\n- int cols = (int) outputBlock.getNumColumns();\n+ int rows = (int) output.getNumRows();\n+ int cols = (int) output.getNumColumns();\nlong size = rows * cols * Sizeof.DOUBLE;\nPointer imagePointer = getDensePointer(image, instName);\nPointer biasPointer = getDensePointer(bias, instName);\n- Pointer outputPointer = getDensePointer(outputBlock, instName);\n+ Pointer outputPointer = getDensePointer(output, instName);\nPointer filterPointer = getDensePointer(filter, instName);\nPointer tmp = allocate(size);\n@@ -377,15 +401,15 @@ public class LibMatrixCUDA {\nif(k1 != bias.getNumColumns() || bias.getNumColumns() != 1 || cols % k1 != 0) {\nthrow new DMLRuntimeException(\"Incorrect inputs for bias_add: input[\" + rows + \" X \" + cols + \"] and bias[\" + K + \" X \" + bias.getNumColumns() + \"]\");\n}\n- // biasAdd(instName, outputBlock, bias, outputBlock);\n+ // biasAdd(instName, output, bias, output);\nbiasAdd(instName, tmp, biasPointer, outputPointer, rows, cols, (int)k1);\ncudaFreeHelper(tmp);\n*/\nLOG.trace(\"GPU : conv2dBiasAdd\" + \", GPUContext=\" + gCtx);\n- conv2d(gCtx, instName, image, filter, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ conv2d(gCtx, instName, image, filter, output, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n//cudaDeviceSynchronize;\n- biasAdd(gCtx, instName, outputBlock, bias, outputBlock);\n+ biasAdd(gCtx, instName, output, bias, output);\n}\npublic static void conv2d(GPUContext gCtx, String instName, MatrixObject image, MatrixObject filter, MatrixObject outputBlock, int N, int C, int H, int W,\n@@ -398,6 +422,31 @@ public class LibMatrixCUDA {\nconv2d(gCtx, instName, imagePointer, filterPointer, dstPointer, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\n+ /**\n+ * Performs 2D convolution\n+ * Takes up an insignificant amount of intermediate space when CONVOLUTION_PREFERENCE is set to CUDNN_CONVOLUTION_FWD_NO_WORKSPACE\n+ * Intermediate space is required by the filter descriptor and convolution descriptor which are metadata structures and don't scale with the size of the input\n+ *\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param image the input matrix (or image) allocated on the GPU\n+ * @param filter the filter allocated on the GPU\n+ * @param output the output matrix allocated on the GPU\n+ * @param N number of input images\n+ * @param C number of channels\n+ * @param H height of each image\n+ * @param W width of each image\n+ * @param K number of output \"channels\"\n+ * @param R height of filter\n+ * @param S width of filter\n+ * @param pad_h padding height\n+ * @param pad_w padding width\n+ * @param stride_h stride height\n+ * @param stride_w string width\n+ * @param P output height\n+ * @param Q output width\n+ * @throws DMLRuntimeException if error\n+ */\npublic static void conv2d(GPUContext gCtx, String instName, Pointer image, Pointer filter, Pointer output, int N,\nint C, int H, int W, int K, int R, int S, int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q)\nthrows DMLRuntimeException {\n@@ -1225,6 +1274,9 @@ public class LibMatrixCUDA {\n/**\n* Performs tsmm, A %*% A' or A' %*% A, on GPU by exploiting cublasDsyrk(...)\n+ * <p>\n+ * Memory Usage - If dense, input space - rows * cols, no intermediate memory, output - Max(rows*rows, cols*cols)\n+ * If sparse, calls matmult\n*\n* @param ec execution context\n* @param gCtx a valid {@link GPUContext}\n@@ -1285,6 +1337,7 @@ public class LibMatrixCUDA {\n* Used for all version of TSMM where the result is known to be symmetric.\n* Hence, we compute only the upper triangular matrix and copy this partial\n* result down to lower triangular matrix once.\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName instruction name\n* @param ret upper triangular matrix\n@@ -1319,6 +1372,12 @@ public class LibMatrixCUDA {\n* Examines sparsity and shapes and routes call to appropriate method\n* from cuBLAS or cuSparse\n* C = op(A) x op(B)\n+ * <p>\n+ * Memory Requirements -\n+ * Both dense - inputs, output, no intermediate\n+ * Both sparse - inputs, output, no intermediate\n+ * One sparse, one dense - inputs, output, intermediates - (input_dim1 * input_dim2) OR (input_dim1 * input_dim2 + input in sparse format)\n+ *\n* @param ec Current {@link ExecutionContext} instance\n* @param gCtx a valid {@link GPUContext}\n* @param instName name of the invoking instruction to record{@link Statistics}.\n@@ -1327,8 +1386,8 @@ public class LibMatrixCUDA {\n* @param outputName Name of the output matrix C (in code generated after LOP layer)\n* @param isLeftTransposed op for A, transposed or not\n* @param isRightTransposed op for B, tranposed or not\n- * @return output of matrix multiply\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n+ * @return output of matrix multiply\n*/\npublic static MatrixObject matmult(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject left, MatrixObject right, String outputName,\nboolean isLeftTransposed, boolean isRightTransposed) throws DMLRuntimeException {\n@@ -1364,6 +1423,7 @@ public class LibMatrixCUDA {\n/**\n* One of the matrices is sparse, the other dense\n* C = op(A) x op(B)\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param output allocated output object for C on host to which GPU output will be attached\n@@ -1400,6 +1460,7 @@ public class LibMatrixCUDA {\n* C = op(A) * op(B) where A is dense and B is sparse\n* If B is ultrasparse, A is converted to a sparse matrix and {@code sparseSparseMatmult(MatrixObject, int, int, int, int, int, CSRPointer, CSRPointer)} is invoked\n* otherwise B is converted to a dense matrix and {@code denseDenseMatmult(Pointer, int, int, int, int, boolean, boolean, Pointer, Pointer)} is invoked.\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param left {@link MatrixObject} of A\n@@ -1473,6 +1534,7 @@ public class LibMatrixCUDA {\n* * C = op(A) * op(B) where A is sparse and B is dense\n* If A is ultrasparse, B is converted to a sparse matrix and {@code sparseSparseMatmult(MatrixObject, int, int, int, int, int, CSRPointer, CSRPointer)} is invoked\n* otherwise A is converted to a dense matrix and {@code denseDenseMatmult(Pointer, int, int, int, int, boolean, boolean, Pointer, Pointer)} is invoked.\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param output the output matrix object\n@@ -1553,6 +1615,7 @@ public class LibMatrixCUDA {\n/**\n* C = op(A) x B\n* A is a sparse matrix, B is a dense vector\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param output allocated output on the host, to which the GPU output C will be attached\n@@ -1585,6 +1648,7 @@ public class LibMatrixCUDA {\n/**\n* Sparse C = Sparse op(A) * Sparse op(B)\n* Reroutes call to sparse matrix-vector mult if needed\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param output ?\n@@ -1622,6 +1686,7 @@ public class LibMatrixCUDA {\n/**\n* Does a sparse matrix-vector multiply.\n* C = op(A) x B, A is a sparse matrix, B is a sparse vector with numCols = 1.\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param output allocated output object C to which the GPU output matrix will be attached\n@@ -1645,6 +1710,7 @@ public class LibMatrixCUDA {\n/**\n* Does a sparse-sparse Matrix multiply\n* C = op(A) x op(B), A, B are sparse matrices\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param A left sparse matrix on GPU\n@@ -1683,6 +1749,7 @@ public class LibMatrixCUDA {\n/**\n* Dense dense matrix multiply\n* C = op(A) * op(B), A and B are dense matrices\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName name of the invoking instruction to record{@link Statistics}.\n* @param output output object C on host with GPU data allocated\n@@ -1715,6 +1782,7 @@ public class LibMatrixCUDA {\n* We do t(B) %*% t(A) to get t(C);\n* If we were to calculate t(t(C), we would get the resultant matrix C, but this would be in column-major format.\n* What we really want is t(C). This we already have as the result of t(B) %*% t(A).\n+ *\n* @param gCtx a valid {@link GPUContext}\n* @param instName name of the invoking instruction to record{@link Statistics}.\n* @param output output allocated on GPU in column major format\n@@ -1809,10 +1877,10 @@ public class LibMatrixCUDA {\n//**************** UNARY AGGREGATE Functions ************************/\n//********************************************************************/\n-\n/**\n* Entry point to perform Unary aggregate operations on the GPU.\n* The execution context object is used to allocate memory for the GPU.\n+ *\n* @param ec Instance of {@link ExecutionContext}, from which the output variable will be allocated\n* @param gCtx a valid {@link GPUContext}\n* @param instName name of the invoking instruction to record{@link Statistics}.\n@@ -1852,7 +1920,7 @@ public class LibMatrixCUDA {\nIndexFunction indexFn = op.indexFn;\nAggregateOperator aggOp = op.aggOp;\n- // Convert Reduction direction to a number to pass to CUDA kernel\n+ // Convert Reduction direction to a number\nint reductionDirection = -1;\nif (indexFn instanceof ReduceAll){\nreductionDirection = REDUCTION_ALL;\n@@ -1867,7 +1935,7 @@ public class LibMatrixCUDA {\n}\nassert reductionDirection !=-1 : \"Internal Error - Incorrect type of reduction direction set for aggregate unary GPU instruction\";\n- // Convert function type to a number to pass to the CUDA Kernel\n+ // Convert function type to a number\nint opIndex = -1;\nif (aggOp.increOp.fn instanceof KahanPlus) {\nopIndex = OP_PLUS;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1713] Added mem estimates for various GPU ops
Closes #553 |
49,737 | 13.07.2017 15:04:28 | 25,200 | 61467dab86fad98e15d0cf529aaea7ba0cd6083f | [MINOR][DOC] Performance Test Documentation
Closes | [
{
"change_type": "ADD",
"old_path": "docs/img/performance-test/perf_test_arch.png",
"new_path": "docs/img/performance-test/perf_test_arch.png",
"diff": "Binary files /dev/null and b/docs/img/performance-test/perf_test_arch.png differ\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/python-performance-test.md",
"diff": "+# Performance Testing Algorithms User Manual\n+\n+This user manual contains details on how to conduct automated performance tests. Work was mostly done in this [PR](https://github.com/apache/systemml/pull/537) and part of [SYSTEMML-1451](https://issues.apache.org/jira/browse/SYSTEMML-1451). Our aim was to move from existing `bash` based performance tests to automatic `python` based automatic performance tests.\n+\n+### Architecture\n+Our performance tests suit contains `7` families namely `binomial`, `multinomial`, `stats1`, `stats2`, `regression1`, `regression2`, `clustering`. Within these families we have algorithms grouped under it. Typically a family is a set of algorithms that require the same data generation script.\n+\n+- Exceptions: `regression1`, `regression2` and `binomial`. We decide to include these algorithms in separate families to keep the architecture simple.\n+\n+\n+\n+On a very high level use construct a string with arguments required to run each operation. Once this string is constructed we use the subprocess module to execute this string and extract time from the standard out.\n+\n+We also use `json` module write our configurations to a json file. This ensure that our current operation is easy to debug.\n+\n+\n+We have `5` files in performance test suit `run_perftest.py`, `datagen.py`, `train.py`, `predict.py` and `utils.py`.\n+\n+`datagen.py`, `train.py` and `predict.py` generate a dictionary. Our key is the name of algorithm being processed and values is a list with path(s) where all the data required is present. We define this dictionary as a configuration packet.\n+\n+We will describe each of them in detail the following sections below.\n+\n+`run_perftest.py` at a high level creates `algos_to_run` list. This list is tuple with key as algorithm and value as family to be executed in our performance test.\n+\n+In `datagen.py` script we have all functions required to generate data. We return the required configuration packet as a result of this script, that contains key as the `data-gen` script to run and values with location to read data-gen json files from.\n+\n+In `train.py` script we have functions required to generate training output. We return the required configuration packet as a result of this script, that contains key as the algorithm to run and values with location to read training json files from.\n+\n+The file `predict.py` contains all functions for all algorithms in the performance test that contain predict script. We return the required configuration packet as a result of this script, that contains key as the algorithm to run and values with location to read predict json files from.\n+\n+In the file `utils.py` we have all the helper functions required in our performance test. These functions do operations like write `json` files, extract time from std out etc.\n+\n+### Adding New Algorithms\n+While adding a new algorithm we need know if it has to be part of the any pre existing family. If this algorithm depends on a new data generation script we would need to create a new family. Steps below to take below to add a new algorithm.\n+\n+Following changes to `run_perftest.py`:\n+\n+- Add the algorithm to `ML_ALGO` dictionary with its respective family.\n+- Add the name of the data generation script in `ML_GENDATA` dictionary if it does not exist already.\n+- Add the name of the training script in `ML_TRAIN` dictionary.\n+- Add the name of the prediction script in `ML_PREDICT` incase the prediction script exists.\n+\n+Following changes to `datagen.py`:\n+\n+- Check if the data generation algorithm has the ability to generate dense and sparse data. If it had the ability to generate only dense data add the corresponding family to `FAMILY_NO_MATRIX_TYPE` list.\n+- Create a function with `familyname + _ + datagen` with same input arguments namely `matrix_dim`, `matrix_type`, `datagen_dir`.\n+- Constants and arguments for the data generation script should be defined in function.\n+- Test the perf test with the algorithm with `mode` as `data-gen`.\n+- Check output folders, json files, output log.\n+- Check for possible errors if these folders/files do not exist. (See the troubleshooting section).\n+\n+Following changes to `train.py`:\n+\n+- Create the function with `familyname + _ + algoname + _ + train`.\n+- This function needs to have the following arguments `save_folder_name`, `datagen_dir`, `train_dir`.\n+- Constants and arguments for the training script should be defined in function.\n+- Make sure that the return type is a list.\n+- Test the perf test with the algorithm with `mode` as `train`.\n+- Check output folders, json files, output log.\n+- Check for possible errors if these folders/files do not exist. (See the troubleshooting section).\n+\n+Following changes to `predict.py`:\n+\n+- Create the function with `algoname + _ + predict`.\n+- This function needs to have the following arguments `save_file_name`, `datagen_dir`, `train_dir`, `predict_dir`.\n+- Constants and arguments for the training script should be defined in function.\n+- Test the perf test with the algorithm with `mode` as `predict`.\n+- Check output folders, json files, output log.\n+- Check for possible errors if these folders/files do not exist. (Please see the troubleshooting section).\n+- Note: `predict.py` will not be executed if the current algorithm being executed does not have predict script.\n+\n+### Current Default Settings\n+Default setting for our performance test below:\n+\n+- Matrix size to 10,000 rows and 100 columns.\n+- Execution mode `singlenode`.\n+- Operation modes `data-gen`, `train` and `predict` in sequence.\n+- Matrix type set to `all`. Which will generate `dense` or / and `sparse` matrices for all relevant algorithms.\n+\n+### Examples\n+Some examples of SystemML performance test with arguments shown below:\n+\n+`./scripts/perftest/python/run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2\n+`\n+Test all algorithms with default parameters.\n+\n+`./scripts/perftest/python/run_perftest.py --exec-type hybrid_spark --family binomial clustering multinomial regression1 regression2 stats1 stats2\n+`\n+Test all algorithms in hybrid spark execution mode.\n+\n+`./scripts/perftest/python/run_perftest.py --exec-type hybrid_spark --family clustering --mat-shape 10k_5 10k_10 10k_50\n+`\n+Test all algorithms in `clustering` family in hybrid spark execution mode, on different matrix size `10k_10` (10,000 rows and 5 columns), `10k_10` and `10k_50`.\n+\n+`./scripts/perftest/python/run_perftest.py --algo Univar-Stats bivar-stats\n+`\n+Run performance test for following algorithms `Univar-Stats` and `bivar-stats`.\n+\n+`./scripts/perftest/python/run_perftest.py --algo m-svm --family multinomial binomial --mode data-gen train\n+`\n+Run performance test for the algorithms `m-svm` with `multinomial` family. Run only data generation and training operations.\n+\n+`./scripts/perftest/python/run_perftest.py --family regression2 --filename new_log\n+`\n+Run performance test for all algorithms under the family `regression2` and log with filename `new_log`.\n+\n+### Operational Notes\n+All performance test depend mainly on two scripts for execution `systemml-standalone.py` and `systemml-spark-submit.py`. Incase we need to change standalone or spark parameters we need to manually change these parameters in their respective scripts.\n+\n+Constants like `DATA_FORMAT` currently set to `csv` and `MATRIX_TYPE_DICT` with `density` set to `0.9` and `sparsity` set to `0.01` are hardcoded in the performance test scripts. They can be changed easily as they are defined at the top of their respective operational scripts.\n+\n+The logs contain the following information below comma separated.\n+\n+algorithm | run_type | intercept | matrix_type | data_shape | time_sec\n+--- | --- | --- | --- | --- | --- |\n+multinomial|data-gen|0|dense|10k_100| 0.33\n+MultiLogReg|train|0|10k_100|dense|6.956\n+MultiLogReg|predict|0|10k_100|dense|4.780\n+\n+These logs can be found in `temp` folder (`$SYSTEMML_HOME/scripts/perftest/temp`) in-case not overridden by `--temp-dir`. This `temp` folders also contain the data generated during our performance test.\n+\n+Every time a script executes in `data-gen` mode successfully, we write a `_SUCCESS` file. If this file exists we ensures that re-run of the same script is not possible as data already exists.\n+\n+### Troubleshooting\n+We can debug the performance test by making changes in the following locations based on\n+\n+- Please see `utils.py` function `exec_dml_and_parse_time`. In uncommenting the debug print statement in the function `exec_dml_and_parse_time`. This allows us to inspect the subprocess string being executed.\n+- Please see `run_perftest.py`. Changing the verbosity level to `0` allows us to log more information while the script runs.\n+- Eyeballing the json files generated and making sure the arguments are correct.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Performance Test Documentation
Closes #563 |
49,703 | 13.07.2017 17:16:58 | 25,200 | f315480074b8e9168a0b04941b86ead4c8b0a65f | [MINOR] Remove unnecessary imports | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/debug/DMLDebugger.java",
"new_path": "src/main/java/org/apache/sysml/debug/DMLDebugger.java",
"diff": "@@ -23,8 +23,6 @@ import java.io.PrintStream;\nimport org.apache.commons.cli.CommandLine;\nimport org.apache.commons.lang.math.IntRange;\n-\n-import org.apache.sysml.debug.DMLDebuggerFunctions;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.instructions.cp.BreakPointInstruction.BPINSTRUCTION_STATUS;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/SpoofCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/SpoofCPInstruction.java",
"diff": "@@ -27,9 +27,6 @@ import org.apache.sysml.runtime.codegen.CodegenUtils;\nimport org.apache.sysml.runtime.codegen.SpoofOperator;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\n-import org.apache.sysml.runtime.instructions.cp.CPOperand;\n-import org.apache.sysml.runtime.instructions.cp.ComputationCPInstruction;\n-import org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic class SpoofCPInstruction extends ComputationCPInstruction\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixAppendGPUInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixAppendGPUInstruction.java",
"diff": "@@ -27,10 +27,6 @@ import org.apache.sysml.runtime.functionobjects.OffsetColumnIndex;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.AppendCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\n-import org.apache.sysml.runtime.instructions.cp.FrameAppendCPInstruction;\n-import org.apache.sysml.runtime.instructions.cp.MatrixAppendCPInstruction;\n-import org.apache.sysml.runtime.instructions.cp.ScalarAppendCPInstruction;\n-import org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\n@@ -62,6 +58,7 @@ public class MatrixAppendGPUInstruction extends GPUInstruction {\nString opcode = parts[0];\nCPOperand in1 = new CPOperand(parts[1]);\nCPOperand in2 = new CPOperand(parts[2]);\n+ @SuppressWarnings(\"unused\")\nCPOperand in3 = new CPOperand(parts[3]);\nCPOperand out = new CPOperand(parts[4]);\nboolean cbind = Boolean.parseBoolean(parts[5]);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SpoofSPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SpoofSPInstruction.java",
"diff": "@@ -33,9 +33,9 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.codegen.CodegenUtils;\nimport org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise;\n-import org.apache.sysml.runtime.codegen.SpoofMultiAggregate;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.AggOp;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\n+import org.apache.sysml.runtime.codegen.SpoofMultiAggregate;\nimport org.apache.sysml.runtime.codegen.SpoofOperator;\nimport org.apache.sysml.runtime.codegen.SpoofOuterProduct;\nimport org.apache.sysml.runtime.codegen.SpoofOuterProduct.OutProdType;\n@@ -50,7 +50,6 @@ import org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n-import org.apache.sysml.runtime.instructions.spark.SPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.data.PartitionedBroadcast;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java",
"diff": "@@ -42,7 +42,6 @@ import org.apache.spark.mllib.util.NumericParser;\nimport org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.RowFactory;\n-import org.apache.spark.sql.SQLContext;\nimport org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.apache.spark.sql.types.StructField;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove unnecessary imports |
49,738 | 13.07.2017 17:20:20 | 25,200 | 586c67b6a47950305f1bb57ba809aa295a83861b | Support for reading dml scripts from object stores
This patch generates the various methods for reading dml scripts files
to support (apart from local fs and hdfs) also the read from object
stores such as swift and s3. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"diff": "@@ -567,8 +567,8 @@ public class DMLScript\ntry\n{\n//read from hdfs or gpfs file system\n- if( fileName.startsWith(\"hdfs:\")\n- || fileName.startsWith(\"gpfs:\") )\n+ if( fileName.startsWith(\"hdfs:\") || fileName.startsWith(\"gpfs:\")\n+ || IOUtilFunctions.isObjectStoreFileScheme(new Path(fileName)) )\n{\nPath scriptPath = new Path(fileName);\nFileSystem fs = IOUtilFunctions.getFileSystem(scriptPath);\n@@ -588,8 +588,7 @@ public class DMLScript\nsb.append( \"\\n\" );\n}\n}\n- catch (IOException ex)\n- {\n+ catch (IOException ex) {\nLOG.error(\"Failed to read the script from the file system\", ex);\nthrow ex;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java",
"new_path": "src/main/java/org/apache/sysml/api/jmlc/Connection.java",
"diff": "@@ -250,8 +250,8 @@ public class Connection implements Closeable\ntry\n{\n//read from hdfs or gpfs file system\n- if( fname.startsWith(\"hdfs:\")\n- || fname.startsWith(\"gpfs:\") )\n+ if( fname.startsWith(\"hdfs:\") || fname.startsWith(\"gpfs:\")\n+ || IOUtilFunctions.isObjectStoreFileScheme(new Path(fname)) )\n{\nPath scriptPath = new Path(fname);\nFileSystem fs = IOUtilFunctions.getFileSystem(scriptPath);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptFactory.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptFactory.java",
"diff": "@@ -351,18 +351,18 @@ public class ScriptFactory {\nthrow new MLContextException(\"Script file path is null\");\n}\ntry {\n- if (scriptFilePath.startsWith(\"hdfs:\") || scriptFilePath.startsWith(\"gpfs:\")) {\n+ if ( scriptFilePath.startsWith(\"hdfs:\") || scriptFilePath.startsWith(\"gpfs:\")\n+ || IOUtilFunctions.isObjectStoreFileScheme(new Path(scriptFilePath))) {\nPath path = new Path(scriptFilePath);\nFileSystem fs = IOUtilFunctions.getFileSystem(path);\n- FSDataInputStream fsdis = fs.open(path);\n+ try( FSDataInputStream fsdis = fs.open(path) ) {\nreturn IOUtils.toString(fsdis);\n+ }\n} else {// from local file system\nFile scriptFile = new File(scriptFilePath);\nreturn FileUtils.readFileToString(scriptFile);\n}\n- } catch (IllegalArgumentException e) {\n- throw new MLContextException(\"Error trying to read script string from file: \" + scriptFilePath, e);\n- } catch (IOException e) {\n+ } catch (IllegalArgumentException | IOException e) {\nthrow new MLContextException(\"Error trying to read script string from file: \" + scriptFilePath, e);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -172,8 +172,8 @@ public class DMLConfig\nfactory.setIgnoringComments(true); //ignore XML comments\nDocumentBuilder builder = factory.newDocumentBuilder();\nDocument domTree = null;\n- if (_fileName.startsWith(\"hdfs:\") ||\n- _fileName.startsWith(\"gpfs:\") ) // config file from DFS\n+ if( _fileName.startsWith(\"hdfs:\") || _fileName.startsWith(\"gpfs:\")\n+ || IOUtilFunctions.isObjectStoreFileScheme(new Path(_fileName)) )\n{\nPath configFilePath = new Path(_fileName);\nFileSystem DFS = IOUtilFunctions.getFileSystem(configFilePath);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/ParserWrapper.java",
"new_path": "src/main/java/org/apache/sysml/parser/ParserWrapper.java",
"diff": "@@ -94,11 +94,12 @@ public abstract class ParserWrapper {\ntry\n{\n//read from hdfs or gpfs file system\n- if( script.startsWith(\"hdfs:\")\n- || script.startsWith(\"gpfs:\") )\n+ if( script.startsWith(\"hdfs:\") || script.startsWith(\"gpfs:\")\n+ || IOUtilFunctions.isObjectStoreFileScheme(new Path(script)) )\n{\n- LOG.debug(\"Looking for the following file in HDFS or GPFS: \" + script);\nPath scriptPath = new Path(script);\n+ String scheme = (scriptPath.toUri()!=null) ? scriptPath.toUri().getScheme() : null;\n+ LOG.debug(\"Looking for the following file in \"+scheme+\": \" + script);\nFileSystem fs = IOUtilFunctions.getFileSystem(scriptPath);\nin = new BufferedReader(new InputStreamReader(fs.open(scriptPath)));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1765] Support for reading dml scripts from object stores
This patch generates the various methods for reading dml scripts files
to support (apart from local fs and hdfs) also the read from object
stores such as swift and s3. |
49,738 | 13.07.2017 19:46:08 | 25,200 | a4ce06461deedd9c4f9d0293195ce81ae42ccfd6 | Cleanup properties of systemml-config file
This patch cleans up the following two properties of the
SystemML-config.xml file in order to better convey their meaning:
1) cp.parallel.matrixmult -> cp.parallel.ops
2) cp.parallel.textio -> cp.parallel.io | [
{
"change_type": "MODIFY",
"old_path": "conf/SystemML-config.xml.template",
"new_path": "conf/SystemML-config.xml.template",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- enables compressed linear algebra, experimental feature -->\n<compressed.linalg>false</compressed.linalg>\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/standalone-guide.md",
"new_path": "docs/standalone-guide.md",
"diff": "@@ -334,8 +334,8 @@ The console output should show the accuracy of the trained model in percent, i.e\n15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.appmaster.mem with value 2048\n15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.mapreduce.mem with value 2048\n15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.app.queue with value default\n- 15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.matrixmult with value true\n- 15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.textio with value true\n+ 15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.ops with value true\n+ 15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.io with value true\nAccuracy (%): 74.14965986394557\n15/09/01 01:32:52 INFO api.DMLScript: SystemML Statistics:\nTotal execution time: 0.130 sec.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -68,8 +68,8 @@ public class DMLConfig\npublic static final String YARN_APPMASTERMEM = \"dml.yarn.appmaster.mem\";\npublic static final String YARN_MAPREDUCEMEM = \"dml.yarn.mapreduce.mem\";\npublic static final String YARN_APPQUEUE = \"dml.yarn.app.queue\";\n- public static final String CP_PARALLEL_MATRIXMULT = \"cp.parallel.matrixmult\";\n- public static final String CP_PARALLEL_TEXTIO = \"cp.parallel.textio\";\n+ public static final String CP_PARALLEL_OPS = \"cp.parallel.ops\";\n+ public static final String CP_PARALLEL_IO = \"cp.parallel.io\";\npublic static final String COMPRESSED_LINALG = \"compressed.linalg\";\npublic static final String NATIVE_BLAS = \"native.blas\";\npublic static final String CODEGEN = \"codegen.enabled\"; //boolean\n@@ -110,14 +110,13 @@ public class DMLConfig\n_defaultVals.put(YARN_APPMASTERMEM, \"2048\" );\n_defaultVals.put(YARN_MAPREDUCEMEM, \"-1\" );\n_defaultVals.put(YARN_APPQUEUE, \"default\" );\n- _defaultVals.put(CP_PARALLEL_MATRIXMULT, \"true\" );\n- _defaultVals.put(CP_PARALLEL_TEXTIO, \"true\" );\n+ _defaultVals.put(CP_PARALLEL_OPS, \"true\" );\n+ _defaultVals.put(CP_PARALLEL_IO, \"true\" );\n_defaultVals.put(COMPRESSED_LINALG, \"false\" );\n_defaultVals.put(CODEGEN, \"false\" );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n_defaultVals.put(NATIVE_BLAS, \"none\" );\n-\n_defaultVals.put(EXTRA_GPU_STATS, \"false\" );\n_defaultVals.put(EXTRA_DNN_STATS, \"false\" );\n@@ -402,7 +401,7 @@ public class DMLConfig\nLOCAL_TMP_DIR,SCRATCH_SPACE,OPTIMIZATION_LEVEL,\nNUM_REDUCERS, DEFAULT_BLOCK_SIZE,\nYARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM,\n- CP_PARALLEL_MATRIXMULT, CP_PARALLEL_TEXTIO, NATIVE_BLAS,\n+ CP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS,\nCOMPRESSED_LINALG, CODEGEN, CODEGEN_LITERALS, CODEGEN_PLANCACHE,\nEXTRA_GPU_STATS, EXTRA_DNN_STATS\n};\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"diff": "@@ -356,7 +356,7 @@ public class OptimizerUtils\n}\n//handle parallel text io (incl awareness of thread contention in <jdk8)\n- if (!dmlconf.getBooleanValue(DMLConfig.CP_PARALLEL_TEXTIO)) {\n+ if (!dmlconf.getBooleanValue(DMLConfig.CP_PARALLEL_IO)) {\ncconf.set(ConfigType.PARALLEL_CP_READ_TEXTFORMATS, false);\ncconf.set(ConfigType.PARALLEL_CP_WRITE_TEXTFORMATS, false);\ncconf.set(ConfigType.PARALLEL_CP_READ_BINARYFORMATS, false);\n@@ -371,7 +371,7 @@ public class OptimizerUtils\n}\n//handle parallel matrix mult / rand configuration\n- if (!dmlconf.getBooleanValue(DMLConfig.CP_PARALLEL_MATRIXMULT)) {\n+ if (!dmlconf.getBooleanValue(DMLConfig.CP_PARALLEL_OPS)) {\ncconf.set(ConfigType.PARALLEL_CP_MATRIX_OPERATIONS, false);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/standalone/SystemML-config.xml",
"new_path": "src/main/standalone/SystemML-config.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded matrix operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n</root>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/config/SystemML-config.xml",
"new_path": "src/test/config/SystemML-config.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded matrix operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n</root>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java",
"diff": "@@ -200,14 +200,14 @@ public class GNMFTest extends AutomatedTestBase\nif(numRegisteredOutputs >= 2) {\nscript.out(\"W\");\n- ml.setConfigProperty(\"cp.parallel.matrixmult\", \"false\");\n+ ml.setConfigProperty(\"cp.parallel.ops\", \"false\");\n}\nMLResults results = ml.execute(script);\nif(numRegisteredOutputs >= 2) {\nString configStr = ConfigurationManager.getDMLConfig().getConfigInfo();\n- if(configStr.contains(\"cp.parallel.matrixmult: true\"))\n+ if(configStr.contains(\"cp.parallel.ops: true\"))\nAssert.fail(\"Configuration not updated via setConfig\");\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml",
"new_path": "src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- enables automatic code generation -->\n<compressed.linalg>true</compressed.linalg>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/SystemML-config-codegen.xml",
"new_path": "src/test/scripts/functions/codegen/SystemML-config-codegen.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- enables automatic code generation -->\n<codegen.enabled>true</codegen.enabled>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/SystemML-config-codegen6.xml",
"new_path": "src/test/scripts/functions/codegen/SystemML-config-codegen6.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- enables automatic code generation -->\n<codegen.enabled>true</codegen.enabled>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/compress/SystemML-config-compress.xml",
"new_path": "src/test/scripts/functions/compress/SystemML-config-compress.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded matrix operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- enables compressed linear algebra for cp/spark -->\n<compressed.linalg>true</compressed.linalg>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/dmlscript/SystemML-config.xml",
"new_path": "src/test/scripts/functions/dmlscript/SystemML-config.xml",
"diff": "<numreducers>10</numreducers>\n<scratch>scratch_space</scratch>\n<defaultblocksize>1000</defaultblocksize>\n-<cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n-<cp.parallel.textio>false</cp.parallel.textio>\n+<cp.parallel.ops>true</cp.parallel.ops>\n+<cp.parallel.io>false</cp.parallel.io>\n</root>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml",
"new_path": "src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml",
"diff": "<!-- yarn application submission queue, relevant for default capacity scheduler -->\n<dml.yarn.app.queue>default</dml.yarn.app.queue>\n- <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n- <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+ <!-- enables multi-threaded operations in singlenode control program -->\n+ <cp.parallel.ops>true</cp.parallel.ops>\n- <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n- <cp.parallel.textio>true</cp.parallel.textio>\n+ <!-- enables multi-threaded read/write in singlenode control program -->\n+ <cp.parallel.io>true</cp.parallel.io>\n<!-- piggybacked test for custom mapred/mapreduce configurations -->\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1768] Cleanup properties of systemml-config file
This patch cleans up the following two properties of the
SystemML-config.xml file in order to better convey their meaning:
1) cp.parallel.matrixmult -> cp.parallel.ops
2) cp.parallel.textio -> cp.parallel.io |
49,736 | 14.07.2017 13:59:41 | 28,800 | 6778a63b02fc1c644501bae67cd24e639ed3a623 | [HOTFIX] Fix for recently updated validation code for convolution operation
Tested NNTest in local environment. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1124,6 +1124,11 @@ public class BuiltinFunctionExpression extends DataIdentifier\noutput.setDataType(DataType.MATRIX);\noutput.setValueType(ValueType.DOUBLE);\noutput.setBlockDimensions(input.getOutput().getRowsInBlock(), input.getOutput().getColumnsInBlock());\n+\n+ if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD) {\n+ output.setDimensions(input.getOutput().getDim1(), input.getOutput().getDim2());\n+ }\n+ else {\n// stride1, stride2, padding1, padding2, numImg, numChannels, imgSize, imgSize,\n// filter_shape1=1, filter_shape2=1, filterSize/poolSize1, filterSize/poolSize1\ntry {\n@@ -1146,23 +1151,38 @@ public class BuiltinFunctionExpression extends DataIdentifier\nstart++; start++; // Increment index for K and C\nlong R = (long) getDoubleValue(_args[start++]);\nlong S = (long) getDoubleValue(_args[start++]);\n+\n+ if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER) {\n+ output.setDimensions(K, C*R*S);\n+ }\n+ else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA) {\n+ output.setDimensions(N, C*H*W);\n+ }\n+ else if(H > 0 && W > 0 && stride_h > 0 && stride_w > 0 && pad_h >= 0 && pad_w >= 0 && R > 0 && S > 0) {\nlong P = ConvolutionUtils.getP(H, R, stride_h, pad_h);\n- long Q = ConvolutionUtils.getP(W, S, stride_w, pad_w);\n+ long Q = ConvolutionUtils.getQ(W, S, stride_w, pad_w);\n+\n+ // Try to set both rows and columns\nif(this.getOpCode() == BuiltinFunctionOp.CONV2D)\noutput.setDimensions(N, K*P*Q);\n- else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER)\n- output.setDimensions(K, C*R*S);\n- else if(this.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA)\n- output.setDimensions(N, C*H*W);\n- else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL)\n+ else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL || this.getOpCode() == BuiltinFunctionOp.AVG_POOL)\noutput.setDimensions(N, C*P*Q);\n- else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD)\n- output.setDimensions(N, C*H*W);\nelse\nthrow new LanguageException(\"\");\n}\n+ else {\n+ // Since columns cannot be computed, set only rows\n+ if(this.getOpCode() == BuiltinFunctionOp.CONV2D)\n+ output.setDimensions(input.getOutput().getDim1(), -1);\n+ else if(this.getOpCode() == BuiltinFunctionOp.MAX_POOL || this.getOpCode() == BuiltinFunctionOp.AVG_POOL)\n+ output.setDimensions(input.getOutput().getDim1(), -1);\n+ else\n+ throw new LanguageException(\"\");\n+ }\n+ }\ncatch(Exception e) {\n- output.setDimensions(input.getOutput().getDim1(), -1); // To make sure that output dimensions are not incorrect\n+ output.setDimensions(-1, -1); // To make sure that output dimensions are not incorrect even if getDoubleValue doesnot return value\n+ }\n}\ncheckMatrixParam(input);\nif(input2 != null)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Fix for recently updated validation code for convolution operation
- Tested NNTest in local environment. |
49,703 | 15.07.2017 03:26:12 | 25,200 | ed6b74bfe6e357399532084877066ebf65db0691 | [MINOR] Remove EncoderRecode fields and TfUtils.TXMETHOD_MVRCD
Remove _mvrcdList and _fullrcdList fields and related code from EncoderRecode.
Remove TfUtils.TXMETHOD_MVRCD since it was only referenced from the removed code.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/transform/TfUtils.java",
"new_path": "src/main/java/org/apache/sysml/runtime/transform/TfUtils.java",
"diff": "@@ -54,7 +54,6 @@ public class TfUtils implements Serializable\npublic static final String TXMETHOD_DUMMYCODE = \"dummycode\";\npublic static final String TXMETHOD_SCALE = \"scale\";\npublic static final String TXMETHOD_OMIT = \"omit\";\n- public static final String TXMETHOD_MVRCD = \"mvrcd\";\n//transform meta data constants (frame-based transform)\npublic static final String TXMTD_MVPREFIX = \"#Meta\"+Lop.DATATYPE_PREFIX+\"MV\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java",
"new_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java",
"diff": "@@ -37,9 +37,6 @@ public class EncoderRecode extends Encoder\n{\nprivate static final long serialVersionUID = 8213163881283341874L;\n- private int[] _mvrcdList = null;\n- private int[] _fullrcdList = null;\n-\n//recode maps and custom map for partial recode maps\nprivate HashMap<Integer, HashMap<String, Long>> _rcdMaps = new HashMap<Integer, HashMap<String, Long>>();\nprivate HashMap<Integer, HashMap<String,String>> _finalMaps = null;\n@@ -49,28 +46,9 @@ public class EncoderRecode extends Encoder\nthrows JSONException\n{\nsuper(null, clen);\n- int rcdCount = 0;\nif( parsedSpec.containsKey(TfUtils.TXMETHOD_RECODE) ) {\n- int[] collist = TfMetaUtils.parseJsonIDList(parsedSpec, colnames, TfUtils.TXMETHOD_RECODE);\n- rcdCount = initColList(collist);\n- }\n-\n- if ( parsedSpec.containsKey(TfUtils.TXMETHOD_MVRCD)) {\n- _mvrcdList = TfMetaUtils.parseJsonIDList(parsedSpec, colnames, TfUtils.TXMETHOD_MVRCD);\n- rcdCount += _mvrcdList.length;\n- }\n-\n- if ( rcdCount > 0 ) {\n- _fullrcdList = new int[rcdCount];\n- int idx = -1;\n- if(_colList != null)\n- for(int i=0; i < _colList.length; i++)\n- _fullrcdList[++idx] = _colList[i];\n-\n- if(_mvrcdList != null)\n- for(int i=0; i < _mvrcdList.length; i++)\n- _fullrcdList[++idx] = _mvrcdList[i];\n+ _colList = TfMetaUtils.parseJsonIDList(parsedSpec, colnames, TfUtils.TXMETHOD_RECODE);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove EncoderRecode fields and TfUtils.TXMETHOD_MVRCD
Remove _mvrcdList and _fullrcdList fields and related code from EncoderRecode.
Remove TfUtils.TXMETHOD_MVRCD since it was only referenced from the removed code.
Closes #576. |
49,703 | 17.07.2017 14:37:07 | 25,200 | 2625cd9033414090372952054a62f2f26aaf68f3 | [MINOR] Update slf4j-api and slf4j-log4j12 copyright dates
Update copyright dates for slf4j-api and slf4j-log4j12 dependencies
for bin and standalone-jar artifacts to match: | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/bin/LICENSE",
"new_path": "src/assembly/bin/LICENSE",
"diff": "@@ -326,7 +326,7 @@ The following SLF4J dependencies are distributed under the MIT license.\nSLF4J API Module (http://www.slf4j.org) org.slf4j:slf4j-api:1.7.5 (slf4j-api-1.7.5.jar)\nSLF4J LOG4J-12 Binding (http://www.slf4j.org) org.slf4j:slf4j-log4j12:1.7.5 (slf4j-log4j12-1.7.5.jar)\n-Copyright (c) 2004-2008 QOS.ch\n+Copyright (c) 2004-2007 QOS.ch\nAll rights reserved.\nMIT license:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/assembly/standalone-jar/LICENSE",
"new_path": "src/assembly/standalone-jar/LICENSE",
"diff": "@@ -317,7 +317,7 @@ The following SLF4J dependencies are distributed under the MIT license.\nSLF4J API Module (http://www.slf4j.org) org.slf4j:slf4j-api:1.7.5\nSLF4J LOG4J-12 Binding (http://www.slf4j.org) org.slf4j:slf4j-log4j12:1.7.5\n-Copyright (c) 2004-2008 QOS.ch\n+Copyright (c) 2004-2007 QOS.ch\nAll rights reserved.\nMIT license:\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update slf4j-api and slf4j-log4j12 copyright dates
Update copyright dates for slf4j-api and slf4j-log4j12 dependencies
for bin and standalone-jar artifacts to match:
https://github.com/qos-ch/slf4j/blob/v_1.7.5/slf4j-api/LICENSE.txt
https://github.com/qos-ch/slf4j/blob/v_1.7.5/slf4j-log4j12/LICENSE.txt |
49,703 | 17.07.2017 15:31:29 | 25,200 | 62b64b32d200817fa320a91b5d5a35751a747856 | Set runtime platform via MLContext
Update MLContext and ScriptExecutor to allow for convenient setting
of the runtime platform. This is useful when debugging and developing.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"diff": "@@ -28,7 +28,6 @@ import org.apache.spark.SparkContext;\nimport org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.sql.SparkSession;\nimport org.apache.sysml.api.DMLScript;\n-import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -113,6 +112,12 @@ public class MLContext {\n*/\nprivate ExplainLevel explainLevel = null;\n+ /**\n+ * The runtime platform on which to execute. By default, MLContext runs on\n+ * {@code ExecutionType.DRIVER_AND_SPARK}.\n+ */\n+ private ExecutionType executionType = ExecutionType.DRIVER_AND_SPARK;\n+\n/**\n* Whether or not all values should be maintained in the symbol table after\n* execution.\n@@ -159,6 +164,38 @@ public class MLContext {\n}\n};\n+ /**\n+ * The different types of execution environments supported by SystemML. The\n+ * default execution type is {@code DRIVER_AND_SPARK}. {@code DRIVER} refers\n+ * to all operations occurring in the local driver JVM. {@code SPARK} refers\n+ * to all operations occurring on Spark. {@code HADOOP} refers to all\n+ * operations occurring on Hadoop. {@code DRIVER_AND_SPARK} refers to\n+ * operations occurring in the local driver JVM and on Spark when\n+ * appropriate. {@code DRIVER_AND_HADOOP} refers to operations occurring in\n+ * the local driver JVM and on Hadoop when appropriate.\n+ *\n+ */\n+ public enum ExecutionType {\n+ DRIVER, SPARK, HADOOP, DRIVER_AND_SPARK, DRIVER_AND_HADOOP;\n+\n+ public DMLScript.RUNTIME_PLATFORM getRuntimePlatform() {\n+ switch (this) {\n+ case DRIVER:\n+ return DMLScript.RUNTIME_PLATFORM.SINGLE_NODE;\n+ case SPARK:\n+ return DMLScript.RUNTIME_PLATFORM.SPARK;\n+ case HADOOP:\n+ return DMLScript.RUNTIME_PLATFORM.HADOOP;\n+ case DRIVER_AND_SPARK:\n+ return DMLScript.RUNTIME_PLATFORM.HYBRID_SPARK;\n+ case DRIVER_AND_HADOOP:\n+ return DMLScript.RUNTIME_PLATFORM.HYBRID;\n+ default:\n+ return DMLScript.RUNTIME_PLATFORM.HYBRID_SPARK;\n+ }\n+ }\n+ }\n+\n/**\n* Retrieve the currently active MLContext. This is used internally by\n* SystemML via MLContextProxy.\n@@ -235,8 +272,7 @@ public class MLContext {\n}\nthis.spark = spark;\n- // by default, run in hybrid Spark mode for optimal performance\n- DMLScript.rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK;\n+ DMLScript.rtplatform = executionType.getRuntimePlatform();\nactiveMLContext = this;\nMLContextProxy.setActive(true);\n@@ -279,6 +315,7 @@ public class MLContext {\n*/\npublic MLResults execute(Script script) {\nScriptExecutor scriptExecutor = new ScriptExecutor();\n+ scriptExecutor.setExecutionType(executionType);\nscriptExecutor.setExplain(explain);\nscriptExecutor.setExplainLevel(explainLevel);\nscriptExecutor.setGPU(gpu);\n@@ -697,4 +734,24 @@ public class MLContext {\nthis.initBeforeExecution = initBeforeExecution;\n}\n+ /**\n+ * Obtain the current execution environment.\n+ *\n+ * @return the execution environment\n+ */\n+ public ExecutionType getExecutionType() {\n+ return executionType;\n+ }\n+\n+ /**\n+ * Set the execution environment.\n+ *\n+ * @param executionType\n+ * the execution environment\n+ */\n+ public void setExecutionType(ExecutionType executionType) {\n+ DMLScript.rtplatform = executionType.getRuntimePlatform();\n+ this.executionType = executionType;\n+ }\n+\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -25,9 +25,10 @@ import java.util.Set;\nimport org.apache.commons.lang3.StringUtils;\nimport org.apache.sysml.api.DMLScript;\n-import org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.DMLScript.DMLOptions;\n+import org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\n+import org.apache.sysml.api.mlcontext.MLContext.ExecutionType;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -125,6 +126,7 @@ public class ScriptExecutor {\nprotected boolean statistics = false;\nprotected boolean oldStatistics = false;\nprotected ExplainLevel explainLevel;\n+ protected ExecutionType executionType;\nprotected int statisticsMaxHeavyHitters = 10;\nprotected boolean maintainSymbolTable = false;\n@@ -693,4 +695,24 @@ public class ScriptExecutor {\npublic DMLConfig getConfig() {\nreturn config;\n}\n+\n+ /**\n+ * Obtain the current execution environment.\n+ *\n+ * @return the execution environment\n+ */\n+ public ExecutionType getExecutionType() {\n+ return executionType;\n+ }\n+\n+ /**\n+ * Set the execution environment.\n+ *\n+ * @param executionType\n+ * the execution environment\n+ */\n+ public void setExecutionType(ExecutionType executionType) {\n+ DMLScript.rtplatform = executionType.getRuntimePlatform();\n+ this.executionType = executionType;\n+ }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1596] Set runtime platform via MLContext
Update MLContext and ScriptExecutor to allow for convenient setting
of the runtime platform. This is useful when debugging and developing.
Closes #497. |
49,738 | 17.07.2017 22:10:53 | 25,200 | 4ad8f7742e0829998414180d49a4aa5cabc1e669 | Collapse subsequent assignvar and rmvar instructions
This patch adds a minor cleanup step to the instruction generation,
which collapses subsequent assignvar and rmvar instructions, in order to
simplify debugging and reduce unnecessary interpretation overhead:
Example: assignvar s1 s2, rmvar s1 -> mvvar s1 s2 | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java",
"new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java",
"diff": "@@ -24,6 +24,7 @@ import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.HashSet;\n+import java.util.Iterator;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -3816,8 +3817,58 @@ public class Dag<N extends Lop>\nreturn false;\n}\n+ /**\n+ * Performs various cleanups on the list of instructions in order to reduce the\n+ * number of instructions to simply debugging and reduce interpretation overhead.\n+ *\n+ * @param insts list of instructions\n+ * @return new list of potentially modified instructions\n+ * @throws DMLRuntimeException in case of instruction parsing errors\n+ */\nprivate static ArrayList<Instruction> cleanupInstructions(ArrayList<Instruction> insts)\nthrows DMLRuntimeException\n+ {\n+ //step 1: create mvvar instructions: assignvar s1 s2, rmvar s1 -> mvvar s1 s2\n+ ArrayList<Instruction> tmp1 = collapseAssignvarAndRmvarInstructions(insts);\n+\n+ //step 2: create packed rmvar instructions: rmvar m1, rmvar m2 -> rmvar m1 m2\n+ ArrayList<Instruction> tmp2 = createPackedRmvarInstructions(tmp1);\n+\n+ return tmp2;\n+ }\n+\n+ private static ArrayList<Instruction> collapseAssignvarAndRmvarInstructions(ArrayList<Instruction> insts)\n+ throws DMLRuntimeException\n+ {\n+ ArrayList<Instruction> ret = new ArrayList<Instruction>();\n+ Iterator<Instruction> iter = insts.iterator();\n+ while( iter.hasNext() ) {\n+ Instruction inst = iter.next();\n+ if( iter.hasNext() && inst instanceof VariableCPInstruction\n+ && ((VariableCPInstruction)inst).isAssignVariable() ) {\n+ VariableCPInstruction inst1 = (VariableCPInstruction) inst;\n+ Instruction inst2 = iter.next();\n+ if( inst2 instanceof VariableCPInstruction\n+ && ((VariableCPInstruction)inst2).isRemoveVariableNoFile()\n+ && inst1.getInput1().getName().equals(\n+ ((VariableCPInstruction)inst2).getInput1().getName()) ) {\n+ ret.add(VariableCPInstruction.prepareMoveInstruction(\n+ inst1.getInput1().getName(), inst1.getInput2().getName()));\n+ }\n+ else {\n+ ret.add(inst1);\n+ ret.add(inst2);\n+ }\n+ }\n+ else {\n+ ret.add(inst);\n+ }\n+ }\n+ return ret;\n+ }\n+\n+ private static ArrayList<Instruction> createPackedRmvarInstructions(ArrayList<Instruction> insts)\n+ throws DMLRuntimeException\n{\nArrayList<Instruction> ret = new ArrayList<Instruction>();\nArrayList<String> currRmVar = new ArrayList<String>();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -228,6 +228,10 @@ public class VariableCPInstruction extends CPInstruction\n|| opcode == VariableOperationCode.RemoveVariableAndFile);\n}\n+ public boolean isAssignVariable() {\n+ return (opcode == VariableOperationCode.AssignVariable);\n+ }\n+\npublic FileFormatProperties getFormatProperties() {\nreturn _formatProperties;\n}\n@@ -707,9 +711,12 @@ public class VariableCPInstruction extends CPInstruction\n// get source variable\nData srcData = ec.getVariable(getInput1().getName());\n- if ( srcData == null )\n- throw new DMLRuntimeException(\"Unexpected error: could not find a data object for variable name:\" + getInput1().getName() + \", while processing instruction \" +this.toString());\n+ if ( srcData == null ) {\n+ throw new DMLRuntimeException(\"Unexpected error: could not find a data object \"\n+ + \"for variable name:\" + getInput1().getName() + \", while processing instruction \");\n+ }\n+ if( getInput2().getDataType().isMatrix() ) {\n// remove existing variable bound to target name\nData tgt = ec.removeVariable(getInput2().getName());\n@@ -717,6 +724,7 @@ public class VariableCPInstruction extends CPInstruction\nif ( tgt != null && tgt instanceof MatrixObject ) {\nec.cleanupMatrixObject((MatrixObject) tgt);\n}\n+ }\n// do the actual move\nec.setVariable(getInput2().getName(), srcData);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1776] Collapse subsequent assignvar and rmvar instructions
This patch adds a minor cleanup step to the instruction generation,
which collapses subsequent assignvar and rmvar instructions, in order to
simplify debugging and reduce unnecessary interpretation overhead:
Example: assignvar s1 s2, rmvar s1 -> mvvar s1 s2 |
49,703 | 19.07.2017 11:13:51 | 25,200 | ec38b3790f11792d3337c35439954d422d6eb60b | MLContextTestBase class for MLContext testing
Create abstract MLContextTestBase class that contains setup and shutdown
code for MLContext tests. This removes boilerplate code from MLContext
test classes that extend MLContextTestBase.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/DataFrameVectorScriptTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/DataFrameVectorScriptTest.java",
"diff": "@@ -38,7 +38,6 @@ import org.apache.spark.sql.types.StructField;\nimport org.apache.spark.sql.types.StructType;\nimport org.apache.sysml.api.mlcontext.FrameFormat;\nimport org.apache.sysml.api.mlcontext.FrameMetadata;\n-import org.apache.sysml.api.mlcontext.MLContext;\nimport org.apache.sysml.api.mlcontext.Matrix;\nimport org.apache.sysml.api.mlcontext.Script;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -49,15 +48,13 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.DataConverter;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n-import org.junit.AfterClass;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\n-public class DataFrameVectorScriptTest extends AutomatedTestBase\n+public class DataFrameVectorScriptTest extends MLContextTestBase\n{\nprivate final static String TEST_DIR = \"functions/mlcontext/\";\nprivate final static String TEST_NAME = \"DataFrameConversion\";\n@@ -75,16 +72,6 @@ public class DataFrameVectorScriptTest extends AutomatedTestBase\nprivate final static double sparsity2 = 0.1;\nprivate final static double eps=0.0000000001;\n- private static SparkSession spark;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- spark = createSystemMLSparkSession(\"DataFrameVectorScriptTest\", \"local\");\n- ml = new MLContext(spark);\n- ml.setExplain(true);\n- }\n-\n@Override\npublic void setUp() {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"A\", \"B\"}));\n@@ -343,16 +330,4 @@ public class DataFrameVectorScriptTest extends AutomatedTestBase\nJavaRDD<Row> rowRDD = sc.parallelize(list);\nreturn sparkSession.createDataFrame(rowRDD, dfSchema);\n}\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/FrameTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/FrameTest.java",
"diff": "@@ -29,10 +29,8 @@ import java.util.List;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\n-import org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.types.StructType;\nimport org.apache.sysml.api.DMLException;\nimport org.apache.sysml.api.DMLScript;\n@@ -40,8 +38,6 @@ import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.api.mlcontext.FrameFormat;\nimport org.apache.sysml.api.mlcontext.FrameMetadata;\nimport org.apache.sysml.api.mlcontext.FrameSchema;\n-import org.apache.sysml.api.mlcontext.MLContext;\n-import org.apache.sysml.api.mlcontext.MLContextUtil;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.Script;\nimport org.apache.sysml.api.mlcontext.ScriptFactory;\n@@ -57,17 +53,14 @@ import org.apache.sysml.runtime.matrix.data.InputInfo;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.runtime.util.MapReduceTool;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n-import org.junit.After;\n-import org.junit.AfterClass;\nimport org.junit.Assert;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\n-public class FrameTest extends AutomatedTestBase\n+public class FrameTest extends MLContextTestBase\n{\nprivate final static String TEST_DIR = \"functions/frame/\";\nprivate final static String TEST_NAME = \"FrameGeneral\";\n@@ -98,17 +91,6 @@ public class FrameTest extends AutomatedTestBase\nschemaMixedLarge = (ValueType[]) schemaMixedLargeList.toArray(schemaMixedLarge);\n}\n- private static SparkSession spark;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- spark = createSystemMLSparkSession(\"FrameTest\", \"local\");\n- ml = new MLContext(spark);\n- sc = MLContextUtil.getJavaSparkContext(ml);\n- }\n-\n@Override\npublic void setUp() {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,\n@@ -373,22 +355,4 @@ public class FrameTest extends AutomatedTestBase\n\", not same as the R value \" + val2);\n}\n}\n-\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- sc = null;\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java",
"diff": "@@ -28,17 +28,13 @@ import java.util.List;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.api.java.function.Function;\nimport org.apache.spark.mllib.linalg.distributed.CoordinateMatrix;\nimport org.apache.spark.mllib.linalg.distributed.MatrixEntry;\nimport org.apache.spark.rdd.RDD;\n-import org.apache.spark.sql.SparkSession;\nimport org.apache.sysml.api.DMLException;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n-import org.apache.sysml.api.mlcontext.MLContext;\n-import org.apache.sysml.api.mlcontext.MLContextUtil;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.Matrix;\nimport org.apache.sysml.api.mlcontext.MatrixFormat;\n@@ -55,19 +51,16 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.runtime.util.MapReduceTool;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n-import org.junit.After;\n-import org.junit.AfterClass;\nimport org.junit.Assert;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.junit.runners.Parameterized;\nimport org.junit.runners.Parameterized.Parameters;\n@RunWith(value = Parameterized.class)\n-public class GNMFTest extends AutomatedTestBase\n+public class GNMFTest extends MLContextTestBase\n{\nprivate final static String TEST_DIR = \"applications/gnmf/\";\nprivate final static String TEST_NAME = \"GNMF\";\n@@ -76,22 +69,11 @@ public class GNMFTest extends AutomatedTestBase\nint numRegisteredInputs;\nint numRegisteredOutputs;\n- private static SparkSession spark;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\n-\npublic GNMFTest(int in, int out) {\nnumRegisteredInputs = in;\nnumRegisteredOutputs = out;\n}\n- @BeforeClass\n- public static void setUpClass() {\n- spark = createSystemMLSparkSession(\"GNMFTest\", \"local\");\n- ml = new MLContext(spark);\n- sc = MLContextUtil.getJavaSparkContext(ml);\n- }\n-\n@Parameters\npublic static Collection<Object[]> data() {\nObject[][] data = new Object[][] { { 0, 0 }, { 3, 2 }, { 2, 2 }, { 2, 1 }, { 2, 0 }, { 3, 0 }};\n@@ -257,24 +239,6 @@ public class GNMFTest extends AutomatedTestBase\n}\n}\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- sc = null;\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n-\npublic static class StringToMatrixEntry implements Function<String, MatrixEntry> {\nprivate static final long serialVersionUID = 7456391906436606324L;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextFrameTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextFrameTest.java",
"diff": "@@ -28,21 +28,17 @@ import java.util.Arrays;\nimport java.util.List;\nimport org.apache.spark.api.java.JavaRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.rdd.RDD;\nimport org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.RowFactory;\n-import org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.apache.spark.sql.types.StructField;\nimport org.apache.spark.sql.types.StructType;\nimport org.apache.sysml.api.mlcontext.FrameFormat;\nimport org.apache.sysml.api.mlcontext.FrameMetadata;\nimport org.apache.sysml.api.mlcontext.FrameSchema;\n-import org.apache.sysml.api.mlcontext.MLContext;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\n-import org.apache.sysml.api.mlcontext.MLContextUtil;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.MatrixFormat;\nimport org.apache.sysml.api.mlcontext.MatrixMetadata;\n@@ -50,19 +46,14 @@ import org.apache.sysml.api.mlcontext.Script;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.instructions.spark.utils.FrameRDDConverterUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.mlcontext.MLContextTest.CommaSeparatedValueStringToDoubleArrayRow;\n-import org.junit.After;\n-import org.junit.AfterClass;\nimport org.junit.Assert;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\nimport scala.collection.Iterator;\n-public class MLContextFrameTest extends AutomatedTestBase {\n- protected final static String TEST_DIR = \"org/apache/sysml/api/mlcontext\";\n- protected final static String TEST_NAME = \"MLContextFrame\";\n+public class MLContextFrameTest extends MLContextTestBase {\npublic static enum SCRIPT_TYPE {\nDML, PYDML\n@@ -72,25 +63,14 @@ public class MLContextFrameTest extends AutomatedTestBase {\nANY, FILE, JAVA_RDD_STR_CSV, JAVA_RDD_STR_IJV, RDD_STR_CSV, RDD_STR_IJV, DATAFRAME\n};\n- private static SparkSession spark;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\nprivate static String CSV_DELIM = \",\";\n@BeforeClass\npublic static void setUpClass() {\n- spark = createSystemMLSparkSession(\"MLContextFrameTest\", \"local\");\n- ml = new MLContext(spark);\n- sc = MLContextUtil.getJavaSparkContext(ml);\n+ MLContextTestBase.setUpClass();\nml.setExplainLevel(ExplainLevel.RECOMPILE_HOPS);\n}\n- @Override\n- public void setUp() {\n- addTestConfiguration(TEST_DIR, TEST_NAME);\n- getAndLoadTestConfiguration(TEST_NAME);\n- }\n-\n@Test\npublic void testFrameJavaRDD_CSV_DML() {\ntestFrame(FrameFormat.CSV, SCRIPT_TYPE.DML, IO_TYPE.JAVA_RDD_STR_CSV, IO_TYPE.ANY);\n@@ -644,21 +624,4 @@ public class MLContextFrameTest extends AutomatedTestBase {\n// }\n// }\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- sc = null;\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextMultipleScriptsTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextMultipleScriptsTest.java",
"diff": "@@ -80,10 +80,6 @@ public class MLContextMultipleScriptsTest extends AutomatedTestBase\nrunMLContextTestMultipleScript(RUNTIME_PLATFORM.SPARK, true);\n}\n- /**\n- *\n- * @param platform\n- */\nprivate void runMLContextTestMultipleScript(RUNTIME_PLATFORM platform, boolean wRead)\n{\nRUNTIME_PLATFORM oldplatform = DMLScript.rtplatform;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextOutputBlocksizeTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextOutputBlocksizeTest.java",
"diff": "@@ -21,10 +21,7 @@ package org.apache.sysml.test.integration.mlcontext;\nimport static org.apache.sysml.api.mlcontext.ScriptFactory.dml;\n-import org.apache.spark.SparkConf;\nimport org.apache.spark.api.java.JavaPairRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\n-import org.apache.sysml.api.mlcontext.MLContext;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.Matrix;\n@@ -36,44 +33,15 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.util.DataConverter;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\n-import org.junit.After;\n-import org.junit.AfterClass;\nimport org.junit.Assert;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\n-\n-public class MLContextOutputBlocksizeTest extends AutomatedTestBase\n+public class MLContextOutputBlocksizeTest extends MLContextTestBase\n{\n- protected final static String TEST_DIR = \"org/apache/sysml/api/mlcontext\";\n- protected final static String TEST_NAME = \"MLContext\";\n-\nprivate final static int rows = 100;\nprivate final static int cols = 63;\nprivate final static double sparsity = 0.7;\n- private static SparkConf conf;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- if (conf == null)\n- conf = SparkExecutionContext.createSystemMLSparkConf()\n- .setAppName(\"MLContextTest\").setMaster(\"local\");\n- if (sc == null)\n- sc = new JavaSparkContext(conf);\n- ml = new MLContext(sc);\n- }\n-\n- @Override\n- public void setUp() {\n- addTestConfiguration(TEST_DIR, TEST_NAME);\n- getAndLoadTestConfiguration(TEST_NAME);\n- }\n-\n-\n@Test\npublic void testOutputBlocksizeTextcell() {\nrunMLContextOutputBlocksizeTest(\"text\");\n@@ -131,21 +99,4 @@ public class MLContextOutputBlocksizeTest extends AutomatedTestBase\n}\n}\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- sc.stop();\n- sc = null;\n- conf = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextParforDatasetTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextParforDatasetTest.java",
"diff": "@@ -21,18 +21,15 @@ package org.apache.sysml.test.integration.mlcontext;\nimport static org.apache.sysml.api.mlcontext.ScriptFactory.dml;\n-import org.apache.spark.SparkConf;\nimport org.apache.spark.api.java.JavaPairRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.SparkSession;\n-import org.apache.sysml.api.mlcontext.MLContext;\n+import org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.MatrixFormat;\nimport org.apache.sysml.api.mlcontext.MatrixMetadata;\nimport org.apache.sysml.api.mlcontext.Script;\n-import org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\n@@ -41,44 +38,17 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.util.DataConverter;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n-import org.junit.After;\n-import org.junit.AfterClass;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\n-public class MLContextParforDatasetTest extends AutomatedTestBase\n+public class MLContextParforDatasetTest extends MLContextTestBase\n{\n- protected final static String TEST_DIR = \"org/apache/sysml/api/mlcontext\";\n- protected final static String TEST_NAME = \"MLContext\";\nprivate final static int rows = 100;\nprivate final static int cols = 1600;\nprivate final static double sparsity = 0.7;\n- private static SparkConf conf;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- if (conf == null)\n- conf = SparkExecutionContext.createSystemMLSparkConf()\n- .setAppName(\"MLContextTest\").setMaster(\"local\");\n- if (sc == null)\n- sc = new JavaSparkContext(conf);\n- ml = new MLContext(sc);\n- }\n-\n- @Override\n- public void setUp() {\n- addTestConfiguration(TEST_DIR, TEST_NAME);\n- getAndLoadTestConfiguration(TEST_NAME);\n- }\n-\n-\n@Test\npublic void testParforDatasetVector() {\nrunMLContextParforDatasetTest(true, false, false);\n@@ -174,22 +144,4 @@ public class MLContextParforDatasetTest extends AutomatedTestBase\nInfrastructureAnalyzer.setLocalMaxMemory(oldmem);\n}\n}\n-\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- sc.stop();\n- sc = null;\n- conf = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextScratchCleanupTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextScratchCleanupTest.java",
"diff": "@@ -80,10 +80,6 @@ public class MLContextScratchCleanupTest extends AutomatedTestBase\nrunMLContextTestMultipleScript(RUNTIME_PLATFORM.SPARK, true);\n}\n- /**\n- *\n- * @param platform\n- */\nprivate void runMLContextTestMultipleScript(RUNTIME_PLATFORM platform, boolean wRead)\n{\nRUNTIME_PLATFORM oldplatform = DMLScript.rtplatform;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java",
"diff": "@@ -45,7 +45,6 @@ import java.util.Map;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n-import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.api.java.function.Function;\nimport org.apache.spark.ml.linalg.Vector;\nimport org.apache.spark.ml.linalg.VectorUDT;\n@@ -54,14 +53,11 @@ import org.apache.spark.rdd.RDD;\nimport org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.RowFactory;\n-import org.apache.spark.sql.SparkSession;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.apache.spark.sql.types.StructField;\nimport org.apache.spark.sql.types.StructType;\n-import org.apache.sysml.api.mlcontext.MLContext;\nimport org.apache.sysml.api.mlcontext.MLContextConversionUtil;\nimport org.apache.sysml.api.mlcontext.MLContextException;\n-import org.apache.sysml.api.mlcontext.MLContextUtil;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.Matrix;\nimport org.apache.sysml.api.mlcontext.MatrixFormat;\n@@ -73,11 +69,7 @@ import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\n-import org.junit.After;\n-import org.junit.AfterClass;\nimport org.junit.Assert;\n-import org.junit.BeforeClass;\nimport org.junit.Test;\nimport scala.Tuple2;\n@@ -86,26 +78,7 @@ import scala.collection.Iterator;\nimport scala.collection.JavaConversions;\nimport scala.collection.Seq;\n-public class MLContextTest extends AutomatedTestBase {\n- protected final static String TEST_DIR = \"org/apache/sysml/api/mlcontext\";\n- protected final static String TEST_NAME = \"MLContext\";\n-\n- private static SparkSession spark;\n- private static JavaSparkContext sc;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- spark = createSystemMLSparkSession(\"MLContextTest\", \"local\");\n- ml = new MLContext(spark);\n- sc = MLContextUtil.getJavaSparkContext(ml);\n- }\n-\n- @Override\n- public void setUp() {\n- addTestConfiguration(TEST_DIR, TEST_NAME);\n- getAndLoadTestConfiguration(TEST_NAME);\n- }\n+public class MLContextTest extends MLContextTestBase {\n@Test\npublic void testCreateDMLScriptBasedOnStringAndExecute() {\n@@ -710,9 +683,12 @@ public class MLContextTest extends AutomatedTestBase {\nSystem.out.println(\"MLContextTest - DataFrame sum DML, mllib vector with ID column\");\nList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> list = new ArrayList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>>();\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0, org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0, org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0, org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));\nJavaRDD<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> javaRddTuple = sc.parallelize(list);\nJavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleMllibVectorRow());\n@@ -734,9 +710,12 @@ public class MLContextTest extends AutomatedTestBase {\nSystem.out.println(\"MLContextTest - DataFrame sum PYDML, mllib vector with ID column\");\nList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> list = new ArrayList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>>();\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0, org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0, org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));\n- list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0, org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));\n+ list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0,\n+ org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));\nJavaRDD<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> javaRddTuple = sc.parallelize(list);\nJavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleMllibVectorRow());\n@@ -2576,7 +2555,8 @@ public class MLContextTest extends AutomatedTestBase {\n@Test\npublic void testPrintFormattingMultipleExpressions() {\nSystem.out.println(\"MLContextTest - print formatting multiple expressions\");\n- Script script = dml(\"a='hello'; b='goodbye'; c=4; d=3; e=3.0; f=5.0; g=FALSE; print('%s %d %f %b', (a+b), (c-d), (e*f), !g);\");\n+ Script script = dml(\n+ \"a='hello'; b='goodbye'; c=4; d=3; e=3.0; f=5.0; g=FALSE; print('%s %d %f %b', (a+b), (c-d), (e*f), !g);\");\nsetExpectedStdOut(\"hellogoodbye 1 15.000000 true\");\nml.execute(script);\n}\n@@ -2776,89 +2756,4 @@ public class MLContextTest extends AutomatedTestBase {\nAssert.assertEquals(3, results.getLong(\"y\"));\n}\n- // NOTE: Uncomment these tests once they work\n-\n- // @SuppressWarnings({ \"rawtypes\", \"unchecked\" })\n- // @Test\n- // public void testInputTupleSeqWithAndWithoutMetadataDML() {\n- // System.out.println(\"MLContextTest - Tuple sequence with and without\n- // metadata DML\");\n- //\n- // List<String> list1 = new ArrayList<String>();\n- // list1.add(\"1,2\");\n- // list1.add(\"3,4\");\n- // JavaRDD<String> javaRDD1 = sc.parallelize(list1);\n- // RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);\n- //\n- // List<String> list2 = new ArrayList<String>();\n- // list2.add(\"5,6\");\n- // list2.add(\"7,8\");\n- // JavaRDD<String> javaRDD2 = sc.parallelize(list2);\n- // RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);\n- //\n- // MatrixMetadata mm1 = new MatrixMetadata(2, 2);\n- //\n- // Tuple3 tuple1 = new Tuple3(\"m1\", rdd1, mm1);\n- // Tuple2 tuple2 = new Tuple2(\"m2\", rdd2);\n- // List tupleList = new ArrayList();\n- // tupleList.add(tuple1);\n- // tupleList.add(tuple2);\n- // Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();\n- //\n- // Script script =\n- // dml(\"print('sums: ' + sum(m1) + ' ' + sum(m2));\").in(seq);\n- // setExpectedStdOut(\"sums: 10.0 26.0\");\n- // ml.execute(script);\n- // }\n- //\n- // @SuppressWarnings({ \"rawtypes\", \"unchecked\" })\n- // @Test\n- // public void testInputTupleSeqWithAndWithoutMetadataPYDML() {\n- // System.out.println(\"MLContextTest - Tuple sequence with and without\n- // metadata PYDML\");\n- //\n- // List<String> list1 = new ArrayList<String>();\n- // list1.add(\"1,2\");\n- // list1.add(\"3,4\");\n- // JavaRDD<String> javaRDD1 = sc.parallelize(list1);\n- // RDD<String> rdd1 = JavaRDD.toRDD(javaRDD1);\n- //\n- // List<String> list2 = new ArrayList<String>();\n- // list2.add(\"5,6\");\n- // list2.add(\"7,8\");\n- // JavaRDD<String> javaRDD2 = sc.parallelize(list2);\n- // RDD<String> rdd2 = JavaRDD.toRDD(javaRDD2);\n- //\n- // MatrixMetadata mm1 = new MatrixMetadata(2, 2);\n- //\n- // Tuple3 tuple1 = new Tuple3(\"m1\", rdd1, mm1);\n- // Tuple2 tuple2 = new Tuple2(\"m2\", rdd2);\n- // List tupleList = new ArrayList();\n- // tupleList.add(tuple1);\n- // tupleList.add(tuple2);\n- // Seq seq = JavaConversions.asScalaBuffer(tupleList).toSeq();\n- //\n- // Script script =\n- // pydml(\"print('sums: ' + sum(m1) + ' ' + sum(m2))\").in(seq);\n- // setExpectedStdOut(\"sums: 10.0 26.0\");\n- // ml.execute(script);\n- // }\n-\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- sc = null;\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTestBase.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext;\n+\n+import org.apache.spark.api.java.JavaSparkContext;\n+import org.apache.spark.sql.SparkSession;\n+import org.apache.sysml.api.mlcontext.MLContext;\n+import org.apache.sysml.api.mlcontext.MLContextUtil;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.junit.After;\n+import org.junit.AfterClass;\n+import org.junit.BeforeClass;\n+\n+/**\n+ * Abstract class that can be used for MLContext tests.\n+ * <p>\n+ * Note that if using the setUp() method of MLContextTestBase, the test directory\n+ * and test name can be specified if needed in the subclass.\n+ * <p>\n+ *\n+ * Example:\n+ *\n+ * <pre>\n+ * public MLContextTestExample() {\n+ * testDir = this.getClass().getPackage().getName().replace(\".\", File.separator);\n+ * testName = this.getClass().getSimpleName();\n+ * }\n+ * </pre>\n+ *\n+ */\n+public abstract class MLContextTestBase extends AutomatedTestBase {\n+\n+ protected static SparkSession spark;\n+ protected static JavaSparkContext sc;\n+ protected static MLContext ml;\n+\n+ protected String testDir = null;\n+ protected String testName = null;\n+\n+ @Override\n+ public void setUp() {\n+ Class<? extends MLContextTestBase> clazz = this.getClass();\n+ String dir = (testDir == null) ? \"org/apache/sysml/api/mlcontext\" : testDir;\n+ String name = (testName == null) ? clazz.getSimpleName() : testName;\n+\n+ addTestConfiguration(dir, name);\n+ getAndLoadTestConfiguration(name);\n+ }\n+\n+ @BeforeClass\n+ public static void setUpClass() {\n+ spark = createSystemMLSparkSession(\"SystemML MLContext Test\", \"local\");\n+ ml = new MLContext(spark);\n+ sc = MLContextUtil.getJavaSparkContext(ml);\n+ }\n+\n+ @After\n+ public void tearDown() {\n+ super.tearDown();\n+ }\n+\n+ @AfterClass\n+ public static void tearDownClass() {\n+ // stop underlying spark context to allow single jvm tests (otherwise\n+ // the next test that tries to create a SparkContext would fail)\n+ spark.stop();\n+ sc = null;\n+ spark = null;\n+ ml.close();\n+ ml = null;\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/scripts/nn/NNTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/scripts/nn/NNTest.java",
"diff": "package org.apache.sysml.test.integration.scripts.nn;\n-import org.apache.spark.sql.SparkSession;\n-import org.apache.sysml.api.mlcontext.MLContext;\n+import static org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile;\n+\nimport org.apache.sysml.api.mlcontext.Script;\n-import org.apache.sysml.test.integration.AutomatedTestBase;\n-import org.junit.After;\n-import org.junit.AfterClass;\n-import org.junit.BeforeClass;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\nimport org.junit.Test;\n-import static org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile;\n-\n/**\n* Test the SystemML deep learning library, `nn`.\n*/\n-public class NNTest extends AutomatedTestBase {\n+public class NNTest extends MLContextTestBase {\n- private static final String TEST_NAME = \"NNTest\";\n- private static final String TEST_DIR = \"scripts/\";\nprivate static final String TEST_SCRIPT = \"scripts/nn/test/run_tests.dml\";\nprivate static final String ERROR_STRING = \"ERROR:\";\n- private static SparkSession spark;\n- private static MLContext ml;\n-\n- @BeforeClass\n- public static void setUpClass() {\n- spark = createSystemMLSparkSession(\"MLContextTest\", \"local\");\n- ml = new MLContext(spark);\n- }\n-\n- @Override\n- public void setUp() {\n- addTestConfiguration(TEST_DIR, TEST_NAME);\n- getAndLoadTestConfiguration(TEST_NAME);\n- }\n-\n@Test\npublic void testNNLibrary() {\nScript script = dmlFromFile(TEST_SCRIPT);\n@@ -62,20 +40,4 @@ public class NNTest extends AutomatedTestBase {\nml.execute(script);\n}\n- @After\n- public void tearDown() {\n- super.tearDown();\n- }\n-\n- @AfterClass\n- public static void tearDownClass() {\n- // stop underlying spark context to allow single jvm tests (otherwise the\n- // next test that tries to create a SparkContext would fail)\n- spark.stop();\n- spark = null;\n-\n- // clear status mlcontext and spark exec context\n- ml.close();\n- ml = null;\n- }\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1777] MLContextTestBase class for MLContext testing
Create abstract MLContextTestBase class that contains setup and shutdown
code for MLContext tests. This removes boilerplate code from MLContext
test classes that extend MLContextTestBase.
Closes #580. |
49,703 | 19.07.2017 23:04:57 | 25,200 | 6618a851690b18f53423c6607c517acb1ba892df | [MINOR] Move GPU test suite to org.apache.sysml.test.gpu
Move GPU test suite from org.apache.sysml.test.integration.gpu to
org.apache.sysml.test.gpu to match location of test classes. | [
{
"change_type": "RENAME",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/gpu/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/gpu/ZPackageSuite.java",
"diff": "* under the License.\n*/\n-package org.apache.sysml.test.integration.gpu;\n+package org.apache.sysml.test.gpu;\nimport org.apache.sysml.test.gpu.AggregateUnaryOpTests;\nimport org.apache.sysml.test.gpu.AppendTest;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Move GPU test suite to org.apache.sysml.test.gpu
Move GPU test suite from org.apache.sysml.test.integration.gpu to
org.apache.sysml.test.gpu to match location of test classes. |
49,703 | 20.07.2017 12:37:37 | 25,200 | 3d4c21b01a891cdae5b2dfc712b2ad7eac82afe6 | MLContext Univariate Statistics test
Create MLContext test class for Univar-Stats.dml top-level algorithm.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"diff": "@@ -55,6 +55,7 @@ import org.apache.sysml.runtime.matrix.data.FrameBlock;\nimport org.apache.sysml.runtime.matrix.data.InputInfo;\nimport org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.util.DataConverter;\nimport org.apache.sysml.runtime.util.MapReduceTool;\nimport org.apache.sysml.test.utils.TestUtils;\nimport org.apache.sysml.utils.ParameterBuilder;\n@@ -1852,4 +1853,12 @@ public abstract class AutomatedTestBase\nSparkSession spark = builder.getOrCreate();\nreturn spark;\n}\n+\n+ public static String getMatrixAsString(double[][] matrix) {\n+ try {\n+ return DataConverter.toString(DataConverter.convertToMatrixBlock(matrix));\n+ } catch (DMLRuntimeException e) {\n+ return \"N/A\";\n+ }\n+ }\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/algorithms/MLContextUnivariateStatisticsTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext.algorithms;\n+\n+import static org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile;\n+\n+import java.util.concurrent.ThreadLocalRandom;\n+\n+import org.apache.log4j.Logger;\n+import org.apache.sysml.api.mlcontext.Script;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+public class MLContextUnivariateStatisticsTest extends MLContextTestBase {\n+ protected static Logger log = Logger.getLogger(MLContextUnivariateStatisticsTest.class);\n+\n+ protected final static String TEST_SCRIPT = \"scripts/algorithms/Univar-Stats.dml\";\n+\n+ @Test\n+ public void testRandomMatrix() {\n+ double[][] random10x3 = getRandomMatrix(10, 3, 0.0, 10.0, 0.9, -1);\n+ double[][] types = new double[][] { { 1.0, 1.0, 1.0 } };\n+ Script univarStats = dmlFromFile(TEST_SCRIPT);\n+ univarStats.in(\"A\", random10x3).in(\"K\", types).in(\"$CONSOLE_OUTPUT\", true).out(\"baseStats\");\n+ ml.execute(univarStats);\n+ }\n+\n+ @Test\n+ public void testRandomMatrixWithRandomCategoricalColumn() {\n+ double[][] random10x3 = getRandomMatrix(10, 3, 0.0, 10.0, 0.9, -1);\n+ log.debug(\"Matrix before random int column replace:\\n\" + getMatrixAsString(random10x3));\n+ replaceColumnWithRandomInts(random10x3, 2, 1, 2);\n+ log.debug(\"Matrix after random int column replace:\\n\" + getMatrixAsString(random10x3));\n+ double[][] types = new double[][] { { 1.0, 1.0, 2.0 } };\n+ Script univarStats = dmlFromFile(TEST_SCRIPT);\n+ univarStats.in(\"A\", random10x3).in(\"K\", types).out(\"baseStats\");\n+ ml.execute(univarStats);\n+ }\n+\n+ @Test\n+ public void testScaleColumn() {\n+ double[][] matrix = new double[][] { { 1.0 }, { 2.0 }, { 2.0 }, { 3.0 }, { 4.0 } };\n+ double[][] types = new double[][] { { 1.0 } };\n+ Script univarStats = dmlFromFile(TEST_SCRIPT);\n+ univarStats.in(\"A\", matrix).in(\"K\", types).out(\"baseStats\");\n+ double[][] stats = ml.execute(univarStats).getMatrix(\"baseStats\").to2DDoubleArray();\n+ log.debug(\"Stats for scale column:\\n\" + getMatrixAsString(stats));\n+ Assert.assertEquals(1.0, stats[0][0], 0); // minimum\n+ Assert.assertEquals(4.0, stats[1][0], 0); // maximum\n+ Assert.assertEquals(2.4, stats[3][0], 0); // average\n+ Assert.assertEquals(2.0, stats[12][0], 0); // mean\n+ }\n+\n+ @Test\n+ public void testCategoricalColumn() {\n+ double[][] matrix = new double[][] { { 1.0 }, { 2.0 }, { 2.0 }, { 3.0 }, { 4.0 } };\n+ double[][] types = new double[][] { { 2.0 } };\n+ Script univarStats = dmlFromFile(TEST_SCRIPT);\n+ univarStats.in(\"A\", matrix).in(\"K\", types).out(\"baseStats\");\n+ double[][] stats = ml.execute(univarStats).getMatrix(\"baseStats\").to2DDoubleArray();\n+ log.debug(\"Stats for categorical column:\\n\" + getMatrixAsString(stats));\n+ Assert.assertEquals(4.0, stats[14][0], 0); // number of categories\n+ Assert.assertEquals(2.0, stats[15][0], 0); // mode\n+ Assert.assertEquals(1.0, stats[16][0], 0); // number of modes\n+ }\n+\n+ private void replaceColumnWithRandomInts(double[][] matrix, int whichColumn, int lowValue, int highValue) {\n+ for (int i = 0; i < matrix.length; i++) {\n+ double[] row = matrix[i];\n+ row[whichColumn] = ThreadLocalRandom.current().nextInt(lowValue, highValue + 1);\n+ }\n+ }\n+\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/algorithms/ZPackageSuite.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext.algorithms;\n+\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Suite;\n+\n+/**\n+ * Group together the tests in this package.\n+ */\n+@RunWith(Suite.class)\[email protected]({ MLContextUnivariateStatisticsTest.class })\n+\n+/** This class is just a holder for the above JUnit annotations. */\n+public class ZPackageSuite {\n+\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1789] MLContext Univariate Statistics test
Create MLContext test class for Univar-Stats.dml top-level algorithm.
Closes #585. |
49,703 | 21.07.2017 01:42:15 | 25,200 | 6133be23ee8caefed84b1042c4335f19520db64e | [MINOR] Exclude uberjar from python artifact when use both profiles
Exclude standalone uberjar from python jar if both profiles are used to
build the artifacts:
mvn clean package -P distribution,standalone-jar | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/pre_setup.py",
"new_path": "src/main/python/pre_setup.py",
"diff": "@@ -32,7 +32,8 @@ root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\nfor file in os.listdir(os.path.join(root_dir, 'target')):\nif (fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar')\nand not (fnmatch.fnmatch(file, 'systemml-*javadoc.jar')\n- or fnmatch.fnmatch(file, 'systemml-*sources.jar'))):\n+ or fnmatch.fnmatch(file, 'systemml-*sources.jar')\n+ or fnmatch.fnmatch(file, 'systemml-*standalone.jar'))):\nshutil.copyfile(os.path.join(root_dir, 'target', file),\nos.path.join(java_dir_full_path, file))\nif fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT-extra.jar') or fnmatch.fnmatch(file, 'systemml-*-extra.jar'):\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Exclude uberjar from python artifact when use both profiles
Exclude standalone uberjar from python jar if both profiles are used to
build the artifacts:
mvn clean package -P distribution,standalone-jar |
49,736 | 21.07.2017 09:18:20 | 28,800 | eee35e984aa1a810da1fadf294174e45dcca15bd | Bugfix for NPE in relu backward instruction
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"diff": "@@ -222,12 +222,15 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\n// (X > 0) * dout\nMatrixBlock input = ec.getMatrixInput(input1.getName(), getExtendedOpcode());\nMatrixBlock dout = ec.getMatrixInput(_in2.getName(), getExtendedOpcode());\n- MatrixBlock outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), (input.isInSparseFormat() || dout.isInSparseFormat()));\n+ MatrixBlock outputBlock;\nif( !input.isEmpty() && !dout.isEmpty() ) {\n- outputBlock.allocateDenseOrSparseBlock();\n+ outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), false);\n+ outputBlock.allocateDenseBlock();\nLibMatrixDNN.reluBackward(input, dout, outputBlock, _numThreads);\n}\n+ else\n+ outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true);\n// release inputs/outputs\nec.releaseMatrixInput(input1.getName(), getExtendedOpcode());\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1774] Bugfix for NPE in relu backward instruction
Closes #579. |
49,736 | 21.07.2017 10:22:04 | 25,200 | 1f5b14dda5ee231a37bd1935b92ba8212132355b | Allow mllearn models to load the model eagerly.
This simplifies performance debugging of training and scoring.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mllearn/estimators.py",
"new_path": "src/main/python/systemml/mllearn/estimators.py",
"diff": "@@ -366,7 +366,7 @@ class BaseSystemMLClassifier(BaseSystemMLEstimator):\nself.labelMap[int(keys[i])] = values[i]\n# self.encode(classes) # Giving incorrect results\n- def load(self, weights, sep='/'):\n+ def load(self, weights, sep='/', eager=False):\n\"\"\"\nLoad a pretrained model.\n@@ -374,9 +374,10 @@ class BaseSystemMLClassifier(BaseSystemMLEstimator):\n----------\nweights: directory whether learned weights are stored\nsep: seperator to use (default: '/')\n+ eager: load the model eagerly. This flag should be only used for debugging purposes. (default: False)\n\"\"\"\nself.weights = weights\n- self.model.load(self.sc._jsc, weights, sep)\n+ self.model.load(self.sc._jsc, weights, sep, eager)\nself.loadLabels(weights + '/labels.txt')\ndef save(self, outputDir, format='binary', sep='/'):\n@@ -421,7 +422,7 @@ class BaseSystemMLRegressor(BaseSystemMLEstimator):\n\"\"\"\nreturn r2_score(y, self.predict(X), multioutput='variance_weighted')\n- def load(self, weights=None, sep='/'):\n+ def load(self, weights=None, sep='/', eager=False):\n\"\"\"\nLoad a pretrained model.\n@@ -429,9 +430,10 @@ class BaseSystemMLRegressor(BaseSystemMLEstimator):\n----------\nweights: directory whether learned weights are stored (default: None)\nsep: seperator to use (default: '/')\n+ eager: load the model eagerly (default: False)\n\"\"\"\nself.weights = weights\n- self.model.load(self.sc._jsc, weights, sep)\n+ self.model.load(self.sc._jsc, weights, sep, eager)\ndef save(self, outputDir, format='binary', sep='/'):\n\"\"\"\n@@ -764,7 +766,7 @@ class Caffe2DML(BaseSystemMLClassifier):\nif tensorboard_log_dir is not None:\nself.estimator.setTensorBoardLogDir(tensorboard_log_dir)\n- def load(self, weights=None, sep='/', ignore_weights=None):\n+ def load(self, weights=None, sep='/', ignore_weights=None, eager=False):\n\"\"\"\nLoad a pretrained model.\n@@ -773,11 +775,12 @@ class Caffe2DML(BaseSystemMLClassifier):\nweights: directory whether learned weights are stored (default: None)\nsep: seperator to use (default: '/')\nignore_weights: names of layers to not read from the weights directory (list of string, default:None)\n+ eager: load the model eagerly (default: False)\n\"\"\"\nself.weights = weights\nself.estimator.setInput(\"$weights\", str(weights))\nself.model = self.sc._jvm.org.apache.sysml.api.dl.Caffe2DMLModel(self.estimator)\n- self.model.load(self.sc._jsc, weights, sep)\n+ self.model.load(self.sc._jsc, weights, sep, eager)\nself.loadLabels(weights + '/labels.txt')\nif ignore_weights is not None:\nself.estimator.setWeightsToIgnore(ignore_weights)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"diff": "@@ -36,6 +36,7 @@ import org.apache.spark.sql._\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel\nimport java.util.HashMap\nimport scala.collection.JavaConversions._\n+import java.util.Random\n/****************************************************\n@@ -162,11 +163,19 @@ trait BaseSystemMLEstimatorModel extends BaseSystemMLEstimatorOrModel {\ndef baseEstimator():BaseSystemMLEstimator\ndef modelVariables():List[String]\n// self.model.load(self.sc._jsc, weights, format, sep)\n- def load(sc:JavaSparkContext, outputDir:String, sep:String):Unit = {\n+ def load(sc:JavaSparkContext, outputDir:String, sep:String, eager:Boolean=false):Unit = {\nval dmlScript = new StringBuilder\ndmlScript.append(\"print(\\\"Loading the model from \" + outputDir + \"...\\\")\\n\")\n+ val tmpSum = \"tmp_sum_var\" + Math.abs((new Random()).nextInt())\n+ if(eager)\n+ dmlScript.append(tmpSum + \" = 0\\n\")\nfor(varName <- modelVariables) {\ndmlScript.append(varName + \" = read(\\\"\" + outputDir + sep + varName + \".mtx\\\")\\n\")\n+ if(eager)\n+ dmlScript.append(tmpSum + \" = \" + tmpSum + \" + 0.001*mean(\" + varName + \")\\n\")\n+ }\n+ if(eager) {\n+ dmlScript.append(\"if(\" + tmpSum + \" > 0) { print(\\\"Loaded the model\\\"); } else { print(\\\"Loaded the model.\\\"); }\")\n}\nval script = dml(dmlScript.toString)\nfor(varName <- modelVariables) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Allow mllearn models to load the model eagerly.
- This simplifies performance debugging of training and scoring.
Closes #574. |
49,717 | 21.07.2017 13:55:03 | 25,200 | fec209306d3c7e55673872f431d43ceda53b7a6c | Specify a set of GPUs to use for a given machine
Can specify:
a) -1 for all GPUs
b) a specific number of GPU
c) a comma separated list of GPUs
d) a range of GPUs
Closes | [
{
"change_type": "MODIFY",
"old_path": "conf/SystemML-config.xml.template",
"new_path": "conf/SystemML-config.xml.template",
"diff": "<!-- prints extra statistics information for Deep Neural Networks done in CP mode -->\n<systemml.stats.extraDNN>false</systemml.stats.extraDNN>\n- <!-- sets the maximum number of GPUs per process, -1 for all GPUs -->\n- <systemml.gpu.perProcessMax>-1</systemml.gpu.perProcessMax>\n+ <!-- sets the GPUs to use per process, -1 for all GPUs, a specific GPU number (5), a range (eg: 0-2) or a comma separated list (eg: 0,2,4)-->\n+ <systemml.gpu.availableGPUs>-1</systemml.gpu.availableGPUs>\n</root>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java",
"new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java",
"diff": "@@ -78,9 +78,8 @@ public class ScriptExecutorUtils {\nLibMatrixDNN.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_DNN_STATS);\nDMLScript.FINEGRAINED_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_FINEGRAINED_STATS);\n- // Sets the maximum number of GPUs per process, -1 for all available\n- // GPUs\n- GPUContextPool.PER_PROCESS_MAX_GPUS = dmlconf.getIntValue(DMLConfig.MAX_GPUS_PER_PROCESS);\n+ // Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n+ GPUContextPool.AVAILABLE_GPUS = dmlconf.getTextValue(DMLConfig.AVAILABLE_GPUS);\nStatistics.startRunTimer();\ntry {\n// run execute (w/ exception handling to ensure proper shutdown)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -78,7 +78,7 @@ public class DMLConfig\npublic static final String EXTRA_FINEGRAINED_STATS = \"systemml.stats.finegrained\"; //boolean\npublic static final String EXTRA_GPU_STATS = \"systemml.stats.extraGPU\"; //boolean\npublic static final String EXTRA_DNN_STATS = \"systemml.stats.extraDNN\"; //boolean\n- public static final String MAX_GPUS_PER_PROCESS = \"systemml.gpu.perProcessMax\"; // boolean, maximum number of gpus to use, -1 for all\n+ public static final String AVAILABLE_GPUS = \"systemml.gpu.availableGPUs\"; // String to specify which GPUs to use (a range, all GPUs, comma separated list or a specific GPU)\n// Fraction of available memory to use. The available memory is computer when the GPUContext is created\n// to handle the tradeoff on calling cudaMemGetInfo too often.\n@@ -123,7 +123,7 @@ public class DMLConfig\n_defaultVals.put(EXTRA_DNN_STATS, \"false\" );\n_defaultVals.put(GPU_MEMORY_UTILIZATION_FACTOR, \"0.9\" );\n- _defaultVals.put(MAX_GPUS_PER_PROCESS, \"-1\");\n+ _defaultVals.put(AVAILABLE_GPUS, \"-1\");\n}\npublic DMLConfig()\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"diff": "@@ -22,6 +22,7 @@ import static jcuda.driver.JCudaDriver.cuDeviceGetCount;\nimport static jcuda.driver.JCudaDriver.cuInit;\nimport static jcuda.runtime.JCuda.cudaGetDeviceProperties;\n+import java.util.ArrayList;\nimport java.util.LinkedList;\nimport java.util.List;\n@@ -42,9 +43,9 @@ public class GPUContextPool {\nprotected static final Log LOG = LogFactory.getLog(GPUContextPool.class.getName());\n/**\n- * Maximum number of gpus to use, -1 for all\n+ * GPUs to use, can specify -1 to use all, comma separated list of GPU numbers, a specific GPU or a range\n*/\n- public static int PER_PROCESS_MAX_GPUS = -1;\n+ public static String AVAILABLE_GPUS;\nprivate static long INITIAL_GPU_MEMORY_BUDGET = -1;\n@@ -98,21 +99,31 @@ public class GPUContextPool {\ndeviceCount = deviceCountArray[0];\ndeviceProperties = new cudaDeviceProp[deviceCount];\n- if (PER_PROCESS_MAX_GPUS > 0)\n- deviceCount = Math.min(PER_PROCESS_MAX_GPUS, deviceCount);\n+ try {\n+ ArrayList<Integer> listOfGPUs = parseListString(AVAILABLE_GPUS, deviceCount);\n- // Initialize the list of devices\n- for (int i = 0; i < deviceCount; i++) {\n+ // Initialize the list of devices & the pool of GPUContexts\n+ for (int i : listOfGPUs) {\ncudaDeviceProp properties = new cudaDeviceProp();\ncudaGetDeviceProperties(properties, i);\ndeviceProperties[i] = properties;\n+ GPUContext gCtx = new GPUContext(i);\n+ pool.add(gCtx);\n}\n- // Initialize the pool of GPUContexts\n+ } catch (IllegalArgumentException e) {\n+ LOG.warn(\"Invalid setting for setting systemml.gpu.availableGPUs, defaulting to use ALL GPUs\");\n+\n+ // Initialize the list of devices & the pool of GPUContexts\nfor (int i = 0; i < deviceCount; i++) {\n+ cudaDeviceProp properties = new cudaDeviceProp();\n+ cudaGetDeviceProperties(properties, i);\n+ deviceProperties[i] = properties;\nGPUContext gCtx = new GPUContext(i);\npool.add(gCtx);\n}\n+ }\n+\n// Initialize the initial memory budget\n// If there are heterogeneous GPUs on the machine (different memory sizes)\n@@ -128,6 +139,7 @@ public class GPUContextPool {\nGPUContext.LOG.info(\"Total number of GPUs on the machine: \" + deviceCount);\n+ GPUContext.LOG.info(\"GPUs being used: \" + AVAILABLE_GPUS);\nGPUContext.LOG.info(\"Initial GPU memory: \" + initialGPUMemBudget());\n//int[] device = {-1};\n@@ -141,6 +153,56 @@ public class GPUContextPool {\nGPUStatistics.cudaInitTime = System.nanoTime() - start;\n}\n+ /**\n+ * Parses a string into a list. The string can be of these forms:\n+ * 1. \"-1\" : all integers from range 0 to max - [0,1,2,3....max]\n+ * 2. \"2,3,0\" : comma separated list of integers - [0,2,3]\n+ * 3. \"4\" : a specific integer - [4]\n+ * 4. \"0-4\" : a range of integers - [0,1,2,3,4]\n+ * In ranges and comma separated lists, all values must be positive. Anything else is invalid.\n+ * @param str input string\n+ * @param max maximum range of integers\n+ * @return the list of integers in the parsed string\n+ */\n+ public static ArrayList<Integer> parseListString(String str, int max) {\n+ ArrayList<Integer> result = new ArrayList<>();\n+ str = str.trim();\n+ if (str == \"-1\") { // all\n+ for (int i=0; i<max; i++){\n+ result.add(i);\n+ }\n+ } else if (str.contains(\"-\")){ // range\n+ String[] numbersStr = str.split(\"-\");\n+ if (numbersStr.length != 2) {\n+ throw new IllegalArgumentException(\"Invalid string to parse to a list of numbers : \" + str);\n+ }\n+ String beginStr = numbersStr[0];\n+ String endStr = numbersStr[1];\n+ int begin = Integer.parseInt(beginStr);\n+ int end = Integer.parseInt(endStr);\n+\n+ for (int i=begin; i<=end; i++){\n+ result.add(i);\n+ }\n+ } else if (str.contains(\",\")) { // comma separated list\n+ String[] numbers = str.split(\",\");\n+ for (int i = 0; i < numbers.length; i++) {\n+ int n = Integer.parseInt(numbers[i].trim());\n+ result.add(n);\n+ }\n+ } else { // single number\n+ int number = Integer.parseInt(str);\n+ result.add(number);\n+ }\n+ // Check if all numbers between 0 and max\n+ for (int n : result){\n+ if (n < 0 || n >= max) {\n+ throw new IllegalArgumentException(\"Invalid string (\" + str + \") parsed to a list of numbers (\" + result + \") which exceeds the maximum range : \");\n+ }\n+ }\n+ return result;\n+ }\n+\n/**\n* Reserves and gets an initialized list of GPUContexts\n*\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/unit/UtilsTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.unit;\n+\n+\n+import java.util.Arrays;\n+\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+/**\n+ * To test utility functions scattered throughout the codebase\n+ */\n+public class UtilsTest {\n+\n+ @Test\n+ public void testParseListString0() {\n+ Assert.assertEquals(Arrays.asList(0), GPUContextPool.parseListString(\"0\", 10));\n+ }\n+\n+ @Test\n+ public void testParseListString1() {\n+ Assert.assertEquals(Arrays.asList(7), GPUContextPool.parseListString(\"7\", 10));\n+ }\n+\n+ @Test\n+ public void testParseListString2() {\n+ Assert.assertEquals(Arrays.asList(0,1,2,3), GPUContextPool.parseListString(\"-1\", 4));\n+ }\n+\n+ @Test\n+ public void testParseListString3() {\n+ Assert.assertEquals(Arrays.asList(0,1,2,3), GPUContextPool.parseListString(\"0,1,2,3\", 6));\n+ }\n+\n+ @Test\n+ public void testParseListString4() {\n+ Assert.assertEquals(Arrays.asList(0,1,2,3), GPUContextPool.parseListString(\"0-3\", 6));\n+ }\n+\n+ @Test(expected=IllegalArgumentException.class)\n+ public void testParseListStringFail0() {\n+ GPUContextPool.parseListString(\"7\", 4);\n+ }\n+\n+ @Test(expected=IllegalArgumentException.class)\n+ public void testParseListStringFail1() {\n+ GPUContextPool.parseListString(\"0,1,2,3\", 2);\n+ }\n+\n+ @Test(expected=IllegalArgumentException.class)\n+ public void testParseListStringFail2() {\n+ GPUContextPool.parseListString(\"0,1,2,3-4\", 2);\n+ }\n+\n+ @Test(expected=IllegalArgumentException.class)\n+ public void testParseListStringFail4() {\n+ GPUContextPool.parseListString(\"-1-4\", 6);\n+ }\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1795] Specify a set of GPUs to use for a given machine
Can specify:
a) -1 for all GPUs
b) a specific number of GPU
c) a comma separated list of GPUs
d) a range of GPUs
Closes #587 |
49,738 | 20.07.2017 21:36:22 | 25,200 | c6679b7b890f2a4e4553988c9a043ef06cd8e9f4 | [MINOR] Performance and cleanup ctable result extraction
This patch cleans up the ctable result extraction by avoiding the
unnecessary materialization of result cells as list, in order to improve
memory-efficiency and performance. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/TernarySPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/TernarySPInstruction.java",
"diff": "@@ -459,18 +459,16 @@ public class TernarySPInstruction extends ComputationSPInstruction\nprivate static final long serialVersionUID = -5933677686766674444L;\n- @SuppressWarnings(\"deprecation\")\n@Override\npublic Iterator<Tuple2<MatrixIndexes, Double>> call(CTableMap ctableMap)\nthrows Exception {\nArrayList<Tuple2<MatrixIndexes, Double>> retVal = new ArrayList<Tuple2<MatrixIndexes, Double>>();\n-\n- for(LLDoubleEntry ijv : ctableMap.entrySet()) {\n+ Iterator<LLDoubleEntry> iter = ctableMap.getIterator();\n+ while( iter.hasNext() ) {\n+ LLDoubleEntry ijv = iter.next();\nlong i = ijv.key1;\nlong j = ijv.key2;\ndouble v = ijv.value;\n-\n- // retVal.add(new Tuple2<MatrixIndexes, MatrixCell>(blockIndexes, cell));\nretVal.add(new Tuple2<MatrixIndexes, Double>(new MatrixIndexes(i, j), v));\n}\nreturn retVal.iterator();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CTableMap.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CTableMap.java",
"diff": "package org.apache.sysml.runtime.matrix.data;\n-import java.util.ArrayList;\n+import java.util.Iterator;\nimport org.apache.sysml.runtime.util.LongLongDoubleHashMap;\nimport org.apache.sysml.runtime.util.LongLongDoubleHashMap.LLDoubleEntry;\n@@ -43,15 +43,12 @@ public class CTableMap\n_maxCol = -1;\n}\n- public int size()\n- {\n+ public int size() {\nreturn _map.size();\n}\n- @Deprecated\n- public ArrayList<LLDoubleEntry> entrySet()\n- {\n- return _map.extractValues();\n+ public Iterator<LLDoubleEntry> getIterator() {\n+ return _map.getIterator();\n}\npublic long getMaxRow() {\n@@ -83,8 +80,9 @@ public class CTableMap\nif( sparse ) //SPARSE <- cells\n{\n//append cells to sparse target (prevent shifting)\n- for( LLDoubleEntry e : _map.extractValues() )\n- {\n+ Iterator<LLDoubleEntry> iter2 = _map.getIterator();\n+ while( iter2.hasNext() ) {\n+ LLDoubleEntry e = iter2.next();\ndouble value = e.value;\nint rix = (int)e.key1;\nint cix = (int)e.key2;\n@@ -98,8 +96,9 @@ public class CTableMap\nelse //DENSE <- cells\n{\n//directly insert cells into dense target\n- for( LLDoubleEntry e : _map.extractValues() )\n- {\n+ Iterator<LLDoubleEntry> iter = _map.getIterator();\n+ while( iter.hasNext() ) {\n+ LLDoubleEntry e = iter.next();\ndouble value = e.value;\nint rix = (int)e.key1;\nint cix = (int)e.key2;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/GMRCtableBuffer.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/GMRCtableBuffer.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.runtime.matrix.mapred;\nimport java.util.ArrayList;\nimport java.util.HashMap;\n+import java.util.Iterator;\nimport java.util.Map.Entry;\nimport org.apache.hadoop.mapred.Reporter;\n@@ -105,7 +106,6 @@ public class GMRCtableBuffer\nreturn _blockBuffer;\n}\n- @SuppressWarnings(\"deprecation\")\npublic void flushBuffer( Reporter reporter )\nthrows RuntimeException\n{\n@@ -129,15 +129,16 @@ public class GMRCtableBuffer\n}\n//output result data\n- for(LLDoubleEntry e: resultMap.entrySet()) {\n+ Iterator<LLDoubleEntry> iter = resultMap.getIterator();\n+ while( iter.hasNext() ) {\n+ LLDoubleEntry e = iter.next();\nkey = new MatrixIndexes(e.key1, e.key2);\nvalue.setValue(e.value);\n- for(Integer i: resultIDs) {\n+ for(Integer i: resultIDs)\n_collector.collectOutput(key, value, i, reporter);\n}\n}\n}\n- }\nelse if ( _blockBuffer != null ) {\nMatrixIndexes key=new MatrixIndexes(1,1);\n//DataConverter.writeBinaryBlockMatrixToHDFS(path, job, mat, mc.get_rows(), mc.get_cols(), mc.get_rows_per_block(), mc.get_cols_per_block(), replication);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/util/LongLongDoubleHashMap.java",
"new_path": "src/main/java/org/apache/sysml/runtime/util/LongLongDoubleHashMap.java",
"diff": "package org.apache.sysml.runtime.util;\n-import java.util.ArrayList;\n+import java.util.Iterator;\n/**\n* This native long long - double hashmap is specifically designed for\n@@ -73,20 +73,8 @@ public class LongLongDoubleHashMap\nresize();\n}\n- public ArrayList<LLDoubleEntry> extractValues()\n- {\n- ArrayList<LLDoubleEntry> ret = new ArrayList<LLDoubleEntry>();\n- for( LLDoubleEntry e : data ) {\n- if( e != null ) {\n- while( e.next!=null ) {\n- ret.add(e);\n- e = e.next;\n- }\n- ret.add(e);\n- }\n- }\n-\n- return ret;\n+ public Iterator<LLDoubleEntry> getIterator() {\n+ return new LLDoubleEntryIterator();\n}\nprivate void resize() {\n@@ -138,4 +126,42 @@ public class LongLongDoubleHashMap\nnext = null;\n}\n}\n+\n+ private class LLDoubleEntryIterator implements Iterator<LLDoubleEntry> {\n+ private LLDoubleEntry _curr;\n+ private int _currPos;\n+\n+ public LLDoubleEntryIterator() {\n+ _curr = null;\n+ _currPos = -1;\n+ findNext();\n+ }\n+\n+ @Override\n+ public boolean hasNext() {\n+ return (_curr != null);\n+ }\n+\n+ @Override\n+ public LLDoubleEntry next() {\n+ LLDoubleEntry ret = _curr;\n+ findNext();\n+ return ret;\n+ }\n+\n+ private void findNext() {\n+ if( _curr != null && _curr.next != null ) {\n+ _curr = _curr.next;\n+ return;\n+ }\n+ _currPos++;\n+ while( _currPos < data.length ) {\n+ _curr = data[_currPos];\n+ if( _curr != null )\n+ return;\n+ _currPos++;\n+ }\n+ _curr = null;\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockAppendSort.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockAppendSort.java",
"diff": "@@ -21,6 +21,9 @@ package org.apache.sysml.test.integration.functions.sparse;\nimport org.junit.Assert;\nimport org.junit.Test;\n+\n+import java.util.Iterator;\n+\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlockCOO;\nimport org.apache.sysml.runtime.matrix.data.SparseBlockCSR;\n@@ -175,9 +178,12 @@ public class SparseBlockAppendSort extends AutomatedTestBase\nfor( int i=0; i<rows; i++ )\nfor( int j=0; j<cols; j++ )\nmap.addValue(i, j, A[i][j]);\n- for( LLDoubleEntry e : map.extractValues() ) //random hash order\n+ Iterator<LLDoubleEntry> iter = map.getIterator();\n+ while( iter.hasNext() ) { //random hash order\n+ LLDoubleEntry e = iter.next();\nsblock.append((int)e.key1, (int)e.key2, e.value);\n}\n+ }\n//sort appended values\nsblock.sort();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockGetSet.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockGetSet.java",
"diff": "@@ -21,6 +21,9 @@ package org.apache.sysml.test.integration.functions.sparse;\nimport org.junit.Assert;\nimport org.junit.Test;\n+\n+import java.util.Iterator;\n+\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlockCOO;\n@@ -233,10 +236,13 @@ public class SparseBlockGetSet extends AutomatedTestBase\nfor( int i=0; i<rows; i++ )\nfor( int j=0; j<cols; j++ )\nmap.addValue(i, j, A[i][j]);\n- for( LLDoubleEntry e : map.extractValues() ) //random hash order\n+ Iterator<LLDoubleEntry> iter = map.getIterator();\n+ while( iter.hasNext() ) { //random hash order\n+ LLDoubleEntry e = iter.next();\nsblock.set((int)e.key1, (int)e.key2, e.value);\n}\n}\n+ }\n//check basic meta data\nif( sblock.numRows() != rows )\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Performance and cleanup ctable result extraction
This patch cleans up the ctable result extraction by avoiding the
unnecessary materialization of result cells as list, in order to improve
memory-efficiency and performance. |
49,736 | 24.07.2017 15:02:55 | 25,200 | 11b689d49b78a73f44b5944cfe0d14bb7c05c3a7 | Fix build failure for distribution profile on Windows
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/post_setup.py",
"new_path": "src/main/python/post_setup.py",
"diff": "@@ -34,11 +34,8 @@ ARTIFACT_VERSION = __project_version__\nARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split(\"-\")[0]\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\n-if platform.system() == \"Windows\":\n+src_path_prefix = os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT)\n+src_path = src_path_prefix + '.zip' if platform.system() == \"Windows\" and os.path.exists(src_path_prefix + '.zip') else src_path_prefix + '.tar.gz'\nos.rename(\n- os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.zip'),\n- os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.zip'))\n-else:\n- os.rename(\n- os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.tar.gz'),\n+ src_path,\nos.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tgz'))\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1802] Fix build failure for distribution profile on Windows
Closes #594. |
49,768 | 26.07.2017 13:03:36 | 25,200 | 5fa84ccfab89b6e2e43b1f4ad7a571c4d0e46cf1 | Fix input data processing for Caffe model
Closes 588. | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/converters.py",
"new_path": "src/main/python/systemml/converters.py",
"diff": "#\n#-------------------------------------------------------------\n-__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr']\n+__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\n+\nfrom pyspark.context import SparkContext\nfrom scipy.sparse import coo_matrix, spmatrix, csr_matrix\nfrom .classloader import *\nSUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)\n+DATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}\n+\ndef getNumCols(numPyArr):\nif numPyArr.ndim == 1:\nreturn 1\n@@ -212,10 +215,38 @@ def convertToNumPyArr(sc, mb):\nelse:\nraise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves\n+# Returns the mean of a model if defined otherwise None\n+def getDatasetMean(dataset_name):\n+ \"\"\"\n+ Input Parameters\n+ ----------------\n+ dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.\n+\n+ Returns\n+ -------\n+ mean: Mean value of model if its defined in the list DATASET_MEAN else None.\n+\n+ \"\"\"\n+\n+ try:\n+ mean = DATASET_MEAN[dataset_name.upper()]\n+ except:\n+ mean = None\n+ return mean\n+\n+\n# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)\n# The above call returns a numpy array of shape (6, 50176) in NCHW format\n-def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False):\n- from PIL import Image\n+def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,\n+ color_mode = 'RGB', mean=None):\n+\n+ ## Input Parameters\n+\n+ # color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,\n+ # color_mode parameter is used to process image data in BGR format.\n+\n+ # mean: mean value is used to subtract from input data from every pixel value. By default value is None, so mean value not subtracted.\n+\nif img_shape is not None:\nnum_channels = img_shape[0]\nsize = (img_shape[1], img_shape[2])\n@@ -224,24 +255,45 @@ def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mir\nsize = None\nif num_channels != 1 and num_channels != 3:\nraise ValueError('Expected the number of channels to be either 1 or 3')\n+\n+ from PIL import Image\n+\nif size is not None:\nim = im.resize(size, Image.LANCZOS)\nexpected_mode = 'L' if num_channels == 1 else 'RGB'\nif expected_mode is not im.mode:\nim = im.convert(expected_mode)\n+\ndef _im2NumPy(im):\nif expected_mode == 'L':\nreturn np.asarray(im.getdata()).reshape((1, -1))\nelse:\n- # (H,W,C) --> (C,H,W) --> (1, C*H*W)\n- return np.asarray(im).transpose(2, 0, 1).reshape((1, -1))\n+ im = (np.array(im).astype(np.float))\n+\n+ # (H,W,C) -> (C,H,W)\n+ im = im.transpose(2, 0, 1)\n+\n+ # RGB -> BGR\n+ if color_mode == 'BGR':\n+ im = im[...,::-1]\n+\n+ # Subtract Mean\n+ if mean is not None:\n+ for c in range(3):\n+ im[:, :, c] = im[:, :, c] - mean[c]\n+\n+ # (C,H,W) --> (1, C*H*W)\n+ return im.reshape((1, -1))\n+\nret = _im2NumPy(im)\n+\nif add_rotated_images:\nret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))\nif add_mirrored_images:\nret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))\nreturn ret\n+\ndef convertToPandasDF(X):\nif not isinstance(X, pd.DataFrame):\nreturn pd.DataFrame(X, columns=['C' + str(i) for i in range(getNumCols(X))])\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1703] Fix input data processing for Caffe VGG-19 model
Closes 588. |
49,738 | 26.07.2017 14:24:10 | 25,200 | 7ae1b1c4c46ab8741b06a5a77d4fff1aa2f06f87 | [MINOR] Modified codegen rowagg test (smaller value range of outputs)
This particular test case created outputs in the range of 10^19. Due to
our absolute epsilon of 10^{-10}, even tiny differences to R led to test
failures (this test failed on jenkins after an update to R 3.4). | [
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/rowAggPattern30.R",
"new_path": "src/test/scripts/functions/codegen/rowAggPattern30.R",
"diff": "@@ -24,9 +24,9 @@ options(digits=22)\nlibrary(\"Matrix\")\nlibrary(\"matrixStats\")\n-X = matrix(seq(1,6000), 600, 10, byrow=TRUE);\n-ssX_V = matrix(seq(1,40), 10, 4, byrow=TRUE);\n-P = matrix(seq(1,3000), 600, 5, byrow=TRUE);\n+X = matrix(seq(1,6000), 600, 10, byrow=TRUE)/6000;\n+ssX_V = matrix(seq(1,40), 10, 4, byrow=TRUE)/40;\n+P = matrix(seq(1,3000), 600, 5, byrow=TRUE)/3000;\nK = 4;\nQ = P[,1:K] * (X %*% ssX_V);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/rowAggPattern30.dml",
"new_path": "src/test/scripts/functions/codegen/rowAggPattern30.dml",
"diff": "#-------------------------------------------------------------\n-X = matrix(seq(1,6000), 600, 10);\n-ssX_V = matrix(seq(1,40), 10, 4);\n-P = matrix(seq(1,3000), 600, 5);\n+X = matrix(seq(1,6000), 600, 10)/6000;\n+ssX_V = matrix(seq(1,40), 10, 4)/40;\n+P = matrix(seq(1,3000), 600, 5)/3000;\nK = 4;\nif(1==1){}\nQ = P[,1:K] * (X %*% ssX_V);\nR = t(X) %*% (Q - P[,1:K] * rowSums(Q));\n+print(max(R));\n+\nwrite(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Modified codegen rowagg test (smaller value range of outputs)
This particular test case created outputs in the range of 10^19. Due to
our absolute epsilon of 10^{-10}, even tiny differences to R led to test
failures (this test failed on jenkins after an update to R 3.4). |
49,717 | 26.07.2017 14:24:53 | 25,200 | 3fd8e495e26ba70eed22bf16c51a7bf69474c1c3 | fix for DMLConfig#setText | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.conf;\nimport java.io.ByteArrayInputStream;\nimport java.io.FileNotFoundException;\nimport java.io.IOException;\n+import java.io.InputStream;\nimport java.io.StringWriter;\nimport java.util.HashMap;\nimport java.util.Map;\n@@ -45,6 +46,7 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\nimport org.w3c.dom.Document;\nimport org.w3c.dom.Element;\n+import org.w3c.dom.Node;\nimport org.w3c.dom.NodeList;\nimport org.xml.sax.SAXException;\n@@ -97,6 +99,8 @@ public class DMLConfig\nprivate String _fileName = null;\nprivate Element _xmlRoot = null;\n+ private DocumentBuilder _documentBuilder = null;\n+ private Document _document = null;\nstatic\n{\n@@ -169,23 +173,30 @@ public class DMLConfig\n*/\nprivate void parseConfig () throws ParserConfigurationException, SAXException, IOException\n{\n- DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();\n- factory.setIgnoringComments(true); //ignore XML comments\n- DocumentBuilder builder = factory.newDocumentBuilder();\n- Document domTree = null;\n+ DocumentBuilder builder = getDocumentBuilder();\n+ _document = null;\nif( _fileName.startsWith(\"hdfs:\") || _fileName.startsWith(\"gpfs:\")\n|| IOUtilFunctions.isObjectStoreFileScheme(new Path(_fileName)) )\n{\nPath configFilePath = new Path(_fileName);\nFileSystem DFS = IOUtilFunctions.getFileSystem(configFilePath);\n- domTree = builder.parse(DFS.open(configFilePath));\n+ _document = builder.parse(DFS.open(configFilePath));\n}\nelse // config from local file system\n{\n- domTree = builder.parse(_fileName);\n+ _document = builder.parse(_fileName);\n}\n- _xmlRoot = domTree.getDocumentElement();\n+ _xmlRoot = _document.getDocumentElement();\n+ }\n+\n+ private DocumentBuilder getDocumentBuilder() throws ParserConfigurationException {\n+ if (_documentBuilder == null) {\n+ DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();\n+ factory.setIgnoringComments(true); //ignore XML comments\n+ _documentBuilder = factory.newDocumentBuilder();\n+ }\n+ return _documentBuilder;\n}\n/**\n@@ -242,20 +253,6 @@ public class DMLConfig\nreturn textVal;\n}\n- /**\n- * Method to update the string value of an element identified by a tag name\n- * @param element the DOM element\n- * @param tagName the tag name\n- * @param newTextValue the new string value\n- */\n- private static void setTextValue(Element element, String tagName, String newTextValue) {\n-\n- NodeList list = element.getElementsByTagName(tagName);\n- if (list != null && list.getLength() > 0) {\n- Element elem = (Element) list.item(0);\n- elem.getFirstChild().setNodeValue(newTextValue);\n- }\n- }\n/**\n* Method to update the key value\n@@ -264,17 +261,23 @@ public class DMLConfig\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic void setTextValue(String paramName, String paramValue) throws DMLRuntimeException {\n- if(_xmlRoot != null)\n- DMLConfig.setTextValue(_xmlRoot, paramName, paramValue);\n- else {\n- DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();\n- factory.setIgnoringComments(true); //ignore XML comments\n- DocumentBuilder builder;\n+ if(_xmlRoot != null) {\n+ NodeList list = _xmlRoot.getElementsByTagName(paramName);\n+ if (list != null && list.getLength() > 0) {\n+ Element elem = (Element) list.item(0);\n+ elem.getFirstChild().setNodeValue(paramValue);\n+ } else {\n+ Node value = _document.createTextNode(paramValue);\n+ Node element = _document.createElement(paramName);\n+ element.appendChild(value);\n+ _xmlRoot.appendChild(element);\n+ }\n+ } else {\ntry {\n- builder = factory.newDocumentBuilder();\n+ DocumentBuilder builder = getDocumentBuilder();\nString configString = \"<root><\" + paramName + \">\"+paramValue+\"</\" + paramName + \"></root>\";\n- Document domTree = builder.parse(new ByteArrayInputStream(configString.getBytes(\"UTF-8\")));\n- _xmlRoot = domTree.getDocumentElement();\n+ _document = builder.parse(new ByteArrayInputStream(configString.getBytes(\"UTF-8\")));\n+ _xmlRoot = _document.getDocumentElement();\n} catch (Exception e) {\nthrow new DMLRuntimeException(\"Unable to set config value\", e);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/unit/UtilsTest.java",
"new_path": "src/test/java/org/apache/sysml/test/unit/UtilsTest.java",
"diff": "package org.apache.sysml.test.unit;\n+import java.io.BufferedWriter;\n+import java.io.File;\n+import java.io.FileWriter;\n+import java.io.IOException;\nimport java.util.Arrays;\n+import org.apache.sysml.conf.DMLConfig;\n+import org.apache.sysml.parser.ParseException;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.junit.Assert;\nimport org.junit.Test;\n@@ -75,4 +82,57 @@ public class UtilsTest {\npublic void testParseListStringFail4() {\nGPUContextPool.parseListString(\"-1-4\", 6);\n}\n+\n+\n+ @Test\n+ public void testDMLConfig1() throws DMLRuntimeException{\n+ DMLConfig dmlConfig = new DMLConfig();\n+ dmlConfig.setTextValue(\"A\", \"a\");\n+ dmlConfig.setTextValue(\"B\", \"b\");\n+ dmlConfig.setTextValue(\"C\", \"2\");\n+ dmlConfig.setTextValue(\"D\", \"5\");\n+ dmlConfig.setTextValue(\"E\", \"5.01\");\n+\n+ Assert.assertEquals(\"a\", dmlConfig.getTextValue(\"A\"));\n+ Assert.assertEquals(\"b\", dmlConfig.getTextValue(\"B\"));\n+ Assert.assertEquals(2, dmlConfig.getIntValue(\"C\"));\n+ Assert.assertEquals(5, dmlConfig.getIntValue(\"D\"));\n+ Assert.assertEquals(5.01, dmlConfig.getDoubleValue(\"E\"), 1e-15);\n+\n+ dmlConfig.setTextValue(\"E\", \"a\");\n+ Assert.assertEquals(\"a\", dmlConfig.getTextValue(\"E\"));\n+ }\n+\n+\n+\n+ @Test\n+ public void testDMLConfig2() throws DMLRuntimeException, IOException, ParseException {\n+\n+ String testStr = \"<root>\"\n+ + \"<A>a</A>\"\n+ + \"<B>b</B>\"\n+ + \"<C>2</C>\"\n+ + \"<D>5</D>\"\n+ + \"<E>5.01</E>\"\n+ + \"</root>\";\n+ File temp = File.createTempFile(\"tempfile\", null);\n+ BufferedWriter bw = new BufferedWriter(new FileWriter(temp));\n+ bw.write(testStr);\n+ bw.close();\n+\n+ DMLConfig dmlConfig = new DMLConfig(temp.getAbsolutePath());\n+\n+ Assert.assertEquals(\"a\", dmlConfig.getTextValue(\"A\"));\n+ Assert.assertEquals(\"b\", dmlConfig.getTextValue(\"B\"));\n+ Assert.assertEquals(2, dmlConfig.getIntValue(\"C\"));\n+ Assert.assertEquals(5, dmlConfig.getIntValue(\"D\"));\n+ Assert.assertEquals(5.01, dmlConfig.getDoubleValue(\"E\"), 1e-15);\n+\n+ dmlConfig.setTextValue(\"E\", \"a\");\n+ Assert.assertEquals(\"a\", dmlConfig.getTextValue(\"E\"));\n+ }\n+\n+\n+\n+\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1806] fix for DMLConfig#setText |
49,717 | 26.07.2017 15:37:06 | 25,200 | 2663ccd417e59908c3a461adfd217b667b58ea2d | [MINOR] fix for SYSTEMML_1795
The GPUContextPool.AVAILABLE_GPUS is read after the lops are
constructed, but the value needs to be read before. This patch is a fix
that problem.
Closes | [
{
"change_type": "MODIFY",
"old_path": "bin/systemml-standalone.py",
"new_path": "bin/systemml-standalone.py",
"diff": "@@ -152,6 +152,7 @@ systemml_default_java_opts = \\\n'-cp ' + classpath + ' ' + \\\n'-Dlog4j.configuration=file:' + log4j_properties_path + ' ' \\\n'-Duser.dir=' + user_dir\n+# '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8111'\n# Reads in key-value pairs from the conf/systemml-env.sh file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"diff": "@@ -85,6 +85,7 @@ import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\nimport org.apache.sysml.runtime.matrix.CleanupMR;\nimport org.apache.sysml.runtime.matrix.mapred.MRConfigurationNames;\n@@ -659,13 +660,16 @@ public class DMLScript\n//print basic time and environment info\nprintStartExecInfo( dmlScriptStr );\n- //Step 1: parse configuration files\n+ //Step 1: parse configuration files & write any configuration specific global variables\nDMLConfig dmlconf = DMLConfig.readConfigurationFile(fnameOptConfig);\nConfigurationManager.setGlobalConfig(dmlconf);\nCompilerConfig cconf = OptimizerUtils.constructCompilerConfig(dmlconf);\nConfigurationManager.setGlobalConfig(cconf);\nLOG.debug(\"\\nDML config: \\n\" + dmlconf.getConfigInfo());\n+ // Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n+ GPUContextPool.AVAILABLE_GPUS = dmlconf.getTextValue(DMLConfig.AVAILABLE_GPUS);\n+\n//Step 2: set local/remote memory if requested (for compile in AM context)\nif( dmlconf.getBooleanValue(DMLConfig.YARN_APPMASTER) ){\nDMLAppMasterUtils.setupConfigRemoteMaxMemory(dmlconf);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java",
"new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java",
"diff": "@@ -79,8 +79,6 @@ public class ScriptExecutorUtils {\nLibMatrixDNN.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_DNN_STATS);\nDMLScript.FINEGRAINED_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_FINEGRAINED_STATS);\n- // Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n- GPUContextPool.AVAILABLE_GPUS = dmlconf.getTextValue(DMLConfig.AVAILABLE_GPUS);\nStatistics.startRunTimer();\ntry {\n// run execute (w/ exception handling to ensure proper shutdown)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -51,6 +51,7 @@ import org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\n@@ -249,6 +250,9 @@ public class ScriptExecutor {\noldGPU = DMLScript.USE_ACCELERATOR;\nDMLScript.USE_ACCELERATOR = gpu;\nDMLScript.STATISTICS_COUNT = statisticsMaxHeavyHitters;\n+\n+ // Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n+ GPUContextPool.AVAILABLE_GPUS = ConfigurationManager.getDMLConfig().getTextValue(DMLConfig.AVAILABLE_GPUS);\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -408,7 +408,8 @@ public class DMLConfig\nYARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM,\nCP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS,\nCOMPRESSED_LINALG, CODEGEN, CODEGEN_LITERALS, CODEGEN_PLANCACHE,\n- EXTRA_GPU_STATS, EXTRA_DNN_STATS, EXTRA_FINEGRAINED_STATS\n+ EXTRA_GPU_STATS, EXTRA_DNN_STATS, EXTRA_FINEGRAINED_STATS,\n+ AVAILABLE_GPUS\n};\nStringBuilder sb = new StringBuilder();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContextPool.java",
"diff": "@@ -167,7 +167,7 @@ public class GPUContextPool {\npublic static ArrayList<Integer> parseListString(String str, int max) {\nArrayList<Integer> result = new ArrayList<>();\nstr = str.trim();\n- if (str == \"-1\") { // all\n+ if (str.equalsIgnoreCase(\"-1\")) { // all\nfor (int i=0; i<max; i++){\nresult.add(i);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] fix for SYSTEMML_1795
The GPUContextPool.AVAILABLE_GPUS is read after the lops are
constructed, but the value needs to be read before. This patch is a fix
that problem.
Closes #592 |
49,738 | 26.07.2017 19:24:07 | 25,200 | 418497019b461b3f3de4c0b453cee76fc9b73d80 | Improved codegen candidate exploration algorithm
This patch makes two minor improvements to the codegen candidate
exploration algorithm for simplification and slightly better
performance. The performance improvements are due to iterating over
distinct templates and avoiding unnecessary object creation. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"diff": "@@ -506,23 +506,14 @@ public class SpoofCompiler\n//open initial operator plans, if possible\nfor( TemplateBase tpl : TemplateUtils.TEMPLATES )\n- if( tpl.open(hop) ) {\n- MemoTableEntrySet P = new MemoTableEntrySet(hop, tpl.getType(), false);\n- memo.addAll(hop, enumPlans(hop, -1, P, tpl, memo));\n- }\n+ if( tpl.open(hop) )\n+ memo.addAll(hop, enumPlans(hop, null, tpl, memo));\n//fuse and merge operator plans\n- for( Hop c : hop.getInput() ) {\n- if( memo.contains(c.getHopID()) )\n- for( MemoTableEntry me : memo.getDistinct(c.getHopID()) ) {\n- TemplateBase tpl = TemplateUtils.createTemplate(me.type, me.closed);\n- if( tpl.fuse(hop, c) ) {\n- int pos = hop.getInput().indexOf(c);\n- MemoTableEntrySet P = new MemoTableEntrySet(hop, tpl.getType(), pos, c.getHopID(), tpl.isClosed());\n- memo.addAll(hop, enumPlans(hop, pos, P, tpl, memo));\n- }\n- }\n- }\n+ for( Hop c : hop.getInput() )\n+ for( TemplateBase tpl : memo.getDistinctTemplates(c.getHopID()) )\n+ if( tpl.fuse(hop, c) )\n+ memo.addAll(hop, enumPlans(hop, c, tpl, memo));\n//close operator plans, if required\nif( memo.contains(hop.getHopID()) ) {\n@@ -546,15 +537,13 @@ public class SpoofCompiler\nmemo.addHop(hop);\n}\n- private static MemoTableEntrySet enumPlans(Hop hop, int pos, MemoTableEntrySet P, TemplateBase tpl, CPlanMemoTable memo) {\n- for(int k=0; k<hop.getInput().size(); k++)\n- if( k != pos ) {\n+ private static MemoTableEntrySet enumPlans(Hop hop, Hop c, TemplateBase tpl, CPlanMemoTable memo) {\n+ MemoTableEntrySet P = new MemoTableEntrySet(hop, c, tpl);\n+ for(int k=0; k<hop.getInput().size(); k++) {\nHop input2 = hop.getInput().get(k);\n- if( memo.contains(input2.getHopID(), true, tpl.getType(), TemplateType.CellTpl)\n- && tpl.merge(hop, input2) )\n+ if( input2 != c && tpl.merge(hop, input2)\n+ && memo.contains(input2.getHopID(), true, tpl.getType(), TemplateType.CellTpl))\nP.crossProduct(k, -1L, input2.getHopID());\n- else\n- P.crossProduct(k, -1L);\n}\nreturn P;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java",
"diff": "@@ -219,9 +219,16 @@ public class CPlanMemoTable\n}\npublic List<MemoTableEntry> getDistinct(long hopID) {\n+ return _plans.get(hopID).stream()\n+ .distinct().collect(Collectors.toList());\n+ }\n+\n+ public List<TemplateBase> getDistinctTemplates(long hopID) {\n+ if(!contains(hopID))\n+ return Collections.emptyList();\n//return distinct entries wrt type and closed attributes\nreturn _plans.get(hopID).stream()\n- .map(p -> new MemoTableEntry(p.type,-1,-1,-1,p.size,p.closed))\n+ .map(p -> TemplateUtils.createTemplate(p.type, p.closed))\n.distinct().collect(Collectors.toList());\n}\n@@ -327,11 +334,14 @@ public class CPlanMemoTable\n&& !(!isPlanRef(1) && that.isPlanRef(1))\n&& !(!isPlanRef(2) && that.isPlanRef(2)));\n}\n-\n@Override\npublic int hashCode() {\n- return Arrays.hashCode(\n- new long[]{(long)type.ordinal(), input1, input2, input3});\n+ int h = UtilFunctions.intHashCode(type.ordinal(), Long.hashCode(input1));\n+ h = UtilFunctions.intHashCode(h, Long.hashCode(input2));\n+ h = UtilFunctions.intHashCode(h, Long.hashCode(input3));\n+ h = UtilFunctions.intHashCode(h, size);\n+ h = UtilFunctions.intHashCode(h, Boolean.hashCode(closed));\n+ return h;\n}\n@Override\npublic boolean equals(Object obj) {\n@@ -339,7 +349,8 @@ public class CPlanMemoTable\nreturn false;\nMemoTableEntry that = (MemoTableEntry)obj;\nreturn type == that.type && input1 == that.input1\n- && input2 == that.input2 && input3 == that.input3;\n+ && input2 == that.input2 && input3 == that.input3\n+ && size == that.size && closed == that.closed;\n}\n@Override\npublic String toString() {\n@@ -360,18 +371,16 @@ public class CPlanMemoTable\n{\npublic ArrayList<MemoTableEntry> plans = new ArrayList<MemoTableEntry>();\n- public MemoTableEntrySet(Hop hop, TemplateType type, boolean close) {\n- int size = (hop instanceof IndexingOp) ? 1 : hop.getInput().size();\n- plans.add(new MemoTableEntry(type, -1, -1, -1, size, close));\n- }\n-\n- public MemoTableEntrySet(Hop hop, TemplateType type, int pos, long hopID, boolean close) {\n+ public MemoTableEntrySet(Hop hop, Hop c, TemplateBase tpl) {\n+ int pos = (c != null) ? hop.getInput().indexOf(c) : -1;\nint size = (hop instanceof IndexingOp) ? 1 : hop.getInput().size();\n- plans.add(new MemoTableEntry(type, (pos==0)?hopID:-1,\n- (pos==1)?hopID:-1, (pos==2)?hopID:-1, size));\n+ plans.add(new MemoTableEntry(tpl.getType(), (pos==0)?c.getHopID():-1,\n+ (pos==1)?c.getHopID():-1, (pos==2)?c.getHopID():-1, size, tpl.isClosed()));\n}\npublic void crossProduct(int pos, Long... refs) {\n+ if( refs.length==1 && refs[0] == -1 )\n+ return; //unmodified plan set\nArrayList<MemoTableEntry> tmp = new ArrayList<MemoTableEntry>();\nfor( MemoTableEntry me : plans )\nfor( Long ref : refs )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateBase.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateBase.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.hops.codegen.template;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.runtime.matrix.data.Pair;\n+import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class TemplateBase\n{\n@@ -43,7 +44,7 @@ public abstract class TemplateBase\n}\nprotected final TemplateType _type;\n- protected boolean _closed = false;\n+ protected final boolean _closed;\nprotected TemplateBase(TemplateType type) {\nthis(type, false);\n@@ -62,6 +63,21 @@ public abstract class TemplateBase\nreturn _closed;\n}\n+ @Override\n+ public int hashCode() {\n+ return UtilFunctions.intHashCode(\n+ _type.ordinal(), Boolean.hashCode(_closed));\n+ }\n+\n+ @Override\n+ public boolean equals(Object obj) {\n+ if( !(obj instanceof TemplateBase) )\n+ return false;\n+ TemplateBase that = (TemplateBase)obj;\n+ return _type == that._type\n+ && _closed == that._closed;\n+ }\n+\n/////////////////////////////////////////////\n// Open-Fuse-Merge-Close interface\n// (for candidate generation and exploration)\n@@ -106,13 +122,6 @@ public abstract class TemplateBase\n*/\npublic abstract CloseType close(Hop hop);\n- /**\n- * Mark the template as closed either invalid or valid.\n- */\n- public void close() {\n- _closed = true;\n- }\n-\n/////////////////////////////////////////////\n// CPlan construction interface\n// (for plan creation of selected candidates)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -54,7 +54,9 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic class TemplateUtils\n{\n- public static final TemplateBase[] TEMPLATES = new TemplateBase[]{new TemplateRow(), new TemplateCell(), new TemplateOuterProduct()};\n+ public static final TemplateBase[] TEMPLATES = new TemplateBase[]{\n+ new TemplateRow(), new TemplateCell(), new TemplateOuterProduct()};\n+ //note: multiagg not included because it's a composite template\npublic static boolean isVector(Hop hop) {\nreturn (hop.getDataType() == DataType.MATRIX\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/codegen/rowAggPattern30.dml",
"new_path": "src/test/scripts/functions/codegen/rowAggPattern30.dml",
"diff": "@@ -29,6 +29,4 @@ if(1==1){}\nQ = P[,1:K] * (X %*% ssX_V);\nR = t(X) %*% (Q - P[,1:K] * rowSums(Q));\n-print(max(R));\n-\nwrite(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1812] Improved codegen candidate exploration algorithm
This patch makes two minor improvements to the codegen candidate
exploration algorithm for simplification and slightly better
performance. The performance improvements are due to iterating over
distinct templates and avoiding unnecessary object creation. |
49,738 | 26.07.2017 21:48:33 | 25,200 | 8dcb487e4e4bb3b153690be351111f2ada1b58b1 | [HOTFIX][SYSTEMML-1761] Fix wsloss special cases w/ sparse factors
This patch fixes null pointer exceptions of wsloss with sparse factors,
which implementations mistakenly access the non-existing weight matrix. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"diff": "@@ -2252,7 +2252,7 @@ public class LibMatrixMult\nelse if( wt==WeightsType.POST_NZ )\n{\n// approach: iterate over W, point-wise in order to exploit sparsity\n- if( mW.sparse ) //SPARSE\n+ if( mX.sparse ) //SPARSE\n{\nSparseBlock x = mX.sparseBlock;\n@@ -2303,7 +2303,7 @@ public class LibMatrixMult\n//parallel task computes sum(X^2)-sum(2*X*(U%*%t(V)))) and the last term\n//sum((t(U)%*%U)*(t(V)%*%V)) is computed once via two tsmm operations.\n- if( mW.sparse ) { //SPARSE\n+ if( mX.sparse ) { //SPARSE\nSparseBlock x = mX.sparseBlock;\nfor( int i=rl; i<ru; i++ ) {\nif( x.isEmpty(i) ) continue;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX][SYSTEMML-1761] Fix wsloss special cases w/ sparse factors
This patch fixes null pointer exceptions of wsloss with sparse factors,
which implementations mistakenly access the non-existing weight matrix. |
49,703 | 01.08.2017 11:07:45 | 25,200 | a2db1ad895f09ccc7e56b43d8b6baf21fc887fe4 | Compile time for statistics via MLContext
If statistics is on via the MLContext API, compute and display the
program compile-time.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -183,10 +183,8 @@ public class ScriptExecutor {\nreturn;\ntry {\n- ExplainType explainType = (explainLevel != null) ?\n- explainLevel.getExplainType() : ExplainType.RUNTIME;\n- System.out.println(Explain.display(\n- dmlProgram, runtimeProgram, explainType, null));\n+ ExplainType explainType = (explainLevel != null) ? explainLevel.getExplainType() : ExplainType.RUNTIME;\n+ System.out.println(Explain.display(dmlProgram, runtimeProgram, explainType, null));\n} catch (Exception e) {\nthrow new MLContextException(\"Exception occurred while explaining dml program\", e);\n}\n@@ -298,6 +296,9 @@ public class ScriptExecutor {\n// main steps in script execution\nsetup(script);\n+ if (statistics) {\n+ Statistics.startCompileTimer();\n+ }\nparseScript();\nliveVariableAnalysis();\nvalidateScript();\n@@ -311,6 +312,9 @@ public class ScriptExecutor {\ncountCompiledMRJobsAndSparkInstructions();\ninitializeCachingAndScratchSpace();\ncleanupRuntimeProgram();\n+ if (statistics) {\n+ Statistics.stopCompileTimer();\n+ }\ntry {\ncreateAndInitializeExecutionContext();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1804] Compile time for statistics via MLContext
If statistics is on via the MLContext API, compute and display the
program compile-time.
Closes #595. |
49,703 | 01.08.2017 14:58:22 | 25,200 | 8c11e8c075d5fcf8e2b190dd34c3cd1be8d11186 | [MINOR] Refactoring - boolean constructors, returns, semicolons
1) Boolean constructor creates new Boolean instances, so follow best
practice of using Boolean.TRUE, Boolean.FALSE, and Boolean.valueOf()
rather than the Boolean constructor. See Boolean constructor javadocs
for more information.
2) Remove unnecessary return statements
3) Remove extraneous semicolons
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLResults.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLResults.java",
"diff": "@@ -1980,7 +1980,7 @@ public class MLResults {\nprivate <T> T outputValue(String outputName) {\nData data = getData(outputName);\nif (data instanceof BooleanObject) {\n- return (T) new Boolean(((BooleanObject) data).getBooleanValue());\n+ return (T) Boolean.valueOf(((BooleanObject) data).getBooleanValue());\n} else if (data instanceof DoubleObject) {\nreturn (T) new Double(((DoubleObject) data).getDoubleValue());\n} else if (data instanceof IntObject) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1217,7 +1217,6 @@ public class BuiltinFunctionExpression extends DataIdentifier\nraiseValidateError(\"Unsupported function \"+op, false, LanguageErrorCodes.INVALID_PARAMETERS);\n}\n}\n- return;\n}\nprivate void setBinaryOutputProperties(DataIdentifier output)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java",
"new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java",
"diff": "@@ -175,8 +175,6 @@ public class DMLTranslator\nconstVars = sb.getConstOut();\n}\n}\n-\n- return;\n}\npublic void liveVariableAnalysis(DMLProgram dmlp) throws LanguageException {\n@@ -240,8 +238,6 @@ public class DMLTranslator\ncurrentLiveOut = sb.analyze(currentLiveOut);\n}\n}\n- return;\n-\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/DataExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/DataExpression.java",
"diff": "@@ -1671,7 +1671,6 @@ public class DataExpression extends DataIdentifier\ndefault:\nraiseValidateError(\"Unsupported Data expression\"+ this.getOpCode(), false, LanguageErrorCodes.INVALID_PARAMETERS); //always unconditional\n}\n- return;\n}\nprivate void performConstantPropagationRand( HashMap<String, ConstIdentifier> currConstVars )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/FunctionCallIdentifier.java",
"new_path": "src/main/java/org/apache/sysml/parser/FunctionCallIdentifier.java",
"diff": "@@ -163,8 +163,6 @@ public class FunctionCallIdentifier extends DataIdentifier\nfor(int i=0; i < fstmt.getOutputParams().size(); i++) {\n_outputs[i] = new DataIdentifier(fstmt.getOutputParams().get(i));\n}\n-\n- return;\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java",
"diff": "@@ -244,7 +244,6 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\nraiseValidateError(\"Unsupported parameterized function \"+ getOpCode(),\nfalse, LanguageErrorCodes.UNSUPPORTED_EXPRESSION);\n}\n- return;\n}\n@Override\n@@ -277,8 +276,6 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\ndefault: //always unconditional (because unsupported operation)\nraiseValidateError(\"Unsupported parameterized function \"+ getOpCode(), false, LanguageErrorCodes.INVALID_PARAMETERS);\n}\n-\n- return;\n}\n// example: A = transformapply(target=X, meta=M, spec=s)\n@@ -645,7 +642,6 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\noutput.setDataType(DataType.SCALAR);\noutput.setValueType(ValueType.DOUBLE);\noutput.setDimensions(0, 0);\n- return;\n}\nprivate void validateCastAsString(DataIdentifier output, boolean conditional)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ProgramConverter.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ProgramConverter.java",
"diff": "@@ -1427,7 +1427,7 @@ public class ProgramConverter\nthrows DMLRuntimeException\n{\nArrayList<ProgramBlock> pbs = new ArrayList<ProgramBlock>();\n- String tmpdata = in.substring(PARFOR_PBS_BEGIN.length(),in.length()-PARFOR_PBS_END.length()); ;\n+ String tmpdata = in.substring(PARFOR_PBS_BEGIN.length(),in.length()-PARFOR_PBS_END.length());\nHierarchyAwareStringTokenizer st = new HierarchyAwareStringTokenizer(tmpdata, ELEMENT_DELIM);\nwhile( st.hasMoreTokens() )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/VariableCPInstruction.java",
"diff": "@@ -492,7 +492,7 @@ public class VariableCPInstruction extends CPInstruction\nString fname = getInput2().getName();\n// check if unique filename needs to be generated\n- boolean overrideFileName = ((BooleanObject) ec.getScalarInput(getInput3().getName(), getInput3().getValueType(), true)).getBooleanValue();; //!(input1.getName().startsWith(\"p\")); //\n+ boolean overrideFileName = ((BooleanObject) ec.getScalarInput(getInput3().getName(), getInput3().getValueType(), true)).getBooleanValue();\nif ( overrideFileName ) {\nfname = fname + \"_\" + _uniqueVarID.getNextID();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"diff": "@@ -2986,7 +2986,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nfor(int c=0; c<clen; c++)\n{\nbuffer._sum=this.quickGetValue(r, c);\n- buffer._correction=cor.quickGetValue(r, 0);;\n+ buffer._correction=cor.quickGetValue(r, 0);\nbuffer=(KahanObject) aggOp.increOp.fn.execute(buffer, newWithCor.quickGetValue(r, c), newWithCor.quickGetValue(r, c+1));\nquickSetValue(r, c, buffer._sum);\ncor.quickSetValue(r, 0, buffer._correction);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/FrameCopyTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/FrameCopyTest.java",
"diff": "@@ -137,7 +137,7 @@ public class FrameCopyTest extends AutomatedTestBase\nfor( int j=0; j<lschema.length; j++ ) {\nswitch( lschema[j] ) {\ncase STRING: frame.set(updateRow, j, \"String:\"+ frame.get(updateRow, j)); break;\n- case BOOLEAN: frame.set(updateRow, j, ((Boolean)frame.get(updateRow, j))?(new Boolean(false)):(new Boolean(true))); break;\n+ case BOOLEAN: frame.set(updateRow, j, ((Boolean)frame.get(updateRow, j))?Boolean.FALSE:Boolean.TRUE); break;\ncase INT: frame.set(updateRow, j, (Long)frame.get(updateRow, j) * 2 + 5); break;\ncase DOUBLE: frame.set(updateRow, j, (Double)frame.get(updateRow, j) * 2 + 7); break;\ndefault: throw new RuntimeException(\"Unsupported value type: \"+lschema[j]);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/indexing/IndexRangeBlockAlignmentTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/indexing/IndexRangeBlockAlignmentTest.java",
"diff": "@@ -38,49 +38,49 @@ public class IndexRangeBlockAlignmentTest extends AutomatedTestBase\n@Test\npublic void testRowBlockFirstColumn() {\n- Assert.assertEquals(new Boolean(true),\n+ Assert.assertEquals(Boolean.TRUE,\nOptimizerUtils.isIndexingRangeBlockAligned(2001, 4000, 1, 1736, BRLEN, BCLEN));\n}\n@Test\npublic void testRowBlockColBlock() {\n- Assert.assertEquals(new Boolean(true),\n+ Assert.assertEquals(Boolean.TRUE,\nOptimizerUtils.isIndexingRangeBlockAligned(2001, 4000, 7001, 9000, BRLEN, BCLEN));\n}\n@Test\npublic void testSingleRowBlockFirstColumn() {\n- Assert.assertEquals(new Boolean(true),\n+ Assert.assertEquals(Boolean.TRUE,\nOptimizerUtils.isIndexingRangeBlockAligned(2500, 2600, 1, 1736, BRLEN, BCLEN));\n}\n@Test\npublic void testSingleRowBlockColBlock() {\n- Assert.assertEquals(new Boolean(true),\n+ Assert.assertEquals(Boolean.TRUE,\nOptimizerUtils.isIndexingRangeBlockAligned(2500, 2600, 7001, 9000, BRLEN, BCLEN));\n}\n@Test\npublic void testRowBlockFirstColumnNeg() {\n- Assert.assertEquals(new Boolean(false),\n+ Assert.assertEquals(Boolean.FALSE,\nOptimizerUtils.isIndexingRangeBlockAligned(2501, 4500, 1, 1736, BRLEN, BCLEN));\n}\n@Test\npublic void testRowBlockColBlockNeg() {\n- Assert.assertEquals(new Boolean(false),\n+ Assert.assertEquals(Boolean.FALSE,\nOptimizerUtils.isIndexingRangeBlockAligned(2501, 4500, 7001, 9000, BRLEN, BCLEN));\n}\n@Test\npublic void testSingleRowBlockFirstColumnNeg() {\n- Assert.assertEquals(new Boolean(false),\n+ Assert.assertEquals(Boolean.FALSE,\nOptimizerUtils.isIndexingRangeBlockAligned(2500, 3001, 1, 1736, BRLEN, BCLEN));\n}\n@Test\npublic void testSingleRowBlockColBlockNeg() {\n- Assert.assertEquals(new Boolean(false),\n+ Assert.assertEquals(Boolean.FALSE,\nOptimizerUtils.isIndexingRangeBlockAligned(2500, 3001, 7001, 9000, BRLEN, BCLEN));\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteCTableToRExpandTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteCTableToRExpandTest.java",
"diff": "@@ -124,7 +124,7 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\ncheckDMLMetaDataFile(\"R\", new MatrixCharacteristics(rrows, rcols, 1, 1));\n//check for applied rewrite\n- Assert.assertEquals(new Boolean(testname.equals(TEST_NAME1)||testname.equals(TEST_NAME2)),\n- new Boolean(heavyHittersContainsSubString(\"rexpand\")));\n+ Assert.assertEquals(Boolean.valueOf(testname.equals(TEST_NAME1) || testname.equals(TEST_NAME2)),\n+ Boolean.valueOf(heavyHittersContainsSubString(\"rexpand\")));\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/ternary/TernaryAggregateTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/ternary/TernaryAggregateTest.java",
"diff": "@@ -254,8 +254,8 @@ public class TernaryAggregateTest extends AutomatedTestBase\nif( rewrites && et != ExecType.MR ) {\nString opcode = ((et == ExecType.SPARK) ? Instruction.SP_INST_PREFIX : \"\") +\n(((testname.equals(TEST_NAME1) || vectors ) ? \"tak+*\" : \"tack+*\"));\n- Assert.assertEquals(new Boolean(true), new Boolean(\n- Statistics.getCPHeavyHitterOpCodes().contains(opcode)));\n+ Assert.assertEquals(Boolean.TRUE,\n+ Boolean.valueOf(Statistics.getCPHeavyHitterOpCodes().contains(opcode)));\n}\n}\nfinally {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Refactoring - boolean constructors, returns, semicolons
1) Boolean constructor creates new Boolean instances, so follow best
practice of using Boolean.TRUE, Boolean.FALSE, and Boolean.valueOf()
rather than the Boolean constructor. See Boolean constructor javadocs
for more information.
2) Remove unnecessary return statements
3) Remove extraneous semicolons
Closes #583. |
49,738 | 01.08.2017 14:10:53 | 25,200 | 61a0931d8ea5174ba19e3ed107925c5d42e07fb5 | Fix parallel csv frame reader w/ unknowns (shutdown)
This patch fixes an issue of missing process exit after successful
script execution. The root cause was a missing thread pool shutdown in
the parallel csv frame reader w/ unknown input sizes. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -22,7 +22,6 @@ package org.apache.sysml.conf;\nimport java.io.ByteArrayInputStream;\nimport java.io.FileNotFoundException;\nimport java.io.IOException;\n-import java.io.InputStream;\nimport java.io.StringWriter;\nimport java.util.HashMap;\nimport java.util.Map;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/FrameReaderTextCSVParallel.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/FrameReaderTextCSVParallel.java",
"diff": "@@ -67,7 +67,8 @@ public class FrameReaderTextCSVParallel extends FrameReaderTextCSV\ntry\n{\n- ExecutorService pool = Executors.newFixedThreadPool(numThreads);\n+ ExecutorService pool = Executors.newFixedThreadPool(\n+ Math.min(numThreads, splits.length));\n//compute num rows per split\nArrayList<CountRowsTask> tasks = new ArrayList<CountRowsTask>();\n@@ -126,6 +127,9 @@ public class FrameReaderTextCSVParallel extends FrameReaderTextCSV\ncatch (Exception e) {\nthrow new IOException(\"Failed parallel read of text csv input.\", e);\n}\n+ finally {\n+ pool.shutdown();\n+ }\nreturn new Pair<Integer,Integer>(nrow, ncol);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1823] Fix parallel csv frame reader w/ unknowns (shutdown)
This patch fixes an issue of missing process exit after successful
script execution. The root cause was a missing thread pool shutdown in
the parallel csv frame reader w/ unknown input sizes. |
49,736 | 03.08.2017 09:06:24 | 28,800 | ac1cf093ad0b47cb6a0f0d48c4deb276b4ae1fa6 | [SYSTEMML-1658] Visualize Hop DAG for explaining the optimizer
Also added an utility to print java output in notebook.
Fixed a bug in dmlFromResource.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"diff": "@@ -46,7 +46,6 @@ import org.apache.sysml.runtime.matrix.MatrixFormatMetaData;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.utils.Explain.ExplainType;\nimport org.apache.sysml.utils.MLContextProxy;\n-\n/**\n* The MLContext API offers programmatic access to SystemML on Spark from\n* languages such as Scala, Java, and Python.\n@@ -288,6 +287,8 @@ public class MLContext {\nMLContextUtil.setDefaultConfig();\n}\n+\n+\n/**\n* Set configuration property, such as\n* {@code setConfigProperty(\"localtmpdir\", \"/tmp/systemml\")}.\n@@ -306,6 +307,7 @@ public class MLContext {\n}\n}\n+\n/**\n* Execute a DML or PYDML Script.\n*\n@@ -358,6 +360,16 @@ public class MLContext {\n}\n}\n+ /**\n+ * Sets the script that is being executed\n+ *\n+ * @param executionScript\n+ * script that is being executed\n+ */\n+ public void setExecutionScript(Script executionScript) {\n+ this.executionScript = executionScript;\n+ }\n+\n/**\n* Set SystemML configuration based on a configuration file.\n*\n@@ -488,6 +500,15 @@ public class MLContext {\nreturn this.gpu;\n}\n+ /**\n+ * Whether or not the \"force\" GPU mode is enabled.\n+ *\n+ * @return true if enabled, false otherwise\n+ */\n+ public boolean isForceGPU() {\n+ return this.forceGPU;\n+ }\n+\n/**\n* Used internally by MLContextProxy.\n*\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java",
"diff": "@@ -23,6 +23,7 @@ import java.io.File;\nimport java.io.FileNotFoundException;\nimport java.net.URL;\nimport java.util.ArrayList;\n+import java.util.Date;\nimport java.util.HashMap;\nimport java.util.Map;\nimport java.util.Map.Entry;\n@@ -35,6 +36,7 @@ import javax.xml.parsers.DocumentBuilderFactory;\nimport org.apache.commons.lang3.ArrayUtils;\nimport org.apache.commons.lang3.StringUtils;\nimport org.apache.commons.lang3.text.WordUtils;\n+import org.apache.spark.SparkConf;\nimport org.apache.spark.SparkContext;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n@@ -52,8 +54,11 @@ import org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.CompilerConfig.ConfigType;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n+import org.apache.sysml.hops.HopsException;\n+import org.apache.sysml.parser.LanguageException;\nimport org.apache.sysml.parser.ParseException;\nimport org.apache.sysml.parser.Statement;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.ForProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.IfProgramBlock;\n@@ -63,6 +68,8 @@ import org.apache.sysml.runtime.controlprogram.ProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.WhileProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.instructions.Instruction;\nimport org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.Data;\n@@ -73,6 +80,7 @@ import org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;\nimport org.apache.sysml.runtime.matrix.data.FrameBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+import org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.MLContextProxy;\nimport org.w3c.dom.Document;\nimport org.w3c.dom.Node;\n@@ -84,6 +92,106 @@ import org.w3c.dom.NodeList;\n*/\npublic final class MLContextUtil {\n+ /**\n+ * Get HOP DAG in dot format for a DML or PYDML Script.\n+ *\n+ * @param mlCtx\n+ * MLContext object.\n+ * @param script\n+ * The DML or PYDML Script object to execute.\n+ * @param lines\n+ * Only display the hops that have begin and end line number\n+ * equals to the given integers.\n+ * @param performHOPRewrites\n+ * should perform static rewrites, perform\n+ * intra-/inter-procedural analysis to propagate size information\n+ * into functions and apply dynamic rewrites\n+ * @param withSubgraph\n+ * If false, the dot graph will be created without subgraphs for\n+ * statement blocks.\n+ * @return hop DAG in dot format\n+ * @throws LanguageException\n+ * if error occurs\n+ * @throws DMLRuntimeException\n+ * if error occurs\n+ * @throws HopsException\n+ * if error occurs\n+ */\n+ public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines,\n+ boolean performHOPRewrites, boolean withSubgraph) throws HopsException, DMLRuntimeException,\n+ LanguageException {\n+ return getHopDAG(mlCtx, script, lines, null, performHOPRewrites, withSubgraph);\n+ }\n+\n+ /**\n+ * Get HOP DAG in dot format for a DML or PYDML Script.\n+ *\n+ * @param mlCtx\n+ * MLContext object.\n+ * @param script\n+ * The DML or PYDML Script object to execute.\n+ * @param lines\n+ * Only display the hops that have begin and end line number\n+ * equals to the given integers.\n+ * @param newConf\n+ * Spark Configuration.\n+ * @param performHOPRewrites\n+ * should perform static rewrites, perform\n+ * intra-/inter-procedural analysis to propagate size information\n+ * into functions and apply dynamic rewrites\n+ * @param withSubgraph\n+ * If false, the dot graph will be created without subgraphs for\n+ * statement blocks.\n+ * @return hop DAG in dot format\n+ * @throws LanguageException\n+ * if error occurs\n+ * @throws DMLRuntimeException\n+ * if error occurs\n+ * @throws HopsException\n+ * if error occurs\n+ */\n+ public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines, SparkConf newConf,\n+ boolean performHOPRewrites, boolean withSubgraph) throws HopsException, DMLRuntimeException,\n+ LanguageException {\n+ SparkConf oldConf = mlCtx.getSparkSession().sparkContext().getConf();\n+ SparkExecutionContext.SparkClusterConfig systemmlConf = SparkExecutionContext.getSparkClusterConfig();\n+ long oldMaxMemory = InfrastructureAnalyzer.getLocalMaxMemory();\n+ try {\n+ if (newConf != null) {\n+ systemmlConf.analyzeSparkConfiguation(newConf);\n+ InfrastructureAnalyzer.setLocalMaxMemory(newConf.getSizeAsBytes(\"spark.driver.memory\"));\n+ }\n+ ScriptExecutor scriptExecutor = new ScriptExecutor();\n+ scriptExecutor.setExecutionType(mlCtx.getExecutionType());\n+ scriptExecutor.setGPU(mlCtx.isGPU());\n+ scriptExecutor.setForceGPU(mlCtx.isForceGPU());\n+ scriptExecutor.setInit(mlCtx.isInitBeforeExecution());\n+ if (mlCtx.isInitBeforeExecution()) {\n+ mlCtx.setInitBeforeExecution(false);\n+ }\n+ scriptExecutor.setMaintainSymbolTable(mlCtx.isMaintainSymbolTable());\n+\n+ Long time = new Long((new Date()).getTime());\n+ if ((script.getName() == null) || (script.getName().equals(\"\"))) {\n+ script.setName(time.toString());\n+ }\n+\n+ mlCtx.setExecutionScript(script);\n+ scriptExecutor.compile(script, performHOPRewrites);\n+ Explain.reset();\n+ // To deal with potential Py4J issues\n+ lines = lines.size() == 1 && lines.get(0) == -1 ? new ArrayList<Integer>() : lines;\n+ return Explain.getHopDAG(scriptExecutor.dmlProgram, lines, withSubgraph);\n+ } catch (RuntimeException e) {\n+ throw new MLContextException(\"Exception when compiling script\", e);\n+ } finally {\n+ if (newConf != null) {\n+ systemmlConf.analyzeSparkConfiguation(oldConf);\n+ InfrastructureAnalyzer.setLocalMaxMemory(oldMaxMemory);\n+ }\n+ }\n+ }\n+\n/**\n* Basic data types supported by the MLContext API\n*/\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -264,8 +264,15 @@ public class ScriptExecutor {\nDMLScript.STATISTICS_COUNT = DMLOptions.defaultOptions.statsCount;\n}\n+ public void compile(Script script) {\n+ compile(script, true);\n+ }\n+\n/**\n- * Execute a DML or PYDML script. This is broken down into the following\n+ * Compile a DML or PYDML script. This will help analysis of DML programs\n+ * that have dynamic recompilation flag set to false without actually executing it.\n+ *\n+ * This is broken down into the following\n* primary methods:\n*\n* <ol>\n@@ -283,16 +290,14 @@ public class ScriptExecutor {\n* <li>{@link #countCompiledMRJobsAndSparkInstructions()}</li>\n* <li>{@link #initializeCachingAndScratchSpace()}</li>\n* <li>{@link #cleanupRuntimeProgram()}</li>\n- * <li>{@link #createAndInitializeExecutionContext()}</li>\n- * <li>{@link #executeRuntimeProgram()}</li>\n- * <li>{@link #cleanupAfterExecution()}</li>\n* </ol>\n*\n* @param script\n- * the DML or PYDML script to execute\n- * @return the results as a MLResults object\n+ * the DML or PYDML script to compile\n+ * @param performHOPRewrites\n+ * should perform static rewrites, perform intra-/inter-procedural analysis to propagate size information into functions and apply dynamic rewrites\n*/\n- public MLResults execute(Script script) {\n+ public void compile(Script script, boolean performHOPRewrites) {\n// main steps in script execution\nsetup(script);\n@@ -303,6 +308,7 @@ public class ScriptExecutor {\nliveVariableAnalysis();\nvalidateScript();\nconstructHops();\n+ if(performHOPRewrites)\nrewriteHops();\nrewritePersistentReadsAndWrites();\nconstructLops();\n@@ -315,6 +321,28 @@ public class ScriptExecutor {\nif (statistics) {\nStatistics.stopCompileTimer();\n}\n+ }\n+\n+\n+ /**\n+ * Execute a DML or PYDML script. This is broken down into the following\n+ * primary methods:\n+ *\n+ * <ol>\n+ * <li>{@link #compile(Script)}</li>\n+ * <li>{@link #createAndInitializeExecutionContext()}</li>\n+ * <li>{@link #executeRuntimeProgram()}</li>\n+ * <li>{@link #cleanupAfterExecution()}</li>\n+ * </ol>\n+ *\n+ * @param script\n+ * the DML or PYDML script to execute\n+ * @return the results as a MLResults object\n+ */\n+ public MLResults execute(Script script) {\n+\n+ // main steps in script execution\n+ compile(script);\ntry {\ncreateAndInitializeExecutionContext();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java",
"diff": "@@ -1378,7 +1378,7 @@ public class SparkExecutionContext extends ExecutionContext\n* degree of parallelism. This configuration abstracts legacy (< Spark 1.6) and current\n* configurations and provides a unified view.\n*/\n- private static class SparkClusterConfig\n+ public static class SparkClusterConfig\n{\n//broadcasts are stored in mem-and-disk in data space, this config\n//defines the fraction of data space to be used as broadcast budget\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/Instruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/Instruction.java",
"diff": "@@ -63,6 +63,26 @@ public abstract class Instruction\nprotected int beginCol = -1;\nprotected int endCol = -1;\n+ public String getFilename() {\n+ return filename;\n+ }\n+\n+ public int getBeginLine() {\n+ return beginLine;\n+ }\n+\n+ public int getEndLine() {\n+ return endLine;\n+ }\n+\n+ public int getBeginColumn() {\n+ return beginCol;\n+ }\n+\n+ public int getEndColumn() {\n+ return endCol;\n+ }\n+\npublic void setType (INSTRUCTION_TYPE tp ) {\ntype = tp;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/Explain.java",
"new_path": "src/main/java/org/apache/sysml/utils/Explain.java",
"diff": "@@ -26,10 +26,16 @@ import java.util.HashSet;\nimport java.util.Map;\nimport java.util.Map.Entry;\n+import org.apache.sysml.hops.AggBinaryOp;\n+import org.apache.sysml.hops.BinaryOp;\n+import org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.Hop.DataOpTypes;\nimport org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.hops.ReorgOp;\n+import org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeMultiAgg;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\n@@ -267,6 +273,50 @@ public class Explain\nreturn sb.toString();\n}\n+ public static String getHopDAG(DMLProgram prog, ArrayList<Integer> lines, boolean withSubgraph)\n+ throws HopsException, DMLRuntimeException, LanguageException {\n+ StringBuilder sb = new StringBuilder();\n+ StringBuilder nodes = new StringBuilder();\n+\n+ // create header\n+ sb.append(\"digraph {\");\n+\n+ // Explain functions (if exists)\n+ if (prog.hasFunctionStatementBlocks()) {\n+\n+ // show function call graph\n+ // FunctionCallGraph fgraph = new FunctionCallGraph(prog);\n+ // sb.append(explainFunctionCallGraph(fgraph, new HashSet<String>(),\n+ // null, 3));\n+\n+ // show individual functions\n+ for (String namespace : prog.getNamespaces().keySet()) {\n+ for (String fname : prog.getFunctionStatementBlocks(namespace).keySet()) {\n+ FunctionStatementBlock fsb = prog.getFunctionStatementBlock(namespace, fname);\n+ FunctionStatement fstmt = (FunctionStatement) fsb.getStatement(0);\n+ String fkey = DMLProgram.constructFunctionKey(namespace, fname);\n+\n+ if (!(fstmt instanceof ExternalFunctionStatement)) {\n+ addSubGraphHeader(sb, withSubgraph);\n+ for (StatementBlock current : fstmt.getBody())\n+ sb.append(getHopDAG(current, nodes, lines, withSubgraph));\n+ String label = \"FUNCTION \" + fkey + \" recompile=\" + fsb.isRecompileOnce() + \"\\n\";\n+ addSubGraphFooter(sb, withSubgraph, label);\n+ }\n+ }\n+ }\n+ }\n+\n+ // Explain main program\n+ for (StatementBlock sblk : prog.getStatementBlocks())\n+ sb.append(getHopDAG(sblk, nodes, lines, withSubgraph));\n+\n+ sb.append(nodes);\n+ sb.append(\"rankdir = \\\"BT\\\"\\n\");\n+ sb.append(\"}\\n\");\n+ return sb.toString();\n+ }\n+\npublic static String explain( Program rtprog ) throws HopsException {\nreturn explain(rtprog, null);\n}\n@@ -466,6 +516,128 @@ public class Explain\n//////////////\n// internal explain HOPS\n+ private static int clusterID = 0;\n+\n+ public static void reset() {\n+ clusterID = 0;\n+ }\n+\n+ private static void addSubGraphHeader(StringBuilder builder, boolean withSubgraph) {\n+ if (withSubgraph) {\n+ builder.append(\"subgraph cluster_\" + (clusterID++) + \" {\\n\");\n+ }\n+ }\n+\n+ private static void addSubGraphFooter(StringBuilder builder, boolean withSubgraph, String label) {\n+ if (withSubgraph) {\n+ builder.append(\"label = \\\"\" + label + \"\\\";\\n\");\n+ builder.append(\"}\\n\");\n+ }\n+ }\n+\n+ private static StringBuilder getHopDAG(StatementBlock sb, StringBuilder nodes, ArrayList<Integer> lines,\n+ boolean withSubgraph) throws HopsException, DMLRuntimeException {\n+ StringBuilder builder = new StringBuilder();\n+\n+ if (sb instanceof WhileStatementBlock) {\n+ addSubGraphHeader(builder, withSubgraph);\n+\n+ WhileStatementBlock wsb = (WhileStatementBlock) sb;\n+ String label = null;\n+ if (!wsb.getUpdateInPlaceVars().isEmpty())\n+ label = \"WHILE (lines \" + wsb.getBeginLine() + \"-\" + wsb.getEndLine() + \") in-place=\"\n+ + wsb.getUpdateInPlaceVars().toString() + \"\";\n+ else\n+ label = \"WHILE (lines \" + wsb.getBeginLine() + \"-\" + wsb.getEndLine() + \")\";\n+ // TODO: Don't show predicate hops for now\n+ // builder.append(explainHop(wsb.getPredicateHops()));\n+\n+ WhileStatement ws = (WhileStatement) sb.getStatement(0);\n+ for (StatementBlock current : ws.getBody())\n+ builder.append(getHopDAG(current, nodes, lines, withSubgraph));\n+\n+ addSubGraphFooter(builder, withSubgraph, label);\n+ } else if (sb instanceof IfStatementBlock) {\n+ addSubGraphHeader(builder, withSubgraph);\n+ IfStatementBlock ifsb = (IfStatementBlock) sb;\n+ String label = \"IF (lines \" + ifsb.getBeginLine() + \"-\" + ifsb.getEndLine() + \")\";\n+ // TODO: Don't show predicate hops for now\n+ // builder.append(explainHop(ifsb.getPredicateHops(), level+1));\n+\n+ IfStatement ifs = (IfStatement) sb.getStatement(0);\n+ for (StatementBlock current : ifs.getIfBody()) {\n+ builder.append(getHopDAG(current, nodes, lines, withSubgraph));\n+ addSubGraphFooter(builder, withSubgraph, label);\n+ }\n+ if (!ifs.getElseBody().isEmpty()) {\n+ addSubGraphHeader(builder, withSubgraph);\n+ label = \"ELSE (lines \" + ifsb.getBeginLine() + \"-\" + ifsb.getEndLine() + \")\";\n+\n+ for (StatementBlock current : ifs.getElseBody())\n+ builder.append(getHopDAG(current, nodes, lines, withSubgraph));\n+ addSubGraphFooter(builder, withSubgraph, label);\n+ }\n+ } else if (sb instanceof ForStatementBlock) {\n+ ForStatementBlock fsb = (ForStatementBlock) sb;\n+ addSubGraphHeader(builder, withSubgraph);\n+ String label = \"\";\n+ if (sb instanceof ParForStatementBlock) {\n+ if (!fsb.getUpdateInPlaceVars().isEmpty())\n+ label = \"PARFOR (lines \" + fsb.getBeginLine() + \"-\" + fsb.getEndLine() + \") in-place=\"\n+ + fsb.getUpdateInPlaceVars().toString() + \"\";\n+ else\n+ label = \"PARFOR (lines \" + fsb.getBeginLine() + \"-\" + fsb.getEndLine() + \")\";\n+ } else {\n+ if (!fsb.getUpdateInPlaceVars().isEmpty())\n+ label = \"FOR (lines \" + fsb.getBeginLine() + \"-\" + fsb.getEndLine() + \") in-place=\"\n+ + fsb.getUpdateInPlaceVars().toString() + \"\";\n+ else\n+ label = \"FOR (lines \" + fsb.getBeginLine() + \"-\" + fsb.getEndLine() + \")\";\n+ }\n+ // TODO: Don't show predicate hops for now\n+ // if (fsb.getFromHops() != null)\n+ // builder.append(explainHop(fsb.getFromHops(), level+1));\n+ // if (fsb.getToHops() != null)\n+ // builder.append(explainHop(fsb.getToHops(), level+1));\n+ // if (fsb.getIncrementHops() != null)\n+ // builder.append(explainHop(fsb.getIncrementHops(), level+1));\n+\n+ ForStatement fs = (ForStatement) sb.getStatement(0);\n+ for (StatementBlock current : fs.getBody())\n+ builder.append(getHopDAG(current, nodes, lines, withSubgraph));\n+ addSubGraphFooter(builder, withSubgraph, label);\n+\n+ } else if (sb instanceof FunctionStatementBlock) {\n+ FunctionStatement fsb = (FunctionStatement) sb.getStatement(0);\n+ addSubGraphHeader(builder, withSubgraph);\n+ String label = \"Function (lines \" + fsb.getBeginLine() + \"-\" + fsb.getEndLine() + \")\";\n+ for (StatementBlock current : fsb.getBody())\n+ builder.append(getHopDAG(current, nodes, lines, withSubgraph));\n+ addSubGraphFooter(builder, withSubgraph, label);\n+ } else {\n+ // For generic StatementBlock\n+ if (sb.requiresRecompilation()) {\n+ addSubGraphHeader(builder, withSubgraph);\n+ }\n+ ArrayList<Hop> hopsDAG = sb.get_hops();\n+ if (hopsDAG != null && !hopsDAG.isEmpty()) {\n+ Hop.resetVisitStatus(hopsDAG);\n+ for (Hop hop : hopsDAG)\n+ builder.append(getHopDAG(hop, nodes, lines, withSubgraph));\n+ Hop.resetVisitStatus(hopsDAG);\n+ }\n+\n+ if (sb.requiresRecompilation()) {\n+ builder.append(\"style=filled;\\n\");\n+ builder.append(\"color=lightgrey;\\n\");\n+ String label = \"(lines \" + sb.getBeginLine() + \"-\" + sb.getEndLine() + \") [recompile=\"\n+ + sb.requiresRecompilation() + \"]\";\n+ addSubGraphFooter(builder, withSubgraph, label);\n+ }\n+ }\n+ return builder;\n+ }\n+\nprivate static String explainStatementBlock(StatementBlock sb, int level)\nthrows HopsException, DMLRuntimeException\n{\n@@ -637,6 +809,134 @@ public class Explain\nreturn sb.toString();\n}\n+ private static boolean isInRange(Hop hop, ArrayList<Integer> lines) {\n+ boolean isInRange = lines.size() == 0 ? true : false;\n+ for (int lineNum : lines) {\n+ if (hop.getBeginLine() == lineNum && lineNum == hop.getEndLine()) {\n+ return true;\n+ }\n+ }\n+ return isInRange;\n+ }\n+\n+ private static StringBuilder getHopDAG(Hop hop, StringBuilder nodes, ArrayList<Integer> lines, boolean withSubgraph)\n+ throws DMLRuntimeException {\n+ StringBuilder sb = new StringBuilder();\n+ if (hop.isVisited() || (!SHOW_LITERAL_HOPS && hop instanceof LiteralOp))\n+ return sb;\n+\n+ for (Hop input : hop.getInput()) {\n+ if ((SHOW_LITERAL_HOPS || !(input instanceof LiteralOp)) && isInRange(hop, lines)) {\n+ String edgeLabel = showMem(input.getOutputMemEstimate(), true);\n+ sb.append(\"h\" + input.getHopID() + \" -> h\" + hop.getHopID() + \" [label=\\\"\" + edgeLabel + \"\\\"];\\n\");\n+ }\n+ }\n+ for (Hop input : hop.getInput())\n+ sb.append(getHopDAG(input, nodes, lines, withSubgraph));\n+\n+ if (isInRange(hop, lines)) {\n+ nodes.append(\"h\" + hop.getHopID() + \"[label=\\\"\" + getNodeLabel(hop) + \"\\\", \" + \"shape=\\\"\"\n+ + getNodeShape(hop) + \"\\\", color=\\\"\" + getNodeColor(hop) + \"\\\", tooltip=\\\"\" + getNodeToolTip(hop)\n+ + \"\\\"];\\n\");\n+ }\n+ hop.setVisited();\n+\n+ return sb;\n+ }\n+\n+ private static String getNodeLabel(Hop hop) {\n+ StringBuilder sb = new StringBuilder();\n+ sb.append(hop.getOpString());\n+ if (hop instanceof AggBinaryOp) {\n+ AggBinaryOp aggBinOp = (AggBinaryOp) hop;\n+ if (aggBinOp.getMMultMethod() != null)\n+ sb.append(\" \" + aggBinOp.getMMultMethod().name() + \" \");\n+ }\n+ // data flow properties\n+ if (SHOW_DATA_FLOW_PROPERTIES) {\n+ if (hop.requiresReblock() && hop.requiresCheckpoint())\n+ sb.append(\", rblk,chkpt\");\n+ else if (hop.requiresReblock())\n+ sb.append(\", rblk\");\n+ else if (hop.requiresCheckpoint())\n+ sb.append(\", chkpt\");\n+ }\n+ if (hop.getFilename() == null) {\n+ sb.append(\"[\" + hop.getBeginLine() + \":\" + hop.getBeginColumn() + \"-\" + hop.getEndLine() + \":\"\n+ + hop.getEndColumn() + \"]\");\n+ } else {\n+ sb.append(\"[\" + hop.getFilename() + \" \" + hop.getBeginLine() + \":\" + hop.getBeginColumn() + \"-\"\n+ + hop.getEndLine() + \":\" + hop.getEndColumn() + \"]\");\n+ }\n+\n+ if (hop.getUpdateType().isInPlace())\n+ sb.append(\",\" + hop.getUpdateType().toString().toLowerCase());\n+ return sb.toString();\n+ }\n+\n+ private static String getNodeToolTip(Hop hop) {\n+ StringBuilder sb = new StringBuilder();\n+ if (hop.getExecType() != null) {\n+ sb.append(hop.getExecType().name());\n+ }\n+ sb.append(\"[\" + hop.getDim1() + \" X \" + hop.getDim2() + \"], nnz=\" + hop.getNnz());\n+ sb.append(\", mem= [in=\");\n+ sb.append(showMem(hop.getInputMemEstimate(), false));\n+ sb.append(\", inter=\");\n+ sb.append(showMem(hop.getIntermediateMemEstimate(), false));\n+ sb.append(\", out=\");\n+ sb.append(showMem(hop.getOutputMemEstimate(), false));\n+ sb.append(\" -> \");\n+ sb.append(showMem(hop.getMemEstimate(), true));\n+ sb.append(\"]\");\n+ return sb.toString();\n+ }\n+\n+ private static String getNodeShape(Hop hop) {\n+ String shape = \"octagon\";\n+ if (hop.getExecType() != null) {\n+ switch (hop.getExecType()) {\n+ case CP:\n+ shape = \"ellipse\";\n+ break;\n+ case SPARK:\n+ shape = \"box\";\n+ break;\n+ case GPU:\n+ shape = \"trapezium\";\n+ break;\n+ case MR:\n+ shape = \"parallelogram\";\n+ break;\n+ default:\n+ shape = \"octagon\";\n+ break;\n+ }\n+ }\n+ return shape;\n+ }\n+\n+ private static String getNodeColor(Hop hop) {\n+ if (hop instanceof DataOp) {\n+ DataOp dOp = (DataOp) hop;\n+ if (dOp.getDataOpType() == DataOpTypes.PERSISTENTREAD || dOp.getDataOpType() == DataOpTypes.TRANSIENTREAD) {\n+ return \"wheat2\";\n+ } else if (dOp.getDataOpType() == DataOpTypes.PERSISTENTWRITE\n+ || dOp.getDataOpType() == DataOpTypes.TRANSIENTWRITE) {\n+ return \"wheat4\";\n+ }\n+ } else if (hop instanceof AggBinaryOp) {\n+ return \"orangered2\";\n+ } else if (hop instanceof BinaryOp) {\n+ return \"royalblue2\";\n+ } else if (hop instanceof ReorgOp) {\n+ return \"green\";\n+ } else if (hop instanceof UnaryOp) {\n+ return \"yellow\";\n+ }\n+ return \"black\";\n+ }\n+\n//////////////\n// internal explain CNODE\n@@ -867,6 +1167,7 @@ public class Explain\nsb.append( offsetInst );\nsb.append( tmp );\n+\nsb.append( '\\n' );\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mlcontext.py",
"new_path": "src/main/python/systemml/mlcontext.py",
"diff": "#\n#-------------------------------------------------------------\n-__all__ = ['MLResults', 'MLContext', 'Script', 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl', '_java2py', 'Matrix']\n+# Methods to create Script object\n+script_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]\n+# Utility methods\n+util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG' ]\n+__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods\nimport os\n@@ -27,13 +31,16 @@ try:\nimport py4j.java_gateway\nfrom py4j.java_gateway import JavaObject\nfrom pyspark import SparkContext\n+ from pyspark.conf import SparkConf\nimport pyspark.mllib.common\nexcept ImportError:\nraise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')\nfrom .converters import *\nfrom .classloader import *\n+import threading, time\n+_loadedSystemML = False\ndef _get_spark_context():\n\"\"\"\nInternal method to get already initialized SparkContext.\n@@ -44,10 +51,93 @@ def _get_spark_context():\nSparkContext\n\"\"\"\nif SparkContext._active_spark_context is not None:\n- return SparkContext._active_spark_context\n+ sc = SparkContext._active_spark_context\n+ if not _loadedSystemML:\n+ createJavaObject(sc, 'dummy')\n+ _loadedSystemML = True\n+ return sc\nelse:\nraise Exception('Expected spark context to be created.')\n+# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook\n+# Example usage:\n+# with jvm_stdout():\n+# ml.execute(script)\n+class jvm_stdout(object):\n+ \"\"\"\n+ This is useful utility class to get the output of the driver JVM from within a Jupyter notebook\n+\n+ Parameters\n+ ----------\n+ parallel_flush: boolean\n+ Should flush the stdout in parallel\n+ \"\"\"\n+ def __init__(self, parallel_flush=False):\n+ self.util = SparkContext._active_spark_context._jvm.org.apache.sysml.api.ml.Utils()\n+ self.parallel_flush = parallel_flush\n+ self.t = threading.Thread(target=self.flush_stdout)\n+ self.stop = False\n+\n+ def flush_stdout(self):\n+ while not self.stop:\n+ time.sleep(1) # flush stdout every 1 second\n+ str = self.util.flushStdOut()\n+ if str != '':\n+ str = str[:-1] if str.endswith('\\n') else str\n+ print(str)\n+\n+ def __enter__(self):\n+ self.util.startRedirectStdOut()\n+ if self.parallel_flush:\n+ self.t.start()\n+\n+ def __exit__(self, *args):\n+ if self.parallel_flush:\n+ self.stop = True\n+ self.t.join()\n+ print(self.util.stopRedirectStdOut())\n+\n+\n+def getHopDAG(ml, script, lines=None, conf=None, apply_rewrites=True, with_subgraph=False):\n+ \"\"\"\n+ Compile a DML / PyDML script.\n+\n+ Parameters\n+ ----------\n+ ml: MLContext instance\n+ MLContext instance.\n+\n+ script: Script instance\n+ Script instance defined with the appropriate input and output variables.\n+\n+ lines: list of integers\n+ Optional: only display the hops that have begin and end line number equals to the given integers.\n+\n+ conf: SparkConf instance\n+ Optional spark configuration\n+\n+ apply_rewrites: boolean\n+ If True, perform static rewrites, perform intra-/inter-procedural analysis to propagate size information into functions and apply dynamic rewrites\n+\n+ with_subgraph: boolean\n+ If False, the dot graph will be created without subgraphs for statement blocks.\n+\n+ Returns\n+ -------\n+ hopDAG: string\n+ hop DAG in dot format\n+ \"\"\"\n+ if not isinstance(script, Script):\n+ raise ValueError(\"Expected script to be an instance of Script\")\n+ scriptString = script.scriptString\n+ script_java = script.script_java\n+ lines = [ int(x) for x in lines ] if lines is not None else [int(-1)]\n+ sc = _get_spark_context()\n+ if conf is not None:\n+ hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, conf._jconf, apply_rewrites, with_subgraph)\n+ else:\n+ hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, apply_rewrites, with_subgraph)\n+ return hopDAG\ndef dml(scriptString):\n\"\"\"\n@@ -330,9 +420,9 @@ class Script(object):\nself.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile(scriptString)\nelif scriptFormat == \"file\" and self.scriptType == \"pydml\":\nself.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile(scriptString)\n- elif scriptFormat == \"file\" and self.scriptType == \"dml\":\n+ elif isResource and self.scriptType == \"dml\":\nself.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromResource(scriptString)\n- elif scriptFormat == \"file\" and self.scriptType == \"pydml\":\n+ elif isResource and self.scriptType == \"pydml\":\nself.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromResource(scriptString)\nelif scriptFormat == \"string\" and self.scriptType == \"dml\":\nself.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml(scriptString)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/ml/Utils.scala",
"new_path": "src/main/scala/org/apache/sysml/api/ml/Utils.scala",
"diff": "*/\npackage org.apache.sysml.api.ml\n+import org.apache.spark.api.java.JavaPairRDD\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+\n+object Utils {\n+ val originalOut = System.out\n+ val originalErr = System.err\n+}\nclass Utils {\ndef checkIfFileExists(filePath:String):Boolean = {\nreturn org.apache.sysml.runtime.util.MapReduceTool.existsFileOnHDFS(filePath)\n}\n+\n+ // --------------------------------------------------------------------------------\n+ // Simple utility function to print the information about our binary blocked format\n+ def getBinaryBlockInfo(binaryBlocks:JavaPairRDD[MatrixIndexes, MatrixBlock]):String = {\n+ val sb = new StringBuilder\n+ var partitionIndex = 0\n+ for(str <- binaryBlocks.rdd.mapPartitions(binaryBlockIteratorToString(_), true).collect) {\n+ sb.append(\"-------------------------------------\\n\")\n+ sb.append(\"Partition \" + partitionIndex + \":\\n\")\n+ sb.append(str)\n+ partitionIndex = partitionIndex + 1\n+ }\n+ sb.append(\"-------------------------------------\\n\")\n+ return sb.toString()\n+ }\n+ def binaryBlockIteratorToString(it: Iterator[(MatrixIndexes, MatrixBlock)]): Iterator[String] = {\n+ val sb = new StringBuilder\n+ for(entry <- it) {\n+ val mi = entry._1\n+ val mb = entry._2\n+ sb.append(mi.toString);\n+ sb.append(\" sparse? = \");\n+ sb.append(mb.isInSparseFormat());\n+ if(mb.isUltraSparse)\n+ sb.append(\" (ultra-sparse)\")\n+ sb.append(\", nonzeros = \");\n+ sb.append(mb.getNonZeros);\n+ sb.append(\", dimensions = \");\n+ sb.append(mb.getNumRows);\n+ sb.append(\" X \");\n+ sb.append(mb.getNumColumns);\n+ sb.append(\"\\n\");\n+ }\n+ List[String](sb.toString).iterator\n+ }\n+ val baos = new java.io.ByteArrayOutputStream()\n+ val baes = new java.io.ByteArrayOutputStream()\n+ def startRedirectStdOut():Unit = {\n+ System.setOut(new java.io.PrintStream(baos));\n+ System.setErr(new java.io.PrintStream(baes));\n+ }\n+ def flushStdOut():String = {\n+ val ret = baos.toString() + baes.toString()\n+ baos.reset(); baes.reset()\n+ return ret\n+ }\n+ def stopRedirectStdOut():String = {\n+ val ret = baos.toString() + baes.toString()\n+ System.setOut(Utils.originalOut)\n+ System.setErr(Utils.originalErr)\n+ return ret\n+ }\n+ // --------------------------------------------------------------------------------\n}\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1808] [SYSTEMML-1658] Visualize Hop DAG for explaining the optimizer
- Also added an utility to print java output in notebook.
- Fixed a bug in dmlFromResource.
Closes #596. |
49,703 | 03.08.2017 14:04:30 | 25,200 | 4ec6f08654246852141b9dacd8070bd65ab7fd4a | [MINOR] Link to Perf Testing from Release Process
Add link to Release Process that points to Perf Testing doc.
Add title and license to Perf Testing doc. | [
{
"change_type": "MODIFY",
"old_path": "docs/python-performance-test.md",
"new_path": "docs/python-performance-test.md",
"diff": "+---\n+layout: global\n+title: SystemML Performance Testing\n+description: Description of SystemML performance testing.\n+displayTitle: SystemML Performance Testing\n+---\n+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n+* This will become a table of contents (this text will be scraped).\n+{:toc}\n+\n+\n# Performance Testing Algorithms User Manual\nThis user manual contains details on how to conduct automated performance tests. Work was mostly done in this [PR](https://github.com/apache/systemml/pull/537) and part of [SYSTEMML-1451](https://issues.apache.org/jira/browse/SYSTEMML-1451). Our aim was to move from existing `bash` based performance tests to automatic `python` based automatic performance tests.\n-### Architecture\n+\n+## Architecture\n+\nOur performance tests suit contains `7` families namely `binomial`, `multinomial`, `stats1`, `stats2`, `regression1`, `regression2`, `clustering`. Within these families we have algorithms grouped under it. Typically a family is a set of algorithms that require the same data generation script.\n- Exceptions: `regression1`, `regression2` and `binomial`. We decide to include these algorithms in separate families to keep the architecture simple.\n@@ -33,7 +64,9 @@ The file `predict.py` contains all functions for all algorithms in the performan\nIn the file(s) `utils_*.py` we have all the helper functions required in our performance test. These functions do operations like write `json` files, extract time from std out etc.\n-### Adding New Algorithms\n+\n+## Adding New Algorithms\n+\nWhile adding a new algorithm we need know if it has to be part of the any pre existing family. If this algorithm depends on a new data generation script we would need to create a new family. Steps below to take below to add a new algorithm.\nFollowing changes to `run_perftest.py`:\n@@ -72,7 +105,9 @@ Following changes to `predict.py`:\n- Check for possible errors if these folders/files do not exist. (Please see the troubleshooting section).\n- Note: `predict.py` will not be executed if the current algorithm being executed does not have predict script.\n-### Current Default Settings\n+\n+## Current Default Settings\n+\nDefault setting for our performance test below:\n- Matrix size to 10,000 rows and 100 columns.\n@@ -80,7 +115,9 @@ Default setting for our performance test below:\n- Operation modes `data-gen`, `train` and `predict` in sequence.\n- Matrix type set to `all`. Which will generate `dense`, `sparse` matrices for all relevant algorithms.\n-### Examples\n+\n+## Examples\n+\nSome examples of SystemML performance test with arguments shown below:\n`./scripts/perftest/python/run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2\n@@ -110,7 +147,9 @@ Run performance test for all algorithms under the family `regression2` and log w\n`./scripts/perftest/python/run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2 --config-dir /Users/krishna/open-source/systemml/scripts/perftest/temp3 --temp-dir hdfs://localhost:9000/temp3`\nRun performance test for all algorithms using HDFS.\n-### Operational Notes\n+\n+## Operational Notes\n+\nAll performance test depend mainly on two scripts for execution `systemml-standalone.py` and `systemml-spark-submit.py`. Incase we need to change standalone or spark parameters we need to manually change these parameters in their respective scripts.\nConstants like `DATA_FORMAT` currently set to `csv` and `MATRIX_TYPE_DICT` with `density` set to `0.9` and `sparsity` set to `0.01` are hardcoded in the performance test scripts. They can be changed easily as they are defined at the top of their respective operational scripts.\n@@ -140,7 +179,9 @@ Currently we only support time difference between algorithms in different versio\nNote: Please pip install `https://github.com/burnash/gspread` to use google docs client.\n-### Troubleshooting\n+\n+## Troubleshooting\n+\nWe can debug the performance test by making changes in the following locations based on\n- Please see `utils_exec.py` function `subprocess_exec`.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/release-process.md",
"new_path": "docs/release-process.md",
"diff": "@@ -366,9 +366,12 @@ For examples, see the [Spark MLContext Programming Guide](http://apache.github.i\n<a href=\"#release-candidate-checklist\">Up to Checklist</a>\n-Verify that the performance suite located at scripts/perftest/ executes on Spark and Hadoop. Testing should\n+Verify that the performance suite executes on Spark and Hadoop. Testing should\ninclude 80MB, 800MB, 8GB, and 80GB data sizes.\n+For more information, please see [SystemML Performance Testing](python-performance-test.html).\n+\n+\n# Run NN Unit Tests for GPU\n<a href=\"#release-candidate-checklist\">Up to Checklist</a>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Link to Perf Testing from Release Process
Add link to Release Process that points to Perf Testing doc.
Add title and license to Perf Testing doc. |
49,701 | 05.08.2017 23:23:37 | 25,200 | 257bf3ed006022b133bf7a2359d3b0cb2a68950b | Enable elementwise multiplication tests
These were meant to be enabled with the completion of
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java",
"diff": "@@ -61,7 +61,6 @@ public class RewriteElementwiseMultChainOptimizationTest extends AutomatedTestBa\ntestRewriteMatrixMultChainOp(TEST_NAME1, false, ExecType.SPARK);\n}\n- /* TODO enable together with RewriteElementwiseMultChainOptimization\n@Test\npublic void testMatrixMultChainOptRewritesCP() {\ntestRewriteMatrixMultChainOp(TEST_NAME1, true, ExecType.CP);\n@@ -71,7 +70,6 @@ public class RewriteElementwiseMultChainOptimizationTest extends AutomatedTestBa\npublic void testMatrixMultChainOptRewritesSP() {\ntestRewriteMatrixMultChainOp(TEST_NAME1, true, ExecType.SPARK);\n}\n- */\nprivate void testRewriteMatrixMultChainOp(String testname, boolean rewrites, ExecType et)\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1663] Enable elementwise multiplication tests
These were meant to be enabled with the completion of SYSTEMML-1663
Closes #602. |
49,736 | 06.08.2017 13:24:55 | 21,600 | 9da5eab00efa86d1f135664dc7458e5f04c0097e | [MINOR] Bugfix to resolve UnboundLocalError in mlcontext | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mlcontext.py",
"new_path": "src/main/python/systemml/mlcontext.py",
"diff": "@@ -52,6 +52,7 @@ def _get_spark_context():\n\"\"\"\nif SparkContext._active_spark_context is not None:\nsc = SparkContext._active_spark_context\n+ global _loadedSystemML\nif not _loadedSystemML:\ncreateJavaObject(sc, 'dummy')\n_loadedSystemML = True\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Bugfix to resolve UnboundLocalError in mlcontext |
49,738 | 06.08.2017 20:45:51 | 25,200 | 70e5f29e8fff0e698d7abc647efe3f79be38abba | Fix scalar-frame casts and frame rbind operations
This patch fixes the missing compiler support for scalar-frame casting
as well as missing meta data handling on frame rbind operations. This
also includes a suite of related testcases and some minor cleanups in
AutomatedTestBase (base class of all tests). | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java",
"diff": "@@ -618,8 +618,7 @@ public class UnaryOp extends Hop implements MultiThreadedHop\n|| _op == OpOp1.CUMMAX );\n}\n- public boolean isCastUnaryOperation()\n- {\n+ public boolean isCastUnaryOperation() {\nreturn ( _op == OpOp1.CAST_AS_MATRIX\n|| _op == OpOp1.CAST_AS_SCALAR\n|| _op == OpOp1.CAST_AS_FRAME\n@@ -695,7 +694,8 @@ public class UnaryOp extends Hop implements MultiThreadedHop\n{\n//do nothing always known\n}\n- else if( _op == OpOp1.CAST_AS_MATRIX && getInput().get(0).getDataType()==DataType.SCALAR )\n+ else if( (_op == OpOp1.CAST_AS_MATRIX || _op == OpOp1.CAST_AS_FRAME)\n+ && getInput().get(0).getDataType()==DataType.SCALAR )\n{\n//prevent propagating 0 from scalar (which would be interpreted as unknown)\nsetDim1( 1 );\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -967,7 +967,7 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n//concatenate column data (w/ deep copy to prevent side effects)\nret._coldata = (Array[]) ArrayUtils.addAll(_coldata, that._coldata);\n- for( int i=0; i<ret._coldata.length; i++ )\n+ for( int i=0; i<ret.getNumColumns(); i++ )\nret._coldata[i] = ret._coldata[i].clone();\n}\nelse //ROW APPEND\n@@ -984,10 +984,13 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nret._numRows = _numRows;\nret._schema = _schema.clone();\nret._colnames = (_colnames!=null) ? _colnames.clone() : null;\n+ ret._colmeta = new ColumnMetadata[getNumColumns()];\n+ for( int j=0; j<_schema.length; j++ )\n+ ret._colmeta[j] = new ColumnMetadata(0);\n//concatenate data (deep copy first, append second)\n- ret._coldata = new Array[_coldata.length];\n- for( int j=0; j<_coldata.length; j++ )\n+ ret._coldata = new Array[getNumColumns()];\n+ for( int j=0; j<getNumColumns(); j++ )\nret._coldata[j] = _coldata[j].clone();\nIterator<Object[]> iter = that.getObjectRowIterator();\nwhile( iter.hasNext() )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"diff": "@@ -195,8 +195,6 @@ public abstract class AutomatedTestBase\nprotected static RUNTIME_PLATFORM rtplatform = RUNTIME_PLATFORM.HYBRID;\nprotected static final boolean DEBUG = false;\n- protected static final boolean VISUALIZE = false;\n- protected static final boolean RUNNETEZZA = false;\nprotected String fullDMLScriptName; // utilize for both DML and PyDML, should probably be renamed.\n// protected String fullPYDMLScriptName;\n@@ -1174,8 +1172,6 @@ public abstract class AutomatedTestBase\n}\n}\n// program-independent parameters\n- if(VISUALIZE)\n- args.add(\"-v\");\nargs.add(\"-exec\");\nif(rtplatform == RUNTIME_PLATFORM.HADOOP)\nargs.add(\"hadoop\");\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/FrameScalarCastingIntegratedTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.frame;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.runtime.util.MapReduceTool;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.apache.sysml.utils.Statistics;\n+\n+public class FrameScalarCastingIntegratedTest extends AutomatedTestBase\n+{\n+ private final static String TEST_DIR = \"functions/frame/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + FrameScalarCastingIntegratedTest.class.getSimpleName() + \"/\";\n+ private final static String TEST_NAME = \"FrameScalarCast\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"R\"}));\n+ }\n+\n+ @Test\n+ public void testFrameStringCP0() {\n+ runFrameScalarCastingTest(ValueType.STRING, RUNTIME_PLATFORM.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testFrameLongCP0() {\n+ runFrameScalarCastingTest(ValueType.INT, RUNTIME_PLATFORM.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testFrameBooleanCP0() {\n+ runFrameScalarCastingTest(ValueType.BOOLEAN, RUNTIME_PLATFORM.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testFrameDoubleCP0() {\n+ runFrameScalarCastingTest(ValueType.DOUBLE, RUNTIME_PLATFORM.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testFrameStringCP1() {\n+ runFrameScalarCastingTest(ValueType.STRING, RUNTIME_PLATFORM.HYBRID);\n+ }\n+\n+ @Test\n+ public void testFrameLongCP1() {\n+ runFrameScalarCastingTest(ValueType.INT, RUNTIME_PLATFORM.HYBRID);\n+ }\n+\n+ @Test\n+ public void testFrameBooleanCP1() {\n+ runFrameScalarCastingTest(ValueType.BOOLEAN, RUNTIME_PLATFORM.HYBRID);\n+ }\n+\n+ @Test\n+ public void testFrameDoubleCP1() {\n+ runFrameScalarCastingTest(ValueType.DOUBLE, RUNTIME_PLATFORM.HYBRID);\n+ }\n+\n+ @Test\n+ public void testFrameStringCP2() {\n+ runFrameScalarCastingTest(ValueType.STRING, RUNTIME_PLATFORM.HYBRID_SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameLongCP2() {\n+ runFrameScalarCastingTest(ValueType.INT, RUNTIME_PLATFORM.HYBRID_SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameBooleanCP2() {\n+ runFrameScalarCastingTest(ValueType.BOOLEAN, RUNTIME_PLATFORM.HYBRID_SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameDoubleCP2() {\n+ runFrameScalarCastingTest(ValueType.DOUBLE, RUNTIME_PLATFORM.HYBRID_SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameStringSP() {\n+ runFrameScalarCastingTest(ValueType.STRING, RUNTIME_PLATFORM.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameLongSP() {\n+ runFrameScalarCastingTest(ValueType.INT, RUNTIME_PLATFORM.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameBooleanSP() {\n+ runFrameScalarCastingTest(ValueType.BOOLEAN, RUNTIME_PLATFORM.SPARK);\n+ }\n+\n+ @Test\n+ public void testFrameDoubleSP() {\n+ runFrameScalarCastingTest(ValueType.DOUBLE, RUNTIME_PLATFORM.SPARK);\n+ }\n+\n+ private void runFrameScalarCastingTest(ValueType vtIn, RUNTIME_PLATFORM et)\n+ {\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ rtplatform = et;\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ getAndLoadTestConfiguration(TEST_NAME);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-stats\", \"-args\", input(\"V\"), output(\"R\") };\n+\n+ //generate input data\n+ switch( vtIn ) {\n+ case STRING: MapReduceTool.writeStringToHDFS(\"foo\", input(\"V\")); break;\n+ case INT: MapReduceTool.writeIntToHDFS(7, input(\"V\")); break;\n+ case BOOLEAN: MapReduceTool.writeBooleanToHDFS(true, input(\"V\")); break;\n+ case DOUBLE: MapReduceTool.writeDoubleToHDFS(7.3, input(\"V\")); break;\n+ default: throw new RuntimeException(\"Unsupported type: \"+vtIn);\n+ }\n+ MapReduceTool.writeScalarMetaDataFile(input(\"V\")+\".mtd\", vtIn);\n+\n+ //run tests\n+ runTest(true, false, null, -1);\n+\n+ //compare output\n+ Assert.assertEquals(readDMLMatrixFromHDFS(\"R\").get(new CellIndex(1,1)), Double.valueOf(1));\n+ if( et != RUNTIME_PLATFORM.SPARK ) {\n+ Assert.assertTrue(Statistics.getNoOfCompiledSPInst()==0);\n+ Assert.assertTrue(Statistics.getNoOfExecutedSPInst()==0);\n+ }\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/frame/FrameScalarCast.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+s1 = read($1);\n+\n+F1 = as.frame(s1);\n+F2 = rbind(F1, F1);\n+s2 = as.scalar(F2[1,1])\n+R = as.matrix(s1==s2);\n+\n+write(R, $2);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/frame/ZPackageSuite.java",
"new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/frame/ZPackageSuite.java",
"diff": "@@ -41,6 +41,7 @@ import org.junit.runners.Suite;\nFrameMatrixWriteTest.class,\nFrameMetaReadWriteTest.class,\nFrameReadWriteTest.class,\n+ FrameScalarCastingIntegratedTest.class,\nFrameScalarCastingTest.class,\nFrameSchemaReadTest.class,\nFrameSerializationTest.class,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1826] Fix scalar-frame casts and frame rbind operations
This patch fixes the missing compiler support for scalar-frame casting
as well as missing meta data handling on frame rbind operations. This
also includes a suite of related testcases and some minor cleanups in
AutomatedTestBase (base class of all tests). |
49,717 | 28.07.2017 17:07:46 | 25,200 | 5906682b0f328a8179c66f960cedb6e68fb8a0e1 | toString does not print negative 0s anymore
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/util/DataConverter.java",
"new_path": "src/main/java/org/apache/sysml/runtime/util/DataConverter.java",
"diff": "@@ -862,11 +862,15 @@ public class DataConverter\nelse { // Dense Print Format\nfor (int i=0; i<rowLength; i++){\nfor (int j=0; j<colLength-1; j++){\n- double value = mb.quickGetValue(i, j);\n+ Double value = mb.quickGetValue(i, j);\n+ if (value.equals(-0.0d))\n+ value = 0.0;\nsb.append(dfFormat(df, value));\nsb.append(separator);\n}\n- double value = mb.quickGetValue(i, colLength-1);\n+ Double value = mb.quickGetValue(i, colLength-1);\n+ if (value.equals(-0.0d))\n+ value = 0.0;\nsb.append(dfFormat(df, value)); // Do not put separator after last element\nsb.append(lineseparator);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1816] toString does not print negative 0s anymore
Closes #599 |
49,768 | 09.08.2017 13:50:02 | 25,200 | c2124544d2ddf8afc081670ea120ac148ef1bf12 | Image Classification using Caffe model sample notebook | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "samples/jupyter-notebooks/Image_Classify_Using_VGG_19.ipynb",
"diff": "+{\n+ \"cells\": [\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Image Classification using Caffe VGG-19 model\\n\",\n+ \"\\n\",\n+ \"This notebook demonstrates importing VGG-19 model from Caffe to SystemML and use that model to do an image classification. VGG-19 model has been trained using ImageNet dataset (1000 classes with ~ 14M images). If an image to be predicted is in one of the class VGG-19 has trained on then accuracy will be higher.\\n\",\n+ \"We expect prediction of any image through SystemML using VGG-19 model will be similar to that of image predicted through Caffe using VGG-19 model directly.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Prerequisite:\\n\",\n+ \"1. SystemML Python Package\\n\",\n+ \"To run this notebook you need to install systeml 1.0 (Master Branch code as of 07/26/2017 or later) python package.\\n\",\n+ \"2. Caffe \\n\",\n+ \"If you want to verify results through Caffe, then you need to have Caffe python package or Caffe installed.\\n\",\n+ \"For this verification I have installed Caffe on local system instead of Caffe python package.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"##### SystemML Python Package information\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!pip show systemml\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### SystemML Build information\\n\",\n+ \"Following code will show SystemML information which is installed in the environment.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from systemml import MLContext\\n\",\n+ \"ml = MLContext(sc)\\n\",\n+ \"print (\\\"SystemML Built-Time:\\\"+ ml.buildTime())\\n\",\n+ \"print(ml.info())\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Workaround for Python 2.7.13 to avoid certificate validation issue while downloading any file.\\n\",\n+ \"\\n\",\n+ \"import ssl\\n\",\n+ \"\\n\",\n+ \"try:\\n\",\n+ \" _create_unverified_https_context = ssl._create_unverified_context\\n\",\n+ \"except AttributeError:\\n\",\n+ \" # Legacy Python that doesn't verify HTTPS certificates by default\\n\",\n+ \" pass\\n\",\n+ \"else:\\n\",\n+ \" # Handle target environment that doesn't support HTTPS verification\\n\",\n+ \" ssl._create_default_https_context = _create_unverified_https_context\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Download model, proto files and convert them to SystemML format.\\n\",\n+ \"\\n\",\n+ \"1. Download Caffe Model (VGG-19), proto files (deployer, network and solver) and label file.\\n\",\n+ \"2. Convert the Caffe model into SystemML input format.\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Download caffemodel and proto files \\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def downloadAndConvertModel(downloadDir='.', trained_vgg_weights='trained_vgg_weights'):\\n\",\n+ \" \\n\",\n+ \" # Step 1: Download the VGG-19 model and other files.\\n\",\n+ \" import errno\\n\",\n+ \" import os\\n\",\n+ \" import urllib\\n\",\n+ \"\\n\",\n+ \" # Create directory, if exists don't error out\\n\",\n+ \" try:\\n\",\n+ \" os.makedirs(os.path.join(downloadDir,trained_vgg_weights))\\n\",\n+ \" except OSError as exc: # Python >2.5\\n\",\n+ \" if exc.errno == errno.EEXIST and os.path.isdir(trained_vgg_weights):\\n\",\n+ \" pass\\n\",\n+ \" else:\\n\",\n+ \" raise\\n\",\n+ \" \\n\",\n+ \" # Download deployer, network, solver proto and label files.\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_deploy.proto', os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'))\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_network.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_network.proto'))\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_solver.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'))\\n\",\n+ \"\\n\",\n+ \" # Get labels for data\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/labels.txt', os.path.join(downloadDir, trained_vgg_weights, 'labels.txt'))\\n\",\n+ \"\\n\",\n+ \" # Following instruction download model of size 500MG file, so based on your network it may take time to download file.\\n\",\n+ \" urllib.urlretrieve('http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel', os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'))\\n\",\n+ \"\\n\",\n+ \" # Step 2: Convert the caffemodel to trained_vgg_weights directory\\n\",\n+ \" import systemml as sml\\n\",\n+ \" sml.convert_caffemodel(sc, os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'), os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'), os.path.join(downloadDir,trained_vgg_weights))\\n\",\n+ \" \\n\",\n+ \" return\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"##### PrintTopK\\n\",\n+ \"This function will print top K probabilities and indices from the result.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Print top K indices and probability\\n\",\n+ \"\\n\",\n+ \"def printTopK(prob, label, k):\\n\",\n+ \" print(label, 'Top ', k, ' Index : ', np.argsort(-prob)[0, :k])\\n\",\n+ \" print(label, 'Top ', k, ' Probability : ', prob[0,np.argsort(-prob)[0, :k]])\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Classify image using Caffe\\n\",\n+ \"Prerequisite: You need to have Caffe installed on a system to run this code. (or have Caffe Python package installed)\\n\",\n+ \"\\n\",\n+ \"This will classify image using Caffe code directly. \\n\",\n+ \"This can be used to verify classification through SystemML if matches with that through Caffe directly.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import os\\n\",\n+ \"\\n\",\n+ \"def getCaffeLabel(url, printTopKData, topK, size=(224,224), modelDir='trained_vgg_weights'):\\n\",\n+ \" import caffe\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \" urllib.urlretrieve(url, 'test.jpg')\\n\",\n+ \" image = caffe.io.resize_image(caffe.io.load_image('test.jpg'), size)\\n\",\n+ \"\\n\",\n+ \" image = [(image * 255).astype(np.float)]\\n\",\n+ \"\\n\",\n+ \" deploy_file = 'VGG_ILSVRC_19_layers_deploy.proto'\\n\",\n+ \" caffemodel_file = 'VGG_ILSVRC_19_layers.caffemodel'\\n\",\n+ \"\\n\",\n+ \" net = caffe.Classifier(deploy_file, caffemodel_file)\\n\",\n+ \" caffe_prob = net.predict(image)\\n\",\n+ \" caffe_prediction = caffe_prob.argmax(axis=1)\\n\",\n+ \" \\n\",\n+ \" if(printTopKData):\\n\",\n+ \" printTopK(caffe_prob, 'Caffe', topK)\\n\",\n+ \"\\n\",\n+ \" import pandas as pd\\n\",\n+ \" labels = pd.read_csv(os.path.join(modelDir,'labels.txt'), names=['index', 'label'])\\n\",\n+ \" caffe_prediction_labels = [ labels[labels.index == x][['label']].values[0][0] for x in caffe_prediction ]\\n\",\n+ \" \\n\",\n+ \" return net, caffe_prediction_labels\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Classify images\\n\",\n+ \"\\n\",\n+ \"This function classify images from images specified through urls.\\n\",\n+ \"\\n\",\n+ \"###### Input Parameters: \\n\",\n+ \" urls: List of urls\\n\",\n+ \" printTokKData (default False): Whether to print top K indices and probabilities\\n\",\n+ \" topK: Top K elements to be displayed.\\n\",\n+ \" caffeInstalled (default False): If Caffe has been installed. If installed, then it will classify image (with top K probability and indices) based on printTopKData. \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import numpy as np\\n\",\n+ \"import urllib\\n\",\n+ \"from systemml.mllearn import Caffe2DML\\n\",\n+ \"import systemml as sml\\n\",\n+ \"\\n\",\n+ \"# Setting other than current directory causes \\\"network file not found\\\" issue, as network file\\n\",\n+ \"# location is defined in solver file which does not have a path, so it searches in current dir.\\n\",\n+ \"downloadDir = '.' # /home/asurve/caffe_models' \\n\",\n+ \"trained_vgg_weights = 'trained_vgg_weights'\\n\",\n+ \"\\n\",\n+ \"img_shape = (3, 224, 224)\\n\",\n+ \"size = (img_shape[1], img_shape[2])\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def classifyImages(urls,printTokKData=False, topK=5, caffeInstalled=False):\\n\",\n+ \"\\n\",\n+ \" downloadAndConvertModel(downloadDir, trained_vgg_weights)\\n\",\n+ \" \\n\",\n+ \" vgg = Caffe2DML(sqlCtx, solver=os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'), input_shape=img_shape)\\n\",\n+ \" vgg.load(trained_vgg_weights)\\n\",\n+ \"\\n\",\n+ \" for url in urls:\\n\",\n+ \" outFile = 'inputTest.jpg'\\n\",\n+ \" urllib.urlretrieve(url, outFile)\\n\",\n+ \" \\n\",\n+ \" from IPython.display import Image, display\\n\",\n+ \" display(Image(filename=outFile))\\n\",\n+ \" \\n\",\n+ \" print (\\\"Prediction of above image to ImageNet Class using\\\");\\n\",\n+ \"\\n\",\n+ \" ## Do image classification through SystemML processing\\n\",\n+ \" from PIL import Image\\n\",\n+ \" input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape\\n\",\n+ \" , color_mode='BGR', mean=sml.getDatasetMean('VGG_ILSVRC_19_2014'))\\n\",\n+ \" print (\\\"Image preprocessed through SystemML :: \\\", vgg.predict(input_image)[0])\\n\",\n+ \" if(printTopKData == True):\\n\",\n+ \" sysml_proba = vgg.predict_proba(input_image)\\n\",\n+ \" printTopK(sysml_proba, 'SystemML BGR', topK)\\n\",\n+ \" \\n\",\n+ \" if(caffeInstalled == True):\\n\",\n+ \" net, caffeLabel = getCaffeLabel(url, printTopKData, topK, size, os.path.join(downloadDir, trained_vgg_weights))\\n\",\n+ \" print (\\\"Image classification through Caffe :: \\\", caffeLabel[0])\\n\",\n+ \"\\n\",\n+ \" print (\\\"Caffe input data through SystemML :: \\\", vgg.predict(np.matrix(net.blobs['data'].data.flatten()))[0])\\n\",\n+ \" \\n\",\n+ \" if(printTopKData == True):\\n\",\n+ \" sysml_proba = vgg.predict_proba(np.matrix(net.blobs['data'].data.flatten()))\\n\",\n+ \" printTopK(sysml_proba, 'With Caffe input data', topK)\\n\",\n+ \" \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Sample API call to classify image\\n\",\n+ \"\\n\",\n+ \"There are couple of parameters to set based on what you are looking for.\\n\",\n+ \"1. printTopKData (default False): If this parameter gets set to True, then top K results (probabilities and indices) will be displayed. \\n\",\n+ \"2. topK (default 5): How many entities (K) to be displayed.\\n\",\n+ \"3. caffeInstalled (default False): If Caffe has installed. If not installed then verification through Caffe won't be done.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"printTopKData=False\\n\",\n+ \"topK=5\\n\",\n+ \"caffeInstalled=False\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"urls = ['https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg', 'https://s-media-cache-ak0.pinimg.com/originals/f2/56/59/f2565989f455984f206411089d6b1b82.jpg', 'http://i2.cdn.cnn.com/cnnnext/dam/assets/161207140243-vanishing-elephant-closeup-exlarge-169.jpg', 'http://wallpaper-gallery.net/images/pictures-of-lilies/pictures-of-lilies-7.jpg', 'https://cdn.pixabay.com/photo/2012/01/07/21/56/sunflower-11574_960_720.jpg', 'https://image.shutterstock.com/z/stock-photo-bird-nest-on-tree-branch-with-five-blue-eggs-inside-108094613.jpg', 'https://i.ytimg.com/vi/6jQDbIv0tDI/maxresdefault.jpg','https://cdn.pixabay.com/photo/2016/11/01/23/53/cat-1790093_1280.jpg']\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"classifyImages(urls,printTopKData, topK, caffeInstalled)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": []\n+ }\n+ ],\n+ \"metadata\": {\n+ \"kernelspec\": {\n+ \"display_name\": \"Python 2\",\n+ \"language\": \"python\",\n+ \"name\": \"python2\"\n+ },\n+ \"language_info\": {\n+ \"codemirror_mode\": {\n+ \"name\": \"ipython\",\n+ \"version\": 2\n+ },\n+ \"file_extension\": \".py\",\n+ \"mimetype\": \"text/x-python\",\n+ \"name\": \"python\",\n+ \"nbconvert_exporter\": \"python\",\n+ \"pygments_lexer\": \"ipython2\",\n+ \"version\": \"2.7.13\"\n+ }\n+ },\n+ \"nbformat\": 4,\n+ \"nbformat_minor\": 2\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1703] Image Classification using Caffe VGG-19 model sample notebook |
49,736 | 10.08.2017 12:39:01 | 28,800 | b2700839b77293cc32e88aaec9cdfcb3223619af | [MINOR] Bugfix for SoftMaxWithLoss layer in Caffe2DML
This commit allows users to specify the bottom of SoftMaxWithLoss in
any order they prefer, thereby avoiding subtle bugs | [
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala",
"diff": "@@ -552,6 +552,14 @@ class SoftmaxWithLoss(val param:LayerParameter, val id:Int, val net:CaffeNetwork\noverride def weightShape():Array[Int] = null\noverride def biasShape():Array[Int] = null\n// -------------------------------------------------\n+ override def bottomLayerOutputShape:(String, String, String) = {\n+ if(computedBottomLayerOutputShape == null) {\n+ val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).filter(l => !l.isInstanceOf[Data]).toList\n+ if(ret.size != 1) throw new LanguageException(\"Expected exactly 1 bottom non-Data layer for \" + param.getName)\n+ computedBottomLayerOutputShape = ret(0).outputShape\n+ }\n+ computedBottomLayerOutputShape\n+ }\n}\nclass ReLU(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] [MINOR] Bugfix for SoftMaxWithLoss layer in Caffe2DML
- This commit allows users to specify the bottom of SoftMaxWithLoss in
any order they prefer, thereby avoiding subtle bugs |
49,737 | 12.08.2017 12:31:26 | 25,200 | dc4bfd95e893f924d80ca3af25c101495134fa77 | [HOTFIX] write stdout and stderr for perftests
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/run_perftest.py",
"new_path": "scripts/perftest/python/run_perftest.py",
"diff": "@@ -134,7 +134,7 @@ def algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode,\nif exit_flag_success:\ntime = 'data_exists'\nelse:\n- time = exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict)\n+ time = exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict, config_path)\nwrite_success(time, temp_cwd)\nprint('{},{},{},{},{},{}'.format(algo, action_mode, intercept, mat_type, mat_shape, time))\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/utils_exec.py",
"new_path": "scripts/perftest/python/utils_exec.py",
"diff": "@@ -27,7 +27,7 @@ import re\n# Subprocess and log parsing related functions\n-def subprocess_exec(cmd_string, extract=None):\n+def subprocess_exec(cmd_string, log_file_path=None, extract=None):\n\"\"\"\nExecute the input string as subprocess\n@@ -38,18 +38,25 @@ def subprocess_exec(cmd_string, extract=None):\nBased on extract as time/dir we extract this information from\nthe logs accordingly\n+ log_file_path: String\n+ Path to write the log file\n+\nreturn: String\nBased on extract we return the relevant string\n\"\"\"\n# Debug\n# print(cmd_string)\n- proc1 = subprocess.Popen(shlex.split(cmd_string), stdout=subprocess.PIPE,\n+ exec_command = shlex.split(cmd_string)\n+ proc1 = subprocess.Popen(exec_command, stdout=subprocess.PIPE,\nstderr=subprocess.PIPE)\nerror_arr, out_arr = get_all_logs(proc1)\nstd_outs = out_arr + error_arr\nreturn_code = proc1.returncode\n+ if log_file_path is not None:\n+ write_logs(std_outs, log_file_path + '.log')\n+\nif return_code == 0:\nif extract == 'time':\nreturn_data = parse_time(std_outs)\n@@ -65,6 +72,14 @@ def subprocess_exec(cmd_string, extract=None):\nreturn return_data\n+def write_logs(std_outs, log_file_path):\n+ \"\"\"\n+ Write all logs to the specified location\n+ \"\"\"\n+ with open(log_file_path, 'w')as log:\n+ log.write(\"\\n\".join(std_outs))\n+\n+\ndef get_all_logs(process):\n\"\"\"\nBased on the subprocess capture logs\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/perftest/python/utils_misc.py",
"new_path": "scripts/perftest/python/utils_misc.py",
"diff": "@@ -166,7 +166,7 @@ def config_reader(read_path):\nreturn conf_file\n-def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict):\n+def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict, log_file_name=None):\n\"\"\"\nThis function is responsible of execution of input arguments via python sub process,\nWe also extract time obtained from the output of this subprocess\n@@ -186,6 +186,9 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup\nsup_args_dict: Dictionary\nSupplementary arguments required by the script\n+ log_file_name: String\n+ Path to write the logfile\n+\nreturn: String\nThe value of time parsed from the logs / error\n\"\"\"\n@@ -207,10 +210,7 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup\ncmd = [exec_script, spark_pre_args, '-f', algorithm, args, sup_args]\ncmd_string = ' '.join(cmd)\n- # Debug\n- # print(cmd_string)\n-\n- time = subprocess_exec(cmd_string, 'time')\n+ time = subprocess_exec(cmd_string, log_file_name, 'time')\nreturn time\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] write stdout and stderr for perftests
Closes #615 |
49,736 | 13.08.2017 13:42:30 | 25,200 | 81b9248fc700303c1542d709e18479bc57147b3a | [MINOR] Resolve race condition between locking of metastore_db of Scala
SparkSession and PySpark SparkSession when using SystemML MLContext API | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mlcontext.py",
"new_path": "src/main/python/systemml/mlcontext.py",
"diff": "@@ -26,6 +26,9 @@ util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG' ]\n__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods\nimport os\n+import numpy as np\n+import pandas as pd\n+import threading, time\ntry:\nimport py4j.java_gateway\n@@ -33,12 +36,16 @@ try:\nfrom pyspark import SparkContext\nfrom pyspark.conf import SparkConf\nimport pyspark.mllib.common\n+ # -----------------------------------------------------------------------------------\n+ # Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession\n+ from pyspark.sql import SparkSession\n+ SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))\n+ # -----------------------------------------------------------------------------------\nexcept ImportError:\nraise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')\nfrom .converters import *\nfrom .classloader import *\n-import threading, time\n_loadedSystemML = False\ndef _get_spark_context():\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Resolve race condition between locking of metastore_db of Scala
SparkSession and PySpark SparkSession when using SystemML MLContext API |
49,768 | 14.08.2017 13:13:18 | 25,200 | 54e80989897a9cd85fbad48e0d57a42c10e09cbf | Fix label map while training from Caffe2DML | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mllearn/estimators.py",
"new_path": "src/main/python/systemml/mllearn/estimators.py",
"diff": "@@ -392,10 +392,14 @@ class BaseSystemMLClassifier(BaseSystemMLEstimator):\n\"\"\"\nif self.model != None:\nself.model.save(self.sc._jsc, outputDir, format, sep)\n- if self.le is not None:\n+\n+ labelMapping = None\n+ if hasattr(self, 'le') and self.le is not None:\nlabelMapping = dict(enumerate(list(self.le.classes_), 1))\n- else:\n+ elif hasattr(self, 'labelMap') and self.labelMap is not None:\nlabelMapping = self.labelMap\n+\n+ if labelMapping is not None:\nlStr = [ [ int(k), str(labelMapping[k]) ] for k in labelMapping ]\ndf = self.sparkSession.createDataFrame(lStr)\ndf.write.csv(outputDir + sep + 'labels.txt', mode='overwrite', header=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1742] Fix label map while training from Caffe2DML |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.