author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
49,717
06.01.2017 10:32:29
28,800
7a30925e6b366caaf5d8bbe0d2457c2e71858485
Implemented uack+/uac+ Closes
[ { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.cu", "new_path": "src/main/cpp/kernels/SystemML.cu", "diff": "@@ -254,8 +254,12 @@ __global__ void reduce(double *g_idata, double *g_odata, unsigned int n)\n/**\n* Does a reduce (sum) over each row of the array.\n+ * This kernel must be launched with as many blocks as there are rows.\n* The intuition for this kernel is that each block does a reduction over a single row.\n- * The maximum numver\n+ * The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1\n+ * This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant)\n+ * If the matrix is \"fat\" and \"short\", i.e. there are small number of rows and a large number of columns,\n+ * there could be under-utilization of the hardware.\n* @param g_idata input matrix stored in device memory\n* @param g_odata output vector of size [rows * 1] in device memory\n* @param rows number of rows in input matrix\n@@ -312,3 +316,32 @@ __global__ void reduce_row(double *g_idata, double *g_odata, unsigned int rows,\n}\n+/**\n+ * Does a column wise reduction.\n+ * The intuition is that there are as many global threads as there are columns\n+ * Each global thread is responsible for a single element in the output vector\n+ * This of course leads to a under-utilization of the GPU resources.\n+ * For cases, where the number of columns is small, there can be unused SMs\n+ * @param g_idata input matrix stored in device memory\n+ * @param g_odata output vector of size [1 * cols] in device memory\n+ * @param rows number of rows in input matrix\n+ * @param cols number of columns in input matrix\n+ */\n+extern \"C\"\n+__global__ void reduce_col(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols)\n+{\n+ unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;\n+ if (global_tid >= cols) {\n+ return;\n+ }\n+\n+ unsigned int i = global_tid;\n+ unsigned int grid_size = cols;\n+ double val = 0;\n+\n+ while (i < rows * cols) {\n+ val += g_idata[i];\n+ i += grid_size;\n+ }\n+ g_odata[global_tid] = val;\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/cpp/kernels/SystemML.ptx", "new_path": "src/main/cpp/kernels/SystemML.ptx", "diff": "@@ -1901,6 +1901,62 @@ BB11_31:\nret;\n}\n+ // .globl reduce_col\n+.visible .entry reduce_col(\n+ .param .u64 reduce_col_param_0,\n+ .param .u64 reduce_col_param_1,\n+ .param .u32 reduce_col_param_2,\n+ .param .u32 reduce_col_param_3\n+)\n+{\n+ .reg .pred %p<4>;\n+ .reg .b32 %r<11>;\n+ .reg .f64 %fd<10>;\n+ .reg .b64 %rd<9>;\n+\n+\n+ ld.param.u64 %rd2, [reduce_col_param_0];\n+ ld.param.u64 %rd3, [reduce_col_param_1];\n+ ld.param.u32 %r5, [reduce_col_param_2];\n+ ld.param.u32 %r6, [reduce_col_param_3];\n+ mov.u32 %r7, %ntid.x;\n+ mov.u32 %r8, %ctaid.x;\n+ mov.u32 %r9, %tid.x;\n+ mad.lo.s32 %r1, %r7, %r8, %r9;\n+ setp.ge.u32 %p1, %r1, %r6;\n+ @%p1 bra BB12_5;\n+\n+ cvta.to.global.u64 %rd1, %rd2;\n+ mul.lo.s32 %r2, %r6, %r5;\n+ mov.f64 %fd8, 0d0000000000000000;\n+ mov.f64 %fd9, %fd8;\n+ setp.ge.u32 %p2, %r1, %r2;\n+ @%p2 bra BB12_4;\n+\n+ mov.u32 %r10, %r1;\n+\n+BB12_3:\n+ mov.u32 %r3, %r10;\n+ mul.wide.u32 %rd4, %r3, 8;\n+ add.s64 %rd5, %rd1, %rd4;\n+ ld.global.f64 %fd6, [%rd5];\n+ add.f64 %fd9, %fd9, %fd6;\n+ add.s32 %r4, %r3, %r6;\n+ setp.lt.u32 %p3, %r4, %r2;\n+ mov.u32 %r10, %r4;\n+ mov.f64 %fd8, %fd9;\n+ @%p3 bra BB12_3;\n+\n+BB12_4:\n+ cvta.to.global.u64 %rd6, %rd3;\n+ mul.wide.u32 %rd7, %r1, 8;\n+ add.s64 %rd8, %rd6, %rd7;\n+ st.global.f64 [%rd8], %fd8;\n+\n+BB12_5:\n+ ret;\n+}\n+\n.func (.param .b64 func_retval0) __internal_accurate_pow(\n.param .b64 __internal_accurate_pow_param_0,\n.param .b64 __internal_accurate_pow_param_1\n@@ -1924,7 +1980,7 @@ BB11_31:\n}\nshr.u32 %r50, %r49, 20;\nsetp.ne.s32 %p1, %r50, 0;\n- @%p1 bra BB12_2;\n+ @%p1 bra BB13_2;\nmul.f64 %fd14, %fd12, 0d4350000000000000;\n{\n@@ -1938,13 +1994,13 @@ BB11_31:\nshr.u32 %r16, %r49, 20;\nadd.s32 %r50, %r16, -54;\n-BB12_2:\n+BB13_2:\nadd.s32 %r51, %r50, -1023;\nand.b32 %r17, %r49, -2146435073;\nor.b32 %r18, %r17, 1072693248;\nmov.b64 %fd133, {%r48, %r18};\nsetp.lt.u32 %p2, %r18, 1073127583;\n- @%p2 bra BB12_4;\n+ @%p2 bra BB13_4;\n{\n.reg .b32 %temp;\n@@ -1958,7 +2014,7 @@ BB12_2:\nmov.b64 %fd133, {%r19, %r21};\nadd.s32 %r51, %r50, -1022;\n-BB12_4:\n+BB13_4:\nadd.f64 %fd16, %fd133, 0d3FF0000000000000;\n// inline asm\nrcp.approx.ftz.f64 %fd15,%fd16;\n@@ -2124,13 +2180,13 @@ BB12_4:\nmov.b32 %f2, %r35;\nabs.f32 %f1, %f2;\nsetp.lt.f32 %p4, %f1, 0f4086232B;\n- @%p4 bra BB12_7;\n+ @%p4 bra BB13_7;\nsetp.lt.f64 %p5, %fd4, 0d0000000000000000;\nadd.f64 %fd130, %fd4, 0d7FF0000000000000;\nselp.f64 %fd134, 0d0000000000000000, %fd130, %p5;\nsetp.geu.f32 %p6, %f1, 0f40874800;\n- @%p6 bra BB12_7;\n+ @%p6 bra BB13_7;\nshr.u32 %r36, %r13, 31;\nadd.s32 %r37, %r13, %r36;\n@@ -2145,26 +2201,26 @@ BB12_4:\nmov.b64 %fd132, {%r44, %r43};\nmul.f64 %fd134, %fd131, %fd132;\n-BB12_7:\n+BB13_7:\n{\n.reg .b32 %temp;\nmov.b64 {%temp, %r45}, %fd134;\n}\nand.b32 %r46, %r45, 2147483647;\nsetp.ne.s32 %p7, %r46, 2146435072;\n- @%p7 bra BB12_9;\n+ @%p7 bra BB13_9;\n{\n.reg .b32 %temp;\nmov.b64 {%r47, %temp}, %fd134;\n}\nsetp.eq.s32 %p8, %r47, 0;\n- @%p8 bra BB12_10;\n+ @%p8 bra BB13_10;\n-BB12_9:\n+BB13_9:\nfma.rn.f64 %fd134, %fd134, %fd5, %fd134;\n-BB12_10:\n+BB13_10:\nst.param.f64 [func_retval0+0], %fd134;\nret;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java", "diff": "@@ -146,7 +146,7 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nif(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET) && (_op == AggOp.SUM)) {\n// Only implemented methods for GPU\n- if (_op == AggOp.SUM && (_direction == Direction.RowCol || _direction == Direction.Row)){\n+ if (_op == AggOp.SUM && (_direction == Direction.RowCol || _direction == Direction.Row || _direction == Direction.Col)){\net = ExecType.GPU;\nk = 1;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -76,6 +76,8 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"uak+\" , GPUINSTRUCTION_TYPE.AggregateUnary);\nString2GPUInstructionType.put( \"uar+\" , GPUINSTRUCTION_TYPE.AggregateUnary);\nString2GPUInstructionType.put( \"uark+\" , GPUINSTRUCTION_TYPE.AggregateUnary);\n+ String2GPUInstructionType.put( \"uac+\" , GPUINSTRUCTION_TYPE.AggregateUnary);\n+ String2GPUInstructionType.put( \"uack+\" , GPUINSTRUCTION_TYPE.AggregateUnary);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -1029,12 +1029,16 @@ public class LibMatrixCUDA {\nec.setScalarOutput(output, new DoubleObject(result));\nbreak;\n}\n- case REDUCTION_COL : {\n+ case REDUCTION_COL : { // The names are a bit misleading, REDUCTION_COL refers to the direction (reduce all elements in a column)\nreduceRow(in, out, rlen, clen);\nbreak;\n}\n+ case REDUCTION_ROW : {\n+ reduceCol(in, out, rlen, clen);\n+ break;\n+ }\n+\ncase REDUCTION_DIAG :\n- case REDUCTION_ROW :\nthrow new DMLRuntimeException(\"Internal Error - Row, Column and Diag summation not implemented yet\");\n}\nbreak;\n@@ -1172,6 +1176,14 @@ public class LibMatrixCUDA {\ncudaDeviceSynchronize();\n}\n+ private static void reduceCol(Pointer in, Pointer out, int rows, int cols) throws DMLRuntimeException {\n+ int[] tmp = getKernelParamsForReduceByCol(rows, cols);\n+ int blocks = tmp[0], threads = tmp[1], sharedMem = tmp[2];\n+ kernels.launchKernel(\"reduce_col\", new ExecutionConfig(blocks, threads, sharedMem),\n+ in, out, rows, cols);\n+ cudaDeviceSynchronize();\n+ }\n+\n/**\n* Get threads, blocks and shared memory for a reduce all operation\n* @param n size of input array\n@@ -1207,6 +1219,18 @@ public class LibMatrixCUDA {\nreturn new int[] {blocks, threads, sharedMemSize};\n}\n+ private static int[] getKernelParamsForReduceByCol(int rows, int cols) {\n+ int threads = Math.min(cols, MAX_THREADS);\n+ int blocks = cols/1024;\n+ if (cols % 1024 != 0) blocks++;\n+ int sharedMemSize = threads * Sizeof.DOUBLE;\n+ if (threads <= 32){\n+ sharedMemSize *=2;\n+ }\n+ return new int[] {blocks, threads, sharedMemSize};\n+ }\n+\n+\nprivate static int nextPow2(int x)\n{\n--x;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1039] Implemented uack+/uac+ Closes #331.
49,736
08.01.2017 13:29:58
28,800
13e1bd9301be1083caa8039adf43af4a8bf326c6
Avoid unnecessary sel+ operation in case of fused_maxpooling
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "diff": "@@ -140,7 +140,15 @@ public class UnaryOp extends Hop implements MultiThreadedHop\nif( optype == null )\nthrow new HopsException(\"Unknown UnaryCP lop type for UnaryOp operation type '\"+_op+\"'\");\n- UnaryCP unary1 = new UnaryCP(input.constructLops(), optype, getDataType(), getValueType());\n+ UnaryCP unary1 = null;\n+ if((_op == Hop.OpOp1.NROW || _op == Hop.OpOp1.NCOL || _op == Hop.OpOp1.LENGTH) &&\n+ input instanceof UnaryOp && ((UnaryOp) input).getOp() == OpOp1.SELP) {\n+ // Dimensions does not change during sel+ operation.\n+ // This case is helpful to avoid unnecessary sel+ operation for fused maxpooling.\n+ unary1 = new UnaryCP(input.getInput().get(0).constructLops(), optype, getDataType(), getValueType());\n+ }\n+ else\n+ unary1 = new UnaryCP(input.constructLops(), optype, getDataType(), getValueType());\nsetOutputDimensions(unary1);\nsetLineNumbers(unary1);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Avoid unnecessary sel+ operation in case of fused_maxpooling
49,772
09.01.2017 14:02:08
28,800
55912a7dab5daa822c7dd5ffb39db2b2d939650f
Clean Up Python Documentation For Next Release Cleanup of Python documentation. Closes
[ { "change_type": "MODIFY", "old_path": "docs/README.md", "new_path": "docs/README.md", "diff": "@@ -27,6 +27,7 @@ Jekyll (and optionally Pygments) can be installed on the Mac OS in the following\n$ brew install ruby\n$ gem install jekyll\n$ gem install jekyll-redirect-from\n+ $ gem install bundler\n$ brew install python\n$ pip install Pygments\n$ gem install pygments.rb\n" }, { "change_type": "MODIFY", "old_path": "docs/beginners-guide-python.md", "new_path": "docs/beginners-guide-python.md", "diff": "@@ -54,7 +54,8 @@ If you already have an Apache Spark installation, you can skip this step.\n/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\nbrew tap caskroom/cask\nbrew install Caskroom/cask/java\n-brew install apache-spark\n+brew tap homebrew/versions\n+brew install apache-spark16\n```\n</div>\n<div data-lang=\"Linux\" markdown=\"1\">\n@@ -70,37 +71,60 @@ brew install apache-spark16\n### Install SystemML\n-We are working towards uploading the python package on pypi. Until then, please use following commands:\n+We are working towards uploading the python package on PyPi. Until then, please use following\n+commands:\n+<div class=\"codetabs\">\n+<div data-lang=\"Python 2\" markdown=\"1\">\n```bash\ngit checkout https://github.com/apache/incubator-systemml.git\ncd incubator-systemml\nmvn clean package -P distribution\npip install target/systemml-0.12.0-incubating-SNAPSHOT-python.tgz\n```\n-\n-The above commands will install Python package and place the corresponding Java binaries (along with algorithms) into the installed location.\n-To find the location of the downloaded Java binaries, use the following command:\n-\n+</div>\n+<div data-lang=\"Python 3\" markdown=\"1\">\n```bash\n-python -c 'import imp; import os; print os.path.join(imp.find_module(\"systemml\")[1], \"systemml-java\")'\n+git checkout https://github.com/apache/incubator-systemml.git\n+cd incubator-systemml\n+mvn clean package -P distribution\n+pip3 install target/systemml-0.12.0-incubating-SNAPSHOT-python.tgz\n```\n+</div>\n+</div>\n-Note: the user is free to either use the prepackaged Java binaries\n-or download them from [SystemML website](http://systemml.apache.org/download.html)\n-or build them from the [source](https://github.com/apache/incubator-systemml).\n-\n+### Uninstall SystemML\nTo uninstall SystemML, please use following command:\n+<div class=\"codetabs\">\n+<div data-lang=\"Python 2\" markdown=\"1\">\n```bash\n-pip uninstall systemml-incubating\n+pip uninstall systemml\n```\n+</div>\n+<div data-lang=\"Python 3\" markdown=\"1\">\n+```bash\n+pip3 uninstall systemml\n+```\n+</div>\n+</div>\n### Start Pyspark shell\n+<div class=\"codetabs\">\n+<div data-lang=\"Python 2\" markdown=\"1\">\n```bash\n-pyspark --master local[*]\n+pyspark\n```\n+</div>\n+<div data-lang=\"Python 3\" markdown=\"1\">\n+```bash\n+PYSPARK_PYTHON=python3 pyspark\n+```\n+</div>\n+</div>\n+\n+---\n## Matrix operations\n@@ -118,20 +142,20 @@ m4.sum(axis=1).toNumPy()\nOutput:\n-```bash\n+```python\narray([[-60.],\n[-60.],\n[-60.]])\n```\nLet us now write a simple script to train [linear regression](https://apache.github.io/incubator-systemml/algorithms-regression.html#linear-regression)\n-model: $ \\beta = solve(X^T X, X^T y) $. For simplicity, we will use direct-solve method and ignore regularization parameter as well as intercept.\n+model: $ \\beta = solve(X^T X, X^T y) $. For simplicity, we will use direct-solve method and ignore\n+regularization parameter as well as intercept.\n```python\nimport numpy as np\nfrom sklearn import datasets\nimport systemml as sml\n-from pyspark.sql import SQLContext\n# Load the diabetes dataset\ndiabetes = datasets.load_diabetes()\n# Use only one feature\n@@ -158,7 +182,10 @@ Output:\nResidual sum of squares: 25282.12\n```\n-We can improve the residual error by adding an intercept and regularization parameter. To do so, we will use `mllearn` API described in the next section.\n+We can improve the residual error by adding an intercept and regularization parameter. To do so, we\n+will use `mllearn` API described in the next section.\n+\n+---\n## Invoke SystemML's algorithms\n@@ -206,7 +233,7 @@ algorithm on digits datasets.\n```python\n# Scikit-learn way\n-from sklearn import datasets, neighbors\n+from sklearn import datasets\nfrom systemml.mllearn import LogisticRegression\nfrom pyspark.sql import SQLContext\nsqlCtx = SQLContext(sc)\n@@ -233,7 +260,7 @@ LogisticRegression score: 0.922222\nTo train the above algorithm on larger dataset, we can load the dataset into DataFrame and pass it to the `fit` method:\n```python\n-from sklearn import datasets, neighbors\n+from sklearn import datasets\nfrom systemml.mllearn import LogisticRegression\nfrom pyspark.sql import SQLContext\nimport pandas as pd\n@@ -245,7 +272,7 @@ X_digits = digits.data\ny_digits = digits.target\nn_samples = len(X_digits)\n# Split the data into training/testing sets and convert to PySpark DataFrame\n-df_train = sml.convertToLabeledDF(sqlContext, X_digits[:int(.9 * n_samples)], y_digits[:int(.9 * n_samples)])\n+df_train = sml.convertToLabeledDF(sqlCtx, X_digits[:int(.9 * n_samples)], y_digits[:int(.9 * n_samples)])\nX_test = sqlCtx.createDataFrame(pd.DataFrame(X_digits[int(.9 * n_samples):]))\nlogistic = LogisticRegression(sqlCtx)\nlogistic.fit(df_train)\n@@ -274,18 +301,18 @@ from pyspark.ml.feature import HashingTF, Tokenizer\nfrom pyspark.sql import SQLContext\nsqlCtx = SQLContext(sc)\ntraining = sqlCtx.createDataFrame([\n- (0L, \"a b c d e spark\", 1.0),\n- (1L, \"b d\", 2.0),\n- (2L, \"spark f g h\", 1.0),\n- (3L, \"hadoop mapreduce\", 2.0),\n- (4L, \"b spark who\", 1.0),\n- (5L, \"g d a y\", 2.0),\n- (6L, \"spark fly\", 1.0),\n- (7L, \"was mapreduce\", 2.0),\n- (8L, \"e spark program\", 1.0),\n- (9L, \"a e c l\", 2.0),\n- (10L, \"spark compile\", 1.0),\n- (11L, \"hadoop software\", 2.0)\n+ (0, \"a b c d e spark\", 1.0),\n+ (1, \"b d\", 2.0),\n+ (2, \"spark f g h\", 1.0),\n+ (3, \"hadoop mapreduce\", 2.0),\n+ (4, \"b spark who\", 1.0),\n+ (5, \"g d a y\", 2.0),\n+ (6, \"spark fly\", 1.0),\n+ (7, \"was mapreduce\", 2.0),\n+ (8, \"e spark program\", 1.0),\n+ (9, \"a e c l\", 2.0),\n+ (10, \"spark compile\", 1.0),\n+ (11, \"hadoop software\", 2.0)\n], [\"id\", \"text\", \"label\"])\ntokenizer = Tokenizer(inputCol=\"text\", outputCol=\"words\")\nhashingTF = HashingTF(inputCol=\"words\", outputCol=\"features\", numFeatures=20)\n@@ -293,10 +320,10 @@ lr = LogisticRegression(sqlCtx)\npipeline = Pipeline(stages=[tokenizer, hashingTF, lr])\nmodel = pipeline.fit(training)\ntest = sqlCtx.createDataFrame([\n- (12L, \"spark i j k\"),\n- (13L, \"l m n\"),\n- (14L, \"mapreduce spark\"),\n- (15L, \"apache hadoop\")], [\"id\", \"text\"])\n+ (12, \"spark i j k\"),\n+ (13, \"l m n\"),\n+ (14, \"mapreduce spark\"),\n+ (15, \"apache hadoop\")], [\"id\", \"text\"])\nprediction = model.transform(test)\nprediction.show()\n```\n@@ -304,27 +331,28 @@ prediction.show()\nOutput:\n```bash\n-+--+---------------+--------------------+--------------------+--------------------+---+----------+\n-|id| text| words| features| probability| ID|prediction|\n-+--+---------------+--------------------+--------------------+--------------------+---+----------+\n-|12| spark i j k|ArrayBuffer(spark...|(20,[5,6,7],[2.0,...|[0.99999999999975...|1.0| 1.0|\n-|13| l m n|ArrayBuffer(l, m, n)|(20,[8,9,10],[1.0...|[1.37552128844736...|2.0| 2.0|\n-|14|mapreduce spark|ArrayBuffer(mapre...|(20,[5,10],[1.0,1...|[0.99860290938153...|3.0| 1.0|\n-|15| apache hadoop|ArrayBuffer(apach...|(20,[9,14],[1.0,1...|[5.41688748236143...|4.0| 2.0|\n-+--+---------------+--------------------+--------------------+--------------------+---+----------+\n++-------+---+---------------+------------------+--------------------+--------------------+----------+\n+|__INDEX| id| text| words| features| probability|prediction|\n++-------+---+---------------+------------------+--------------------+--------------------+----------+\n+| 1.0| 12| spark i j k| [spark, i, j, k]|(20,[5,6,7],[2.0,...|[0.99999999999975...| 1.0|\n+| 2.0| 13| l m n| [l, m, n]|(20,[8,9,10],[1.0...|[1.37552128844736...| 2.0|\n+| 3.0| 14|mapreduce spark|[mapreduce, spark]|(20,[5,10],[1.0,1...|[0.99860290938153...| 1.0|\n+| 4.0| 15| apache hadoop| [apache, hadoop]|(20,[9,14],[1.0,1...|[5.41688748236143...| 2.0|\n++-------+---+---------------+------------------+--------------------+--------------------+----------+\n```\n+---\n+\n## Invoking DML/PyDML scripts using MLContext\nThe below example demonstrates how to invoke the algorithm [scripts/algorithms/MultiLogReg.dml](https://github.com/apache/incubator-systemml/blob/master/scripts/algorithms/MultiLogReg.dml)\nusing Python [MLContext API](https://apache.github.io/incubator-systemml/spark-mlcontext-programming-guide).\n```python\n-from sklearn import datasets, neighbors\n-from pyspark.sql import DataFrame, SQLContext\n+from sklearn import datasets\n+from pyspark.sql import SQLContext\nimport systemml as sml\nimport pandas as pd\n-import os, imp\nsqlCtx = SQLContext(sc)\ndigits = datasets.load_digits()\nX_digits = digits.data\n@@ -334,8 +362,8 @@ n_samples = len(X_digits)\nX_df = sqlCtx.createDataFrame(pd.DataFrame(X_digits[:int(.9 * n_samples)]))\ny_df = sqlCtx.createDataFrame(pd.DataFrame(y_digits[:int(.9 * n_samples)]))\nml = sml.MLContext(sc)\n-# Get the path of MultiLogReg.dml\n-scriptPath = os.path.join(imp.find_module(\"systemml\")[1], 'systemml-java', 'scripts', 'algorithms', 'MultiLogReg.dml')\n-script = sml.dml(scriptPath).input(X=X_df, Y_vec=y_df).output(\"B_out\")\n+# Run the MultiLogReg.dml script at the given URL\n+scriptUrl = \"https://raw.githubusercontent.com/apache/incubator-systemml/master/scripts/algorithms/MultiLogReg.dml\"\n+script = sml.dml(scriptUrl).input(X=X_df, Y_vec=y_df).output(\"B_out\")\nbeta = ml.execute(script).get('B_out').toNumPy()\n```\n" }, { "change_type": "MODIFY", "old_path": "docs/index.md", "new_path": "docs/index.md", "diff": "@@ -42,13 +42,11 @@ To download SystemML, visit the [downloads](http://systemml.apache.org/download)\n## Running SystemML\n+* **[Beginner's Guide For Python Users](beginners-guide-python)** - Beginner's Guide for Python users.\n* **[Spark MLContext](spark-mlcontext-programming-guide)** - Spark MLContext is a programmatic API\nfor running SystemML from Spark via Scala, Python, or Java.\n- * See the [Spark MLContext Programming Guide](spark-mlcontext-programming-guide) with the\n- following examples:\n- * [**Spark Shell (Scala)**](spark-mlcontext-programming-guide#spark-shell-example---new-api)\n- * [**Zeppelin Notebook (Scala)**](spark-mlcontext-programming-guide#zeppelin-notebook-example---linear-regression-algorithm---old-api)\n- * [**Jupyter Notebook (PySpark)**](spark-mlcontext-programming-guide#jupyter-pyspark-notebook-example---poisson-nonnegative-matrix-factorization---old-api)\n+ * [**Spark Shell Example (Scala)**](spark-mlcontext-programming-guide#spark-shell-example)\n+ * [**Jupyter Notebook Example (PySpark)**](spark-mlcontext-programming-guide#jupyter-pyspark-notebook-example---poisson-nonnegative-matrix-factorization)\n* **[Spark Batch](spark-batch-mode)** - Algorithms are automatically optimized to run across Spark clusters.\n* See [Invoking SystemML in Spark Batch Mode](spark-batch-mode) for detailed information.\n* **[Hadoop Batch](hadoop-batch-mode)** - Algorithms are automatically optimized when distributed across Hadoop clusters.\n@@ -62,16 +60,13 @@ machine in R-like and Python-like declarative languages.\n## Language Guides\n+* [Python API Reference](python-reference) - API Reference Guide for Python users.\n* [DML Language Reference](dml-language-reference) -\nDML is a high-level R-like declarative language for machine learning.\n* **PyDML Language Reference** **(Coming Soon)** -\nPyDML is a high-level Python-like declarative language for machine learning.\n* [Beginner's Guide to DML and PyDML](beginners-guide-to-dml-and-pydml) -\nAn introduction to the basics of DML and PyDML.\n-* [Beginner's Guide for Python users](beginners-guide-python) -\n-Beginner's Guide for Python users.\n-* [Reference Guide for Python users](python-reference) -\n-Reference Guide for Python users.\n## ML Algorithms\n" }, { "change_type": "MODIFY", "old_path": "docs/spark-mlcontext-programming-guide.md", "new_path": "docs/spark-mlcontext-programming-guide.md", "diff": "@@ -35,14 +35,10 @@ such as Scala, Java, and Python. As a result, it offers a convenient way to inte\nShell and from Notebooks such as Jupyter and Zeppelin.\n**NOTE: A new MLContext API has been redesigned for future SystemML releases. The old API is available\n-in all versions of SystemML but will be deprecated and removed, so please migrate to the new API.**\n+in previous versions of SystemML but is deprecated and will be removed soon, so please migrate to the new API.**\n-# Spark Shell Example - NEW API\n-\n-**NOTE: The new MLContext API will be available in future SystemML releases. It can be used\n-by building the project using Maven ('mvn clean package', or 'mvn clean package -P distribution').\n-For SystemML version 0.10.0 and earlier, please see the documentation regarding the old API.**\n+# Spark Shell Example\n## Start Spark Shell with SystemML\n@@ -1644,25 +1640,8 @@ scala> for (i <- 1 to 5) {\n# Jupyter (PySpark) Notebook Example - Poisson Nonnegative Matrix Factorization\n-Similar to the Scala API, SystemML also provides a Python MLContext API. In addition to the\n-regular `SystemML.jar` file, you'll need to install the Python API as follows:\n-\n- * Latest release:\n- * Python 2:\n-\n- ```\n- pip install systemml\n- # Bleeding edge: pip install git+git://github.com/apache/incubator-systemml.git#subdirectory=src/main/python\n- ```\n-\n- * Python 3:\n-\n- ```\n- pip3 install systemml\n- # Bleeding edge: pip3 install git+git://github.com/apache/incubator-systemml.git#subdirectory=src/main/python\n- ```\n- * Don't forget to download the `SystemML.jar` file, which can be found in the latest release, or\n- in a nightly build.\n+Similar to the Scala API, SystemML also provides a Python MLContext API. Before usage, you'll need\n+**[to install it first](beginners-guide-python#download--setup)**.\nHere, we'll explore the use of SystemML via PySpark in a [Jupyter notebook](http://jupyter.org/).\nThis Jupyter notebook example can be nicely viewed in a rendered state\n@@ -1671,17 +1650,18 @@ and can be [downloaded here](https://raw.githubusercontent.com/apache/incubator-\nFrom the directory with the downloaded notebook, start Jupyter with PySpark:\n- * Python 2:\n-\n- ```\n- PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=\"notebook\" pyspark --master local[*] --driver-class-path SystemML.jar --jars SystemML.jar\n- ```\n-\n- * Python 3:\n-\n+<div class=\"codetabs\">\n+<div data-lang=\"Python 2\" markdown=\"1\">\n+```bash\n+PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=\"notebook\" pyspark\n```\n- PYSPARK_PYTHON=python3 PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=\"notebook\" pyspark --master local[*] --driver-class-path SystemML.jar --jars SystemML.jar\n+</div>\n+<div data-lang=\"Python 3\" markdown=\"1\">\n+```bash\n+PYSPARK_PYTHON=python3 PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=\"notebook\" pyspark\n```\n+</div>\n+</div>\nThis will open Jupyter in a browser:\n@@ -1797,6 +1777,9 @@ plt.title('PNMF Training Loss')\n# Spark Shell Example - OLD API\n+### ** **NOTE: This API is old and has been deprecated.** **\n+**Please use the [new MLContext API](spark-mlcontext-programming-guide#spark-shell-example) instead.**\n+\n## Start Spark Shell with SystemML\nTo use SystemML with the Spark Shell, the SystemML jar can be referenced using the Spark Shell's `--jars` option.\n@@ -2216,11 +2199,13 @@ val (min, max, mean) = minMaxMean(sysMlMatrix, numRows, numCols, ml)\n</div>\n-\n-* * *\n+---\n# Zeppelin Notebook Example - Linear Regression Algorithm - OLD API\n+### ** **NOTE: This API is old and has been deprecated.** **\n+**Please use the [new MLContext API](spark-mlcontext-programming-guide#spark-shell-example) instead.**\n+\nNext, we'll consider an example of a SystemML linear regression algorithm run from Spark through an Apache Zeppelin notebook.\nInstructions to clone and build Zeppelin can be found at the [GitHub Apache Zeppelin](https://github.com/apache/incubator-zeppelin)\nsite. This example also will look at the Spark ML linear regression algorithm.\n@@ -2701,10 +2686,13 @@ Training time per iter: 0.2334166666666667 seconds\n{% endhighlight %}\n-* * *\n+---\n# Jupyter (PySpark) Notebook Example - Poisson Nonnegative Matrix Factorization - OLD API\n+### ** **NOTE: This API is old and has been deprecated.** **\n+**Please use the [new MLContext API](spark-mlcontext-programming-guide#jupyter-pyspark-notebook-example---poisson-nonnegative-matrix-factorization) instead.**\n+\nHere, we'll explore the use of SystemML via PySpark in a [Jupyter notebook](http://jupyter.org/).\nThis Jupyter notebook example can be nicely viewed in a rendered state\n[on GitHub](https://github.com/apache/incubator-systemml/blob/master/samples/jupyter-notebooks/SystemML-PySpark-Recommendation-Demo.ipynb),\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1170] Clean Up Python Documentation For Next Release Cleanup of Python documentation. Closes #335.
49,772
09.01.2017 14:13:27
28,800
2fa6141f339f3a24ae2537a90c7b3136c509ef45
Clean Up Python Distribution This cleans up the Python distribution package by removing the `scripts` folder that is included, as it is not used by internal code, will be confusing to users, and will cause confusion during release reviews. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/python/MANIFEST.in", "new_path": "src/main/python/MANIFEST.in", "diff": "include LICENSE\ninclude NOTICE\ninclude DISCLAIMER\n-include systemml/systemml-java/scripts/sparkDML.sh\nrecursive-include systemml/systemml-java *\n-recursive-include systemml/systemml-java/scripts/algorithms *\n-recursive-include systemml/systemml-java/scripts/datagen *\n-recursive-include systemml/systemml-java/scripts/utils *\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/post_setup.py", "new_path": "src/main/python/post_setup.py", "diff": "@@ -35,6 +35,10 @@ ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split(\"-\")[0]\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\nif platform.system() == \"Windows\":\n- os.rename(os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.zip'), os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.zip'))\n+ os.rename(\n+ os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.zip'),\n+ os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.zip'))\nelse:\n- os.rename(os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.tar.gz'), os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tgz'))\n+ os.rename(\n+ os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT + '.tar.gz'),\n+ os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tgz'))\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/pre_setup.py", "new_path": "src/main/python/pre_setup.py", "diff": "#\n#-------------------------------------------------------------\n-import os, shutil\n+import os\n+import shutil\nimport fnmatch\npython_dir = 'systemml'\njava_dir='systemml-java'\n@@ -30,5 +31,5 @@ os.mkdir(java_dir_full_path)\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))\nfor file in os.listdir(os.path.join(root_dir, 'target')):\nif fnmatch.fnmatch(file, 'systemml-*-incubating-SNAPSHOT.jar'):\n- shutil.copyfile(os.path.join(root_dir, 'target', file), os.path.join(java_dir_full_path, file))\n-shutil.copytree(os.path.join(root_dir, 'scripts'), os.path.join(java_dir_full_path, 'scripts'))\n+ shutil.copyfile(os.path.join(root_dir, 'target', file),\n+ os.path.join(java_dir_full_path, file))\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/setup.py", "new_path": "src/main/python/setup.py", "diff": "@@ -51,7 +51,9 @@ PACKAGE_DATA = []\nfor path, subdirs, files in os.walk(java_dir_full_path):\nfor name in files:\nPACKAGE_DATA = PACKAGE_DATA + [ os.path.join(path, name).replace('./', '') ]\n-PACKAGE_DATA = PACKAGE_DATA + [os.path.join(python_dir, 'LICENSE'), os.path.join(python_dir, 'DISCLAIMER'), os.path.join(python_dir, 'NOTICE')]\n+PACKAGE_DATA = PACKAGE_DATA + [os.path.join(python_dir, 'LICENSE'),\n+ os.path.join(python_dir, 'DISCLAIMER'),\n+ os.path.join(python_dir, 'NOTICE')]\nsetup(\nname=ARTIFACT_NAME,\n@@ -59,14 +61,16 @@ setup(\ndescription='Apache SystemML is a distributed and declarative machine learning platform.',\nlong_description='''\n- Apache SystemML is an effort undergoing incubation at the Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.\n+ Apache SystemML is an effort undergoing incubation at the Apache Software Foundation (ASF),\n+ sponsored by the Apache Incubator PMC.\nWhile incubation status is not necessarily a reflection of the completeness\nor stability of the code, it does indicate that the project has yet to be\nfully endorsed by the ASF.\nApache SystemML provides declarative large-scale machine learning (ML) that aims at\nflexible specification of ML algorithms and automatic generation of hybrid runtime\n- plans ranging from single-node, in-memory computations, to distributed computations on Apache Hadoop and Apache Spark.\n+ plans ranging from single-node, in-memory computations, to distributed computations on Apache\n+ Hadoop and Apache Spark.\n''',\nurl='http://systemml.apache.org/',\nauthor='Apache SystemML',\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1171] Clean Up Python Distribution This cleans up the Python distribution package by removing the `scripts` folder that is included, as it is not used by internal code, will be confusing to users, and will cause confusion during release reviews. Closes #336.
49,717
09.01.2017 17:06:15
28,800
b0fb707d2b515da89b9c0577a279b5cd2ae3e47a
[HOTFIX] Fixed gpu bias_add function for GPU. Metadata was being set incorrectly Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "diff": "@@ -146,8 +146,8 @@ public class ConvolutionGPUInstruction extends GPUInstruction\nMatrixObject input = ec.getMatrixInputForGPUInstruction(_input1.getName());\nMatrixObject bias = ec.getMatrixInputForGPUInstruction(_input2.getName());\n- MatrixObject out = ec.getDenseMatrixOutputForGPUInstruction(_output.getName());\nec.setMetaData(_output.getName(), input.getNumRows(), input.getNumColumns());\n+ MatrixObject out = ec.getDenseMatrixOutputForGPUInstruction(_output.getName());\nLibMatrixCUDA.bias_add(input, bias, out);\n// release inputs/outputs\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fixed gpu bias_add function for GPU. Metadata was being set incorrectly Closes #338.
49,736
10.01.2017 19:04:58
28,800
7af36f80b7b2726d3411eb308592dcb3ea00ccc3
[HOTFIX] Fixed javadoc errors and added fused CP conv2d + bias_add
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -137,14 +137,27 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nthrow new HopsException(\"Incorrect number of inputs for \" + op.name());\n}\n- Lop in = null;\n+ Lop in = null; Lop in2 = null;\nOperationTypes lopOp = HopsConv2Lops.get(op);\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n+ ArrayList<Hop> inputs1 = inputs;\nif(op == ConvOp.MAX_POOLING && et == ExecType.CP && inputs.get(0) instanceof UnaryOp\n&& ((UnaryOp) inputs.get(0)).getOp() == OpOp1.SELP) {\nin = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n+ else if(op == ConvOp.BIAS_ADD && et == ExecType.CP && inputs.get(0) instanceof ConvolutionOp\n+ && ((ConvolutionOp) inputs.get(0)).getOp() == ConvOp.DIRECT_CONV2D) {\n+ lopOp = OperationTypes.DIRECT_CONV2D_BIAS_ADD;\n+\n+ // the first lop is image\n+ in = inputs.get(0).getInput().get(0).constructLops();\n+ // the second lop is bias\n+ in2 = inputs.get(1).constructLops();\n+\n+ // Use the inputs from conv2d rather than bias_add\n+ inputs1 = inputs.get(0).getInput();\n+ }\nelse {\nin = inputs.get(0).constructLops();\n}\n@@ -153,14 +166,18 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nsetLineNumbers(transform1);\nin.addOutput(transform1);\n+ if(in2 != null) {\n+ transform1.addInput(in2);\n+ in2.addOutput(transform1);\n+ }\n+\n// stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n// filter_shape1, filter_shape2, filter_shape3, filter_shape4\n- for( int i=1; i < inputs.size(); i++ )\n+ for( int i=1; i < inputs1.size(); i++ )\n{\n- Lop ltmp = inputs.get(i).constructLops();\n+ Lop ltmp = inputs1.get(i).constructLops();\ntransform1.addInput(ltmp);\n- //if(i == 1 && expectedNumInputs == 14)\nltmp.addOutput(transform1);\n}\ntransform1.setLevel(); //force order of added lops\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/ConvolutionTransform.java", "new_path": "src/main/java/org/apache/sysml/lops/ConvolutionTransform.java", "diff": "@@ -32,7 +32,7 @@ public class ConvolutionTransform extends Lop\npublic enum OperationTypes {\nMAX_POOLING, MAX_POOLING_BACKWARD, RELU_MAX_POOLING, RELU_BACKWARD,\nDIRECT_CONV2D, DIRECT_CONV2D_BACKWARD_FILTER, DIRECT_CONV2D_BACKWARD_DATA,\n- BIAS_ADD\n+ BIAS_ADD, DIRECT_CONV2D_BIAS_ADD\n};\nprivate OperationTypes operation = null;\n@@ -121,6 +121,9 @@ public class ConvolutionTransform extends Lop\ncase DIRECT_CONV2D:\nreturn \"conv2d\";\n+ case DIRECT_CONV2D_BIAS_ADD:\n+ return \"conv2d_bias_add\";\n+\ncase BIAS_ADD:\nreturn \"bias_add\";\n@@ -163,66 +166,57 @@ public class ConvolutionTransform extends Lop\n}\n}\n- //CP instructions\n- // stride1, stride2, padding1, padding2\n- // input_shape1, input_shape2, input_shape3, input_shape4,\n- // filter_shape1, filter_shape2, filter_shape3, filter_shape4,\n+ // Used by maxpool\npublic String getInstructions(String input, String stride1, String stride2, String padding1, String padding2,\nString input_shape1, String input_shape2, String input_shape3, String input_shape4,\nString filter_shape1, String filter_shape2, String filter_shape3, String filter_shape4,\nString output) throws LopsException {\n- //only used for im2col and col2im\nStringBuilder sb = new StringBuilder();\n- sb.append( getExecType() );\n-\n- sb.append( OPERAND_DELIMITOR );\n- sb.append( getOpcode() );\n- sb.append( OPERAND_DELIMITOR );\n+ appendOpcode(sb);\nsb.append( getInputs().get(0).prepInputOperand(input));\n-\n- //rows, cols, byrow\n- String[] inputX = new String[]{stride1, stride2, padding1, padding2,\n- input_shape1, input_shape2, input_shape3, input_shape4,\n- filter_shape1, filter_shape2, filter_shape3, filter_shape4};\n- for( int i=1; i<=(inputX.length); i++ ) {\n- Lop ltmp = getInputs().get(i);\n- sb.append( OPERAND_DELIMITOR );\n- sb.append( ltmp.prepScalarInputOperand(getExecType()));\n+ appendOperands(1, 13, output, sb);\n+ return sb.toString();\n}\n- //output\n- sb.append( OPERAND_DELIMITOR );\n- sb.append( this.prepOutputOperand(output));\n-\n- //append degree of parallelism\n- if( getExecType()==ExecType.CP ) {\n+ // Used by conv2d*, maxpool_bwd\n+ public String getInstructions(String input, String dout, String stride1, String stride2, String padding1, String padding2,\n+ String input_shape1, String input_shape2, String input_shape3, String input_shape4,\n+ String filter_shape1, String filter_shape2, String filter_shape3, String filter_shape4,\n+ String output) throws LopsException {\n+ StringBuilder sb = new StringBuilder();\n+ appendOpcode(sb);\n+ sb.append( getInputs().get(0).prepInputOperand(input));\nsb.append( OPERAND_DELIMITOR );\n- sb.append( numThreads );\n- }\n-\n+ sb.append( getInputs().get(1).prepInputOperand(dout));\n+ appendOperands(2, 14, output, sb);\nreturn sb.toString();\n}\n- public String getInstructions(String input, String dout, String stride1, String stride2, String padding1, String padding2,\n+ // Used by fused conv2d+bias_add\n+ public String getInstructions(String input, String bias, String filter, String stride1, String stride2, String padding1, String padding2,\nString input_shape1, String input_shape2, String input_shape3, String input_shape4,\nString filter_shape1, String filter_shape2, String filter_shape3, String filter_shape4,\nString output) throws LopsException {\n- //only used for im2col and col2im\nStringBuilder sb = new StringBuilder();\n- sb.append( getExecType() );\n-\n+ appendOpcode(sb);\n+ sb.append( getInputs().get(0).prepInputOperand(input));\nsb.append( OPERAND_DELIMITOR );\n- sb.append( getOpcode() );\n+ sb.append( getInputs().get(1).prepInputOperand(bias));\nsb.append( OPERAND_DELIMITOR );\n- sb.append( getInputs().get(0).prepInputOperand(input));\n+ sb.append( getInputs().get(2).prepInputOperand(filter));\n+ appendOperands(3, 15, output, sb);\n+ return sb.toString();\n+ }\n+ public void appendOpcode(StringBuilder sb) {\n+ sb.append( getExecType() );\nsb.append( OPERAND_DELIMITOR );\n- sb.append( getInputs().get(1).prepInputOperand(dout));\n+ sb.append( getOpcode() );\n+ sb.append( OPERAND_DELIMITOR );\n+ }\n- String[] inputX = new String[]{input, dout, stride1, stride2, padding1, padding2,\n- input_shape1, input_shape2, input_shape3, input_shape4,\n- filter_shape1, filter_shape2, filter_shape3, filter_shape4};\n- for( int i=2; i < inputX.length; i++ ) {\n+ public void appendOperands(int startInputIndex, int endInputIndex, String output, StringBuilder sb) {\n+ for( int i=startInputIndex; i < endInputIndex; i++ ) {\nLop ltmp = getInputs().get(i);\nsb.append( OPERAND_DELIMITOR );\nsb.append( ltmp.prepScalarInputOperand(getExecType()));\n@@ -237,8 +231,6 @@ public class ConvolutionTransform extends Lop\nsb.append( OPERAND_DELIMITOR );\nsb.append( numThreads );\n}\n-\n- return sb.toString();\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/Lop.java", "new_path": "src/main/java/org/apache/sysml/lops/Lop.java", "diff": "@@ -647,7 +647,6 @@ public abstract class Lop\nthrow new LopsException(this.printErrorLocation() + \"Should never be invoked in Baseclass\");\n}\n- // For pooling backward\npublic String getInstructions(String input, String dout, String stride1, String stride2, String padding1, String padding2,\nString input_shape1, String input_shape2, String input_shape3, String input_shape4,\nString filter_shape1, String filter_shape2, String filter_shape3, String filter_shape4,\n@@ -655,6 +654,13 @@ public abstract class Lop\nthrow new LopsException(this.printErrorLocation() + \"Should never be invoked in Baseclass\");\n}\n+ public String getInstructions(String input, String bias, String dout, String stride1, String stride2, String padding1, String padding2,\n+ String input_shape1, String input_shape2, String input_shape3, String input_shape4,\n+ String filter_shape1, String filter_shape2, String filter_shape3, String filter_shape4,\n+ String output) throws LopsException {\n+ throw new LopsException(this.printErrorLocation() + \"Should never be invoked in Baseclass\");\n+ }\n+\npublic String getInstructions(int input, int rowl, int rowu,\nint coll, int colu, int leftRowDim,\nint leftColDim, int output) throws LopsException {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "diff": "@@ -1528,6 +1528,26 @@ public class Dag<N extends Lop>\nnode.getInputs().get(13).getOutputParameters().getLabel(),\nnode.getOutputParameters().getLabel());\n}\n+ else if (node.getInputs().size() == 15) {\n+ // Used for fused conv2d_bias_add\n+ inst_string = node.getInstructions(\n+ node.getInputs().get(0).getOutputParameters().getLabel(),\n+ node.getInputs().get(1).getOutputParameters().getLabel(),\n+ node.getInputs().get(2).getOutputParameters().getLabel(),\n+ node.getInputs().get(3).getOutputParameters().getLabel(),\n+ node.getInputs().get(4).getOutputParameters().getLabel(),\n+ node.getInputs().get(5).getOutputParameters().getLabel(),\n+ node.getInputs().get(6).getOutputParameters().getLabel(),\n+ node.getInputs().get(7).getOutputParameters().getLabel(),\n+ node.getInputs().get(8).getOutputParameters().getLabel(),\n+ node.getInputs().get(9).getOutputParameters().getLabel(),\n+ node.getInputs().get(10).getOutputParameters().getLabel(),\n+ node.getInputs().get(11).getOutputParameters().getLabel(),\n+ node.getInputs().get(12).getOutputParameters().getLabel(),\n+ node.getInputs().get(13).getOutputParameters().getLabel(),\n+ node.getInputs().get(14).getOutputParameters().getLabel(),\n+ node.getOutputParameters().getLabel());\n+ }\nelse {\nthrow new LopsException(node.printErrorLocation() + \"Node with \" + node.getInputs().size() + \" inputs is not supported in CP yet! \\n\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "diff": "@@ -225,6 +225,7 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"maxpooling\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"maxpooling_backward\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"conv2d\" , CPINSTRUCTION_TYPE.Convolution);\n+ String2CPInstructionType.put( \"conv2d_bias_add\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"conv2d_backward_filter\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"conv2d_backward_data\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"bias_add\" , CPINSTRUCTION_TYPE.Convolution);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "diff": "@@ -21,7 +21,6 @@ package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n-\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -36,7 +35,8 @@ import org.apache.sysml.runtime.util.ConvolutionUtils;\npublic class ConvolutionCPInstruction extends UnaryCPInstruction {\n- private CPOperand _in2; // used for pooling backward\n+ private CPOperand _in2;\n+ private CPOperand _in3;\nprivate ArrayList<CPOperand> _input_shape;\nprivate ArrayList<CPOperand> _filter_shape;\nprivate ArrayList<CPOperand> _stride = new ArrayList<CPOperand>();\n@@ -83,6 +83,22 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n_numThreads = numThreads;\n}\n+ public ConvolutionCPInstruction(CPOperand in, CPOperand in2, CPOperand in3, CPOperand out, String opcode,\n+ String istr, ArrayList<CPOperand> stride,\n+ ArrayList<CPOperand> padding, ArrayList<CPOperand> input_shape,\n+ ArrayList<CPOperand> filter_shape, int numThreads) {\n+ super(new ReorgOperator(SwapIndex.getSwapIndexFnObject()), in, out,\n+ opcode, istr);\n+ _in2 = in2;\n+ _in3 = in3;\n+ _cptype = CPINSTRUCTION_TYPE.Convolution;\n+ _stride = stride;\n+ _padding = padding;\n+ _input_shape = input_shape;\n+ _filter_shape = filter_shape;\n+ _numThreads = numThreads;\n+ }\n+\npublic static ConvolutionCPInstruction parseInstruction(String str)\nthrows DMLRuntimeException {\nCPOperand in = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n@@ -153,6 +169,39 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nreturn new ConvolutionCPInstruction(in, in2, out, opcode, str, stride,\npadding, input_shape, filter_shape, k);\n}\n+ else if (opcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\n+ InstructionUtils.checkNumFields(parts, 17);\n+ // dout, stride1, stride2, padding1, padding2\n+ // input_shape1, input_shape2, input_shape3, input_shape4,\n+ // filter_shape1, filter_shape2, filter_shape3, filter_shape4, k\n+ in.split(parts[1]);\n+ CPOperand in2 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n+ in2.split(parts[2]);\n+ CPOperand in3 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n+ in3.split(parts[3]);\n+ out.split(parts[16]);\n+\n+ ArrayList<CPOperand> stride = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> padding = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> input_shape = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> filter_shape = new ArrayList<CPOperand>();\n+ stride.add(new CPOperand(parts[4]));\n+ stride.add(new CPOperand(parts[5]));\n+ padding.add(new CPOperand(parts[6]));\n+ padding.add(new CPOperand(parts[7]));\n+ input_shape.add(new CPOperand(parts[8]));\n+ input_shape.add(new CPOperand(parts[9]));\n+ input_shape.add(new CPOperand(parts[10]));\n+ input_shape.add(new CPOperand(parts[11]));\n+ filter_shape.add(new CPOperand(parts[12]));\n+ filter_shape.add(new CPOperand(parts[13]));\n+ filter_shape.add(new CPOperand(parts[14]));\n+ filter_shape.add(new CPOperand(parts[15]));\n+ int k = Integer.parseInt(parts[17]);\n+\n+ return new ConvolutionCPInstruction(in, in2, in3, out, opcode, str, stride,\n+ padding, input_shape, filter_shape, k);\n+ }\nelse if (opcode.equalsIgnoreCase(\"bias_add\") || opcode.equals(\"relu_backward\")) {\nInstructionUtils.checkNumFields(parts, 4);\nin.split(parts[1]);\n@@ -194,7 +243,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nec.setMatrixOutput(getOutputVariableName(), outputBlock);\n}\n- public void processBiasInstruction(ExecutionContext ec) throws DMLRuntimeException {\n+ public void processBiasAddInstruction(ExecutionContext ec) throws DMLRuntimeException {\nMatrixBlock outputBlock = null;\nMatrixBlock input = ec.getMatrixInput(input1.getName());\nMatrixBlock bias = ec.getMatrixInput(_in2.getName());\n@@ -227,7 +276,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\npublic void processInstruction(ExecutionContext ec)\nthrows DMLRuntimeException {\nif (instOpcode.equalsIgnoreCase(\"bias_add\")) {\n- processBiasInstruction(ec);\n+ processBiasAddInstruction(ec);\nreturn;\n}\nelse if (instOpcode.equalsIgnoreCase(\"relu_backward\")) {\n@@ -289,6 +338,21 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\nec.releaseMatrixInput(_in2.getName());\n}\n+ else if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\n+ MatrixBlock filter = ec.getMatrixInput(_in3.getName());\n+ MatrixBlock bias = ec.getMatrixInput(_in2.getName());\n+ if((filter.isEmptyBlock() || matBlock.isEmptyBlock()) && bias.isEmptyBlock()) {\n+ outputBlock = new MatrixBlock(N, K*P*Q, true, 0);\n+ }\n+ else {\n+ outputBlock = getDenseOutputBlock(ec, N, K*P*Q);\n+ if(!bias.isEmptyBlock())\n+ params.bias = bias;\n+ LibMatrixDNN.conv2d(matBlock, filter, outputBlock, params);\n+ }\n+ ec.releaseMatrixInput(_in3.getName());\n+ ec.releaseMatrixInput(_in2.getName());\n+ }\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_filter\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\nif(dout.isEmptyBlock() || matBlock.isEmptyBlock()) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java", "diff": "@@ -34,6 +34,7 @@ public class ConvolutionParameters {\nMatrixBlock input1; MatrixBlock input2; MatrixBlock output;\n+ public MatrixBlock bias;\npublic int [] start_indexes_h, end_indexes_h, start_indexes_w, end_indexes_w;\nprivate int convertToInt(long val) throws DMLRuntimeException {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -126,10 +126,6 @@ public class LibMatrixCUDA {\ndstTensorDesc = allocateTensorDescriptor(N, K, P, Q);\nfilterDesc = allocateFilterDescriptor(K, C, R, S);\n- // Allocate data\n- // (Pointer) gpuCtx.prepare(image, true, true);\n- // (Pointer) gpuCtx.prepare(filter, true, true);\n-\nPointer imagePointer = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\nPointer filterPointer = ((JCudaObject)filter.getGPUObject()).jcudaDenseMatrixPtr;\nPointer dstPointer = ((JCudaObject)outputBlock.getGPUObject()).jcudaDenseMatrixPtr;\n@@ -245,10 +241,10 @@ public class LibMatrixCUDA {\n/**\n* This method computes the backpropagation errors for previous layer of relu operation\n*\n- * @param input\n- * @param dout\n- * @param outputBlock\n- * @throws DMLRuntimeException\n+ * @param input input image\n+ * @param dout next layer error propogation\n+ * @param outputBlock output\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void reluBackward(MatrixObject input, MatrixObject dout, MatrixObject outputBlock) throws DMLRuntimeException {\nif(isInSparseFormat(input)) {\n@@ -273,10 +269,10 @@ public class LibMatrixCUDA {\n* output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)\n* This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function\n*\n- * @param input\n- * @param bias\n- * @param outputBlock\n- * @throws DMLRuntimeException\n+ * @param input input image\n+ * @param bias bias\n+ * @param outputBlock output\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void biasAdd(MatrixObject input, MatrixObject bias, MatrixObject outputBlock) throws DMLRuntimeException {\nif(isInSparseFormat(input)) {\n@@ -320,7 +316,7 @@ public class LibMatrixCUDA {\n* @param stride_w stride width\n* @param P output activation height\n* @param Q output activation width\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void conv2dBackwardFilter(MatrixObject image, MatrixObject dout,\nMatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n@@ -501,8 +497,8 @@ public class LibMatrixCUDA {\n* Hence, we compute only the upper triangular matrix and copy this partial\n* result down to lower triangular matrix once.\n*\n- * @param ret\n- * @throws DMLRuntimeException\n+ * @param ret upper triangular matrix\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void copyUpperToLowerTriangle(MatrixObject ret) throws DMLRuntimeException {\nif(isInSparseFormat(ret)) {\n@@ -1185,7 +1181,7 @@ public class LibMatrixCUDA {\n* @param in {@link Pointer} to matrix in device memory\n* @param n size of array\n* @return the reduced value\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static double reduceAll(String kernelFunction, Pointer in, int n) throws DMLRuntimeException {\nint[] tmp = getKernelParamsForReduceAll(n);\n@@ -1218,7 +1214,7 @@ public class LibMatrixCUDA {\n* @param out {@link Pointer} to output matrix in device memory (size - rows * 1)\n* @param rows number of rows in input matrix\n* @param cols number of columns in input matrix\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void reduceRow(String kernelFunction, Pointer in, Pointer out, int rows, int cols) throws DMLRuntimeException {\nint[] tmp = getKernelParamsForReduceByRow(rows, cols);\n@@ -1236,7 +1232,7 @@ public class LibMatrixCUDA {\n* @param out {@link Pointer} to output matrix in device memory (size - 1 * cols)\n* @param rows number of rows in input matrix\n* @param cols number of columns in input matrix\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void reduceCol(String kernelFunction, Pointer in, Pointer out, int rows, int cols) throws DMLRuntimeException {\nint[] tmp = getKernelParamsForReduceByCol(rows, cols);\n@@ -1328,7 +1324,7 @@ public class LibMatrixCUDA {\n* @param stride_w stride width\n* @param P output activation height\n* @param Q output activation width\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void conv2dBackwardData(MatrixObject filter, MatrixObject dout,\nMatrixObject output, int N, int C, int H, int W, int K, int R,\n@@ -1632,12 +1628,12 @@ public class LibMatrixCUDA {\n/**\n* Utility to launch binCellScalarOp kernel\n*\n- * @param ec\n- * @param in\n- * @param outputName\n- * @param isInputTransposed\n- * @param op\n- * @throws DMLRuntimeException\n+ * @param ec execution context\n+ * @param in input matrix\n+ * @param outputName output variable name\n+ * @param isInputTransposed true if input is transposed\n+ * @param op operator\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void launchBinCellOpKernel(ExecutionContext ec, MatrixObject in, String outputName, boolean isInputTransposed,\nScalarOperator op) throws DMLRuntimeException {\n@@ -1665,14 +1661,14 @@ public class LibMatrixCUDA {\n/**\n* Utility to launch binCellOp kernel\n*\n- * @param ec\n- * @param in1\n- * @param in2\n- * @param outputName\n- * @param isLeftTransposed\n- * @param isRightTransposed\n- * @param op\n- * @throws DMLRuntimeException\n+ * @param ec execution context\n+ * @param in1 left input matrix\n+ * @param in2 right input matrix\n+ * @param outputName output variable name\n+ * @param isLeftTransposed true if left matrix is transposed\n+ * @param isRightTransposed true if right matrix is transposed\n+ * @param op operator\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void launchBinCellOpKernel(ExecutionContext ec, MatrixObject in1, MatrixObject in2,\nString outputName, boolean isLeftTransposed, boolean isRightTransposed, BinaryOperator op) throws DMLRuntimeException {\n@@ -1759,10 +1755,10 @@ public class LibMatrixCUDA {\n/**\n* Performs a deep device copy of input matrix\n*\n- * @param ec\n- * @param src\n- * @param outputName\n- * @throws DMLRuntimeException\n+ * @param ec execution context\n+ * @param src source matrix\n+ * @param outputName destination variable name\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void deviceCopy(ExecutionContext ec, MatrixObject src, String outputName) throws DMLRuntimeException {\nif(isInSparseFormat(src)) {\n@@ -1821,11 +1817,11 @@ public class LibMatrixCUDA {\n/**\n* Performs a deep copy of input device double pointer corresponding to matrix\n*\n- * @param src\n- * @param dest\n- * @param rlen\n- * @param clen\n- * @throws DMLRuntimeException\n+ * @param src source matrix\n+ * @param dest destination matrix\n+ * @param rlen number of rows\n+ * @param clen number of columns\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void deviceCopy(Pointer src, Pointer dest, int rlen, int clen) throws DMLRuntimeException {\nkernels.launchKernel(\"dense_matrix_copy\",\n@@ -1933,15 +1929,15 @@ public class LibMatrixCUDA {\n* C = alpha* op( A ) + beta* op ( B )\n* where op = transpose or not (specified by isLeftTransposed and isRightTransposed).\n*\n- * @param ec\n- * @param in1\n- * @param in2\n- * @param outputName\n- * @param isLeftTransposed\n- * @param isRightTransposed\n- * @param alpha\n- * @param beta\n- * @throws DMLRuntimeException\n+ * @param ec execution context\n+ * @param in1 left input matrix\n+ * @param in2 right input matrix\n+ * @param outputName output variable name\n+ * @param isLeftTransposed true if left matrix is transposed\n+ * @param isRightTransposed true if right matrix is transposed\n+ * @param alpha alpha\n+ * @param beta beta\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void dgeam(ExecutionContext ec, MatrixObject in1, MatrixObject in2, String outputName,\nboolean isLeftTransposed, boolean isRightTransposed, double alpha, double beta) throws DMLRuntimeException {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -132,8 +132,8 @@ public class LibMatrixDNN {\n* @param filter filter used in conv2d\n* @param dout errors from next layer\n* @param outputBlock output errors\n- * @param params\n- * @throws DMLRuntimeException\n+ * @param params convolution parameters\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void conv2dBackwardData(MatrixBlock filter, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = filter;\n@@ -162,11 +162,11 @@ public class LibMatrixDNN {\n/**\n* This method computes the backpropogation errors for filter of convolution operation\n*\n- * @param image input image\n+ * @param input input image\n* @param dout errors from next layer\n* @param outputBlock output errors\n- * @param params\n- * @throws DMLRuntimeException\n+ * @param params convolution parameters\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void conv2dBackwardFilter(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\n@@ -194,9 +194,9 @@ public class LibMatrixDNN {\n/**\n* Performs the operation: ret += elem\n- * @param ret\n- * @param elem\n- * @throws DMLRuntimeException\n+ * @param ret left and output matrix\n+ * @param elem right matrix\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void elementWiseInPlaceAddition(MatrixBlock ret, MatrixBlock elem) throws DMLRuntimeException {\nif(ret.getNumRows() != elem.getNumRows() || ret.getNumColumns() != elem.getNumColumns()) {\n@@ -225,9 +225,10 @@ public class LibMatrixDNN {\n/**\n* Performs the operation: ret += t(elem)\n- * @param ret\n- * @param elem\n- * @throws DMLRuntimeException\n+ * @param ret left and output matrix\n+ * @param elem right untransposed matrix\n+ * @param params convolution parameters\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate static void elementWiseInPlaceTransposedAddition(MatrixBlock ret, MatrixBlock elem) throws DMLRuntimeException {\nif(ret.getNumRows() != elem.getNumColumns() || ret.getNumColumns() != elem.getNumRows()) {\n@@ -376,11 +377,11 @@ public class LibMatrixDNN {\n/**\n* This method computes the backpropogation errors for previous layer of maxpooling operation\n*\n- * @param input\n- * @param dout\n- * @param outputBlock\n- * @param params\n- * @throws DMLRuntimeException\n+ * @param input input matrix\n+ * @param dout dout matrix\n+ * @param outputBlock output matrix\n+ * @param params convolution parameters\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void maxpoolingBackward(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\n@@ -594,11 +595,11 @@ public class LibMatrixDNN {\n/**\n* This method computes the backpropagation errors for previous layer of relu operation\n*\n- * @param input\n- * @param dout\n- * @param outputBlock\n- * @param numThreads\n- * @throws DMLRuntimeException\n+ * @param input input matrix\n+ * @param dout errors from next layer\n+ * @param outputBlock output matrix\n+ * @param numThreads number of threads\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void reluBackward(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, int numThreads) throws DMLRuntimeException {\nint N = input.getNumRows();\n@@ -668,11 +669,11 @@ public class LibMatrixDNN {\n* output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)\n* This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function\n*\n- * @param input\n- * @param bias\n- * @param outputBlock\n- * @param numThreads\n- * @throws DMLRuntimeException\n+ * @param input input matrix\n+ * @param bias bias matrix\n+ * @param outputBlock output matrix\n+ * @param numThreads number of threads\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void biasAdd(MatrixBlock input, MatrixBlock bias, MatrixBlock outputBlock, int numThreads) throws DMLRuntimeException {\nint N = input.getNumRows();\n@@ -1004,6 +1005,8 @@ public class LibMatrixDNN {\nfor(int n = n1; n < n2; n++)\ndoLoopedIm2ColConv2d(n, im2ColOutBlock, params);\nim2ColOutBlocks.add(im2ColOutBlock);\n+ if(params.bias != null)\n+ addBias(n1, n2, params);\nbreak;\n}\ncase LoopedIm2ColConv2dBwdFilter:\n@@ -1033,6 +1036,37 @@ public class LibMatrixDNN {\n}\n}\n+ private static void addBias(int n1, int n2, ConvolutionParameters params) {\n+ int PQ = params.P*params.Q;\n+ int K = params.K;\n+ double [] outputArr = params.output.getDenseBlock();\n+ if(!params.bias.isInSparseFormat()) {\n+ double [] biasArr = params.bias.getDenseBlock();\n+ int index = n1*K*PQ;\n+ for(int n = n1; n < n2; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ outputArr[index] += biasArr[k];\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ Iterator<IJV> iter = params.bias.getSparseBlockIterator();\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ int k = ijv.getI();\n+ double val = ijv.getV();\n+ for(int n = n1; n < n2; n++) {\n+ int index = n*K*PQ + k*PQ;\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ outputArr[index] += val;\n+ }\n+ }\n+ }\n+ }\n+ }\n+\n// Converts input: PQ X CRS matrix and writes to 1 X CHW\nprivate static void doCol2imOverSingleImage(int outputN, MatrixBlock input, ConvolutionParameters params) throws DMLRuntimeException {\nif(input.rlen != params.P*params.Q || input.clen != params.C*params.R*params.S) {\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] [SYSTEMML-540] Fixed javadoc errors and added fused CP conv2d + bias_add
49,768
11.01.2017 15:36:42
28,800
a83ae1f7fbe9551da674450eedce08aaf7d19d07
[maven-release-plugin] prepare release v0.12.0-incubating-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.12.0-incubating-SNAPSHOT</version>\n+ <version>0.12.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.12.0-incubating-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v0.12.0-incubating-rc1
49,736
12.01.2017 16:14:40
28,800
c197f20cc5cce945db70daecff45a8ce19d1bda3
Reduced the number of transpose from batchsize to number of cores in conv2d_backward_filter
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -296,7 +296,7 @@ public class LibMatrixDNN {\nloopedConvBwdFilterIm2ColTime.addAndGet(t2-t1);\n}\nif(!temp.isEmptyBlock())\n- elementWiseInPlaceTransposedAddition(partialRetBlock, temp);\n+ elementWiseInPlaceAddition(partialRetBlock, temp);\nreturn partialRetBlock;\n}\n@@ -877,7 +877,7 @@ public class LibMatrixDNN {\n}\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\n- MatrixBlock partialRetBlock = new MatrixBlock(params.K, params.C*params.R*params.S, false);\n+ MatrixBlock partialRetBlock = new MatrixBlock(params.C*params.R*params.S, params.K, false);\npartialRetBlock.allocateDenseBlock(true);\npartialRetBlocks.add(partialRetBlock);\n}\n@@ -923,7 +923,7 @@ public class LibMatrixDNN {\n}\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nfor(MatrixBlock partialRetBlock : partialRetBlocks) {\n- elementWiseInPlaceAddition(params.output, partialRetBlock);\n+ elementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n}\n}\n} catch (InterruptedException e) {\n@@ -943,7 +943,7 @@ public class LibMatrixDNN {\n}\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nfor(MatrixBlock partialRetBlock : partialRetBlocks) {\n- elementWiseInPlaceAddition(params.output, partialRetBlock);\n+ elementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n}\n}\n} catch (Exception e) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Reduced the number of transpose from batchsize to number of cores in conv2d_backward_filter
49,766
13.01.2017 13:07:18
28,800
03ae3afcb8a79c0d6b3d487d815b4f1ce039dd3a
[MINOR] Add .tgz to the list of files to sign
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -256,9 +256,9 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\ncd svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\nrm -f *.asc\n- for i in *.jar *.zip *.gz; do gpg --output $i.asc --detach-sig --armor $i; done\n+ for i in *.jar *.zip *.gz *.tgz; do gpg --output $i.asc --detach-sig --armor $i; done\nrm -f *.md5\n- for i in *.jar *.zip *.gz; do openssl md5 -hex $i | sed 's/MD5(\\([^)]*\\))= \\([0-9a-f]*\\)/\\2 *\\1/' > $i.md5; done\n+ for i in *.jar *.zip *.gz *.tgz; do openssl md5 -hex $i | sed 's/MD5(\\([^)]*\\))= \\([0-9a-f]*\\)/\\2 *\\1/' > $i.md5; done\ncd .. #exit $RELEASE_VERSION-$RELEASE_RC/\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add .tgz to the list of files to sign
49,766
13.01.2017 13:13:26
28,800
c528b769c0235186c44c96affeb1dfa54de94aba
[MINOR] Generate sha signature during release process
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -259,6 +259,8 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\nfor i in *.jar *.zip *.gz *.tgz; do gpg --output $i.asc --detach-sig --armor $i; done\nrm -f *.md5\nfor i in *.jar *.zip *.gz *.tgz; do openssl md5 -hex $i | sed 's/MD5(\\([^)]*\\))= \\([0-9a-f]*\\)/\\2 *\\1/' > $i.md5; done\n+ rm -f *.sha\n+ for i in *.jar *.zip *.gz *.tgz; do shasum $i > $i.sha; done\ncd .. #exit $RELEASE_VERSION-$RELEASE_RC/\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Generate sha signature during release process
49,728
18.01.2017 10:48:50
28,800
31d2cda55a5dd48b279d2e8f187339822c3a4647
Adding Python3-style Division of Scalar By Matrix This adds the right-hand side Python3-style division to the `matrix` class by overriding `__rtruediv__`. I.e. `3 / X` will now work. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/defmatrix.py", "new_path": "src/main/python/systemml/defmatrix.py", "diff": "@@ -915,6 +915,12 @@ class matrix(object):\ndef __rdiv__(self, other):\nreturn binary_op(other, self, ' / ')\n+ def __rtruediv__(self, other):\n+ \"\"\"\n+ Performs division (Python 3 way).\n+ \"\"\"\n+ return binary_op(other, self, ' / ')\n+\ndef __rmod__(self, other):\nreturn binary_op(other, self, ' % ')\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1186] Adding Python3-style Division of Scalar By Matrix This adds the right-hand side Python3-style division to the `matrix` class by overriding `__rtruediv__`. I.e. `3 / X` will now work. Closes #348.
49,736
19.01.2017 12:07:22
28,800
42ebc9620e62fc7458c2d7177439e61569b89b9e
[SYSTEMML-515] Allow an expression for sparsity This PR also improves the performance of dropout. Closes
[ { "change_type": "MODIFY", "old_path": "scripts/staging/SystemML-NN/nn/layers/dropout.dml", "new_path": "scripts/staging/SystemML-NN/nn/layers/dropout.dml", "diff": "@@ -42,10 +42,16 @@ forward = function(matrix[double] X, double p, int seed)\n* - out: Ouptuts, of same shape as X.\n* - mask: Dropout mask used to compute the output.\n*/\n+ # Normally, we might use something like\n+ # `mask = rand(rows=nrow(X), cols=ncol(X), min=0, max=1, seed=seed) <= p`\n+ # to create a dropout mask. Fortunately, SystemML has a `sparsity` parameter on\n+ # the `rand` function that allows use to create a mask directly.\nif (seed == -1) {\n- seed = as.integer(floor(as.scalar(rand(rows=1, cols=1, min=1, max=100000))))\n+ mask = rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p)\n+ }\n+ else {\n+ mask = rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p, seed=seed)\n}\n- mask = rand(rows=nrow(X), cols=ncol(X), min=0, max=1, seed=seed) <= p\nout = X * mask / p\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/DataGenOp.java", "new_path": "src/main/java/org/apache/sysml/hops/DataGenOp.java", "diff": "@@ -100,8 +100,10 @@ public class DataGenOp extends Hop implements MultiThreadedHop\n_paramIndexMap.put(s, index);\nindex++;\n}\n- if ( mthd == DataGenMethod.RAND )\n- _sparsity = Double.valueOf(((LiteralOp)inputParameters.get(DataExpression.RAND_SPARSITY)).getName());\n+\n+ Hop sparsityOp = inputParameters.get(DataExpression.RAND_SPARSITY);\n+ if ( mthd == DataGenMethod.RAND && sparsityOp instanceof LiteralOp)\n+ _sparsity = Double.valueOf(((LiteralOp)sparsityOp).getName());\n//generate base dir\nString scratch = ConfigurationManager.getScratchSpace();\n@@ -199,7 +201,7 @@ public class DataGenOp extends Hop implements MultiThreadedHop\n{\ndouble ret = 0;\n- if ( _op == DataGenMethod.RAND ) {\n+ if ( _op == DataGenMethod.RAND && _sparsity != -1 ) {\nif( hasConstantValue(0.0) ) { //if empty block\nret = OptimizerUtils.estimateSizeEmptyBlock(dim1, dim2);\n}\n@@ -237,7 +239,7 @@ public class DataGenOp extends Hop implements MultiThreadedHop\n{\nlong dim1 = computeDimParameterInformation(getInput().get(_paramIndexMap.get(DataExpression.RAND_ROWS)), memo);\nlong dim2 = computeDimParameterInformation(getInput().get(_paramIndexMap.get(DataExpression.RAND_COLS)), memo);\n- long nnz = (long)(_sparsity * dim1 * dim2);\n+ long nnz = _sparsity >= 0 ? (long)(_sparsity * dim1 * dim2) : -1;\nif( dim1>0 && dim2>0 )\nreturn new long[]{ dim1, dim2, nnz };\n}\n@@ -355,6 +357,8 @@ public class DataGenOp extends Hop implements MultiThreadedHop\n_nnz = 0;\nelse if ( dimsKnown() && _sparsity>=0 ) //general case\n_nnz = (long) (_sparsity * _dim1 * _dim2);\n+ else\n+ _nnz = -1;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/DataGen.java", "new_path": "src/main/java/org/apache/sysml/lops/DataGen.java", "diff": "@@ -199,11 +199,10 @@ public class DataGen extends Lop\nsb.append(iLop.prepScalarLabel());\nsb.append(OPERAND_DELIMITOR);\n- iLop = _inputParams.get(DataExpression.RAND_SPARSITY.toString()); //no variable support\n+ iLop = _inputParams.get(DataExpression.RAND_SPARSITY.toString());\nif (iLop.isVariable())\n- throw new LopsException(printErrorLocation()\n- + \"Parameter \" + DataExpression.RAND_SPARSITY\n- + \" must be a literal for a Rand operation.\");\n+ sb.append(iLop.prepScalarLabel());\n+ else\nsb.append(iLop.getOutputParameters().getLabel());\nsb.append(OPERAND_DELIMITOR);\n@@ -442,8 +441,8 @@ public class DataGen extends Lop\niLop = _inputParams.get(DataExpression.RAND_SPARSITY.toString()); //no variable support\nif (iLop.isVariable())\n- throw new LopsException(this.printErrorLocation() + \"Parameter \"\n- + DataExpression.RAND_SPARSITY + \" must be a literal for a Rand operation.\");\n+ sb.append(iLop.prepScalarLabel());\n+ else\nsb.append( iLop.getOutputParameters().getLabel() );\nsb.append( OPERAND_DELIMITOR );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/DataExpression.java", "diff": "@@ -1162,10 +1162,7 @@ public class DataExpression extends DataIdentifier\nraiseValidateError(\"for Rand statement \" + RAND_MIN + \" has incorrect value type\", conditional);\n}\n- //parameters w/o support for variable inputs (requires double/int or string constants)\n- if (!(getVarParam(RAND_SPARSITY) instanceof DoubleIdentifier || getVarParam(RAND_SPARSITY) instanceof IntIdentifier)) {\n- raiseValidateError(\"for Rand statement \" + RAND_SPARSITY + \" has incorrect value type\", conditional);\n- }\n+ // Since sparsity can be arbitrary expression (SYSTEMML-515), no validation check for DoubleIdentifier/IntIdentifier required.\nif (!(getVarParam(RAND_PDF) instanceof StringIdentifier)) {\nraiseValidateError(\"for Rand statement \" + RAND_PDF + \" has incorrect value type\", conditional);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/DataGenCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/DataGenCPInstruction.java", "diff": "@@ -212,7 +212,10 @@ public class DataGenCPInstruction extends UnaryCPInstruction\nmaxValue = Double.valueOf(s[6]).doubleValue();\n}\n- double sparsity = Double.parseDouble(s[7]);\n+ double sparsity = -1;\n+ if (!s[7].contains( Lop.VARIABLE_NAME_PLACEHOLDER)) {\n+ sparsity = Double.valueOf(s[7]);\n+ }\nlong seed = DataGenOp.UNSPECIFIED_SEED;\nif( !s[8].contains( Lop.VARIABLE_NAME_PLACEHOLDER)){\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/RandSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/RandSPInstruction.java", "diff": "@@ -245,7 +245,10 @@ public class RandSPInstruction extends UnarySPInstruction\nmaxValue = Double.valueOf(s[6]).doubleValue();\n}\n- double sparsity = Double.parseDouble(s[7]);\n+ double sparsity = -1;\n+ if (!s[7].contains( Lop.VARIABLE_NAME_PLACEHOLDER)) {\n+ sparsity = Double.valueOf(s[7]);\n+ }\nlong seed = DataGenOp.UNSPECIFIED_SEED;\nif (!s[8].contains( Lop.VARIABLE_NAME_PLACEHOLDER)) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] [SYSTEMML-515] Allow an expression for sparsity - This PR also improves the performance of dropout. Closes #351.
49,738
22.01.2017 00:14:46
-3,600
998b0b1a5f511f5306a849e092a9cef6d9de4e1e
Performance tsmm right (cache blocking over K for L2)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -426,8 +426,10 @@ public class LibMatrixMult\nint blklen = (int)(Math.ceil((double)ret.rlen/(2*k)));\nfor( int i=0; i<2*k & i*blklen<ret.rlen; i++ )\ntasks.add(new MatrixMultTransposeTask(m1, ret, leftTranspose, i*blklen, Math.min((i+1)*blklen, ret.rlen)));\n- pool.invokeAll(tasks);\n+ List<Future<Object>> rtasks = pool.invokeAll(tasks);\npool.shutdown();\n+ for( Future<Object> rtask : rtasks )\n+ rtask.get(); //error handling\n}\ncatch(Exception ex) {\nthrow new DMLRuntimeException(ex);\n@@ -1753,22 +1755,21 @@ public class LibMatrixMult\n//1) Unrolled inner loop, for better ILP\n//2) Blocked execution, for less cache trashing in parallel exec\n- // (smaller block sizes would be slightly better, but consistent as is)\n- //3) Single write in inner loop (transient intermediates)\n- int blocksize = 64;\n- for( int bi = rl; bi<ru; bi+=blocksize )\n- for( int bj = bi; bj<m; bj+=blocksize )\n- {\n- final int bimin = Math.min(ru, bi+blocksize);\n- final int bjmin = Math.min(m, bj+blocksize);\n+ // (we block such that lhs, rhs, and output roughly fit into L2, output in L1)\n+ //3) Asymmetric block sizes and exploitation of result symmetry\n+ int blocksizeK = 1024; //two memory pages for sufficiently long scans\n+ int blocksizeIJ = L2_CACHESIZE / 8 / blocksizeK / 2 - 1; //15\n- for(int i = bi, ix1 = bi*n, ix3 = bi*m; i < bimin; i++, ix1+=n, ix3+=m)\n- {\n- final int bjmax = Math.max(i,bj);\n- for(int j = bjmax, ix2 = bjmax*n; j <bjmin; j++, ix2+=n) //from i due to symmetry\n- {\n- c[ ix3+j ] = dotProduct(a, a, ix1, ix2, n);\n- }\n+ //blocked execution over IKJ (lhs/rhs in L2, output in L1)\n+ for( int bi = rl; bi<ru; bi+=blocksizeIJ )\n+ for( int bk = 0, bimin = Math.min(ru, bi+blocksizeIJ); bk<n; bk+=blocksizeK )\n+ for( int bj = bi, bklen = Math.min(blocksizeK, n-bk); bj<m; bj+=blocksizeIJ ) {\n+ //core tsmm block operation (15x15 vectors of length 1K elements)\n+ int bjmin = Math.min(m, bj+blocksizeIJ);\n+ for(int i=bi, ix1=bi*n+bk, ix3=bi*m; i<bimin; i++, ix1+=n, ix3+=m) {\n+ final int bjmax = Math.max(i,bj); //from i due to symmetry\n+ for(int j=bjmax, ix2=bjmax*n+bk; j <bjmin; j++, ix2+=n)\n+ c[ ix3+j ] += dotProduct(a, a, ix1, ix2, bklen);\n}\n}\n}\n@@ -3653,11 +3654,11 @@ public class LibMatrixMult\nprivate static class MatrixMultTransposeTask implements Callable<Object>\n{\n- private MatrixBlock _m1 = null;\n- private MatrixBlock _ret = null;\n- private boolean _left = true;\n- private int _rl = -1;\n- private int _ru = -1;\n+ private final MatrixBlock _m1;\n+ private final MatrixBlock _ret;\n+ private final boolean _left;\n+ private final int _rl;\n+ private final int _ru;\nprotected MatrixMultTransposeTask( MatrixBlock m1, MatrixBlock ret, boolean left, int rl, int ru )\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1166] Performance tsmm right (cache blocking over K for L2)
49,736
22.01.2017 19:12:13
28,800
a23396df60f485c8a6627afbf4e322145922707b
Updated the documentation for removeEmpty with select and bugfix for relu_backward Also, added a multi-input cbind external function.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "diff": "@@ -603,7 +603,8 @@ public class BinaryOp extends Hop\n&& potentialZero instanceof LiteralOp && ((LiteralOp) potentialZero).getDoubleValue() == 0;\nif(op == OpOp2.MULT && isLeftXGt0 &&\n- !getInput().get(0).isVector() && !getInput().get(1).isVector()) {\n+ !getInput().get(0).isVector() && !getInput().get(1).isVector()\n+ && getInput().get(0).dimsKnown() && getInput().get(1).dimsKnown()) {\nbinary = new ConvolutionTransform(getInput().get(0).getInput().get(0).constructLops(),\ngetInput().get(1).constructLops(),\nConvolutionTransform.OperationTypes.RELU_BACKWARD, getDataType(), getValueType(), et, -1);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/udf/lib/MultiInputCbind.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.udf.lib;\n+\n+import java.io.IOException;\n+import java.util.Iterator;\n+\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheException;\n+import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.udf.FunctionParameter;\n+import org.apache.sysml.udf.Matrix;\n+import org.apache.sysml.udf.PackageFunction;\n+import org.apache.sysml.udf.Scalar;\n+import org.apache.sysml.udf.Matrix.ValueType;\n+\n+/**\n+ * This external built-in function addresses following two common scenarios:\n+ * 1. cbind (cbind (cbind ( X1, X2 ), X3 ), X4)\n+ * 2. With spagetization: cbind (cbind (cbind ( matrix(X1, rows=length(X1), cols=1), matrix(X2, rows=length(X2), cols=1) ), matrix(X3, rows=length(X3), cols=1) ), matrix(X4, rows=length(X4), cols=1))\n+ *\n+ * The API of this external built-in function is as follows:\n+ *\n+ * func = externalFunction(int numInputs, boolean spagetize, matrix[double] X1, matrix[double] X2, matrix[double] X3, matrix[double] X4) return (matrix[double] out)\n+ * implemented in (classname=\"org.apache.sysml.udf.lib.MultiInputCbind\",exectype=\"mem\");\n+ *\n+ */\n+public class MultiInputCbind extends PackageFunction {\n+ private static final long serialVersionUID = -4266180315672563097L;\n+\n+ private Matrix ret;\n+ private MatrixBlock retMB;\n+ long numRetRows; long numRetCols;\n+ boolean spagetize;\n+\n+ @Override\n+ public int getNumFunctionOutputs() {\n+ return 1;\n+ }\n+\n+ @Override\n+ public FunctionParameter getFunctionOutput(int pos) {\n+ if(pos == 0)\n+ return ret;\n+ else\n+ throw new RuntimeException(\"MultiInputCbind produces only one output\");\n+ }\n+\n+ @Override\n+ public void execute() {\n+ int numInputs = Integer.parseInt(((Scalar)getFunctionInput(0)).getValue());\n+ spagetize = Boolean.parseBoolean(((Scalar)getFunctionInput(1)).getValue());\n+\n+ // Compute output dimensions\n+ try {\n+ numRetCols = 0;\n+ if(spagetize) {\n+ // Assumption the inputs are of same shape\n+ MatrixBlock in = ((Matrix) getFunctionInput(2)).getMatrixObject().acquireRead();\n+ numRetRows = in.getNumRows()*in.getNumColumns();\n+ numRetCols = numInputs;\n+ ((Matrix) getFunctionInput(2)).getMatrixObject().release();\n+ }\n+ else {\n+ for(int inputID = 2; inputID < numInputs + 2; inputID++) {\n+ MatrixBlock in = ((Matrix) getFunctionInput(inputID)).getMatrixObject().acquireRead();\n+ numRetRows = in.getNumRows();\n+ numRetCols += in.getNumColumns();\n+ ((Matrix) getFunctionInput(inputID)).getMatrixObject().release();\n+ }\n+ }\n+ } catch (CacheException e) {\n+ throw new RuntimeException(\"Error while executing MultiInputCbind\", e);\n+ }\n+\n+ allocateOutput();\n+\n+ // Performs cbind (cbind (cbind ( X1, X2 ), X3 ), X4)\n+ double [] retData = retMB.getDenseBlock();\n+ try {\n+ int startColumn = 0;\n+ for(int inputID = 2; inputID < numInputs + 2; inputID++) {\n+ MatrixBlock in = ((Matrix) getFunctionInput(inputID)).getMatrixObject().acquireRead();\n+ if(spagetize && in.getNumRows()*in.getNumColumns() != numRetRows) {\n+ throw new RuntimeException(\"Expected the inputs to be of same size when spagetization is turned on.\");\n+ }\n+ int inputNumCols = in.getNumColumns();\n+ if(in.isInSparseFormat()) {\n+ Iterator<IJV> iter = in.getSparseBlockIterator();\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ if(spagetize) {\n+ // Perform matrix(X1, rows=length(X1), cols=1) operation before cbind\n+ // Output Column ID = inputID-2 for all elements of inputs\n+ int outputRowIndex = ijv.getI()*inputNumCols + ijv.getJ();\n+ int outputColIndex = inputID-2;\n+ retData[(int) (outputRowIndex*retMB.getNumColumns() + outputColIndex)] = ijv.getV();\n+ }\n+ else {\n+ // Traditional cbind\n+ // Row ID remains the same as that of input\n+ int outputRowIndex = ijv.getI();\n+ int outputColIndex = ijv.getJ() + startColumn;\n+ retData[(int) (outputRowIndex*retMB.getNumColumns() + outputColIndex)] = ijv.getV();\n+ }\n+ }\n+ }\n+ else {\n+ double [] denseBlock = in.getDenseBlock();\n+ if(denseBlock != null) {\n+ if(spagetize) {\n+ // Perform matrix(X1, rows=length(X1), cols=1) operation before cbind\n+ // Output Column ID = inputID-2 for all elements of inputs\n+ int j = inputID-2;\n+ for(int i = 0; i < numRetRows; i++) {\n+ retData[(int) (i*numRetCols + j)] = denseBlock[i];\n+ }\n+ }\n+ else {\n+ // Traditional cbind\n+ // Row ID remains the same as that of input\n+ for(int i = 0; i < retMB.getNumRows(); i++) {\n+ for(int j = 0; j < inputNumCols; j++) {\n+ int outputColIndex = j + startColumn;\n+ retData[(int) (i*numRetCols + outputColIndex)] = denseBlock[i*inputNumCols + j];\n+ }\n+ }\n+ }\n+ }\n+ }\n+ ((Matrix) getFunctionInput(inputID)).getMatrixObject().release();\n+ startColumn += inputNumCols;\n+ }\n+ } catch (CacheException e) {\n+ throw new RuntimeException(\"Error while executing MultiInputCbind\", e);\n+ }\n+\n+ retMB.recomputeNonZeros();\n+ try {\n+ retMB.examSparsity();\n+ ret.setMatrixDoubleArray(retMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+ } catch (DMLRuntimeException e) {\n+ throw new RuntimeException(\"Error while executing MultiInputCbind\", e);\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Error while executing MultiInputCbind\", e);\n+ }\n+ }\n+\n+ private void allocateOutput() {\n+ String dir = createOutputFilePathAndName( \"TMP\" );\n+ ret = new Matrix( dir, numRetRows, numRetCols, ValueType.Double );\n+ retMB = new MatrixBlock((int) numRetRows, (int) numRetCols, false);\n+ retMB.allocateDenseBlock();\n+ }\n+\n+\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1187] Updated the documentation for removeEmpty with select and bugfix for relu_backward Also, added a multi-input cbind external function.
49,740
18.01.2017 02:48:53
28,800
b4a4dee762141a9c81353b47841debb3f3ccdd41
[MINOR] prompt for PGP passphrase if not exported Closes
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -141,12 +141,12 @@ while [ \"${1+defined}\" ]; do\ndone\n-for env in GPG_PASSPHRASE; do\n- if [ -z \"${!env}\" ]; then\n- echo \"ERROR: $env must be set to run this script\"\n- exit_with_usage\n+if [[ -z \"$GPG_PASSPHRASE\" ]]; then\n+ echo 'The environment variable GPG_PASSPHRASE is not set. Enter the passphrase to'\n+ echo 'unlock the GPG signing key that will be used to sign the release!'\n+ echo\n+ stty -echo && printf \"GPG passphrase: \" && read GPG_PASSPHRASE && printf '\\n' && stty echo\nfi\n-done\nif [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$RELEASE_VERSION\" ]]; then\necho \"ERROR: --releaseVersion must be passed as an argument to run this script\"\n@@ -245,7 +245,7 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\ncd $RELEASE_WORK_DIR/incubator-systemml\n# Build and prepare the release\n- $MVN $PUBLISH_PROFILES release:clean release:prepare $DRY_RUN -Dgpg.passphrase=\"$GPG_PASSPHRASE\" -DskipTests -Darguments=\"-DskipTests\" -DreleaseVersion=\"$RELEASE_VERSION\" -DdevelopmentVersion=\"$DEVELOPMENT_VERSION\" -Dtag=\"$RELEASE_TAG\"\n+ $MVN $PUBLISH_PROFILES release:clean release:prepare $DRY_RUN -Darguments=\"-Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\" -DskipTests\" -DreleaseVersion=\"$RELEASE_VERSION\" -DdevelopmentVersion=\"$DEVELOPMENT_VERSION\" -Dtag=\"$RELEASE_TAG\"\ncd $RELEASE_WORK_DIR\n@@ -282,7 +282,7 @@ if [[ \"$RELEASE_PUBLISH\" == \"true\" ]]; then\ncd $RELEASE_WORK_DIR/incubator-systemml\n#Deploy scala 2.10\n- mvn -DaltDeploymentRepository=apache.releases.https::default::https://repository.apache.org/service/local/staging/deploy/maven2 clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests\" -Dgpg.passphrase=$GPG_PASSPHRASE $PUBLISH_PROFILES\n+ mvn -DaltDeploymentRepository=apache.releases.https::default::https://repository.apache.org/service/local/staging/deploy/maven2 clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES\ncd \"$BASE_DIR\" #exit target\n@@ -308,7 +308,7 @@ if [[ \"$RELEASE_SNAPSHOT\" == \"true\" ]]; then\nfi\n#Deploy scala 2.10\n- $MVN -DaltDeploymentRepository=apache.snapshots.https::default::https://repository.apache.org/content/repositories/snapshots clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests\" -Dgpg.passphrase=$GPG_PASSPHRASE $PUBLISH_PROFILES\n+ $MVN -DaltDeploymentRepository=apache.snapshots.https::default::https://repository.apache.org/content/repositories/snapshots clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES\ncd \"$BASE_DIR\" #exit target\nexit 0\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] prompt for PGP passphrase if not exported Closes #350
49,736
24.01.2017 17:02:38
28,800
115d906399a9bf9dea22e674bf17952c23c297ae
Cleanup Scala UDF invocation Closes
[ { "change_type": "MODIFY", "old_path": "docs/spark-mlcontext-programming-guide.md", "new_path": "docs/spark-mlcontext-programming-guide.md", "diff": "@@ -1636,45 +1636,6 @@ scala> for (i <- 1 to 5) {\n</div>\n-## Passing Scala UDF to SystemML\n-\n-SystemML allows the users to pass a Scala UDF (with input/output types supported by SystemML)\n-to the DML script via MLContext. The restrictions for the supported Scala UDFs are as follows:\n-\n-1. Only types specified by DML language is supported for parameters and return types (i.e. Int, Double, Boolean, String, double[][]).\n-2. At minimum, the function should have 1 argument and 1 return value.\n-3. At max, the function can have 10 arguments and 10 return values.\n-\n-{% highlight scala %}\n-import org.apache.sysml.api.mlcontext._\n-import org.apache.sysml.api.mlcontext.ScriptFactory._\n-val ml = new MLContext(sc)\n-\n-// Demonstrates how to pass a simple scala UDF to SystemML\n-def addOne(x:Double):Double = x + 1\n-ml.udf.register(\"addOne\", addOne _)\n-val script1 = dml(\"v = addOne(2.0); print(v)\")\n-ml.execute(script1)\n-\n-// Demonstrates operation on local matrices (double[][])\n-def addOneToDiagonal(x:Array[Array[Double]]):Array[Array[Double]] = { for(i <- 0 to x.length-1) x(i)(i) = x(i)(i) + 1; x }\n-ml.udf.register(\"addOneToDiagonal\", addOneToDiagonal _)\n-val script2 = dml(\"m1 = matrix(0, rows=3, cols=3); m2 = addOneToDiagonal(m1); print(toString(m2));\")\n-ml.execute(script2)\n-\n-// Demonstrates multi-return function\n-def multiReturnFn(x:Double):(Double, Int) = (x + 1, (x * 2).toInt)\n-ml.udf.register(\"multiReturnFn\", multiReturnFn _)\n-val script3 = dml(\"[v1, v2] = multiReturnFn(2.0); print(v1)\")\n-ml.execute(script3)\n-\n-// Demonstrates multi-argument multi-return function\n-def multiArgReturnFn(x:Double, y:Int):(Double, Int) = (x + 1, (x * y).toInt)\n-ml.udf.register(\"multiArgReturnFn\", multiArgReturnFn _)\n-val script4 = dml(\"[v1, v2] = multiArgReturnFn(2.0, 1); print(v2)\")\n-ml.execute(script4)\n-{% endhighlight %}\n-\n---\n# Jupyter (PySpark) Notebook Example - Poisson Nonnegative Matrix Factorization\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "diff": "@@ -31,7 +31,6 @@ import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.api.MLContextProxy;\n-import org.apache.sysml.api.ExternalUDFRegistration;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.monitoring.SparkMonitoringUtil;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -120,12 +119,6 @@ public class MLContext {\nprivate List<String> scriptHistoryStrings = new ArrayList<String>();\nprivate Map<String, Script> scripts = new LinkedHashMap<String, Script>();\n- /**\n- * Allows users to register external scala UDFs.\n- * The design is explained in ExternalUDFRegistration.scala.\n- */\n- public ExternalUDFRegistration udf = null;\n-\n/**\n* The different explain levels supported by SystemML.\n*\n@@ -224,8 +217,6 @@ public class MLContext {\n}\nthis.sc = sc;\n- this.udf = new ExternalUDFRegistration();\n- this.udf.setMLContext(this);\nMLContextUtil.verifySparkVersionSupported(sc);\n// by default, run in hybrid Spark mode for optimal performance\nDMLScript.rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK;\n@@ -305,7 +296,6 @@ public class MLContext {\nscript.setName(time.toString());\n}\n- scriptExecutor.udf = udf;\nMLResults results = scriptExecutor.execute(script);\nString history = MLContextUtil.createHistoryForScript(script, time);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -25,7 +25,6 @@ import java.util.Set;\nimport org.apache.commons.lang3.StringUtils;\nimport org.apache.sysml.api.DMLScript;\n-import org.apache.sysml.api.ExternalUDFRegistration;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.api.monitoring.SparkMonitoringUtil;\n@@ -120,7 +119,6 @@ public class ScriptExecutor {\nprotected boolean statistics = false;\nprotected ExplainLevel explainLevel;\nprotected int statisticsMaxHeavyHitters = 10;\n- public ExternalUDFRegistration udf;\n/**\n* ScriptExecutor constructor.\n@@ -452,12 +450,6 @@ public class ScriptExecutor {\ninputParameters, script.getScriptType());\nString scriptExecutionString = script.getScriptExecutionString();\n- if(udf != null) {\n- // Append the headers from Scala UDF.\n- String externalHeaders = udf.getExternalHeaders();\n- if(!externalHeaders.equals(\"\"))\n- scriptExecutionString = externalHeaders + scriptExecutionString;\n- }\ndmlProgram = parser.parse(null, scriptExecutionString, inputParametersStringMaps);\n} catch (ParseException e) {\nthrow new MLContextException(\"Exception occurred while parsing script\", e);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ExternalFunctionProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ExternalFunctionProgramBlock.java", "diff": "@@ -601,11 +601,6 @@ public class ExternalFunctionProgramBlock extends FunctionProgramBlock\nfunc.setConfiguration(configFile);\nfunc.setBaseDir(_baseDir);\n- if(className.equals(\"org.apache.sysml.udf.lib.GenericFunction\")) {\n- ((org.apache.sysml.udf.lib.GenericFunction)func)._functionName = this._functionName;\n- ((org.apache.sysml.udf.lib.GenericFunction)func)._namespace = this._namespace;\n- }\n-\n//executes function\nfunc.execute();\n" }, { "change_type": "DELETE", "old_path": "src/main/java/org/apache/sysml/udf/lib/GenericFunction.java", "new_path": null, "diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.udf.lib;\n-\n-import java.io.IOException;\n-\n-import org.apache.commons.lang.StringUtils;\n-import org.apache.sysml.api.ExternalUDFRegistration;\n-import org.apache.sysml.parser.DMLProgram;\n-import org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.udf.FunctionParameter;\n-import org.apache.sysml.udf.Matrix;\n-import org.apache.sysml.udf.PackageFunction;\n-import org.apache.sysml.udf.Scalar;\n-\n-import scala.Function0;\n-\n-public class GenericFunction extends PackageFunction {\n- private static final long serialVersionUID = -195996547505886575L;\n- String [] fnSignature;\n- FunctionParameter [] returnVals;\n- Function0<FunctionParameter []> scalaUDF;\n- public String _functionName;\n- public String _namespace;\n-\n- public void initialize() {\n- if(_namespace != null && !_namespace.equals(DMLProgram.DEFAULT_NAMESPACE)) {\n- throw new RuntimeException(\"Expected the function in default namespace\");\n- }\n- if(_functionName == null) {\n- throw new RuntimeException(\"Expected the function name to be set\");\n- }\n- if(fnSignature == null) {\n- fnSignature = ExternalUDFRegistration.fnSignatureMapping().get(_functionName);\n- scalaUDF = ExternalUDFRegistration.fnMapping().get(_functionName);\n- ExternalUDFRegistration.udfMapping().put(_functionName, this);\n- }\n- }\n-\n- @Override\n- public int getNumFunctionOutputs() {\n- initialize();\n- String retSignature = fnSignature[fnSignature.length -1];\n- if(!retSignature.startsWith(\"(\"))\n- return 1;\n- else {\n- return StringUtils.countMatches(retSignature, \",\") + 1;\n- }\n- }\n-\n- @Override\n- public FunctionParameter getFunctionOutput(int pos) {\n- initialize();\n- if(returnVals == null || returnVals.length <= pos)\n- throw new RuntimeException(\"Incorrect number of outputs or function not executed\");\n- return returnVals[pos];\n- }\n-\n- @Override\n- public void execute() {\n- initialize();\n- returnVals = scalaUDF.apply();\n- }\n-\n- public Object getInput(String type, int pos) throws DMLRuntimeException, IOException {\n- if(type.equals(\"Int\") || type.equals(\"java.lang.Integer\")) {\n- return Integer.parseInt(((Scalar)getFunctionInput(pos)).getValue());\n- }\n- else if(type.equals(\"Double\") || type.equals(\"java.lang.Double\")) {\n- return Double.parseDouble(((Scalar)getFunctionInput(pos)).getValue());\n- }\n- else if(type.equals(\"java.lang.String\")) {\n- return ((Scalar)getFunctionInput(pos)).getValue();\n- }\n- else if(type.equals(\"boolean\") || type.equals(\"java.lang.Boolean\")) {\n- return Boolean.parseBoolean(((Scalar)getFunctionInput(pos)).getValue());\n- }\n- else if(type.equals(\"scala.Array[scala.Array[Double]]\")) {\n- return ((Matrix) getFunctionInput(pos)).getMatrixAsDoubleArray();\n- }\n-\n- throw new RuntimeException(\"Unsupported type: \" + type);\n- }\n-\n-}\n" }, { "change_type": "DELETE", "old_path": "src/main/scala/org/apache/sysml/api/ExternalUDFRegistration.scala", "new_path": null, "diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.api;\n-\n-import scala.reflect.runtime.universe._\n-import java.util.ArrayList\n-import org.apache.sysml.udf.FunctionParameter\n-import org.apache.sysml.udf.Scalar\n-import org.apache.sysml.udf.Matrix\n-import org.apache.sysml.udf.Matrix.ValueType\n-import org.apache.sysml.api.mlcontext.Script\n-import org.apache.sysml.udf.PackageFunction\n-import org.apache.sysml.udf.FunctionParameter\n-import org.apache.sysml.udf.lib.GenericFunction\n-import org.apache.sysml.udf.Scalar.ScalarValueType\n-import java.util.HashMap\n-\n-/*\n- * Design of Scala external UDF functionality:\n- * Two main classes in that enable this functionality are as follows:\n- * 1. ExternalUDFRegistration: We have overloaded the register method to allow for registration\n- * of scala UDFs with 10 arguments. Each of these functions examine the input types to check\n- * if they are supported (see getType). If input types are supported, then it creates a header of format:\n- *\n- * fnName = externalFunction(input arguments) return (output arguments) implemented in (classname=\"org.apache.sysml.udf.lib.GenericFunction\",exectype=\"mem\")\n- *\n- * This header is appended in MLContext before execution of the script.\n- *\n- * In addition, it populates two global data structures: fnMapping (which stores a zero-argument anonymous\n- * function) and fnSignatureMapping (useful for computing the number of return values).\n- * These data structures are used by GenericFunction.\n- *\n- * The secret sauce of this approach is conversion of arbitrary Scala UDF into a zero-argument anonymous UDF\n- * stored in ExternalUDFRegistration's fnMapping data structure (similar to execute) :)\n- *\n- * 2. GenericFunction\n- * This generic class is called by SystemML for any registered Scala UDF. This class first inserts itself into\n- * ExternalUDFRegistration's udfMapping data structure and then invokes the zero-argument anonymous\n- * function corresponding to the user specified Scala UDF.\n- *\n- *\n- * The current implementation allows the functions registered with one MLContext\n- * to be visible to other MLContext as well as ExternalUDFRegistration's fnMapping, fnSignatureMapping and udfMapping\n- * fields are static. This is necessary to simplify the integration with existing external UDF function framework.\n- *\n- * Usage:\n- * scala> import org.apache.sysml.api.mlcontext._\n- * scala> import org.apache.sysml.api.mlcontext.ScriptFactory._\n- * scala> val ml = new MLContext(sc)\n- * scala>\n- * scala> // Demonstrates how to pass a simple scala UDF to SystemML\n- * scala> def addOne(x:Double):Double = x + 1\n- * scala> ml.udf.register(\"addOne\", addOne)\n- * scala> val script1 = dml(\"v = addOne(2.0); print(v)\")\n- * scala> ml.execute(script1)\n- * scala>\n- * scala> // Demonstrates operation on local matrices (double[][])\n- * scala> def addOneToDiagonal(x:Array[Array[Double]]):Array[Array[Double]] = { for(i <- 0 to x.length-1) x(i)(i) = x(i)(i) + 1; x }\n- * scala> ml.udf.register(\"addOneToDiagonal\", addOneToDiagonal)\n- * scala> val script2 = dml(\"m1 = matrix(0, rows=3, cols=3); m2 = addOneToDiagonal(m1); print(toString(m2));\")\n- * scala> ml.execute(script2)\n- * scala>\n- * scala> // Demonstrates multi-return function\n- * scala> def multiReturnFn(x:Double):(Double, Int) = (x + 1, (x * 2).toInt)\n- * scala> ml.udf.register(\"multiReturnFn\", multiReturnFn)\n- * scala> val script3 = dml(\"[v1, v2] = multiReturnFn(2.0); print(v1)\")\n- * scala> ml.execute(script3)\n- * scala>\n- * scala> // Demonstrates multi-argument multi-return function\n- * scala> def multiArgReturnFn(x:Double, y:Int):(Double, Int) = (x + 1, (x * y).toInt)\n- * scala> ml.udf.register(\"multiArgReturnFn\", multiArgReturnFn _)\n- * scala> val script4 = dml(\"[v1, v2] = multiArgReturnFn(2.0, 1); print(v2)\")\n- * scala> ml.execute(script4)\n- */\n-\n-object ExternalUDFRegistration {\n- val fnMapping: HashMap[String, Function0[Array[FunctionParameter]]] = new HashMap[String, Function0[Array[FunctionParameter]]]()\n- val fnSignatureMapping: HashMap[String, Array[String]] = new HashMap[String, Array[String]]()\n- val udfMapping:HashMap[String, GenericFunction] = new HashMap[String, GenericFunction]();\n-}\n-\n-/**\n- * This class handles the registration of external Scala UDFs via MLContext.\n- */\n-class ExternalUDFRegistration {\n- var ml:MLContext = null\n- def setMLContext(ml1:org.apache.sysml.api.mlcontext.MLContext) = { this.ml = ml }\n-\n- val scriptHeaders:HashMap[String,StringBuilder] = new HashMap[String,StringBuilder]()\n- def getExternalHeaders(): String = {\n- val it = scriptHeaders.entrySet().iterator();\n- val ret = new StringBuilder\n- while (it.hasNext()) {\n- val header = it.next().getValue.toString()\n- if(!header.equals(\"\")) {\n- ret.append(header + \"\\n\")\n- }\n- }\n- // Useful for debugging:\n- // System.out.println(ret.toString)\n- ret.toString()\n- }\n-\n- def getType(t: String):String = {\n- t match {\n- case \"java.lang.String\" => \"string\"\n- case \"Double\" => \"double\"\n- case \"Int\" => \"integer\"\n- case \"Boolean\" => \"boolean\"\n- // Support only pass by value for now.\n- // case \"org.apache.sysml.runtime.matrix.data.MatrixBlock\" => \"matrix[double]\"\n- // case \"scala.Array[Double]\" => \"matrix[double]\"\n- case \"scala.Array[scala.Array[Double]]\" => \"matrix[double]\"\n- case _ => throw new RuntimeException(\"Unsupported type of parameter: \" + t)\n- }\n- }\n-\n- def getReturnType(t: String):String = {\n- if(t.startsWith(\"(\")) {\n- val t1 = t.substring(1, t.length()-1).split(\",\").map(_.trim)\n- val ret = new StringBuilder\n- for(i <- 0 until t1.length) {\n- if(i != 0) ret.append(\", \")\n- ret.append(getType(t1(i)) + \" output\" + i)\n- }\n- ret.toString\n- }\n- else\n- getType(t) + \" output0\"\n- }\n-\n- def createExternalFunctionHeader(name:String, typeInput:Array[String]): Unit = {\n- if(scriptHeaders.containsKey(name)) scriptHeaders.remove(name)\n- val header:StringBuilder = new StringBuilder()\n- header.append(name + \" = externalFunction(\")\n- header.append(getType(typeInput(0)) + \" input0\")\n- for(i <- 1 until typeInput.length -1) {\n- header.append(\", \" + getType(typeInput(i)) + \" input\" + i)\n- }\n- header.append(\") return (\")\n- header.append(getReturnType( typeInput(typeInput.length -1) ))\n- header.append(\") implemented in (classname=\\\"org.apache.sysml.udf.lib.GenericFunction\\\", exectype=\\\"mem\\\");\\n\")\n- scriptHeaders.put(name, header)\n- ExternalUDFRegistration.fnSignatureMapping.put(name, typeInput)\n- }\n-\n- // ------------------------------------------------------------------------------------------\n- // Overloaded register function for 1 to 10 inputs:\n-\n- // zero-input function unsupported by SystemML\n-// def register[RT: TypeTag](name: String, func: Function0[RT]): Unit = {\n-// println(getType(typeOf[RT].toString()))\n-// }\n-\n- def unregister(name: String): Unit = {\n- ExternalUDFRegistration.fnSignatureMapping.remove(name)\n- ExternalUDFRegistration.fnMapping.remove(name)\n- ExternalUDFRegistration.udfMapping.remove(name)\n- scriptHeaders.remove(name)\n- }\n-\n- def register[A1: TypeTag, RT: TypeTag](name: String, func: Function1[A1, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(typeOf[A1].toString(), typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, RT: TypeTag](name: String, func: Function2[A1, A2, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(typeOf[A1].toString(), typeOf[A2].toString(), typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, RT: TypeTag](name: String, func: Function3[A1, A2, A3, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, RT: TypeTag](name: String, func: Function4[A1, A2, A3, A4, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, RT: TypeTag](name: String,\n- func: Function5[A1, A2, A3, A4, A5, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, RT: TypeTag](name: String,\n- func: Function6[A1, A2, A3, A4, A5, A6, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5],\n- udf.getInput(typeOf[A6].toString(), 5).asInstanceOf[A6]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(), typeOf[A6].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, RT: TypeTag](name: String,\n- func: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5],\n- udf.getInput(typeOf[A6].toString(), 5).asInstanceOf[A6],\n- udf.getInput(typeOf[A7].toString(), 6).asInstanceOf[A7]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(), typeOf[A6].toString(), typeOf[A7].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag,\n- A8: TypeTag, RT: TypeTag](name: String,\n- func: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5],\n- udf.getInput(typeOf[A6].toString(), 5).asInstanceOf[A6],\n- udf.getInput(typeOf[A7].toString(), 6).asInstanceOf[A7],\n- udf.getInput(typeOf[A8].toString(), 7).asInstanceOf[A8]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(), typeOf[A6].toString(), typeOf[A7].toString(), typeOf[A8].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag,\n- A8: TypeTag, A9: TypeTag, RT: TypeTag](name: String,\n- func: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5],\n- udf.getInput(typeOf[A6].toString(), 5).asInstanceOf[A6],\n- udf.getInput(typeOf[A7].toString(), 6).asInstanceOf[A7],\n- udf.getInput(typeOf[A8].toString(), 7).asInstanceOf[A8],\n- udf.getInput(typeOf[A9].toString(), 8).asInstanceOf[A9]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(), typeOf[A6].toString(), typeOf[A7].toString(), typeOf[A8].toString(),\n- typeOf[A9].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- def register[A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag,\n- A8: TypeTag, A9: TypeTag, A10: TypeTag, RT: TypeTag](name: String,\n- func: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): Unit = {\n- val anonfun0 = new Function0[Array[FunctionParameter]] {\n- def apply(): Array[FunctionParameter] = {\n- val udf = ExternalUDFRegistration.udfMapping.get(name);\n- return convertReturnToOutput(func.apply(udf.getInput(typeOf[A1].toString(), 0).asInstanceOf[A1],\n- udf.getInput(typeOf[A2].toString(), 1).asInstanceOf[A2],\n- udf.getInput(typeOf[A3].toString(), 2).asInstanceOf[A3],\n- udf.getInput(typeOf[A4].toString(), 3).asInstanceOf[A4],\n- udf.getInput(typeOf[A5].toString(), 4).asInstanceOf[A5],\n- udf.getInput(typeOf[A6].toString(), 5).asInstanceOf[A6],\n- udf.getInput(typeOf[A7].toString(), 6).asInstanceOf[A7],\n- udf.getInput(typeOf[A8].toString(), 7).asInstanceOf[A8],\n- udf.getInput(typeOf[A9].toString(), 8).asInstanceOf[A9],\n- udf.getInput(typeOf[A10].toString(), 9).asInstanceOf[A10]))\n- }\n- }\n- createExternalFunctionHeader(name, Array(\n- typeOf[A1].toString(), typeOf[A2].toString(), typeOf[A3].toString(), typeOf[A4].toString(),\n- typeOf[A5].toString(), typeOf[A6].toString(), typeOf[A7].toString(), typeOf[A8].toString(),\n- typeOf[A9].toString(), typeOf[A10].toString(),\n- typeOf[RT].toString()))\n- ExternalUDFRegistration.fnMapping.put(name, anonfun0)\n- }\n-\n- // ------------------------------------------------------------------------------------------\n-\n- def convertReturnToOutput(ret:Any): Array[FunctionParameter] = {\n- ret match {\n- case x:Tuple1[Any] => Array(convertToOutput(x._1))\n- case x:Tuple2[Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2))\n- case x:Tuple3[Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3))\n- case x:Tuple4[Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4))\n- case x:Tuple5[Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5))\n- case x:Tuple6[Any, Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5), convertToOutput(x._6))\n- case x:Tuple7[Any, Any, Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5), convertToOutput(x._6), convertToOutput(x._7))\n- case x:Tuple8[Any, Any, Any, Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5), convertToOutput(x._6), convertToOutput(x._7), convertToOutput(x._8))\n- case x:Tuple9[Any, Any, Any, Any, Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5), convertToOutput(x._6), convertToOutput(x._7),\n- convertToOutput(x._8), convertToOutput(x._9))\n- case x:Tuple10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any] => Array(convertToOutput(x._1), convertToOutput(x._2), convertToOutput(x._3), convertToOutput(x._4), convertToOutput(x._5), convertToOutput(x._6), convertToOutput(x._7),\n- convertToOutput(x._8), convertToOutput(x._9), convertToOutput(x._10))\n- case _ => Array(convertToOutput(ret))\n- }\n- }\n- val rand = new java.util.Random()\n- def convertToOutput(x:Any): FunctionParameter = {\n- x match {\n- case x1:Int => return new Scalar(ScalarValueType.Integer, String.valueOf(x))\n- case x1:java.lang.Integer => return new Scalar(ScalarValueType.Integer, String.valueOf(x))\n- case x1:Double => return new Scalar(ScalarValueType.Double, String.valueOf(x))\n- case x1:java.lang.Double => return new Scalar(ScalarValueType.Double, String.valueOf(x))\n- case x1:java.lang.String => return new Scalar(ScalarValueType.Text, String.valueOf(x))\n- case x1:java.lang.Boolean => return new Scalar(ScalarValueType.Boolean, String.valueOf(x))\n- case x1:Boolean => return new Scalar(ScalarValueType.Boolean, String.valueOf(x))\n- case x1:scala.Array[scala.Array[Double]] => {\n- val mat = new Matrix( \"temp\" + rand.nextLong, x1.length, x1(0).length, ValueType.Double );\n- mat.setMatrixDoubleArray(x1)\n- return mat\n- }\n- case _ => throw new RuntimeException(\"Unsupported output type:\" + x.getClass().getName)\n- }\n- }\n-}\n\\ No newline at end of file\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1190] Cleanup Scala UDF invocation Closes #357.
49,738
25.01.2017 16:18:22
-3,600
61b36582002e44ddc2c6555e1c393eabb942cc24
Fix bufferpool robustness (flag visibility) Our bufferpool serializes matrices and frames outside a synchronized region in order to keep the serial fraction as small as possible. Since one thread blocks until another thread updates a certain flag, we now ensure - via volatile - that these updates are always visible.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/ByteBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/ByteBuffer.java", "diff": "@@ -36,10 +36,10 @@ import org.apache.sysml.runtime.util.LocalFileUtils;\n*/\npublic class ByteBuffer\n{\n- private boolean _serialized;\n- private boolean _shallow;\n- private boolean _matrix;\n- private long _size;\n+ private volatile boolean _serialized;\n+ private volatile boolean _shallow;\n+ private volatile boolean _matrix;\n+ private final long _size;\nprotected byte[] _bdata = null; //sparse matrix\nprotected CacheBlock _cdata = null; //dense matrix/frame\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/LazyWriteBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/LazyWriteBuffer.java", "diff": "@@ -38,7 +38,7 @@ public class LazyWriteBuffer\n}\n//global size limit in bytes\n- private static long _limit;\n+ private static final long _limit;\n//current size in bytes\nprivate static long _size;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1196] Fix bufferpool robustness (flag visibility) Our bufferpool serializes matrices and frames outside a synchronized region in order to keep the serial fraction as small as possible. Since one thread blocks until another thread updates a certain flag, we now ensure - via volatile - that these updates are always visible.
49,773
26.01.2017 01:09:56
28,800
14ddd3e90785b2d372e21968dc208c7aee6f5455
Eliminate collision of regularization and tolerance parameters Closes
[ { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "diff": "@@ -60,7 +60,7 @@ trait HasTol extends Params {\nfinal def getTol: Double = $(tol)\n}\ntrait HasRegParam extends Params {\n- final val regParam: DoubleParam = new DoubleParam(this, \"tol\", \"the convergence tolerance for iterative algorithms\")\n+ final val regParam: DoubleParam = new DoubleParam(this, \"regParam\", \"regularization parameter\")\nsetDefault(regParam, 0.000001)\nfinal def getRegParam: Double = $(regParam)\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1197] Eliminate collision of regularization and tolerance parameters Closes #359.
49,736
01.02.2017 14:48:33
28,800
6fad65d1d5ae4f1e65bdf99a68faf8396f280331
[MINOR] Added external builtin functions for performing cumsumprod and rowclassmeet
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/udf/lib/CumSumProd.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.udf.lib;\n+\n+import java.io.IOException;\n+import java.util.Iterator;\n+\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheException;\n+import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.udf.FunctionParameter;\n+import org.apache.sysml.udf.Matrix;\n+import org.apache.sysml.udf.PackageFunction;\n+import org.apache.sysml.udf.Scalar;\n+import org.apache.sysml.udf.Matrix.ValueType;\n+\n+/**\n+ * Variant of cumsum:\n+ * Computes following two functions:\n+ *\n+ * cumsum_prod = function (Matrix[double] X, Matrix[double] C, double start) return (Matrix[double] Y)\n+ * # Computes the following recurrence in log-number of steps:\n+ * # Y [1, ] = X [1, ] + C [1, ] * start;\n+ * # Y [i+1, ] = X [i+1, ] + C [i+1, ] * Y [i, ]\n+ * {\n+ * Y = X; P = C; m = nrow(X); k = 1;\n+ * Y [1, ] = Y [1, ] + C [1, ] * start;\n+ * while (k < m) {\n+ * Y [k+1 : m, ] = Y [k+1 : m, ] + Y [1 : m-k, ] * P [k+1 : m, ];\n+ * P [k+1 : m, ] = P [1 : m-k, ] * P [k+1 : m, ];\n+ * k = 2 * k;\n+ * }\n+ * }\n+ *\n+ * cumsum_prod_reverse = function (Matrix[double] X, Matrix[double] C, double start) return (Matrix[double] Y)\n+ * # Computes the reverse recurrence in log-number of steps:\n+ * # Y [m, ] = X [m, ] + C [m, ] * start;\n+ * # Y [i-1, ] = X [i-1, ] + C [i-1, ] * Y [i, ]\n+ * {\n+ * Y = X; P = C; m = nrow(X); k = 1;\n+ * Y [m, ] = Y [m, ] + C [m, ] * start;\n+ * while (k < m) {\n+ * Y [1 : m-k, ] = Y [1 : m-k, ] + Y [k+1 : m, ] * P [1 : m-k, ];\n+ * P [1 : m-k, ] = P [k+1 : m, ] * P [1 : m-k, ];\n+ * k = 2 * k;\n+ * }\n+ * }\n+ *\n+ * The API of this external built-in function is as follows:\n+ *\n+ * func = externalFunction(matrix[double] X, matrix[double] C, double start, boolean isReverse) return (matrix[double] Y)\n+ * implemented in (classname=\"org.apache.sysml.udf.lib.CumSumProd\",exectype=\"mem\");\n+ */\n+public class CumSumProd extends PackageFunction {\n+\n+ private static final long serialVersionUID = -7883258699548686065L;\n+ private Matrix ret;\n+ private MatrixBlock retMB, X, C;\n+ private double start;\n+ private boolean isReverse;\n+\n+ @Override\n+ public int getNumFunctionOutputs() {\n+ return 1;\n+ }\n+\n+ @Override\n+ public FunctionParameter getFunctionOutput(int pos) {\n+ if(pos == 0)\n+ return ret;\n+ else\n+ throw new RuntimeException(\"CumSumProd produces only one output\");\n+ }\n+\n+ @Override\n+ public void execute() {\n+ try {\n+ X = ((Matrix) getFunctionInput(0)).getMatrixObject().acquireRead();\n+ C = ((Matrix) getFunctionInput(1)).getMatrixObject().acquireRead();\n+ if(X.getNumRows() != C.getNumRows())\n+ throw new RuntimeException(\"Number of rows of X and C should match\");\n+ if( X.getNumColumns() != C.getNumColumns() && C.getNumColumns() != 1 )\n+ throw new RuntimeException(\"Incorrect Number of columns of X and C (Expected C to be of same dimension or a vector)\");\n+ start = Double.parseDouble(((Scalar)getFunctionInput(2)).getValue());\n+ isReverse = Boolean.parseBoolean(((Scalar)getFunctionInput(3)).getValue());\n+\n+ numRetRows = X.getNumRows();\n+ numRetCols = X.getNumColumns();\n+ allocateOutput();\n+\n+ // Copy X to Y\n+ denseBlock = retMB.getDenseBlock();\n+ if(X.isInSparseFormat()) {\n+ Iterator<IJV> iter = X.getSparseBlockIterator();\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ denseBlock[ijv.getI()*numRetCols + ijv.getJ()] = ijv.getV();\n+ }\n+ }\n+ else {\n+ if(X.getDenseBlock() != null)\n+ System.arraycopy(X.getDenseBlock(), 0, denseBlock, 0, denseBlock.length);\n+ }\n+\n+ if(!isReverse) {\n+ // Y [1, ] = X [1, ] + C [1, ] * start;\n+ // Y [i+1, ] = X [i+1, ] + C [i+1, ] * Y [i, ]\n+ addCNConstant(0, start);\n+ for(int i = 1; i < numRetRows; i++) {\n+ addC(i, true);\n+ }\n+ }\n+ else {\n+ // Y [m, ] = X [m, ] + C [m, ] * start;\n+ // Y [i-1, ] = X [i-1, ] + C [i-1, ] * Y [i, ]\n+ addCNConstant(numRetRows-1, start);\n+ for(int i = numRetRows - 2; i >= 0; i--) {\n+ addC(i, false);\n+ }\n+ }\n+\n+ ((Matrix) getFunctionInput(1)).getMatrixObject().release();\n+ ((Matrix) getFunctionInput(0)).getMatrixObject().release();\n+ } catch (CacheException e) {\n+ throw new RuntimeException(\"Error while executing CumSumProd\", e);\n+ }\n+\n+ retMB.recomputeNonZeros();\n+ try {\n+ retMB.examSparsity();\n+ ret.setMatrixDoubleArray(retMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+ } catch (DMLRuntimeException e) {\n+ throw new RuntimeException(\"Error while executing CumSumProd\", e);\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Error while executing CumSumProd\", e);\n+ }\n+ }\n+\n+ int numRetRows; int numRetCols;\n+ double [] denseBlock;\n+\n+ private void addCNConstant(int i, double constant) {\n+ boolean isCVector = C.getNumColumns() != ret.getNumCols();\n+ if(C.isInSparseFormat()) {\n+ Iterator<IJV> iter = C.getSparseBlockIterator(i, i+1);\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ if(!isCVector)\n+ denseBlock[ijv.getI()*numRetCols + ijv.getJ()] += ijv.getV() * constant;\n+ else {\n+ double val = ijv.getV();\n+ for(int j = ijv.getI()*numRetCols; j < (ijv.getI()+1)*numRetCols; j++) {\n+ denseBlock[j] += val*constant;\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ double [] CBlk = C.getDenseBlock();\n+ if(CBlk != null) {\n+ if(!isCVector) {\n+ for(int j = i*numRetCols; j < (i+1)*numRetCols; j++) {\n+ denseBlock[j] += CBlk[j]*constant;\n+ }\n+ }\n+ else {\n+ for(int j = i*numRetCols; j < (i+1)*numRetCols; j++) {\n+ denseBlock[j] += CBlk[i]*constant;\n+ }\n+ }\n+ }\n+ }\n+ }\n+\n+ private void addC(int i, boolean addPrevRow) {\n+ boolean isCVector = C.getNumColumns() != ret.getNumCols();\n+ if(C.isInSparseFormat()) {\n+ Iterator<IJV> iter = C.getSparseBlockIterator(i, i+1);\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ if(!isCVector) {\n+ if(addPrevRow)\n+ denseBlock[ijv.getI()*numRetCols + ijv.getJ()] += ijv.getV() * denseBlock[(ijv.getI()-1)*numRetCols + ijv.getJ()];\n+ else\n+ denseBlock[ijv.getI()*numRetCols + ijv.getJ()] += ijv.getV() * denseBlock[(ijv.getI()+1)*numRetCols + ijv.getJ()];\n+ }\n+ else {\n+ double val = ijv.getV();\n+ for(int j = ijv.getI()*numRetCols; j < (ijv.getI()+1)*numRetCols; j++) {\n+ double val1 = addPrevRow ? denseBlock[(ijv.getI()-1)*numRetCols + ijv.getJ()] : denseBlock[(ijv.getI()+1)*numRetCols + ijv.getJ()];\n+ denseBlock[j] += val*val1;\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ double [] CBlk = C.getDenseBlock();\n+ if(CBlk != null) {\n+ if(!isCVector) {\n+ for(int j = i*numRetCols; j < (i+1)*numRetCols; j++) {\n+ double val1 = addPrevRow ? denseBlock[j-numRetCols] : denseBlock[j+numRetCols];\n+ denseBlock[j] += CBlk[j]*val1;\n+ }\n+ }\n+ else {\n+ for(int j = i*numRetCols; j < (i+1)*numRetCols; j++) {\n+ double val1 = addPrevRow ? denseBlock[j-numRetCols] : denseBlock[j+numRetCols];\n+ denseBlock[j] += CBlk[i]*val1;\n+ }\n+ }\n+ }\n+ }\n+ }\n+\n+ private void allocateOutput() {\n+ String dir = createOutputFilePathAndName( \"TMP\" );\n+ ret = new Matrix( dir, numRetRows, numRetCols, ValueType.Double );\n+ retMB = new MatrixBlock((int) numRetRows, (int) numRetCols, false);\n+ retMB.allocateDenseBlock();\n+ }\n+\n+\n+\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/udf/lib/RowClassMeet.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.udf.lib;\n+\n+import java.io.IOException;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Comparator;\n+import java.util.Iterator;\n+import java.util.Map.Entry;\n+import java.util.TreeMap;\n+\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheException;\n+import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.udf.FunctionParameter;\n+import org.apache.sysml.udf.Matrix;\n+import org.apache.sysml.udf.PackageFunction;\n+import org.apache.sysml.udf.Matrix.ValueType;\n+\n+/**\n+ * Performs following operation:\n+ * # Computes the intersection (\"meet\") of equivalence classes for\n+ * # each row of A and B, excluding 0-valued cells.\n+ * # INPUT:\n+ * # A, B = matrices whose rows contain that row's class labels;\n+ * # for each i, rows A [i, ] and B [i, ] define two\n+ * # equivalence relations on some of the columns, which\n+ * # we want to intersect\n+ * # A [i, j] == A [i, k] != 0 if and only if (j ~ k) as defined\n+ * # by row A [i, ];\n+ * # A [i, j] == 0 means that j is excluded by A [i, ]\n+ * # B [i, j] is analogous\n+ * # NOTE 1: Either nrow(A) == nrow(B), or exactly one of A or B\n+ * # has one row that \"applies\" to each row of the other matrix.\n+ * # NOTE 2: If ncol(A) != ncol(B), we pad extra 0-columns up to\n+ * # max (ncol(A), ncol(B)).\n+ * # OUTPUT:\n+ * # Both C and N have the same size as (the max of) A and B.\n+ * # C = matrix whose rows contain class labels that represent\n+ * # the intersection (coarsest common refinement) of the\n+ * # corresponding rows of A and B.\n+ * # C [i, j] == C [i, k] != 0 if and only if (j ~ k) as defined\n+ * # by both A [i, ] and B [j, ]\n+ * # C [i, j] == 0 if and only if A [i, j] == 0 or B [i, j] == 0\n+ * # Additionally, we guarantee that non-0 labels in C [i, ]\n+ * # will be integers from 1 to max (C [i, ]) without gaps.\n+ * # For A and B the labels can be arbitrary.\n+ * # N = matrix with class-size information for C-cells\n+ * # N [i, j] = count of {C [i, k] | C [i, j] == C [i, k] != 0}\n+ *\n+ */\n+public class RowClassMeet extends PackageFunction {\n+\n+ private static final long serialVersionUID = 1L;\n+ private Matrix CMat, NMat;\n+ private MatrixBlock A, B, C, N;\n+ private int nr, nc;\n+\n+ @Override\n+ public int getNumFunctionOutputs() {\n+ return 2;\n+ }\n+\n+ @Override\n+ public FunctionParameter getFunctionOutput(int pos) {\n+ if(pos == 0)\n+ return CMat;\n+ else if(pos == 1)\n+ return NMat;\n+ else\n+ throw new RuntimeException(\"RowClassMeet produces only one output\");\n+ }\n+\n+\n+ public class ClassLabels {\n+ public double aVal;\n+ public double bVal;\n+ public ClassLabels(double aVal, double bVal) {\n+ this.aVal = aVal;\n+ this.bVal = bVal;\n+ }\n+ }\n+\n+ public class ClassLabelComparator implements Comparator<ClassLabels> {\n+ Integer tmp1, tmp2;\n+ @Override\n+ public int compare(ClassLabels o1, ClassLabels o2) {\n+ if(o1.aVal != o2.aVal) {\n+ tmp1 = (int) o1.aVal;\n+ tmp2 = (int) o2.aVal;\n+ }\n+ else {\n+ tmp1 = (int) o1.bVal;\n+ tmp2 = (int) o2.bVal;\n+ }\n+ return tmp1.compareTo(tmp2);\n+ }\n+ }\n+\n+ double [] getRow(MatrixBlock B, double [] bRow, int i) {\n+ if(B.getNumRows() == 1)\n+ i = 0;\n+ Arrays.fill(bRow, 0);\n+ if(B.isInSparseFormat()) {\n+ Iterator<IJV> iter = B.getSparseBlockIterator(i, i+1);\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ bRow[ijv.getJ()] = ijv.getV();\n+ }\n+ }\n+ else {\n+ double [] denseBlk = B.getDenseBlock();\n+ if(denseBlk != null)\n+ System.arraycopy(denseBlk, i*B.getNumColumns(), bRow, 0, B.getNumColumns());\n+ }\n+ return bRow;\n+ }\n+\n+ @Override\n+ public void execute() {\n+ try {\n+ A = ((Matrix) getFunctionInput(0)).getMatrixObject().acquireRead();\n+ B = ((Matrix) getFunctionInput(1)).getMatrixObject().acquireRead();\n+ nr = Math.max(A.getNumRows(), B.getNumRows());\n+ nc = Math.max(A.getNumColumns(), B.getNumColumns());\n+\n+ double [] bRow = new double[B.getNumColumns()];\n+ CMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\n+ C = new MatrixBlock(nr, nc, false);\n+ C.allocateDenseBlock();\n+ NMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\n+ N = new MatrixBlock(nr, nc, false);\n+ N.allocateDenseBlock();\n+\n+ double [] cBlk = C.getDenseBlock();\n+ double [] nBlk = N.getDenseBlock();\n+\n+ if(B.getNumRows() == 1)\n+ getRow(B, bRow, 0);\n+\n+ for(int i = 0; i < A.getNumRows(); i++) {\n+ if(B.getNumRows() != 1)\n+ getRow(B, bRow, i);\n+\n+ // Create class labels\n+ TreeMap<ClassLabels, ArrayList<Integer>> classLabelMapping = new TreeMap<ClassLabels, ArrayList<Integer>>(new ClassLabelComparator());\n+ if(A.isInSparseFormat()) {\n+ Iterator<IJV> iter = A.getSparseBlockIterator(i, i+1);\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ int j = ijv.getJ();\n+ double aVal = ijv.getV();\n+ if(aVal != 0 && bRow[j] != 0) {\n+ ClassLabels key = new ClassLabels(aVal, bRow[j]);\n+ if(!classLabelMapping.containsKey(key))\n+ classLabelMapping.put(key, new ArrayList<Integer>());\n+ classLabelMapping.get(key).add(j);\n+ }\n+ }\n+ }\n+ else {\n+ double [] denseBlk = A.getDenseBlock();\n+ if(denseBlk != null) {\n+ int offset = i*A.getNumColumns();\n+ for(int j = 0; j < A.getNumColumns(); j++) {\n+ double aVal = denseBlk[offset + j];\n+ if(aVal != 0 && bRow[j] != 0) {\n+ ClassLabels key = new ClassLabels(aVal, bRow[j]);\n+ if(!classLabelMapping.containsKey(key))\n+ classLabelMapping.put(key, new ArrayList<Integer>());\n+ classLabelMapping.get(key).add(j);\n+ }\n+ }\n+ }\n+ }\n+\n+\n+ int labelID = 1;\n+ for(Entry<ClassLabels, ArrayList<Integer>> entry : classLabelMapping.entrySet()) {\n+ double nVal = entry.getValue().size();\n+ for(Integer j : entry.getValue()) {\n+ nBlk[i*nc + j] = nVal;\n+ cBlk[i*nc + j] = labelID;\n+ }\n+ labelID++;\n+ }\n+ }\n+\n+ ((Matrix) getFunctionInput(0)).getMatrixObject().release();\n+ ((Matrix) getFunctionInput(1)).getMatrixObject().release();\n+ } catch (CacheException e) {\n+ throw new RuntimeException(\"Error while executing RowClassMeet\", e);\n+ }\n+\n+ try {\n+ C.recomputeNonZeros();\n+ C.examSparsity();\n+ CMat.setMatrixDoubleArray(C, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+ N.recomputeNonZeros();\n+ N.examSparsity();\n+ NMat.setMatrixDoubleArray(N, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+ } catch (DMLRuntimeException e) {\n+ throw new RuntimeException(\"Error while executing RowClassMeet\", e);\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Error while executing RowClassMeet\", e);\n+ }\n+ }\n+\n+\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Added external builtin functions for performing cumsumprod and rowclassmeet
49,762
02.02.2017 16:08:08
28,800
e2cb2328b09ae97bc94f309873a692d87f3598d4
Change sqlContext to spark in MLContext docs The variable sqlContext is not available by default in the spark shell anymore, instead spark should be used to create DataFrames. Where methods expect an instance of SqlContext, arguments are replaced with spark.sqlContext. Closes
[ { "change_type": "MODIFY", "old_path": "docs/spark-mlcontext-programming-guide.md", "new_path": "docs/spark-mlcontext-programming-guide.md", "diff": "@@ -141,7 +141,7 @@ val numRows = 10000\nval numCols = 1000\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\n-val df = sqlContext.createDataFrame(data, schema)\n+val df = spark.createDataFrame(data, schema)\n{% endhighlight %}\n</div>\n@@ -167,7 +167,7 @@ data: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = MapPartitionsRDD[1] a\nscala> val schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\nschema: org.apache.spark.sql.types.StructType = StructType(StructField(C0,DoubleType,true), StructField(C1,DoubleType,true), StructField(C2,DoubleType,true), StructField(C3,DoubleType,true), StructField(C4,DoubleType,true), StructField(C5,DoubleType,true), StructField(C6,DoubleType,true), StructField(C7,DoubleType,true), StructField(C8,DoubleType,true), StructField(C9,DoubleType,true), StructField(C10,DoubleType,true), StructField(C11,DoubleType,true), StructField(C12,DoubleType,true), StructField(C13,DoubleType,true), StructField(C14,DoubleType,true), StructField(C15,DoubleType,true), StructField(C16,DoubleType,true), StructField(C17,DoubleType,true), StructField(C18,DoubleType,true), StructField(C19,DoubleType,true), StructField(C20,DoubleType,true), StructField(C21,DoubleType,true), ...\n-scala> val df = sqlContext.createDataFrame(data, schema)\n+scala> val df = spark.createDataFrame(data, schema)\ndf: org.apache.spark.sql.DataFrame = [C0: double, C1: double, C2: double, C3: double, C4: double, C5: double, C6: double, C7: double, C8: double, C9: double, C10: double, C11: double, C12: double, C13: double, C14: double, C15: double, C16: double, C17: double, C18: double, C19: double, C20: double, C21: double, C22: double, C23: double, C24: double, C25: double, C26: double, C27: double, C28: double, C29: double, C30: double, C31: double, C32: double, C33: double, C34: double, C35: double, C36: double, C37: double, C38: double, C39: double, C40: double, C41: double, C42: double, C43: double, C44: double, C45: double, C46: double, C47: double, C48: double, C49: double, C50: double, C51: double, C52: double, C53: double, C54: double, C55: double, C56: double, C57: double, C58: double, C5...\n{% endhighlight %}\n@@ -1540,7 +1540,7 @@ val numRows = 10000\nval numCols = 1000\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\n-val df = sqlContext.createDataFrame(data, schema)\n+val df = spark.createDataFrame(data, schema)\nval mm = new MatrixMetadata(numRows, numCols)\nval minMaxMeanScript = dml(minMaxMean).in(\"Xin\", df, mm).out(\"minOut\", \"maxOut\", \"meanOut\")\nval minMaxMeanScript = dml(minMaxMean).in(\"Xin\", df, mm).out(\"minOut\", \"maxOut\", \"meanOut\")\n@@ -1561,7 +1561,7 @@ val numRows = 10000\nval numCols = 1000\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\n-val df = sqlContext.createDataFrame(data, schema)\n+val df = spark.createDataFrame(data, schema)\nval mm = new MatrixMetadata(numRows, numCols)\nval bbm = new BinaryBlockMatrix(df, mm)\nval minMaxMeanScript = dml(minMaxMean).in(\"Xin\", bbm).out(\"minOut\", \"maxOut\", \"meanOut\")\n@@ -1852,7 +1852,7 @@ data: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = MapPartitionsRDD[1] a\nscala> val schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\nschema: org.apache.spark.sql.types.StructType = StructType(StructField(C0,DoubleType,true), StructField(C1,DoubleType,true), StructField(C2,DoubleType,true), StructField(C3,DoubleType,true), StructField(C4,DoubleType,true), StructField(C5,DoubleType,true), StructField(C6,DoubleType,true), StructField(C7,DoubleType,true), StructField(C8,DoubleType,true), StructField(C9,DoubleType,true), StructField(C10,DoubleType,true), StructField(C11,DoubleType,true), StructField(C12,DoubleType,true), StructField(C13,DoubleType,true), StructField(C14,DoubleType,true), StructField(C15,DoubleType,true), StructField(C16,DoubleType,true), StructField(C17,DoubleType,true), StructField(C18,DoubleType,true), StructField(C19,DoubleType,true), StructField(C20,DoubleType,true), StructField(C21,DoubleType,true), ...\n-scala> val df = sqlContext.createDataFrame(data, schema)\n+scala> val df = spark.createDataFrame(data, schema)\ndf: org.apache.spark.sql.DataFrame = [C0: double, C1: double, C2: double, C3: double, C4: double, C5: double, C6: double, C7: double, C8: double, C9: double, C10: double, C11: double, C12: double, C13: double, C14: double, C15: double, C16: double, C17: double, C18: double, C19: double, C20: double, C21: double, C22: double, C23: double, C24: double, C25: double, C26: double, C27: double, C28: double, C29: double, C30: double, C31: double, C32: double, C33: double, C34: double, C35: double, C36: double, C37: double, C38: double, C39: double, C40: double, C41: double, C42: double, C43: double, C44: double, C45: double, C46: double, C47: double, C48: double, C49: double, C50: double, C51: double, C52: double, C53: double, C54: double, C55: double, C56: double, C57: double, C58: double, C5...\n{% endhighlight %}\n@@ -1867,7 +1867,7 @@ val numRows = 100000\nval numCols = 1000\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\n-val df = sqlContext.createDataFrame(data, schema)\n+val df = spark.createDataFrame(data, schema)\n{% endhighlight %}\n</div>\n@@ -1889,7 +1889,7 @@ scala> import org.apache.sysml.api.MLOutput\nimport org.apache.sysml.api.MLOutput\nscala> def getScalar(outputs: MLOutput, symbol: String): Any =\n- | outputs.getDF(sqlContext, symbol).first()(1)\n+ | outputs.getDF(spark.sqlContext, symbol).first()(1)\ngetScalar: (outputs: org.apache.sysml.api.MLOutput, symbol: String)Any\nscala> def getScalarDouble(outputs: MLOutput, symbol: String): Double =\n@@ -1907,7 +1907,7 @@ getScalarInt: (outputs: org.apache.sysml.api.MLOutput, symbol: String)Int\n{% highlight scala %}\nimport org.apache.sysml.api.MLOutput\ndef getScalar(outputs: MLOutput, symbol: String): Any =\n-outputs.getDF(sqlContext, symbol).first()(1)\n+outputs.getDF(spark.sqlContext, symbol).first()(1)\ndef getScalarDouble(outputs: MLOutput, symbol: String): Double =\ngetScalar(outputs, symbol).asInstanceOf[Double]\ndef getScalarInt(outputs: MLOutput, symbol: String): Int =\n@@ -2264,7 +2264,7 @@ The Spark `LinearDataGenerator` is used to generate test data for the Spark ML a\n{% highlight scala %}\n// Generate data\nimport org.apache.spark.mllib.util.LinearDataGenerator\n-import sqlContext.implicits._\n+import spark.implicits._\nval numRows = 10000\nval numCols = 1000\n@@ -2549,7 +2549,7 @@ This cell contains helper methods to return `Double` and `Int` values from outpu\nimport org.apache.sysml.api.MLOutput\ndef getScalar(outputs: MLOutput, symbol: String): Any =\n- outputs.getDF(sqlContext, symbol).first()(1)\n+ outputs.getDF(spark.sqlContext, symbol).first()(1)\ndef getScalarDouble(outputs: MLOutput, symbol: String): Double =\ngetScalar(outputs, symbol).asInstanceOf[Double]\n@@ -2638,7 +2638,7 @@ val outputs = ml.executeScript(linearReg)\nval trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0\n// Get outputs\n-val B = outputs.getDF(sqlContext, \"beta_out\").sort(\"ID\").drop(\"ID\")\n+val B = outputs.getDF(spark.sqlContext, \"beta_out\").sort(\"ID\").drop(\"ID\")\nval r2 = getScalarDouble(outputs, \"R2\")\nval iters = getScalarInt(outputs, \"totalIters\")\nval trainingTimePerIter = trainingTime / iters\n@@ -2815,7 +2815,7 @@ outputs = ml.executeScript(pnmf, {\"X\": X_train, \"maxiter\": 100, \"rank\": 10}, [\"W\n{% highlight python %}\n# Plot training loss over time\n-losses = outputs.getDF(sqlContext, \"losses\")\n+losses = outputs.getDF(spark.sqlContext, \"losses\")\nxy = losses.sort(losses.ID).map(lambda r: (r[0], r[1])).collect()\nx, y = zip(*xy)\nplt.plot(x, y)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1181] Change sqlContext to spark in MLContext docs The variable sqlContext is not available by default in the spark shell anymore, instead spark should be used to create DataFrames. Where methods expect an instance of SqlContext, arguments are replaced with spark.sqlContext. Closes #371.
49,772
06.02.2017 14:48:12
28,800
a11374b890052dd9041eebaabac7dfff3f7d37c7
Minor updates to the breast cancer project.
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/Preprocessing.ipynb", "new_path": "projects/breast_cancer/Preprocessing.ipynb", "diff": "},\n\"outputs\": [],\n\"source\": [\n- \"def create_tile_generator(slide, tile_size=1024, overlap=0):\\n\",\n+ \"def create_tile_generator(slide, tile_size, overlap):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Create a tile generator for the given slide.\\n\",\n\" \\n\",\n},\n\"outputs\": [],\n\"source\": [\n- \"def process_slide(slide_num, folder, training, tile_size=1024, overlap=0):\\n\",\n+ \"def process_slide(slide_num, folder, training, tile_size, overlap):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Generate all possible tile indices for a whole-slide image.\\n\",\n\" \\n\",\n},\n\"outputs\": [],\n\"source\": [\n- \"def keep_tile(tile_tuple, tile_size=1024, tissue_threshold=0.9):\\n\",\n+ \"def keep_tile(tile_tuple, tile_size, tissue_threshold):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Determine if a tile should be kept.\\n\",\n\" \\n\",\n},\n\"outputs\": [],\n\"source\": [\n- \"def process_tile(tile_tuple, sample_size=256, grayscale=False):\\n\",\n+ \"def process_tile(tile_tuple, sample_size, grayscale):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Process a tile into a group of smaller samples.\\n\",\n\" \\n\",\n\"\\n\",\n\"# Settings\\n\",\n\"training = True\\n\",\n- \"sample_size=64\\n\",\n- \"grayscale = True\\n\",\n+ \"tile_size = 1024\\n\",\n+ \"sample_size = 256\\n\",\n+ \"grayscale = False\\n\",\n\"num_partitions = 20000\\n\",\n\"folder = \\\"/home/MDM/breast_cancer/data\\\"\"\n]\n\"outputs\": [],\n\"source\": [\n\"# Process all slides\\n\",\n- \"df = preprocess(slide_nums, sample_size=sample_size, grayscale=grayscale,\\n\",\n+ \"df = preprocess(slide_nums, tile_size=tile_size, sample_size=sample_size, grayscale=grayscale,\\n\",\n\" training=training, num_partitions=num_partitions, folder=folder)\\n\",\n\"df\"\n]\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1185] Minor updates to the breast cancer project.
49,738
08.02.2017 16:36:53
-3,600
f8d707788b106108dadcb9f7796cfd3c591bb294
Fix javadoc issues of compressed linear algebra v2
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/BitmapEncoder.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/BitmapEncoder.java", "diff": "@@ -94,9 +94,9 @@ public class BitmapEncoder\n* <b>NOTE: This method must be kept in sync with {@link BitmapDecoderRLE}\n* !</b>\n*\n- * @param offsets\n- * uncompressed contents of the bitmap, expressed as a list of\n- * the offsets of different bits\n+ * @param offsets uncompressed offset list\n+ * @param len logical length of the given offset list\n+ *\n* @return compressed version of said bitmap\n*/\npublic static char[] genRLEBitmap(int[] offsets, int len) {\n@@ -187,9 +187,9 @@ public class BitmapEncoder\n* Encodes the bitmap in blocks of offsets. Within each block, the bits are\n* stored as absolute offsets from the start of the block.\n*\n- * @param offsets\n- * uncompressed contents of the bitmap, expressed as a list of\n- * the offsets of different bits\n+ * @param offsets uncompressed offset list\n+ * @param len logical length of the given offset list\n+ *\n* @return compressed version of said bitmap\n*/\npublic static char[] genOffsetBitmap(int[] offsets, int len)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOffset.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOffset.java", "diff": "@@ -88,12 +88,12 @@ public abstract class ColGroupOffset extends ColGroupValue\n/**\n* Constructor for subclass methods that need to create shallow copies\n*\n- * @param type compression type\n* @param colIndices\n* raw column index information\n* @param numRows\n* number of rows in the block\n- * @param zeros ?\n+ * @param zeros\n+ * indicator if column group contains zero values\n* @param values\n* set of distinct values for the block (associated bitmaps are\n* kept in the subclass)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupValue.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupValue.java", "diff": "@@ -56,7 +56,7 @@ public abstract class ColGroupValue extends ColGroup\n}\n/**\n- * Main constructor. Stores the headers for the individual bitmaps.\n+ * Stores the headers for the individual bitmaps.\n*\n* @param colIndices\n* indices (within the block) of the columns included in this\n@@ -296,7 +296,7 @@ public abstract class ColGroupValue extends ColGroup\n* @param result output matrix block\n* @param rl row lower index, inclusive\n* @param ru row upper index, exclusive\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException on invalid inputs\n*/\npublic abstract void unaryAggregateOperations(AggregateUnaryOperator op, MatrixBlock result, int rl, int ru)\nthrows DMLRuntimeException;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCodingGroup.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCodingGroup.java", "diff": "@@ -54,10 +54,10 @@ public class PlanningCoCodingGroup\n/**\n* Constructor for merging two disjoint groups of columns\n*\n- * @param grp1 first group of columns to merge\n- * @param grp2 second group to merge\n- * @param bitmapSizeEstimator bitmap size estimator\n- * @param numRowsWeight numRows x sparsity\n+ * @param grp1 first column group to merge\n+ * @param grp2 second column group to merge\n+ * @param estim bitmap size estimator\n+ * @param numRows number of rows\n*/\npublic PlanningCoCodingGroup(PlanningCoCodingGroup grp1, PlanningCoCodingGroup grp2,\nCompressedSizeEstimator estim, int numRows)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/utils/IntArrayList.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/utils/IntArrayList.java", "diff": "@@ -71,7 +71,8 @@ public class IntArrayList\n* physically larger than the actual length of the offset lists. Use size()\n* to obtain the actual length.\n*\n- * @return\n+ * @return integer array of offsets, the physical array length\n+ * may be larger than the length of the offset list\n*/\npublic int[] extractValues() {\nif( _size == 1 )\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-449] Fix javadoc issues of compressed linear algebra v2
49,738
09.02.2017 04:56:07
-3,600
d3cfcafcf38ab2b11f47621c475da44614e4f582
Fix size propagation right indexing (all rows/columns) This patch fixes specific issues of inferring the size of X[1:n,1:m], where n=nrow(Y) and m=ncol(Z). Until now, we always used n=nrow(X) and m=ncol(X) even if Y!=X or Z!=X.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "new_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "diff": "@@ -402,7 +402,6 @@ public class IndexingOp extends Hop\n@Override\npublic void refreshSizeInformation()\n{\n- Hop input1 = getInput().get(0); //original matrix\nHop input2 = getInput().get(1); //inpRowL\nHop input3 = getInput().get(2); //inpRowU\nHop input4 = getInput().get(3); //inpColL\n@@ -421,8 +420,10 @@ public class IndexingOp extends Hop\n//set dimension information\nif( _rowLowerEqualsUpper ) //ROWS\nsetDim1(1);\n- else if( allRows )\n- setDim1(input1.getDim1());\n+ else if( allRows ) {\n+ //input3 guaranteed to be a unaryop-nrow\n+ setDim1(input3.getInput().get(0).getDim1());\n+ }\nelse if( constRowRange ) {\nsetDim1( HopRewriteUtils.getIntValueSafe((LiteralOp)input3)\n-HopRewriteUtils.getIntValueSafe((LiteralOp)input2)+1 );\n@@ -433,8 +434,10 @@ public class IndexingOp extends Hop\nif( _colLowerEqualsUpper ) //COLS\nsetDim2(1);\n- else if( allCols )\n- setDim2(input1.getDim2());\n+ else if( allCols ) {\n+ //input5 guaranteed to be a unaryop-ncol\n+ setDim2(input5.getInput().get(0).getDim2());\n+ }\nelse if( constColRange ) {\nsetDim2( HopRewriteUtils.getIntValueSafe((LiteralOp)input5)\n-HopRewriteUtils.getIntValueSafe((LiteralOp)input4)+1 );\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1167] Fix size propagation right indexing (all rows/columns) This patch fixes specific issues of inferring the size of X[1:n,1:m], where n=nrow(Y) and m=ncol(Z). Until now, we always used n=nrow(X) and m=ncol(X) even if Y!=X or Z!=X.
49,736
09.02.2017 15:33:01
28,800
0f2085498c89882e09c379f0fc6c3c2c179fab58
Update the documentation for Python users
[ { "change_type": "MODIFY", "old_path": "docs/beginners-guide-python.md", "new_path": "docs/beginners-guide-python.md", "diff": "@@ -71,8 +71,23 @@ brew install apache-spark16\n### Install SystemML\n-We are working towards uploading the python package on PyPi. Until then, please use following\n-commands:\n+To install released SystemML, please use following commands:\n+\n+<div class=\"codetabs\">\n+<div data-lang=\"Python 2\" markdown=\"1\">\n+```bash\n+pip install systemml\n+```\n+</div>\n+<div data-lang=\"Python 3\" markdown=\"1\">\n+```bash\n+pip3 install systemml\n+```\n+</div>\n+</div>\n+\n+\n+If you want to try out the bleeding edge version, please use following commands:\n<div class=\"codetabs\">\n<div data-lang=\"Python 2\" markdown=\"1\">\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-855] Update the documentation for Python users
49,738
09.02.2017 20:32:47
-3,600
827cdba935adebef02eed710f0e66d5b22d0cba8
Fix aggregate loop vectorization rewrite, incl tests
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteForLoopVectorization.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteForLoopVectorization.java", "diff": "@@ -179,11 +179,14 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\nHop ix = cast.getInput().get(0);\nint aggOpPos = HopRewriteUtils.getValidOpPos(bop.getOp(), MAP_SCALAR_AGGREGATE_SOURCE_OPS);\nAggOp aggOp = MAP_SCALAR_AGGREGATE_TARGET_OPS[aggOpPos];\n+\n//replace cast with sum\n- AggUnaryOp newSum = new AggUnaryOp(cast.getName(), DataType.SCALAR, ValueType.DOUBLE, aggOp, Direction.RowCol, ix);\n+ AggUnaryOp newSum = new AggUnaryOp(cast.getName(), DataType.SCALAR, ValueType.DOUBLE,\n+ aggOp, Direction.RowCol, ix);\nHopRewriteUtils.removeChildReference(cast, ix);\nHopRewriteUtils.removeChildReference(bop, cast);\nHopRewriteUtils.addChildReference(bop, newSum, leftScalar?1:0 );\n+\n//modify indexing expression according to loop predicate from-to\n//NOTE: any redundant index operations are removed via dynamic algebraic simplification rewrites\nint index1 = rowIx ? 1 : 3;\n@@ -193,8 +196,14 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\nHopRewriteUtils.removeChildReferenceByPos(ix, ix.getInput().get(index2), index2);\nHopRewriteUtils.addChildReference(ix, to, index2);\n+ //update indexing size information\n+ if( rowIx )\n+ ((IndexingOp)ix).setRowLowerEqualsUpper(false);\n+ else\n+ ((IndexingOp)ix).setColLowerEqualsUpper(false);\n+ ix.refreshSizeInformation();\n+\nret = csb;\n- //ret.liveIn().removeVariable(itervar);\nLOG.debug(\"Applied vectorizeScalarSumForLoop.\");\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteLoopVectorization.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import java.util.HashMap;\n+\n+import org.junit.Test;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+/**\n+ * Regression test for loop vectorization rewrite\n+ * for(i in 1:n) s = s + as.scalar(A[i,1]) -> s = s + sum(A[1:n,1])\n+ *\n+ */\n+public class RewriteLoopVectorization extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME1 = \"RewriteLoopVectorizationSum\"; //amendable\n+ private static final String TEST_NAME2 = \"RewriteLoopVectorizationSum2\"; //not amendable\n+\n+ private static final String TEST_DIR = \"functions/misc/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + RewriteLoopVectorization.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testLoopVectorizationSumNoRewrite() {\n+ testRewriteLoopVectorizationSum( TEST_NAME1, false );\n+ }\n+\n+ @Test\n+ public void testLoopVectorizationSumRewrite() {\n+ testRewriteLoopVectorizationSum( TEST_NAME1, true );\n+ }\n+\n+ @Test\n+ public void testLoopVectorizationSum2NoRewrite() {\n+ testRewriteLoopVectorizationSum( TEST_NAME2, false );\n+ }\n+\n+ @Test\n+ public void testLoopVectorizationSum2Rewrite() {\n+ testRewriteLoopVectorizationSum( TEST_NAME2, true );\n+ }\n+\n+ /**\n+ *\n+ * @param testname\n+ * @param rewrites\n+ */\n+ private void testRewriteLoopVectorizationSum( String testname, boolean rewrites )\n+ {\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+\n+ try\n+ {\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{ \"-stats\",\"-args\", output(\"Scalar\") };\n+\n+ fullRScriptName = HOME + testname + \".R\";\n+ rCmd = getRCmd(inputDir(), expectedDir());\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare scalars\n+ HashMap<CellIndex, Double> dmlfile = readDMLScalarFromHDFS(\"Scalar\");\n+ HashMap<CellIndex, Double> rfile = readRScalarFromFS(\"Scalar\");\n+ TestUtils.compareScalars(dmlfile.toString(), rfile.toString());\n+ }\n+ finally {\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ }\n+ }\n+}\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteLoopVectorizationSum.R", "diff": "+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+\n+A = matrix(7.0, 10, 10)\n+n = nrow(A)\n+s = 0.0\n+\n+for( i in 1:n ) {\n+ s = s + A[i,1]\n+}\n+\n+write(s, paste(args[2], \"Scalar\",sep=\"\"))\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteLoopVectorizationSum.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = matrix(7.0, 10, 10)\n+n = nrow(A)\n+s = 0.0\n+\n+for( i in 1:n ) {\n+ s = s + as.scalar(A[i,1])\n+}\n+\n+write(s, $1)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteLoopVectorizationSum2.R", "diff": "+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+\n+A = matrix(7.0, 10, 10)\n+n = nrow(A)\n+s = 0.0\n+\n+for( i in 1:n ) {\n+ s = s + A[i,1]\n+}\n+\n+write(s, paste(args[2], \"Scalar\",sep=\"\"))\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteLoopVectorizationSum2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = matrix(7.0, 10, 10)\n+n = nrow(A)\n+s = 0.0\n+\n+for( i in 1:n ) {\n+ print(\"[i,1]: \" + i)\n+ s = s + as.scalar(A[i,1])\n+}\n+\n+write(s, $1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1150] Fix aggregate loop vectorization rewrite, incl tests
49,738
10.02.2017 05:50:52
-3,600
d0b23d607998e0bebd3d9d051faf748dd5530ce8
Fix missing nnz maintenance conv2d ops, incl cleanups This patch extends all conv2d operations by (so far unoptimized) nnz maintenance in order to prevent side effects with update-in-place and other operations that incrementally maintain the number of non-zeros.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "diff": "@@ -401,9 +401,9 @@ public class ProgramBlock\nif( nnz1 != nnz2 )\nthrow new DMLRuntimeException(\"Matrix nnz meta data was incorrect: (\"+varname+\", actual=\"+nnz1+\", expected=\"+nnz2+\", inst=\"+lastInst+\")\");\n-\nif( sparse1 != sparse2 )\n- throw new DMLRuntimeException(\"Matrix was in wrong data representation: (\"+varname+\", actual=\"+sparse1+\", expected=\"+sparse2+\", nnz=\"+nnz1+\", inst=\"+lastInst+\")\");\n+ throw new DMLRuntimeException(\"Matrix was in wrong data representation: (\"+varname+\", actual=\"+sparse1+\", expected=\"+sparse2 +\n+ \", nrow=\"+mb.getNumRows()+\", ncol=\"+mb.getNumColumns()+\", nnz=\"+nnz1+\", inst=\"+lastInst+\")\");\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "diff": "@@ -21,8 +21,6 @@ package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n-import org.apache.sysml.parser.Expression.DataType;\n-import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.functionobjects.SwapIndex;\n@@ -33,8 +31,8 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\n-public class ConvolutionCPInstruction extends UnaryCPInstruction {\n-\n+public class ConvolutionCPInstruction extends UnaryCPInstruction\n+{\nprivate CPOperand _in2;\nprivate CPOperand _in3;\nprivate ArrayList<CPOperand> _input_shape;\n@@ -101,8 +99,6 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\npublic static ConvolutionCPInstruction parseInstruction(String str)\nthrows DMLRuntimeException {\n- CPOperand in = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n- CPOperand out = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\nString[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\nString opcode = parts[0];\n@@ -111,8 +107,8 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n// stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n// filter_shape1, filter_shape2, filter_shape3, filter_shape4, k\n- in.split(parts[1]);\n- out.split(parts[14]);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand out = new CPOperand(parts[14]);\nArrayList<CPOperand> stride = new ArrayList<CPOperand>();\nArrayList<CPOperand> padding = new ArrayList<CPOperand>();\n@@ -143,10 +139,9 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n// dout, stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n// filter_shape1, filter_shape2, filter_shape3, filter_shape4, k\n- in.split(parts[1]);\n- CPOperand in2 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n- in2.split(parts[2]);\n- out.split(parts[15]);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand out = new CPOperand(parts[15]);\nArrayList<CPOperand> stride = new ArrayList<CPOperand>();\nArrayList<CPOperand> padding = new ArrayList<CPOperand>();\n@@ -174,12 +169,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n// dout, stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n// filter_shape1, filter_shape2, filter_shape3, filter_shape4, k\n- in.split(parts[1]);\n- CPOperand in2 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n- in2.split(parts[2]);\n- CPOperand in3 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n- in3.split(parts[3]);\n- out.split(parts[16]);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = new CPOperand(parts[3]);\n+ CPOperand out = new CPOperand(parts[16]);\nArrayList<CPOperand> stride = new ArrayList<CPOperand>();\nArrayList<CPOperand> padding = new ArrayList<CPOperand>();\n@@ -204,10 +197,9 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\nelse if (opcode.equalsIgnoreCase(\"bias_add\") || opcode.equals(\"relu_backward\")) {\nInstructionUtils.checkNumFields(parts, 4);\n- in.split(parts[1]);\n- CPOperand in2 = new CPOperand(\"\", ValueType.UNKNOWN, DataType.UNKNOWN);\n- in2.split(parts[2]);\n- out.split(parts[3]);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand out = new CPOperand(parts[3]);\nint k = Integer.parseInt(parts[4]);\nreturn new ConvolutionCPInstruction(in, in2, out, opcode, str, k);\n}\n@@ -216,24 +208,23 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\n}\n- private int getScalarInput(ExecutionContext ec, ArrayList<CPOperand> aL,\n- int index) throws DMLRuntimeException {\n+ private int getScalarInput(ExecutionContext ec, ArrayList<CPOperand> aL, int index)\n+ throws DMLRuntimeException {\nreturn (int) ec.getScalarInput(aL.get(index).getName(),\naL.get(index).getValueType(), aL.get(index).isLiteral())\n.getLongValue();\n}\n+ @SuppressWarnings(\"unused\")\npublic void processReluBackwardInstruction(ExecutionContext ec) throws DMLRuntimeException {\n// (X > 0) * dout\n- MatrixBlock outputBlock = null;\nMatrixBlock input = ec.getMatrixInput(input1.getName());\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\n+ MatrixBlock outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(),\n+ LibMatrixDNN.SUPPORTS_SPARSE_OUTPUTS && (input.isInSparseFormat() || dout.isInSparseFormat()));\n- if(input.isEmptyBlock() || dout.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true, 0);\n- }\n- else {\n- outputBlock = getDenseOutputBlock(ec, input.getNumRows(), input.getNumColumns());\n+ if( !input.isEmptyBlock() && !dout.isEmptyBlock() ) {\n+ outputBlock.allocateDenseOrSparseBlock();\nLibMatrixDNN.reluBackward(input, dout, outputBlock, _numThreads);\n}\n@@ -244,24 +235,24 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\npublic void processBiasAddInstruction(ExecutionContext ec) throws DMLRuntimeException {\n- MatrixBlock outputBlock = null;\nMatrixBlock input = ec.getMatrixInput(input1.getName());\nMatrixBlock bias = ec.getMatrixInput(_in2.getName());\n+ MatrixBlock outputBlock = null;\nif(bias.getNumColumns() != 1) {\nthrow new DMLRuntimeException(\"Expected the number of columns of bias matrix to be 1, but found \" + bias.getNumColumns());\n}\nif(input.isEmptyBlock() && bias.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true, 0);\n+ outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true);\n}\nelse if(bias.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), input.isInSparseFormat());\n- outputBlock.copy(input);\n+ outputBlock = new MatrixBlock(input);\n}\nelse {\n// As we always fill the output first with bias\n- outputBlock = getDenseOutputBlock(ec, input.getNumRows(), input.getNumColumns());\n+ outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), false);\n+ outputBlock.allocateDenseBlock();\nLibMatrixDNN.biasAdd(input, bias, outputBlock, _numThreads);\n}\n@@ -307,10 +298,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nConvolutionParameters params = new ConvolutionParameters(N, C, H, W, K, R, S, stride_h, stride_w, pad_h, pad_w, _numThreads);\nif (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nif(matBlock.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(N, C*P*Q, true, 0);\n+ outputBlock = new MatrixBlock(N, C*P*Q, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, N, C*P*Q);\n+ outputBlock = getDenseOutputBlock(N, C*P*Q);\nif(instOpcode.equalsIgnoreCase(\"maxpooling\"))\nArrays.fill(outputBlock.getDenseBlock(), -Double.MAX_VALUE);\nLibMatrixDNN.maxpooling(matBlock, outputBlock, params);\n@@ -319,10 +310,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nelse if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\nif(matBlock.isEmptyBlock() || dout.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(N, C*H*W, true, 0);\n+ outputBlock = new MatrixBlock(N, C*H*W, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, N, C*H*W);\n+ outputBlock = getDenseOutputBlock(N, C*H*W);\nLibMatrixDNN.maxpoolingBackward(matBlock, dout, outputBlock, params);\n}\nec.releaseMatrixInput(_in2.getName());\n@@ -330,10 +321,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nelse if (instOpcode.equalsIgnoreCase(\"conv2d\")) {\nMatrixBlock filter = ec.getMatrixInput(_in2.getName());\nif(filter.isEmptyBlock() || matBlock.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(N, K*P*Q, true, 0);\n+ outputBlock = new MatrixBlock(N, K*P*Q, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, N, K*P*Q);\n+ outputBlock = getDenseOutputBlock(N, K*P*Q);\nLibMatrixDNN.conv2d(matBlock, filter, outputBlock, params);\n}\nec.releaseMatrixInput(_in2.getName());\n@@ -342,10 +333,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nMatrixBlock filter = ec.getMatrixInput(_in3.getName());\nMatrixBlock bias = ec.getMatrixInput(_in2.getName());\nif((filter.isEmptyBlock() || matBlock.isEmptyBlock()) && bias.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(N, K*P*Q, true, 0);\n+ outputBlock = new MatrixBlock(N, K*P*Q, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, N, K*P*Q);\n+ outputBlock = getDenseOutputBlock(N, K*P*Q);\nif(!bias.isEmptyBlock())\nparams.bias = bias;\nLibMatrixDNN.conv2d(matBlock, filter, outputBlock, params);\n@@ -356,10 +347,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_filter\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\nif(dout.isEmptyBlock() || matBlock.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(K, C*R*S, true, 0);\n+ outputBlock = new MatrixBlock(K, C*R*S, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, K, C*R*S);\n+ outputBlock = getDenseOutputBlock(K, C*R*S);\nLibMatrixDNN.conv2dBackwardFilter(matBlock, dout, outputBlock, params);\n}\nec.releaseMatrixInput(_in2.getName());\n@@ -367,10 +358,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_data\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\nif(dout.isEmptyBlock() || matBlock.isEmptyBlock()) {\n- outputBlock = new MatrixBlock(N, C * H * W, true, 0);\n+ outputBlock = new MatrixBlock(N, C * H * W, true);\n}\nelse {\n- outputBlock = getDenseOutputBlock(ec, N, C * H * W);\n+ outputBlock = getDenseOutputBlock(N, C * H * W);\nLibMatrixDNN.conv2dBackwardData(matBlock, dout, outputBlock, params);\n}\nec.releaseMatrixInput(_in2.getName());\n@@ -384,10 +375,9 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nec.setMatrixOutput(getOutputVariableName(), outputBlock);\n}\n- private MatrixBlock getDenseOutputBlock(ExecutionContext ec, int numRows, int numCols) throws DMLRuntimeException {\n- MatrixBlock outputBlock = new MatrixBlock(numRows, numCols, false, numRows * numCols);\n+ private MatrixBlock getDenseOutputBlock(int numRows, int numCols) throws DMLRuntimeException {\n+ MatrixBlock outputBlock = new MatrixBlock(numRows, numCols, false);\noutputBlock.allocateDenseBlock();\n- outputBlock.setNonZeros(-1);\nreturn outputBlock;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -24,7 +24,6 @@ import java.util.Iterator;\nimport java.util.List;\nimport java.util.concurrent.Callable;\nimport java.util.concurrent.ConcurrentLinkedQueue;\n-import java.util.concurrent.ExecutionException;\nimport java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\n@@ -48,13 +47,14 @@ import org.apache.sysml.runtime.DMLRuntimeException;\npublic class LibMatrixDNN {\nprotected static final Log LOG = LogFactory.getLog(LibMatrixDNN.class.getName());\n- // ------------------------------------------------------------------------------------------------\n- // Useful flags for performance testing:\n- private static boolean DISPLAY_STATISTICS = false;\n- private static final boolean ALLOW_MULTI_THREADED_OPS = true;\n- // ------------------------------------------------------------------------------------------------\n- enum TaskType {\n+ //library configurations and external contracts\n+ public static final boolean SUPPORTS_SPARSE_OUTPUTS = false; //operations able to handle sparse outputs\n+ private static final boolean DISPLAY_STATISTICS = false; //conv2d summaries in stats output\n+ private static final boolean ALLOW_MULTI_THREADED_OPS = true; //enable multi-threading in cp\n+ private static final int NUM_TASK_FACTOR = 2; //number of tasks is vcores scaled by this factor\n+\n+ private enum TaskType {\nMaxPooling_Forward, MaxPooling_Backward,\n// Alternate approaches that we tried but the performance was unsatisfactory be included: direct, non-looped im2col\nLoopedIm2ColConv2d, LoopedIm2ColConv2dBwdFilter, LoopedIm2ColConv2dBwdData,\n@@ -79,6 +79,7 @@ public class LibMatrixDNN {\nprivate static AtomicLong loopedConvBwdDataMatMultTime = new AtomicLong(0);\nprivate static AtomicLong loopedConvBwdDataCol2ImTime = new AtomicLong(0);\n+ @SuppressWarnings(\"unused\")\npublic static void appendStatistics(StringBuilder sb) {\nif(DMLScript.STATISTICS && DISPLAY_STATISTICS && (conv2dDenseCount.get() != 0 || conv2dSparseCount.get() != 0)) {\nsb.append(\"LibMatrixDNN dense count (conv/bwdF/bwdD/im2col/maxBwd):\\t\"\n@@ -135,6 +136,7 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n+ @SuppressWarnings(\"unused\")\npublic static void conv2dBackwardData(MatrixBlock filter, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = filter;\nparams.input2 = dout;\n@@ -157,6 +159,9 @@ public class LibMatrixDNN {\n}\nrunConvTask(TaskType.LoopedIm2ColConv2dBwdData, params);\n+\n+ //post-processing: maintain nnz\n+ outputBlock.recomputeNonZeros();\n}\n/**\n@@ -168,6 +173,7 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n+ @SuppressWarnings(\"unused\")\npublic static void conv2dBackwardFilter(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = dout;\n@@ -190,6 +196,9 @@ public class LibMatrixDNN {\n}\nrunConvTask(TaskType.LoopedIm2ColConv2dBwdFilter, params);\n+\n+ //post-processing: maintain nnz\n+ outputBlock.recomputeNonZeros();\n}\n/**\n@@ -259,6 +268,7 @@ public class LibMatrixDNN {\n}\n}\n+ @SuppressWarnings(\"unused\")\nprivate static void doLoopedIm2ColConv2dBwdData(int n, MatrixBlock dout_reshaped, ConvolutionParameters params) throws DMLRuntimeException {\nMatrixBlock filter = params.input1;\nMatrixBlock dout = params.input2;\n@@ -277,6 +287,7 @@ public class LibMatrixDNN {\n}\n}\n+ @SuppressWarnings(\"unused\")\nprivate static MatrixBlock doLoopedIm2ColConv2dBwdFilter(int n,\nMatrixBlock im2ColOutBlock, MatrixBlock dout_reshaped, MatrixBlock partialRetBlock, ConvolutionParameters params) throws DMLRuntimeException {\nlong t1 = DMLScript.STATISTICS && DISPLAY_STATISTICS ? System.nanoTime() : 0;\n@@ -306,6 +317,7 @@ public class LibMatrixDNN {\nret[2] = j % W;\n}\n+ @SuppressWarnings(\"unused\")\npublic static void conv2d(MatrixBlock input, MatrixBlock filter, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = filter;\n@@ -333,8 +345,12 @@ public class LibMatrixDNN {\n}\nrunConvTask(TaskType.LoopedIm2ColConv2d, params);\n+\n+ //post-processing: maintain nnz\n+ outputBlock.recomputeNonZeros();\n}\n+ @SuppressWarnings(\"unused\")\nprivate static void doLoopedIm2ColConv2d(int n, MatrixBlock im2ColOutBlock, ConvolutionParameters params) throws DMLRuntimeException {\nlong t1 = DMLScript.STATISTICS && DISPLAY_STATISTICS ? System.nanoTime() : 0;\ndoIm2col(n, im2ColOutBlock, params);\n@@ -372,6 +388,9 @@ public class LibMatrixDNN {\nSystem.arraycopy(matMultOutBlock.denseBlock, 0, params.output.denseBlock, destPos, length);\n}\n// -----------------------------------------------------------------------------\n+\n+ //post-processing: maintain nnz\n+ params.output.recomputeNonZeros();\n}\n/**\n@@ -383,6 +402,7 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n+ @SuppressWarnings(\"unused\")\npublic static void maxpoolingBackward(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = dout;\n@@ -409,6 +429,9 @@ public class LibMatrixDNN {\nfillIndexesArray(params);\nrunConvTask(TaskType.MaxPooling_Backward, params);\n+\n+ //post-processing: maintain nnz\n+ outputBlock.recomputeNonZeros();\n}\nprivate static void fillIndexesArray(ConvolutionParameters params) {\n@@ -611,10 +634,13 @@ public class LibMatrixDNN {\nthrow new DMLRuntimeException(\"Incorrect dimensions for relu_backward:\" +\ninput.getNumRows() + \" != \" + dout.getNumRows() + \" || \" + input.getNumColumns() + \" != \" + dout.getNumColumns());\n}\n+\nrunConvTask(TaskType.ReluBackward, params);\n+\n+ //note: no post-processing as nnz maintained per task\n}\n- private static void doReluBackward(int n, ConvolutionParameters params) throws DMLRuntimeException {\n+ private static long doReluBackward(ConvolutionParameters params, int rl, int ru) throws DMLRuntimeException {\n// (X > 0) * dout\ndouble [] outputArray = params.output.getDenseBlock();\nint numOutCols = params.input1.getNumColumns();\n@@ -622,14 +648,14 @@ public class LibMatrixDNN {\nif(!params.input1.isInSparseFormat() && !params.input2.isInSparseFormat()) {\ndouble [] inputArr = params.input1.getDenseBlock();\ndouble [] doutArr = params.input2.getDenseBlock();\n- for(int i = n*numOutCols; i < (n+1)*numOutCols; i++) {\n+ for(int i = rl*numOutCols; i < ru*numOutCols; i++) {\noutputArray[i] = inputArr[i] > 0 ? doutArr[i] : 0;\n}\n}\nelse {\n// Perform (X > 0)\nif(params.input1.isInSparseFormat()) {\n- Iterator<IJV> iter = params.input1.sparseBlock.getIterator(n, n+1);\n+ Iterator<IJV> iter = params.input1.sparseBlock.getIterator(rl, ru);\nwhile(iter.hasNext()) {\nIJV ijv = iter.next();\nint i = ijv.getI();\n@@ -639,13 +665,13 @@ public class LibMatrixDNN {\n}\nelse {\ndouble [] inputArr = params.input1.getDenseBlock();\n- for(int i = n*numOutCols; i < (n+1)*numOutCols; i++) {\n+ for(int i = rl*numOutCols; i < ru*numOutCols; i++) {\noutputArray[i] = inputArr[i] > 0 ? 1 : 0;\n}\n}\n// Then perform (X > 0) * dout\nif(params.input2.isInSparseFormat()) {\n- Iterator<IJV> iter = params.input2.sparseBlock.getIterator(n, n+1);\n+ Iterator<IJV> iter = params.input2.sparseBlock.getIterator(rl, ru);\nwhile(iter.hasNext()) {\nIJV ijv = iter.next();\nint i = ijv.getI();\n@@ -655,11 +681,14 @@ public class LibMatrixDNN {\n}\nelse {\ndouble [] doutArr = params.input2.getDenseBlock();\n- for(int i = n*numOutCols; i < (n+1)*numOutCols; i++) {\n+ for(int i = rl*numOutCols; i < ru*numOutCols; i++) {\noutputArray[i] *= doutArr[i];\n}\n}\n}\n+\n+ //post-processing: maintain nnz\n+ return params.output.recomputeNonZeros(rl, ru-1, 0, numOutCols-1);\n}\n@@ -704,9 +733,12 @@ public class LibMatrixDNN {\nelse {\nrunConvTask(TaskType.BiasAdd, params);\n}\n+\n+ //post-processing: maintain nnz\n+ params.output.recomputeNonZeros();\n}\n- private static void doBiasAdd(int n1, int n2, ConvolutionParameters params) throws DMLRuntimeException {\n+ private static void doBiasAdd(ConvolutionParameters params, int rl, int ru) throws DMLRuntimeException {\ndouble [] outputArray = params.output.getDenseBlock();\nint PQ = params.C;\nint numOutCols = params.input1.getNumColumns();\n@@ -715,8 +747,8 @@ public class LibMatrixDNN {\ndouble [] inputArr = params.input1.getDenseBlock();\ndouble [] biasArr = params.input2.getDenseBlock();\nint K = params.K;\n- int index = n1*K*PQ;\n- for(int n = n1; n < n2; n++) {\n+ int index = rl*K*PQ;\n+ for(int n = rl; n < ru; n++) {\nfor(int k = 0; k < K; k++) {\nfor(int pq = 0; pq < PQ; pq++, index++) {\noutputArray[index] = inputArr[index] + biasArr[k];\n@@ -725,9 +757,9 @@ public class LibMatrixDNN {\n}\n}\nelse {\n- fillBias(params.input2, outputArray, n1, n2, params.N, params.K, PQ);\n+ fillBias(params.input2, outputArray, rl, ru, params.N, params.K, PQ);\nif(params.input1.isInSparseFormat()) {\n- Iterator<IJV> iter = params.input1.sparseBlock.getIterator(n1, n2);\n+ Iterator<IJV> iter = params.input1.sparseBlock.getIterator(rl, ru);\nwhile(iter.hasNext()) {\nIJV ijv = iter.next();\nint i = ijv.getI();\n@@ -737,7 +769,7 @@ public class LibMatrixDNN {\n}\nelse {\ndouble [] inputArr = params.input1.getDenseBlock();\n- for(int i = n1*numOutCols; i < n2*numOutCols; i++) {\n+ for(int i = rl*numOutCols; i < ru*numOutCols; i++) {\noutputArray[i] += inputArr[i];\n}\n}\n@@ -780,6 +812,9 @@ public class LibMatrixDNN {\nfillIndexesArray(params);\nrunConvTask(TaskType.MaxPooling_Forward, params);\n+\n+ //post-processing: maintain nnz\n+ outputBlock.recomputeNonZeros();\n}\nprivate static void doPooling(int n, ConvolutionParameters params) throws DMLRuntimeException {\n@@ -872,75 +907,63 @@ public class LibMatrixDNN {\nfor(int i = 0; i < poolSize; i++) {\nif(type == TaskType.LoopedIm2ColConv2d || type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nMatrixBlock im2ColOutBlock = new MatrixBlock(params.C*params.R*params.S, params.P*params.Q, false);\n- im2ColOutBlock.allocateDenseBlock(true);\n+ im2ColOutBlock.allocateDenseBlock();\nim2ColOutBlocks.add(im2ColOutBlock);\n}\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nMatrixBlock partialRetBlock = new MatrixBlock(params.C*params.R*params.S, params.K, false);\n- partialRetBlock.allocateDenseBlock(true);\n+ partialRetBlock.allocateDenseBlock();\npartialRetBlocks.add(partialRetBlock);\n}\nif(type == TaskType.LoopedIm2ColConv2dBwdData || type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nMatrixBlock doutReshapedBlock = new MatrixBlock(params.P*params.Q, params.K, false);\n- doutReshapedBlock.allocateDenseBlock(true);\n+ doutReshapedBlock.allocateDenseBlock();\ndoutReshapedBlocks.add(doutReshapedBlock);\n}\n}\n}\n// Methods to execute convolution-related tasks using multiple threads.\nprivate static void runConvTask(TaskType type, ConvolutionParameters params) throws DMLRuntimeException {\n- int constrainedNumThreads = OptimizerUtils.getConstrainedNumThreads(params.numThreads);\n+ int k = OptimizerUtils.getConstrainedNumThreads(params.numThreads);\nConcurrentLinkedQueue<MatrixBlock> im2ColOutBlocks = new ConcurrentLinkedQueue<MatrixBlock>();\nConcurrentLinkedQueue<MatrixBlock> doutReshapedBlocks = new ConcurrentLinkedQueue<MatrixBlock>();\nConcurrentLinkedQueue<MatrixBlock> partialRetBlocks = new ConcurrentLinkedQueue<MatrixBlock>();\n- if (ALLOW_MULTI_THREADED_OPS && params.isOutputThreadSafe() && constrainedNumThreads > 1) {\n- int poolSize = Math.min(constrainedNumThreads, params.N);\n+\n+ if (ALLOW_MULTI_THREADED_OPS && params.isOutputThreadSafe() && k > 1) {\n+ int poolSize = Math.min(k, params.N);\naddMatrixBlocks(poolSize, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);\n+\nArrayList<ConvTask> tasks = new ArrayList<ConvTask>();\n- int NSize = params.N - poolSize;\n- if(NSize >= constrainedNumThreads) {\n- for(int n = 0; n < params.N; n++)\n- tasks.add(new ConvTask(n, n+1, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks));\n- }\n- else {\n- int numNTasks = (int) Math.ceil(((double) NSize) / constrainedNumThreads);\n- for (int n = 0; n < NSize; n += numNTasks) {\n- tasks.add(new ConvTask(n, Math.min(NSize, n+numNTasks), type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks));\n- }\n- for (int n = NSize; n < params.N; n++)\n- tasks.add(new ConvTask(n, n+1, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks));\n- }\n+ int blklen = (int)(Math.ceil((double)params.N/poolSize/NUM_TASK_FACTOR));\n+ for( int i=0; i<poolSize*NUM_TASK_FACTOR && i*blklen<params.N; i++ )\n+ tasks.add(new ConvTask(i*blklen, Math.min((i+1)*blklen, params.N),\n+ type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks));\n- ExecutorService pool = Executors.newFixedThreadPool( poolSize );\n- List<Future<Object>> taskret;\ntry {\n- taskret = pool.invokeAll(tasks);\n+ ExecutorService pool = Executors.newFixedThreadPool( poolSize );\n+ List<Future<Long>> taskret = pool.invokeAll(tasks);\npool.shutdown();\n- for( Future<Object> task : taskret ) {\n- task.get();\n- }\n+ for( Future<Long> task : taskret )\n+ params.output.nonZeros += task.get();\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nfor(MatrixBlock partialRetBlock : partialRetBlocks) {\nelementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n}\n}\n- } catch (InterruptedException e) {\n- throw new DMLRuntimeException(\"Error while executing multi-threaded \" + type.name(), e);\n- } catch (ExecutionException e) {\n+ }\n+ catch (Exception e) {\nthrow new DMLRuntimeException(\"Error while executing multi-threaded \" + type.name(), e);\n}\n}\nelse {\naddMatrixBlocks(1, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);\n- ConvTask task = new ConvTask(0, 0, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);\ntry {\n- for(int n = 0; n < params.N; n++) {\n- task.n1 = n;\n- task.n2 = n+1;\n- task.call();\n- }\n+ //execute single task and maintain nnz if supported\n+ params.output.setNonZeros(new ConvTask(0, params.N, type, params, im2ColOutBlocks,\n+ doutReshapedBlocks, partialRetBlocks).call());\n+\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\nfor(MatrixBlock partialRetBlock : partialRetBlocks) {\nelementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n@@ -958,92 +981,94 @@ public class LibMatrixDNN {\n* to be executed in multi-thread manner.\n*\n*/\n- private static class ConvTask implements Callable<Object> {\n- public int n1; public int n2;\n- ConvolutionParameters params;\n- TaskType type;\n- ConcurrentLinkedQueue<MatrixBlock> im2ColOutBlocks;\n- ConcurrentLinkedQueue<MatrixBlock> partialRetBlocks;\n- ConcurrentLinkedQueue<MatrixBlock> doutReshapedBlocks;\n- public ConvTask(int n1, int n2, TaskType type, ConvolutionParameters params,\n+ private static class ConvTask implements Callable<Long>\n+ {\n+ public int _rl;\n+ public int _ru;\n+ private final ConvolutionParameters _params;\n+ private final TaskType _type;\n+ private final ConcurrentLinkedQueue<MatrixBlock> _im2ColOutBlocks;\n+ private final ConcurrentLinkedQueue<MatrixBlock> _partialRetBlocks;\n+ private final ConcurrentLinkedQueue<MatrixBlock> _doutReshapedBlocks;\n+\n+ public ConvTask(int rl, int ru, TaskType type, ConvolutionParameters params,\nConcurrentLinkedQueue<MatrixBlock> im2ColOutBlocks,\nConcurrentLinkedQueue<MatrixBlock> doutReshapedBlocks,\nConcurrentLinkedQueue<MatrixBlock> partialRetBlocks) {\n- this.n1 = n1;\n- this.n2 = n2;\n- this.type = type;\n- this.params = params;\n- this.im2ColOutBlocks = im2ColOutBlocks;\n- this.partialRetBlocks = partialRetBlocks;\n- this.doutReshapedBlocks = doutReshapedBlocks;\n+ _rl = rl;\n+ _ru = ru;\n+ _type = type;\n+ _params = params;\n+ _im2ColOutBlocks = im2ColOutBlocks;\n+ _partialRetBlocks = partialRetBlocks;\n+ _doutReshapedBlocks = doutReshapedBlocks;\n}\n@Override\n- public Object call() throws DMLRuntimeException {\n- switch(type) {\n+ public Long call() throws DMLRuntimeException {\n+ long lnnz = 0; //nnz per partition\n+\n+ switch(_type) {\ncase MaxPooling_Forward:\n- {\n- for(int n = n1; n < n2; n++) {\n- doPooling(n, params);\n- }\n+ for(int n = _rl; n < _ru; n++)\n+ doPooling(n, _params);\nbreak;\n- }\ncase MaxPooling_Backward:\n- for(int n = n1; n < n2; n++)\n- doPoolingBackward(n, params);\n+ for(int n = _rl; n < _ru; n++)\n+ doPoolingBackward(n, _params);\nbreak;\ncase BiasAdd:\n- doBiasAdd(n1, n2, params);\n+ doBiasAdd(_params, _rl, _ru);\nbreak;\ncase ReluBackward:\n- for(int n = n1; n < n2; n++)\n- doReluBackward(n, params);\n+ lnnz = doReluBackward(_params, _rl, _ru);\nbreak;\ncase LoopedIm2ColConv2d:\n{\n- MatrixBlock im2ColOutBlock = im2ColOutBlocks.remove();\n- for(int n = n1; n < n2; n++)\n- doLoopedIm2ColConv2d(n, im2ColOutBlock, params);\n- im2ColOutBlocks.add(im2ColOutBlock);\n- if(params.bias != null)\n- addBias(n1, n2, params);\n+ MatrixBlock im2ColOutBlock = _im2ColOutBlocks.remove();\n+ for(int n = _rl; n < _ru; n++)\n+ doLoopedIm2ColConv2d(n, im2ColOutBlock, _params);\n+ _im2ColOutBlocks.add(im2ColOutBlock);\n+ if(_params.bias != null)\n+ addBias(_params, _rl, _ru);\nbreak;\n}\ncase LoopedIm2ColConv2dBwdFilter:\n{\n- MatrixBlock im2ColOutBlock = im2ColOutBlocks.remove();\n- MatrixBlock partialRetBlock = partialRetBlocks.remove();\n- MatrixBlock doutReshapedBlock = doutReshapedBlocks.remove();\n- for(int n = n1; n < n2; n++)\n- partialRetBlock = doLoopedIm2ColConv2dBwdFilter(n, im2ColOutBlock, doutReshapedBlock, partialRetBlock, params);\n- im2ColOutBlocks.add(im2ColOutBlock);\n- partialRetBlocks.add(partialRetBlock);\n- doutReshapedBlocks.add(doutReshapedBlock);\n+ MatrixBlock im2ColOutBlock = _im2ColOutBlocks.remove();\n+ MatrixBlock partialRetBlock = _partialRetBlocks.remove();\n+ MatrixBlock doutReshapedBlock = _doutReshapedBlocks.remove();\n+ for(int n = _rl; n < _ru; n++)\n+ partialRetBlock = doLoopedIm2ColConv2dBwdFilter(n, im2ColOutBlock, doutReshapedBlock, partialRetBlock, _params);\n+ _im2ColOutBlocks.add(im2ColOutBlock);\n+ _partialRetBlocks.add(partialRetBlock);\n+ _doutReshapedBlocks.add(doutReshapedBlock);\nbreak;\n}\ncase LoopedIm2ColConv2dBwdData:\n{\n- MatrixBlock doutReshapedBlock = doutReshapedBlocks.remove();\n- for(int n = n1; n < n2; n++)\n- doLoopedIm2ColConv2dBwdData(n, doutReshapedBlock, params);\n- doutReshapedBlocks.add(doutReshapedBlock);\n+ MatrixBlock doutReshapedBlock = _doutReshapedBlocks.remove();\n+ for(int n = _rl; n < _ru; n++)\n+ doLoopedIm2ColConv2dBwdData(n, doutReshapedBlock, _params);\n+ _doutReshapedBlocks.add(doutReshapedBlock);\nbreak;\n}\ndefault:\n- throw new DMLRuntimeException(\"Unsupported ConvTask:\" + type.name());\n+ throw new DMLRuntimeException(\"Unsupported ConvTask:\" + _type.name());\n}\n- return null;\n+\n+ return lnnz;\n}\n}\n- private static void addBias(int n1, int n2, ConvolutionParameters params) {\n+ private static void addBias(ConvolutionParameters params, int rl, int ru) {\nint PQ = params.P*params.Q;\nint K = params.K;\ndouble [] outputArr = params.output.getDenseBlock();\nif(!params.bias.isInSparseFormat()) {\ndouble [] biasArr = params.bias.getDenseBlock();\n- int index = n1*K*PQ;\n- for(int n = n1; n < n2; n++) {\n+ int index = rl*K*PQ;\n+ for(int n = rl; n < ru; n++) {\nfor(int k = 0; k < K; k++) {\nfor(int pq = 0; pq < PQ; pq++, index++) {\noutputArr[index] += biasArr[k];\n@@ -1057,7 +1082,7 @@ public class LibMatrixDNN {\nIJV ijv = iter.next();\nint k = ijv.getI();\ndouble val = ijv.getV();\n- for(int n = n1; n < n2; n++) {\n+ for(int n = rl; n < ru; n++) {\nint index = n*K*PQ + k*PQ;\nfor(int pq = 0; pq < PQ; pq++, index++) {\noutputArr[index] += val;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1078] Fix missing nnz maintenance conv2d ops, incl cleanups This patch extends all conv2d operations by (so far unoptimized) nnz maintenance in order to prevent side effects with update-in-place and other operations that incrementally maintain the number of non-zeros.
49,738
10.02.2017 06:58:34
-3,600
de1e119de0b2fc2a6c6a2c57bf64c4172a26890d
Performance conv2d_bias_add (cache-conscious transpose)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -233,39 +233,39 @@ public class LibMatrixDNN {\n}\n/**\n- * Performs the operation: ret += t(elem)\n+ * Performs the operation for(e : elem) ret += t(e) in a cache-conscious manner\n+ * by sequentially aggregating for(e : elem) tmp += e and finally transposing\n+ * ret = t(tmp).\n+ *\n* @param ret left and output matrix\n- * @param elem right untransposed matrix\n+ * @param elem array of right untransposed matrices (expected in dense format)\n* @param params convolution parameters\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n+ * @throws DMLRuntimeException in case of unsupported inputs or output\n*/\n- private static void elementWiseInPlaceTransposedAddition(MatrixBlock ret, MatrixBlock elem) throws DMLRuntimeException {\n- if(ret.getNumRows() != elem.getNumColumns() || ret.getNumColumns() != elem.getNumRows()) {\n- throw new DMLRuntimeException(\"Incorrect dimensions\");\n- }\n- int numRow = ret.getNumColumns();\n- if(!ret.isInSparseFormat() && !elem.isInSparseFormat()) {\n- int iter = 0;\n- for(int i = 0; i < elem.getNumRows(); i++) {\n- for(int j = 0; j < elem.getNumColumns(); j++, iter++) {\n- int index = j*numRow+i;\n- ret.denseBlock[index] += elem.denseBlock[iter];\n- }\n- }\n- }\n- else if(!ret.isInSparseFormat() && elem.isInSparseFormat()) {\n- if(!elem.isEmptyBlock()) {\n- Iterator<IJV> iter = elem.sparseBlock.getIterator();\n- while(iter.hasNext()) {\n- IJV ijv = iter.next();\n- int index = ijv.getJ()*numRow + ijv.getI();\n- ret.denseBlock[index] += ijv.getV();\n- }\n- }\n- }\n- else {\n- throw new DMLRuntimeException(\"Sparse return format not supported\");\n+ private static void elementWiseInPlaceTransposedAddition(MatrixBlock ret, MatrixBlock[] elem)\n+ throws DMLRuntimeException\n+ {\n+ //sanity checks non-empty and dense inputs / dense output\n+ if( elem == null || elem.length==0 )\n+ throw new DMLRuntimeException(\"Empty input not supported.\");\n+ for( MatrixBlock e : elem )\n+ if( e.isInSparseFormat() )\n+ throw new DMLRuntimeException(\"Sparse input format not supported.\");\n+ if( ret.isInSparseFormat() )\n+ throw new DMLRuntimeException(\"Sparse output format not supported.\");\n+\n+ //Step 1: aggregate partial blocks without transpose\n+ MatrixBlock tmpAgg = elem[0];\n+ double[] tmp = tmpAgg.denseBlock;\n+ for( int k=1; k<elem.length; k++ ) {\n+ double[] tmp2 = elem[k].denseBlock;\n+ for( int i=0; i<tmp.length; i++ )\n+ tmp[i] += tmp2[i];\n}\n+\n+ //Step 2: cache-conscious transpose to output\n+ tmpAgg.setNonZeros(-1); //avoid early abort\n+ LibMatrixReorg.transpose(tmpAgg, ret);\n}\n@SuppressWarnings(\"unused\")\n@@ -948,9 +948,7 @@ public class LibMatrixDNN {\nfor( Future<Long> task : taskret )\nparams.output.nonZeros += task.get();\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\n- for(MatrixBlock partialRetBlock : partialRetBlocks) {\n- elementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n- }\n+ elementWiseInPlaceTransposedAddition(params.output, partialRetBlocks.toArray(new MatrixBlock[0]));\n}\n}\ncatch (Exception e) {\n@@ -965,9 +963,7 @@ public class LibMatrixDNN {\ndoutReshapedBlocks, partialRetBlocks).call());\nif(type == TaskType.LoopedIm2ColConv2dBwdFilter) {\n- for(MatrixBlock partialRetBlock : partialRetBlocks) {\n- elementWiseInPlaceTransposedAddition(params.output, partialRetBlock);\n- }\n+ elementWiseInPlaceTransposedAddition(params.output, partialRetBlocks.toArray(new MatrixBlock[0]));\n}\n} catch (Exception e) {\nthrow new DMLRuntimeException(\"Error while executing single-threaded \" + type.name(), e);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1140] Performance conv2d_bias_add (cache-conscious transpose)
49,736
10.02.2017 13:43:43
28,800
10b7b8669bee522af49448db6bd37ae2ced24846
[MINOR] Bugfix in Spark pmapmm operator
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/PMapmmSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/PMapmmSPInstruction.java", "diff": "@@ -97,9 +97,13 @@ public class PMapmmSPInstruction extends BinarySPInstruction\nJavaPairRDD<MatrixIndexes,MatrixBlock> in2 = sec.getBinaryBlockRDDHandleForVariable( input2.getName() );\nMatrixCharacteristics mc1 = sec.getMatrixCharacteristics(input1.getName());\n+ // This avoids errors such as java.lang.UnsupportedOperationException: Cannot change storage level of an RDD after it was already assigned a level\n+ // Ideally, we should ensure that we donot redundantly call persist on the same RDD.\n+ StorageLevel pmapmmStorageLevel = StorageLevel.MEMORY_AND_DISK();\n+\n//cache right hand side because accessed many times\nin2 = in2.repartition(sec.getSparkContext().defaultParallelism())\n- .persist(StorageLevel.MEMORY_AND_DISK());\n+ .persist(pmapmmStorageLevel);\nJavaPairRDD<MatrixIndexes,MatrixBlock> out = null;\nfor( int i=0; i<mc1.getRows(); i+=NUM_ROWBLOCKS*mc1.getRowsPerBlock() )\n@@ -117,7 +121,7 @@ public class PMapmmSPInstruction extends BinarySPInstruction\nJavaPairRDD<MatrixIndexes,MatrixBlock> rdd2 = in2\n.flatMapToPair(new PMapMMFunction(bpmb, i/mc1.getRowsPerBlock()));\nrdd2 = RDDAggregateUtils.sumByKeyStable(rdd2);\n- rdd2.persist(StorageLevel.MEMORY_ONLY())\n+ rdd2.persist(pmapmmStorageLevel)\n.count();\nbpmb.unpersist(false);\n@@ -128,7 +132,7 @@ public class PMapmmSPInstruction extends BinarySPInstruction\n}\n//cache final result\n- out = out.persist(StorageLevel.MEMORY_AND_DISK());\n+ out = out.persist(pmapmmStorageLevel);\nout.count();\n//put output RDD handle into symbol table\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Bugfix in Spark pmapmm operator
49,738
11.02.2017 00:51:00
-3,600
c87da2ce8ffe4ab6a03fce4cd548703147f12fca
Fix robustness csv text read (quoted recoded maps)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "diff": "@@ -117,13 +117,12 @@ public class IOUtilFunctions\n/**\n* Splits a string by a specified delimiter into all tokens, including empty\n- * while respecting the rules for quotes and escapes defined in RFC4180.\n- *\n- * NOTE: use StringEscapeUtils.unescapeCsv(tmp) if needed afterwards.\n+ * while respecting the rules for quotes and escapes defined in RFC4180,\n+ * with robustness for various special cases.\n*\n* @param str string to split\n* @param delim delimiter\n- * @return string array\n+ * @return string array of tokens\n*/\npublic static String[] splitCSV(String str, String delim)\n{\n@@ -135,6 +134,7 @@ public class IOUtilFunctions\nArrayList<String> tokens = new ArrayList<String>();\nint from = 0, to = 0;\nint len = str.length();\n+ int dlen = delim.length();\nwhile( from < len ) { // for all tokens\nif( str.charAt(from) == CSV_QUOTE_CHAR\n&& str.indexOf(CSV_QUOTE_CHAR, from+1) > 0 ) {\n@@ -143,8 +143,11 @@ public class IOUtilFunctions\nwhile( to+1 < len && str.charAt(to+1)==CSV_QUOTE_CHAR )\nto = str.indexOf(CSV_QUOTE_CHAR, to+2); // to + \"\"\nto += 1; // last \"\n+ // handle remaining non-quoted characters \"aa\"a\n+ if( to<len-1 && !str.regionMatches(to, delim, 0, dlen) )\n+ to = str.indexOf(delim, to+1);\n}\n- else if(str.regionMatches(from, delim, 0, delim.length())) {\n+ else if( str.regionMatches(from, delim, 0, dlen) ) {\nto = from; // empty string\n}\nelse { // default: unquoted non-empty\n@@ -165,6 +168,16 @@ public class IOUtilFunctions\nreturn tokens.toArray(new String[0]);\n}\n+ /**\n+ * Splits a string by a specified delimiter into all tokens, including empty\n+ * while respecting the rules for quotes and escapes defined in RFC4180,\n+ * with robustness for various special cases.\n+ *\n+ * @param str string to split\n+ * @param delim delimiter\n+ * @param string array for tokens, length needs to match the number of tokens\n+ * @return string array of tokens\n+ */\npublic static String[] splitCSV(String str, String delim, String[] tokens)\n{\n// check for empty input\n@@ -174,6 +187,7 @@ public class IOUtilFunctions\n// scan string and create individual tokens\nint from = 0, to = 0;\nint len = str.length();\n+ int dlen = delim.length();\nint pos = 0;\nwhile( from < len ) { // for all tokens\nif( str.charAt(from) == CSV_QUOTE_CHAR\n@@ -183,8 +197,11 @@ public class IOUtilFunctions\nwhile( to+1 < len && str.charAt(to+1)==CSV_QUOTE_CHAR )\nto = str.indexOf(CSV_QUOTE_CHAR, to+2); // to + \"\"\nto += 1; // last \"\n+ // handle remaining non-quoted characters \"aa\"a\n+ if( to<len-1 && !str.regionMatches(to, delim, 0, dlen) )\n+ to = str.indexOf(delim, to+1);\n}\n- else if(str.regionMatches(from, delim, 0, delim.length())) {\n+ else if( str.regionMatches(from, delim, 0, dlen) ) {\nto = from; // empty string\n}\nelse { // default: unquoted non-empty\n@@ -207,9 +224,10 @@ public class IOUtilFunctions\n/**\n* Counts the number of tokens defined by the given delimiter, respecting\n- * the rules for quotes and escapes defined in RFC4180.\n+ * the rules for quotes and escapes defined in RFC4180,\n+ * with robustness for various special cases.\n*\n- * @param str string\n+ * @param str string to split\n* @param delim delimiter\n* @return number of tokens split by the given delimiter\n*/\n@@ -223,6 +241,7 @@ public class IOUtilFunctions\nint numTokens = 0;\nint from = 0, to = 0;\nint len = str.length();\n+ int dlen = delim.length();\nwhile( from < len ) { // for all tokens\nif( str.charAt(from) == CSV_QUOTE_CHAR\n&& str.indexOf(CSV_QUOTE_CHAR, from+1) > 0 ) {\n@@ -231,8 +250,11 @@ public class IOUtilFunctions\nwhile( to+1 < len && str.charAt(to+1)==CSV_QUOTE_CHAR )\nto = str.indexOf(CSV_QUOTE_CHAR, to+2); // to + \"\"\nto += 1; // last \"\n+ // handle remaining non-quoted characters \"aa\"a\n+ if( to<len-1 && !str.regionMatches(to, delim, 0, dlen) )\n+ to = str.indexOf(delim, to+1);\n}\n- else if(str.regionMatches(from, delim, 0, delim.length())) {\n+ else if( str.regionMatches(from, delim, 0, dlen) ) {\nto = from; // empty string\n}\nelse { // default: unquoted non-empty\n@@ -366,11 +388,11 @@ public class IOUtilFunctions\ninformat.getRecordReader(splits[i], job, Reporter.NULL);\ntry {\nif( reader.next(key, value) ) {\n- String row = value.toString().trim();\n- if( row.startsWith(TfUtils.TXMTD_MVPREFIX) )\n+ if( value.toString().startsWith(TfUtils.TXMTD_MVPREFIX) )\nreader.next(key, value);\n- if( row.startsWith(TfUtils.TXMTD_NDPREFIX) )\n+ if( value.toString().startsWith(TfUtils.TXMTD_NDPREFIX) )\nreader.next(key, value);\n+ String row = value.toString().trim();\nif( !row.isEmpty() )\nncol = IOUtilFunctions.countTokensCSV(row, delim);\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/transform/FrameCSVReadWriteTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.transform;\n+\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.io.FrameReader;\n+import org.apache.sysml.runtime.io.FrameReaderFactory;\n+import org.apache.sysml.runtime.matrix.data.CSVFileFormatProperties;\n+import org.apache.sysml.runtime.matrix.data.FrameBlock;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class FrameCSVReadWriteTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"FrameCSVReadWrite\";\n+ private final static String TEST_DIR = \"functions/transform/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FrameCSVReadWriteTest.class.getSimpleName() + \"/\";\n+\n+ //dataset and transform tasks without missing values\n+ private final static String DATASET = \"csv_mix/quotes1.csv\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testCSVReadWriteSinglenode() {\n+ runCSVQuotesReadWriteTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\");\n+ }\n+\n+ @Test\n+ public void testCSVReadWriteHybrid() {\n+ runCSVQuotesReadWriteTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\");\n+ }\n+\n+ @Test\n+ public void testCSVReadWriteSpark() {\n+ runCSVQuotesReadWriteTest(RUNTIME_PLATFORM.SPARK, \"csv\");\n+ }\n+\n+\n+ /**\n+ *\n+ * @param rt\n+ * @param ofmt\n+ * @param dataset\n+ */\n+ private void runCSVQuotesReadWriteTest( RUNTIME_PLATFORM rt, String ofmt )\n+ {\n+ //set runtime platform\n+ RUNTIME_PLATFORM rtold = rtplatform;\n+ boolean csvReblockOld = OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK;\n+ rtplatform = rt;\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK)\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ if( !ofmt.equals(\"csv\") )\n+ throw new RuntimeException(\"Unsupported test output format\");\n+\n+ try\n+ {\n+ getAndLoadTestConfiguration(TEST_NAME1);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n+ programArgs = new String[]{\"-explain\",\"-args\",\n+ HOME + \"input/\" + DATASET, output(\"R\") };\n+\n+ OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK = true;\n+ runTest(true, false, null, -1);\n+\n+ //read input/output and compare\n+ FrameReader reader1 = FrameReaderFactory.createFrameReader(InputInfo.CSVInputInfo,\n+ new CSVFileFormatProperties(false, \",\", false));\n+ FrameBlock fb1 = reader1.readFrameFromHDFS(HOME + \"input/\" + DATASET, -1L, -1L);\n+ FrameReader reader2 = FrameReaderFactory.createFrameReader(InputInfo.CSVInputInfo);\n+ FrameBlock fb2 = reader2.readFrameFromHDFS(output(\"R\"), -1L, -1L);\n+ String[][] R1 = DataConverter.convertToStringFrame(fb1);\n+ String[][] R2 = DataConverter.convertToStringFrame(fb2);\n+ TestUtils.compareFrames(R1, R2, R1.length, R1[0].length);\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = rtold;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK = csvReblockOld;\n+ }\n+ }\n+}\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/transform/TransformCSVFrameEncodeReadTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.transform;\n+\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.io.FrameReader;\n+import org.apache.sysml.runtime.io.FrameReaderTextCSV;\n+import org.apache.sysml.runtime.io.FrameReaderTextCSVParallel;\n+import org.apache.sysml.runtime.matrix.data.CSVFileFormatProperties;\n+import org.apache.sysml.runtime.matrix.data.FrameBlock;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class TransformCSVFrameEncodeReadTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"TransformCSVFrameEncodeRead\";\n+ private final static String TEST_DIR = \"functions/transform/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + TransformCSVFrameEncodeReadTest.class.getSimpleName() + \"/\";\n+\n+ //dataset and transform tasks without missing values\n+ private final static String DATASET = \"csv_mix/quotes1.csv\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testFrameReadMetaSingleNodeCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", false);\n+ }\n+\n+ @Test\n+ public void testFrameReadMetaSparkCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", false);\n+ }\n+\n+ @Test\n+ public void testFrameReadMetaHybridCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", false);\n+ }\n+\n+ @Test\n+ public void testFrameParReadMetaSingleNodeCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", true);\n+ }\n+\n+ @Test\n+ public void testFrameParReadMetaSparkCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", true);\n+ }\n+\n+ @Test\n+ public void testFrameParReadMetaHybridCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", true);\n+ }\n+\n+ /**\n+ *\n+ * @param rt\n+ * @param ofmt\n+ * @param dataset\n+ */\n+ private void runTransformTest( RUNTIME_PLATFORM rt, String ofmt, boolean parRead )\n+ {\n+ //set runtime platform\n+ RUNTIME_PLATFORM rtold = rtplatform;\n+ boolean csvReblockOld = OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK;\n+ rtplatform = rt;\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK)\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ if( !ofmt.equals(\"csv\") )\n+ throw new RuntimeException(\"Unsupported test output format\");\n+\n+ try\n+ {\n+ getAndLoadTestConfiguration(TEST_NAME1);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-stats\",\"-args\",\n+ HOME + \"input/\" + DATASET, output(\"R\") };\n+\n+ OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK = true;\n+ runTest(true, false, null, -1);\n+\n+ //read input/output and compare\n+ FrameReader reader2 = parRead ?\n+ new FrameReaderTextCSVParallel( new CSVFileFormatProperties() ) :\n+ new FrameReaderTextCSV( new CSVFileFormatProperties() );\n+ FrameBlock fb2 = reader2.readFrameFromHDFS(output(\"R\"), -1L, -1L);\n+ System.out.println(DataConverter.toString(fb2));\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = rtold;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_FRAME_CSV_REBLOCK = csvReblockOld;\n+ }\n+ }\n+}\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/transform/FrameCSVReadWrite.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1, data_type=\"frame\", format=\"csv\");\n+if(1==1){}\n+\n+print(toString(X));\n+write(X, $2, format=\"csv\");\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/transform/TransformCSVFrameEncodeRead.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+F1 = read($1, data_type=\"frame\", format=\"csv\");\n+jspec = \"{\\\"ids\\\": true, \\\"recode\\\": [1,2,3]}\";\n+\n+[X, M] = transformencode(target=F1, spec=jspec);\n+\n+print(toString(M))\n+write(M, $2, format=\"csv\");\n+\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "diff": "@@ -47,9 +47,10 @@ import org.junit.runners.Suite;\nPrintMatrixTest.class,\nReadAfterWriteTest.class,\nRewriteFusedRandTest.class,\n+ RewriteLoopVectorization.class,\n+ RewritePushdownSumBinaryMult.class,\nRewritePushdownSumOnBinaryTest.class,\nRewritePushdownUaggTest.class,\n- RewritePushdownSumBinaryMult.class,\nRewriteSimplifyRowColSumMVMultTest.class,\nRewriteSlicedMatrixMultTest.class,\nRewriteFuseBinaryOpChainTest.class,\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/transform/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/transform/ZPackageSuite.java", "diff": "@@ -26,10 +26,12 @@ import org.junit.runners.Suite;\n* won't run two of them at once. */\n@RunWith(Suite.class)\[email protected]({\n+ FrameCSVReadWriteTest.class,\nRunTest.class,\nScalingTest.class,\nTransformAndApplyTest.class,\nTransformCSVFrameEncodeDecodeTest.class,\n+ TransformCSVFrameEncodeReadTest.class,\nTransformEncodeDecodeTest.class,\nTransformFrameApplyTest.class,\nTransformFrameEncodeApplyTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1244] Fix robustness csv text read (quoted recoded maps)
49,738
11.02.2017 03:23:04
-3,600
8c49730d3a2eaebcaea2ff162eeb5e5dcc9839c9
Fix size update wdivmm/wsigmoid/wumm on rewrite
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -1570,6 +1570,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WSIGMOID, W, Y, tX, false, false);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedSigmoid1 (line \"+hi.getBeginLine()+\")\");\n@@ -1599,6 +1600,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WSIGMOID, W, Y, tX, false, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedSigmoid2 (line \"+hi.getBeginLine()+\")\");\n@@ -1625,6 +1627,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WSIGMOID, W, Y, tX, true, false);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedSigmoid3 (line \"+hi.getBeginLine()+\")\");\n@@ -1658,6 +1661,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WSIGMOID, W, Y, tX, true, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedSigmoid4 (line \"+hi.getBeginLine()+\")\");\n@@ -1715,6 +1719,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, new LiteralOp(-1), 1, mult, false);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\n//add output transpose for efficient target indexing (redundant t() removed by other rewrites)\nhnew = HopRewriteUtils.createTranspose(hnew);\n@@ -1749,6 +1754,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, X, 3, false, false); // 3=>DIV_LEFT_EPS\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\n//add output transpose for efficient target indexing (redundant t() removed by other rewrites)\nhnew = HopRewriteUtils.createTranspose(hnew);\n@@ -1781,6 +1787,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, new LiteralOp(-1), 2, mult, false);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM2 (line \"+hi.getBeginLine()+\")\");\n@@ -1812,6 +1819,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, X, 4, false, false); // 4=>DIV_RIGHT_EPS\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM2e (line \"+hi.getBeginLine()+\")\");\n@@ -1842,6 +1850,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, X, U, V, new LiteralOp(-1), 1, true, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\n//add output transpose for efficient target indexing (redundant t() removed by other rewrites)\nhnew = HopRewriteUtils.createTranspose(hnew);\n@@ -1875,6 +1884,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, X, U, V, new LiteralOp(-1), 2, true, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM4 (line \"+hi.getBeginLine()+\")\");\n@@ -1905,6 +1915,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, X, 1, true, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\n//add output transpose for efficient target indexing (redundant t() removed by other rewrites)\nhnew = HopRewriteUtils.createTranspose(hnew);\n@@ -1938,6 +1949,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, X, 2, true, true);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM6 (line \"+hi.getBeginLine()+\")\");\n@@ -1968,6 +1980,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WDIVMM, W, U, V, new LiteralOp(-1), 0, true, false);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM7 (line \"+hi.getBeginLine()+\")\");\n@@ -2093,6 +2106,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WUMM, W, U, V, mult, op, null);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedUnaryMM1 (line \"+hi.getBeginLine()+\")\");\n@@ -2145,6 +2159,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nhnew = new QuaternaryOp(hi.getName(), DataType.MATRIX, ValueType.DOUBLE,\nOpOp4.WUMM, W, U, V, mult, null, op);\nHopRewriteUtils.setOutputBlocksizes(hnew, W.getRowsInBlock(), W.getColsInBlock());\n+ hnew.refreshSizeInformation();\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedUnaryMM2 (line \"+hi.getBeginLine()+\")\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1243] Fix size update wdivmm/wsigmoid/wumm on rewrite
49,738
11.02.2017 04:45:05
-3,600
0ae01230d617cf5ae322d00f44b5fb520b3ee850
Fix loop rewrite update-in-place (exclude local vars)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "diff": "@@ -63,9 +63,12 @@ public class RewriteMarkLoopVariablesUpdateInPlace extends StatementBlockRewrite\n{\nArrayList<String> candidates = new ArrayList<String>();\nVariableSet updated = sb.variablesUpdated();\n+ VariableSet liveout = sb.liveOut();\nfor( String varname : updated.getVariableNames() ) {\n- if( updated.getVariable(varname).getDataType()==DataType.MATRIX) {\n+ if( updated.getVariable(varname).getDataType()==DataType.MATRIX\n+ && liveout.containsVariable(varname) ) //exclude local vars\n+ {\nif( sb instanceof WhileStatementBlock ) {\nWhileStatement wstmt = (WhileStatement) sb.getStatement(0);\nif( rIsApplicableForUpdateInPlace(wstmt.getBody(), varname) )\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1248] Fix loop rewrite update-in-place (exclude local vars)
49,738
12.02.2017 01:14:16
-3,600
696d10b5ae05704918c4fca76ef6d3fb635ec228
[MINOR] Cleanups (missing imports, unnecessary tags, fix max blocks)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -1292,7 +1292,7 @@ public class SparkExecutionContext extends ExecutionContext\n// The most expensive operation here is rdd.toDebugString() which can be a major hit because\n// of unrolling lazy evaluation of Spark. Hence, it is guarded against it along with flag 'PRINT_EXPLAIN_WITH_LINEAGE' which is\n// enabled only through MLContext. This way, it doesnot affect our performance evaluation through non-MLContext path\n- @SuppressWarnings(\"deprecation\")\n+ @SuppressWarnings(\"unused\")\nprivate void setLineageInfoForExplain(SPInstruction inst,\nJavaPairRDD<?, ?> out,\nJavaPairRDD<?, ?> in1, String in1Name,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SPInstruction.java", "diff": "@@ -74,7 +74,6 @@ public abstract class SPInstruction extends Instruction\nreturn getOpcode();\n}\n- @SuppressWarnings(\"deprecation\")\n@Override\npublic Instruction preprocessInstruction(ExecutionContext ec)\nthrows DMLRuntimeException\n@@ -97,7 +96,6 @@ public abstract class SPInstruction extends Instruction\npublic abstract void processInstruction(ExecutionContext ec)\nthrows DMLRuntimeException;\n- @SuppressWarnings(\"deprecation\")\n@Override\npublic void postprocessInstruction(ExecutionContext ec)\nthrows DMLRuntimeException\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -84,8 +84,6 @@ import jcuda.jcudnn.cudnnTensorDescriptor;\nimport jcuda.jcusparse.JCusparse;\nimport jcuda.jcusparse.cusparseHandle;\n-import java.util.Vector;\n-\n//FIXME move could to respective instructions, this is not a block library\npublic class LibMatrixCUDA {\n@@ -992,12 +990,6 @@ public class LibMatrixCUDA {\n//**************** UNARY AGGREGATE Functions ************************/\n//********************************************************************/\n- /**\n- * Direction of reduction for aggregate binary operations\n- */\n- private enum ReductionDirection{\n- ALL, ROW, COL, DIAG;\n- };\n/**\n* Entry point to perform Unary aggregate operations on the GPU.\n@@ -1436,7 +1428,7 @@ public class LibMatrixCUDA {\nfinal int MAX_BLOCKS = getMaxBlocks();\nfinal int WARP_SIZE = getWarpSize();\nint threads = Math.min(cols, MAX_THREADS);\n- int blocks = cols/MAX_THREADS;\n+ int blocks = Math.min(cols/MAX_THREADS, MAX_BLOCKS);\nif (cols % MAX_THREADS != 0) blocks++;\nint sharedMemSize = threads * Sizeof.DOUBLE;\nif (threads <= WARP_SIZE){\n@@ -2232,6 +2224,7 @@ public class LibMatrixCUDA {\n* @param rlen row length\n* @param clen column length\n*/\n+ @SuppressWarnings(\"unused\")\nprivate static void debugPrintMatrix(Pointer in, int rlen, int clen){\ndouble[] data = new double[rlen * clen];\ncudaMemcpy(Pointer.to(data), in, rlen*clen*Sizeof.DOUBLE, cudaMemcpyDeviceToHost);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java", "diff": "@@ -64,7 +64,6 @@ import org.junit.runner.RunWith;\nimport org.junit.runners.Parameterized;\nimport org.junit.runners.Parameterized.Parameters;\n-@SuppressWarnings(\"deprecation\")\n@RunWith(value = Parameterized.class)\npublic class GNMFTest extends AutomatedTestBase\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanups (missing imports, unnecessary tags, fix max blocks)
49,738
12.02.2017 07:44:03
-3,600
d3e617b0c9f65cc1b80e884e8df6c7d3668332b0
Selective wdivmm rewrite (dense factors), stratstats
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -60,6 +60,7 @@ import org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.instructions.cp.StringObject;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\npublic class HopRewriteUtils\n@@ -672,6 +673,11 @@ public class HopRewriteUtils\n&& hop.getInput().get(1).getDim1() < hop.getInput().get(1).getDim2();\n}\n+ public static boolean isSparse( Hop hop ) {\n+ return hop.dimsKnown(true) //dims and nnz known\n+ && MatrixBlock.evalSparseFormatInMemory(hop.getDim1(), hop.getDim2(), hop.getNnz());\n+ }\n+\npublic static boolean isEqualValue( LiteralOp hop1, LiteralOp hop2 )\nthrows HopsException\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -1972,6 +1972,9 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nHop U = hi.getInput().get(1).getInput().get(0);\nHop V = hi.getInput().get(1).getInput().get(1);\n+ //for this basic pattern, we're more conservative and only apply wdivmm if\n+ //the factors are not known to be sparse\n+ if( !HopRewriteUtils.isSparse(U) && !HopRewriteUtils.isSparse(V) ) {\nif( !HopRewriteUtils.isTransposeOperation(V) )\nV = HopRewriteUtils.createTranspose(V);\nelse\n@@ -1985,6 +1988,7 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nappliedPattern = true;\nLOG.debug(\"Applied simplifyWeightedDivMM7 (line \"+hi.getBeginLine()+\")\");\n}\n+ }\n//relink new hop into original position\nif( hnew != null ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1253] Selective wdivmm rewrite (dense factors), stratstats
49,738
12.02.2017 08:29:36
-3,600
b3ba991604cf79c5e3e2c0992fe2439ae47ce023
New sum-product rewrites (agg pushdown), stratstats In the spirit of our SPOOF compiler framework and the existing sum(X%*%Y) rewrite, this patch adds the following two sum-product rewrites (where the first applies multiple times in stratstats): * colSums(X %*% Y) -> colsSums(X) %*% Y * rowSums(X %*% Y) -> X %*% rowSums(Y)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -2547,11 +2547,12 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nprivate Hop simplifySumMatrixMult(Hop parent, Hop hi, int pos)\n{\n- //sum(A%*%B) -> sum(t(colSums(A))*rowSums(B))\n- //if not dot product, not applied since aggregate removed\n- //if sum not the only consumer, not applied to prevent redundancy\n+ //sum(A%*%B) -> sum(t(colSums(A))*rowSums(B)), later rewritten to dot-product\n+ //colSums(A%*%B) -> colSums(A)%*%B\n+ //rowSums(A%*%B) -> A%*%rowSums(B)\n+ //-- if not dot product, not applied since aggregate removed\n+ //-- if sum not the only consumer, not applied to prevent redundancy\nif( hi instanceof AggUnaryOp && ((AggUnaryOp)hi).getOp()==AggOp.SUM //sum\n- && ((AggUnaryOp)hi).getDirection() == Direction.RowCol //full aggregate\n&& hi.getInput().get(0) instanceof AggBinaryOp //A%*%B\n&& (hi.getInput().get(0).getDim1()>1 || hi.getInput().get(0).getDim2()>1) //not dot product\n&& hi.getInput().get(0).getParent().size()==1 ) //not multiple consumers of matrix mult\n@@ -2560,34 +2561,39 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nHop left = hi2.getInput().get(0);\nHop right = hi2.getInput().get(1);\n- //remove link from parent to diag\n+ //remove link from parent to matrix mult\nHopRewriteUtils.removeChildReference(hi, hi2);\n//create new operators\n- AggUnaryOp colSum = new AggUnaryOp(left.getName(), left.getDataType(), left.getValueType(), AggOp.SUM, Direction.Col, left);\n- colSum.setRowsInBlock(left.getRowsInBlock());\n- colSum.setColsInBlock(left.getColsInBlock());\n- colSum.refreshSizeInformation();\n+ Hop root = null;\n+ //pattern: sum(A%*%B) -> sum(t(colSums(A))*rowSums(B)), later rewritten to dot-product\n+ if( ((AggUnaryOp)hi).getDirection() == Direction.RowCol ) {\n+ AggUnaryOp colSum = HopRewriteUtils.createAggUnaryOp(left, AggOp.SUM, Direction.Col);\nReorgOp trans = HopRewriteUtils.createTranspose(colSum);\n- AggUnaryOp rowSum = new AggUnaryOp(right.getName(), right.getDataType(), right.getValueType(), AggOp.SUM, Direction.Row, right);\n- rowSum.setRowsInBlock(right.getRowsInBlock());\n- rowSum.setColsInBlock(right.getColsInBlock());\n- rowSum.refreshSizeInformation();\n- BinaryOp mult = new BinaryOp(right.getName(), right.getDataType(), right.getValueType(), OpOp2.MULT, trans, rowSum);\n- mult.setRowsInBlock(right.getRowsInBlock());\n- mult.setColsInBlock(right.getColsInBlock());\n- mult.refreshSizeInformation();\n-\n+ AggUnaryOp rowSum = HopRewriteUtils.createAggUnaryOp(right, AggOp.SUM, Direction.Row);\n+ root = HopRewriteUtils.createBinary(trans, rowSum, OpOp2.MULT);\n+ LOG.debug(\"Applied simplifySumMatrixMult RC.\");\n+ }\n+ //colSums(A%*%B) -> colSums(A)%*%B\n+ else if( ((AggUnaryOp)hi).getDirection() == Direction.Col ) {\n+ AggUnaryOp colSum = HopRewriteUtils.createAggUnaryOp(left, AggOp.SUM, Direction.Col);\n+ root = HopRewriteUtils.createMatrixMultiply(colSum, right);\n+ LOG.debug(\"Applied simplifySumMatrixMult C.\");\n+ }\n+ //rowSums(A%*%B) -> A%*%rowSums(B)\n+ else if( ((AggUnaryOp)hi).getDirection() == Direction.Row ) {\n+ AggUnaryOp rowSum = HopRewriteUtils.createAggUnaryOp(right, AggOp.SUM, Direction.Row);\n+ root = HopRewriteUtils.createMatrixMultiply(left, rowSum);\n+ LOG.debug(\"Applied simplifySumMatrixMult R.\");\n+ }\n//rehang new subdag under current node (keep hi intact)\n- HopRewriteUtils.addChildReference(hi, mult, 0);\n+ HopRewriteUtils.addChildReference(hi, root, 0);\nhi.refreshSizeInformation();\n//cleanup if only consumer of intermediate\nif( hi2.getParent().isEmpty() )\nHopRewriteUtils.removeAllChildReferences( hi2 );\n-\n- LOG.debug(\"Applied simplifySumMatrixMult.\");\n}\nreturn hi;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1254] New sum-product rewrites (agg pushdown), stratstats In the spirit of our SPOOF compiler framework and the existing sum(X%*%Y) rewrite, this patch adds the following two sum-product rewrites (where the first applies multiple times in stratstats): * colSums(X %*% Y) -> colsSums(X) %*% Y * rowSums(X %*% Y) -> X %*% rowSums(Y)
49,738
12.02.2017 20:15:00
-3,600
1000750464d470a7b56db71d896c6ca25e11e0bd
[HOTFIX] Fix csv frame readers (robust size computation), tests
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "diff": "@@ -388,15 +388,17 @@ public class IOUtilFunctions\ninformat.getRecordReader(splits[i], job, Reporter.NULL);\ntry {\nif( reader.next(key, value) ) {\n+ boolean hasValue = true;\nif( value.toString().startsWith(TfUtils.TXMTD_MVPREFIX) )\n- reader.next(key, value);\n+ hasValue = reader.next(key, value);\nif( value.toString().startsWith(TfUtils.TXMTD_NDPREFIX) )\n- reader.next(key, value);\n+ hasValue = reader.next(key, value);\nString row = value.toString().trim();\n- if( !row.isEmpty() )\n+ if( hasValue && !row.isEmpty() ) {\nncol = IOUtilFunctions.countTokensCSV(row, delim);\n}\n}\n+ }\nfinally {\ncloseSilently(reader);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/transform/TransformCSVFrameEncodeReadTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/transform/TransformCSVFrameEncodeReadTest.java", "diff": "@@ -51,41 +51,72 @@ public class TransformCSVFrameEncodeReadTest extends AutomatedTestBase\n@Test\npublic void testFrameReadMetaSingleNodeCSV() {\n- runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", false);\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", false, false);\n}\n@Test\npublic void testFrameReadMetaSparkCSV() {\n- runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", false);\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", false, false);\n}\n@Test\npublic void testFrameReadMetaHybridCSV() {\n- runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", false);\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", false, false);\n}\n@Test\npublic void testFrameParReadMetaSingleNodeCSV() {\n- runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", true);\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", false, true);\n}\n@Test\npublic void testFrameParReadMetaSparkCSV() {\n- runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", true);\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", false, true);\n}\n@Test\npublic void testFrameParReadMetaHybridCSV() {\n- runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", true);\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", false, true);\n}\n+ @Test\n+ public void testFrameReadSubMetaSingleNodeCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", true, false);\n+ }\n+\n+ @Test\n+ public void testFrameReadSubMetaSparkCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", true, false);\n+ }\n+\n+ @Test\n+ public void testFrameReadSubMetaHybridCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", true, false);\n+ }\n+\n+ @Test\n+ public void testFrameParReadSubMetaSingleNodeCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SINGLE_NODE, \"csv\", true, true);\n+ }\n+\n+ @Test\n+ public void testFrameParReadSubMetaSparkCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.SPARK, \"csv\", true, true);\n+ }\n+\n+ @Test\n+ public void testFrameParReadSubMetaHybridCSV() {\n+ runTransformTest(RUNTIME_PLATFORM.HYBRID_SPARK, \"csv\", true, true);\n+ }\n+\n+\n/**\n*\n* @param rt\n* @param ofmt\n* @param dataset\n*/\n- private void runTransformTest( RUNTIME_PLATFORM rt, String ofmt, boolean parRead )\n+ private void runTransformTest( RUNTIME_PLATFORM rt, String ofmt, boolean subset, boolean parRead )\n{\n//set runtime platform\nRUNTIME_PLATFORM rtold = rtplatform;\n@@ -104,9 +135,10 @@ public class TransformCSVFrameEncodeReadTest extends AutomatedTestBase\ngetAndLoadTestConfiguration(TEST_NAME1);\nString HOME = SCRIPT_DIR + TEST_DIR;\n+ int nrows = subset ? 4 : 13;\nfullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\nprogramArgs = new String[]{\"-explain\", \"-stats\",\"-args\",\n- HOME + \"input/\" + DATASET, output(\"R\") };\n+ HOME + \"input/\" + DATASET, String.valueOf(nrows), output(\"R\") };\nOptimizerUtils.ALLOW_FRAME_CSV_REBLOCK = true;\nrunTest(true, false, null, -1);\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/transform/TransformCSVFrameEncodeRead.dml", "new_path": "src/test/scripts/functions/transform/TransformCSVFrameEncodeRead.dml", "diff": "@@ -24,6 +24,7 @@ jspec = \"{\\\"ids\\\": true, \\\"recode\\\": [1,2,3]}\";\n[X, M] = transformencode(target=F1, spec=jspec);\n+M = M[1:$2,]\nprint(toString(M))\n-write(M, $2, format=\"csv\");\n+write(M, $3, format=\"csv\");\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix csv frame readers (robust size computation), tests
49,738
14.02.2017 15:19:02
28,800
d58c7875081f49f8cb74a5bb94a083adad83a1bb
Fix gnmf mlcontext test (missing spark context close)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java", "diff": "@@ -57,6 +57,8 @@ import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.runtime.util.MapReduceTool;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.utils.TestUtils;\n+import org.junit.After;\n+import org.junit.AfterClass;\nimport org.junit.Assert;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\n@@ -258,6 +260,24 @@ public class GNMFTest extends AutomatedTestBase\n}\n}\n+ @After\n+ public void tearDown() {\n+ super.tearDown();\n+ }\n+\n+ @AfterClass\n+ public static void tearDownClass() {\n+ // stop spark context to allow single jvm tests (otherwise the\n+ // next test that tries to create a SparkContext would fail)\n+ sc.stop();\n+ sc = null;\n+ conf = null;\n+\n+ // clear status mlcontext and spark exec context\n+ ml.close();\n+ ml = null;\n+ }\n+\npublic static class StringToMatrixEntry implements Function<String, MatrixEntry> {\nprivate static final long serialVersionUID = 7456391906436606324L;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1235] Fix gnmf mlcontext test (missing spark context close)
49,738
14.02.2017 17:11:59
28,800
0e6411dada77870ae29049288b1789313a35a9f6
New rewrite 'pushdown CSE transpose-scalar', incl tests This new rewrite allows to pushdown a transpose below a matrix-scalar binary operation (except quantile and centralMoment) in order to reuse an existing transpose common subexpression.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -711,17 +711,28 @@ public class HopRewriteUtils\nreturn ret;\n}\n- public static boolean isTransposeOperation(Hop hop)\n- {\n+ public static boolean isTransposeOperation(Hop hop) {\nreturn (hop instanceof ReorgOp && ((ReorgOp)hop).getOp()==ReOrgOp.TRANSPOSE);\n}\n- public static boolean isTransposeOfItself(Hop hop1, Hop hop2)\n- {\n+ public static boolean containsTransposeOperation(ArrayList<Hop> hops) {\n+ boolean ret = false;\n+ for( Hop hop : hops )\n+ ret |= isTransposeOperation(hop);\n+ return ret;\n+ }\n+\n+ public static boolean isTransposeOfItself(Hop hop1, Hop hop2) {\nreturn hop1 instanceof ReorgOp && ((ReorgOp)hop1).getOp()==ReOrgOp.TRANSPOSE && hop1.getInput().get(0) == hop2\n|| hop2 instanceof ReorgOp && ((ReorgOp)hop2).getOp()==ReOrgOp.TRANSPOSE && hop2.getInput().get(0) == hop1;\n}\n+ public static boolean isBinaryMatrixScalarOperation(Hop hop) {\n+ return hop instanceof BinaryOp &&\n+ ((hop.getInput().get(0).getDataType().isMatrix() && hop.getInput().get(1).getDataType().isScalar())\n+ ||(hop.getInput().get(1).getDataType().isMatrix() && hop.getInput().get(0).getDataType().isScalar()));\n+ }\n+\npublic static boolean isNonZeroIndicator(Hop pred, Hop hop )\n{\nif( pred instanceof BinaryOp && ((BinaryOp)pred).getOp()==OpOp2.NOTEQUAL\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -148,6 +148,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nhi = simplifyUnaryAggReorgOperation(hop, hi, i); //e.g., sum(t(X)) -> sum(X)\nhi = simplifyBinaryMatrixScalarOperation(hop, hi, i);//e.g., as.scalar(X*s) -> as.scalar(X)*s;\nhi = pushdownUnaryAggTransposeOperation(hop, hi, i); //e.g., colSums(t(X)) -> t(rowSums(X))\n+ hi = pushdownCSETransposeScalarOperation(hop, hi, i);//e.g., a=t(X), b=t(X^2) -> a=t(X), b=t(X)^2 for CSE t(X)\nhi = pushdownSumBinaryMult(hop, hi, i); //e.g., sum(lamda*X) -> lamda*sum(X)\nhi = simplifyUnaryPPredOperation(hop, hi, i); //e.g., abs(ppred()) -> ppred(), others: round, ceil, floor\nhi = simplifyTransposedAppend(hop, hi, i); //e.g., t(cbind(t(A),t(B))) -> rbind(A,B);\n@@ -943,6 +944,41 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nreturn hi;\n}\n+ private Hop pushdownCSETransposeScalarOperation( Hop parent, Hop hi, int pos )\n+ {\n+ // a=t(X), b=t(X^2) -> a=t(X), b=t(X)^2 for CSE t(X)\n+ // probed at root node of b in above example\n+ // (with support for left or right scalar operations)\n+ if( HopRewriteUtils.isTransposeOperation(hi) && hi.getParent().size()==1\n+ && HopRewriteUtils.isBinaryMatrixScalarOperation(hi.getInput().get(0))\n+ && hi.getInput().get(0).getParent().size()==1)\n+ {\n+ int Xpos = hi.getInput().get(0).getInput().get(0).getDataType().isMatrix() ? 0 : 1;\n+ Hop X = hi.getInput().get(0).getInput().get(Xpos);\n+ BinaryOp binary = (BinaryOp) hi.getInput().get(0);\n+\n+ if( HopRewriteUtils.containsTransposeOperation(X.getParent())\n+ && !HopRewriteUtils.isValidOp(binary.getOp(), new OpOp2[]{OpOp2.CENTRALMOMENT, OpOp2.QUANTILE}))\n+ {\n+ //clear existing wiring\n+ HopRewriteUtils.removeChildReferenceByPos(parent, hi, pos);\n+ HopRewriteUtils.removeChildReference(hi, binary);\n+ HopRewriteUtils.removeChildReference(binary, X);\n+\n+ //re-wire operators\n+ HopRewriteUtils.addChildReference(parent, binary, pos);\n+ HopRewriteUtils.addChildReference(binary, hi, Xpos);\n+ HopRewriteUtils.addChildReference(hi, X);\n+ //note: common subexpression later eliminated by dedicated rewrite\n+\n+ hi = binary;\n+ LOG.debug(\"Applied pushdownCSETransposeScalarOperation (line \"+hi.getBeginLine()+\").\");\n+ }\n+ }\n+\n+ return hi;\n+ }\n+\nprivate Hop pushdownSumBinaryMult(Hop parent, Hop hi, int pos ) throws HopsException {\n//pattern: sum(lamda*X) -> lamda*sum(X)\nif( hi instanceof AggUnaryOp && ((AggUnaryOp)hi).getDirection()==Direction.RowCol\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -464,11 +464,14 @@ public class Statistics\n_cpInstCounts.put(key, newCnt);\n}\n- public static Set<String> getCPHeavyHitterOpCodes()\n- {\n+ public static Set<String> getCPHeavyHitterOpCodes() {\nreturn _cpInstTime.keySet();\n}\n+ public static long getCPHeavyHitterCount(String opcode) {\n+ return _cpInstCounts.get(opcode);\n+ }\n+\n@SuppressWarnings(\"unchecked\")\npublic static String getHeavyHitters( int num )\n{\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteCSETransposeScalarTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.apache.sysml.utils.Statistics;\n+\n+/**\n+ *\n+ *\n+ */\n+public class RewriteCSETransposeScalarTest extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME1 = \"RewriteCSETransposeScalarPow\"; //right scalar\n+ private static final String TEST_NAME2 = \"RewriteCSETransposeScalarMult\"; //left scalar\n+\n+ private static final String TEST_DIR = \"functions/misc/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + RewriteCSETransposeScalarTest.class.getSimpleName() + \"/\";\n+\n+ private static final int rows = 1932;\n+ private static final int cols = 14;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testRewriteCSETransposePow() {\n+ testRewriteCSETransposeScalar( TEST_NAME1, true );\n+ }\n+\n+ @Test\n+ public void testRewriteCSETransposePowNoRewrite() {\n+ testRewriteCSETransposeScalar( TEST_NAME1, false );\n+ }\n+\n+ @Test\n+ public void testRewriteCSETransposeMult() {\n+ testRewriteCSETransposeScalar( TEST_NAME2, true );\n+ }\n+\n+ @Test\n+ public void testRewriteCSETransposeMultNoRewrite() {\n+ testRewriteCSETransposeScalar( TEST_NAME2, false );\n+ }\n+\n+ /**\n+ *\n+ * @param testname\n+ * @param et\n+ */\n+ private void testRewriteCSETransposeScalar( String testname, boolean rewrites )\n+ {\n+ boolean rewritesOld = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ try {\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{ \"-stats\", \"-args\", String.valueOf(rows),\n+ String.valueOf(cols), output(\"R\") };\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ //run performance tests\n+ runTest(true, false, null, -1);\n+\n+ //compare output\n+ double ret = TestUtils.readDMLScalar(output(\"R\"));\n+ Assert.assertEquals(\"Wrong result, expected: \"+(rows*cols), new Double(rows*cols), new Double(ret));\n+ Assert.assertEquals(new Long(rewrites?1:2), new Long(Statistics.getCPHeavyHitterCount(\"r'\")));\n+ }\n+ finally {\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewritesOld;\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteCSETransposeScalarMult.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=$1, cols=$2, min=1, max=10);\n+if(1==1){}\n+\n+a = t(X);\n+b = t(2*X);\n+\n+if(1==1){}\n+\n+R = sum(2*a == b);\n+write(R, $3);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteCSETransposeScalarPow.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=$1, cols=$2, min=1, max=10);\n+if(1==1){}\n+\n+a = t(X);\n+b = t(X^2);\n+\n+if(1==1){}\n+\n+R = sum(a^2 == b);\n+write(R, $3);\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "diff": "@@ -46,6 +46,7 @@ import org.junit.runners.Suite;\nPrintExpressionTest.class,\nPrintMatrixTest.class,\nReadAfterWriteTest.class,\n+ RewriteCSETransposeScalarTest.class,\nRewriteFusedRandTest.class,\nRewriteLoopVectorization.class,\nRewritePushdownSumBinaryMult.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1254] New rewrite 'pushdown CSE transpose-scalar', incl tests This new rewrite allows to pushdown a transpose below a matrix-scalar binary operation (except quantile and centralMoment) in order to reuse an existing transpose common subexpression.
49,738
14.02.2017 19:26:27
28,800
12d79c5481838fbf31d2ec3a86c3b9c4c5af1265
Fix robustness quantile/iqm check for integer weights
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -4843,11 +4843,18 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nthrows DMLRuntimeException\n{\ndouble sum_wt = 0;\n- for (int i=0; i < getNumRows(); i++ )\n- sum_wt += quickGetValue(i, 1);\n- if ( Math.floor(sum_wt) < sum_wt ) {\n- throw new DMLRuntimeException(\"Unexpected error while computing quantile -- weights must be integers.\");\n+ for (int i=0; i < getNumRows(); i++ ) {\n+ double tmp = quickGetValue(i, 1);\n+ sum_wt += tmp;\n+\n+ // test all values not just final sum_wt to ensure that non-integer weights\n+ // don't cancel each other out; integer weights are required by all quantiles, etc\n+ if( Math.floor(tmp) < tmp ) {\n+ throw new DMLRuntimeException(\"Wrong input data, quantile weights \"\n+ + \"are expected to be integers but found '\"+tmp+\"'.\");\n+ }\n}\n+\nreturn sum_wt;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1265] Fix robustness quantile/iqm check for integer weights
49,738
14.02.2017 21:56:14
28,800
732e6da4f924a99ba5fbddf656436fc1bd62668f
Replace accumulators with new accumulatorV2 framework This patch globally replaces all uses of deprecated accumulators with the new accumulatorV2 framework. For custom accumulators, this entailed a reimplementation. Furthermore, we now avoid expensive double-long casting and use named accumulators for easier debugging in the webui.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "diff": "@@ -23,9 +23,9 @@ import java.util.List;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaSparkContext;\n+import org.apache.spark.util.LongAccumulator;\nimport scala.Tuple2;\n@@ -73,8 +73,8 @@ public class RemoteDPParForSpark\nInputInfo ii = InputInfo.BinaryBlockInputInfo;\n//initialize accumulators for tasks/iterations\n- Accumulator<Integer> aTasks = sc.accumulator(0);\n- Accumulator<Integer> aIters = sc.accumulator(0);\n+ LongAccumulator aTasks = sc.sc().longAccumulator(\"tasks\");\n+ LongAccumulator aIters = sc.sc().longAccumulator(\"iterations\");\nJavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(matrixvar);\nDataPartitionerRemoteSparkMapper dpfun = new DataPartitionerRemoteSparkMapper(mc, ii, oi, dpf);\n@@ -88,8 +88,8 @@ public class RemoteDPParForSpark\n//de-serialize results\nLocalVariableMap[] results = RemoteParForUtils.getResults(out, LOG);\n- int numTasks = aTasks.value(); //get accumulator value\n- int numIters = aIters.value(); //get accumulator value\n+ int numTasks = aTasks.value().intValue(); //get accumulator value\n+ int numIters = aIters.value().intValue(); //get accumulator value\n//create output symbol table entries\nRemoteParForJobReturn ret = new RemoteParForJobReturn(true, numTasks, numIters, results);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "diff": "@@ -24,10 +24,9 @@ import java.util.ArrayList;\nimport java.util.Iterator;\nimport org.apache.hadoop.io.Writable;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.TaskContext;\nimport org.apache.spark.api.java.function.PairFlatMapFunction;\n-\n+import org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitionFormat;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\n@@ -61,10 +60,10 @@ public class RemoteDPParForSparkWorker extends ParWorker implements PairFlatMapF\nprivate boolean _tSparseCol = false;\nprivate PDataPartitionFormat _dpf = null;\n- private Accumulator<Integer> _aTasks = null;\n- private Accumulator<Integer> _aIters = null;\n+ private LongAccumulator _aTasks = null;\n+ private LongAccumulator _aIters = null;\n- public RemoteDPParForSparkWorker(String program, String inputVar, String iterVar, boolean cpCaching, MatrixCharacteristics mc, boolean tSparseCol, PDataPartitionFormat dpf, OutputInfo oinfo, Accumulator<Integer> atasks, Accumulator<Integer> aiters)\n+ public RemoteDPParForSparkWorker(String program, String inputVar, String iterVar, boolean cpCaching, MatrixCharacteristics mc, boolean tSparseCol, PDataPartitionFormat dpf, OutputInfo oinfo, LongAccumulator atasks, LongAccumulator aiters)\nthrows DMLRuntimeException\n{\n//keep inputs (unfortunately, spark does not expose task ids and it would be implementation-dependent\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSpark.java", "diff": "@@ -23,8 +23,8 @@ import java.util.List;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.api.java.JavaSparkContext;\n+import org.apache.spark.util.LongAccumulator;\nimport scala.Tuple2;\n@@ -64,8 +64,8 @@ public class RemoteParForSpark\nJavaSparkContext sc = sec.getSparkContext();\n//initialize accumulators for tasks/iterations\n- Accumulator<Integer> aTasks = sc.accumulator(0);\n- Accumulator<Integer> aIters = sc.accumulator(0);\n+ LongAccumulator aTasks = sc.sc().longAccumulator(\"tasks\");\n+ LongAccumulator aIters = sc.sc().longAccumulator(\"iterations\");\n//run remote_spark parfor job\n//(w/o lazy evaluation to fit existing parfor framework, e.g., result merge)\n@@ -77,8 +77,8 @@ public class RemoteParForSpark\n//de-serialize results\nLocalVariableMap[] results = RemoteParForUtils.getResults(out, LOG);\n- int numTasks = aTasks.value(); //get accumulator value\n- int numIters = aIters.value(); //get accumulator value\n+ int numTasks = aTasks.value().intValue(); //get accumulator value\n+ int numIters = aIters.value().intValue(); //get accumulator value\n//create output symbol table entries\nRemoteParForJobReturn ret = new RemoteParForJobReturn(true, numTasks, numIters, results);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSparkWorker.java", "diff": "@@ -23,9 +23,9 @@ import java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.Iterator;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.TaskContext;\nimport org.apache.spark.api.java.function.PairFlatMapFunction;\n+import org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\n@@ -42,10 +42,10 @@ public class RemoteParForSparkWorker extends ParWorker implements PairFlatMapFun\nprivate String _prog = null;\nprivate boolean _caching = true;\n- private Accumulator<Integer> _aTasks = null;\n- private Accumulator<Integer> _aIters = null;\n+ private LongAccumulator _aTasks = null;\n+ private LongAccumulator _aIters = null;\n- public RemoteParForSparkWorker(String program, boolean cpCaching, Accumulator<Integer> atasks, Accumulator<Integer> aiters)\n+ public RemoteParForSparkWorker(String program, boolean cpCaching, LongAccumulator atasks, LongAccumulator aiters)\nthrows DMLRuntimeException\n{\n//keep inputs (unfortunately, spark does not expose task ids and it would be implementation-dependent\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/MultiReturnParameterizedBuiltinSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/MultiReturnParameterizedBuiltinSPInstruction.java", "diff": "package org.apache.sysml.runtime.instructions.spark;\nimport java.io.IOException;\n-import java.io.Serializable;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Iterator;\nimport java.util.Map.Entry;\n-import org.apache.spark.Accumulator;\n-import org.apache.spark.AccumulatorParam;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n+import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.api.java.function.FlatMapFunction;\nimport org.apache.spark.api.java.function.PairFlatMapFunction;\nimport org.apache.spark.broadcast.Broadcast;\n+import org.apache.spark.util.AccumulatorV2;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -124,7 +123,7 @@ public class MultiReturnParameterizedBuiltinSPInstruction extends ComputationSPI\nEncoder encoderBuild = EncoderFactory.createEncoder(spec, colnames,\nfo.getSchema(), (int)fo.getNumColumns(), null);\n- Accumulator<Long> accMax = sec.getSparkContext().accumulator(0L, new MaxAcc());\n+ MaxLongAccumulator accMax = registerMaxLongAccumulator(sec.getSparkContext());\nJavaRDD<String> rcMaps = in\n.mapPartitionsToPair(new TransformEncodeBuildFunction(encoderBuild))\n.distinct().groupByKey()\n@@ -190,6 +189,54 @@ public class MultiReturnParameterizedBuiltinSPInstruction extends ComputationSPI\nreturn null;\n}\n+ private static MaxLongAccumulator registerMaxLongAccumulator(JavaSparkContext sc) {\n+ MaxLongAccumulator acc = new MaxLongAccumulator(Long.MIN_VALUE);\n+ sc.sc().register(acc, \"max\");\n+ return acc;\n+ }\n+\n+\n+ private static class MaxLongAccumulator extends AccumulatorV2<Long,Long>\n+ {\n+ private static final long serialVersionUID = -3739727823287550826L;\n+\n+ private long _value = Long.MIN_VALUE;\n+\n+ public MaxLongAccumulator(long value) {\n+ _value = value;\n+ }\n+\n+ @Override\n+ public void add(Long arg0) {\n+ _value = Math.max(_value, arg0);\n+ }\n+\n+ @Override\n+ public AccumulatorV2<Long, Long> copy() {\n+ return new MaxLongAccumulator(_value);\n+ }\n+\n+ @Override\n+ public boolean isZero() {\n+ return _value == Long.MIN_VALUE;\n+ }\n+\n+ @Override\n+ public void merge(AccumulatorV2<Long, Long> arg0) {\n+ _value = Math.max(_value, arg0.value());\n+ }\n+\n+ @Override\n+ public void reset() {\n+ _value = Long.MIN_VALUE;\n+ }\n+\n+ @Override\n+ public Long value() {\n+ return _value;\n+ }\n+ }\n+\n/**\n* This function pre-aggregates distinct values of recoded columns per partition\n* (part of distributed recode map construction, used for recoding, binning and\n@@ -242,9 +289,9 @@ public class MultiReturnParameterizedBuiltinSPInstruction extends ComputationSPI\n{\nprivate static final long serialVersionUID = -1034187226023517119L;\n- private Accumulator<Long> _accMax = null;\n+ private MaxLongAccumulator _accMax = null;\n- public TransformEncodeGroupFunction( Accumulator<Long> accMax ) {\n+ public TransformEncodeGroupFunction( MaxLongAccumulator accMax ) {\n_accMax = accMax;\n}\n@@ -275,26 +322,6 @@ public class MultiReturnParameterizedBuiltinSPInstruction extends ComputationSPI\n}\n}\n- private static class MaxAcc implements AccumulatorParam<Long>, Serializable\n- {\n- private static final long serialVersionUID = -3739727823287550826L;\n-\n- @Override\n- public Long addInPlace(Long arg0, Long arg1) {\n- return Math.max(arg0, arg1);\n- }\n-\n- @Override\n- public Long zero(Long arg0) {\n- return arg0;\n- }\n-\n- @Override\n- public Long addAccumulator(Long arg0, Long arg1) {\n- return Math.max(arg0, arg1);\n- }\n- }\n-\npublic static class TransformEncodeBuild2Function implements PairFlatMapFunction<Iterator<Tuple2<Long, FrameBlock>>, Integer, ColumnMetadata>\n{\nprivate static final long serialVersionUID = 6336375833412029279L;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/WriteSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/WriteSPInstruction.java", "diff": "@@ -25,9 +25,9 @@ import java.util.Random;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.hadoop.mapred.SequenceFileOutputFormat;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\n+import org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -203,12 +203,12 @@ public class WriteSPInstruction extends SPInstruction\nelse if( oi == OutputInfo.CSVOutputInfo )\n{\nJavaRDD<String> out = null;\n- Accumulator<Double> aNnz = null;\n+ LongAccumulator aNnz = null;\nif ( isInputMatrixBlock ) {\n//piggyback nnz computation on actual write\nif( !mc.nnzKnown() ) {\n- aNnz = sec.getSparkContext().accumulator(0L);\n+ aNnz = sec.getSparkContext().sc().longAccumulator(\"nnz\");\nin1 = in1.mapValues(new ComputeBinaryBlockNnzFunction(aNnz));\n}\n@@ -252,9 +252,9 @@ public class WriteSPInstruction extends SPInstruction\n}\nelse if( oi == OutputInfo.BinaryBlockOutputInfo ) {\n//piggyback nnz computation on actual write\n- Accumulator<Double> aNnz = null;\n+ LongAccumulator aNnz = null;\nif( !mc.nnzKnown() ) {\n- aNnz = sec.getSparkContext().accumulator(0L);\n+ aNnz = sec.getSparkContext().sc().longAccumulator(\"nnz\");\nin1 = in1.mapValues(new ComputeBinaryBlockNnzFunction(aNnz));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/functions/ComputeBinaryBlockNnzFunction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/functions/ComputeBinaryBlockNnzFunction.java", "diff": "package org.apache.sysml.runtime.instructions.spark.functions;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.api.java.function.Function;\n-\n+import org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\npublic class ComputeBinaryBlockNnzFunction implements Function<MatrixBlock,MatrixBlock>\n{\nprivate static final long serialVersionUID = -8396410450821999936L;\n- private Accumulator<Double> _aNnz = null;\n+ private LongAccumulator _aNnz = null;\n- public ComputeBinaryBlockNnzFunction( Accumulator<Double> aNnz )\n+ public ComputeBinaryBlockNnzFunction( LongAccumulator aNnz )\n{\n_aNnz = aNnz;\n}\n@@ -38,7 +37,7 @@ public class ComputeBinaryBlockNnzFunction implements Function<MatrixBlock,Matri\n@Override\npublic MatrixBlock call(MatrixBlock arg0) throws Exception\n{\n- _aNnz.add( (double)arg0.getNonZeros() );\n+ _aNnz.add( arg0.getNonZeros() );\nreturn arg0;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "diff": "@@ -27,7 +27,6 @@ import java.util.List;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.hadoop.io.Text;\n-import org.apache.spark.Accumulator;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\nimport org.apache.spark.api.java.JavaSparkContext;\n@@ -46,6 +45,7 @@ import org.apache.spark.sql.RowFactory;\nimport org.apache.spark.sql.SQLContext;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.apache.spark.sql.types.StructField;\n+import org.apache.spark.util.LongAccumulator;\nimport scala.Tuple2;\n@@ -166,7 +166,7 @@ public class RDDConverterUtils\n{\n//determine unknown dimensions and sparsity if required\nif( !mc.dimsKnown(true) ) {\n- Accumulator<Double> aNnz = sc.accumulator(0L);\n+ LongAccumulator aNnz = sc.sc().longAccumulator(\"nnz\");\nJavaRDD<String> tmp = input.values()\n.map(new CSVAnalysisFunction(aNnz, delim));\nlong rlen = tmp.count() - (hasHeader ? 1 : 0);\n@@ -230,7 +230,7 @@ public class RDDConverterUtils\n{\n//determine unknown dimensions and sparsity if required\nif( !mc.dimsKnown(true) ) {\n- Accumulator<Double> aNnz = sc.accumulator(0L);\n+ LongAccumulator aNnz = sc.sc().longAccumulator(\"nnz\");\nJavaRDD<Row> tmp = df.javaRDD().map(new DataFrameAnalysisFunction(aNnz, containsID, isVector));\nlong rlen = tmp.count();\nlong clen = !isVector ? df.columns().length - (containsID?1:0) :\n@@ -531,10 +531,10 @@ public class RDDConverterUtils\n{\nprivate static final long serialVersionUID = 2310303223289674477L;\n- private Accumulator<Double> _aNnz = null;\n+ private LongAccumulator _aNnz = null;\nprivate String _delim = null;\n- public CSVAnalysisFunction( Accumulator<Double> aNnz, String delim )\n+ public CSVAnalysisFunction( LongAccumulator aNnz, String delim )\n{\n_aNnz = aNnz;\n_delim = delim;\n@@ -552,7 +552,7 @@ public class RDDConverterUtils\nint lnnz = IOUtilFunctions.countNnz(cols);\n//update counters\n- _aNnz.add( (double)lnnz );\n+ _aNnz.add( lnnz );\nreturn line;\n}\n@@ -922,11 +922,11 @@ public class RDDConverterUtils\n{\nprivate static final long serialVersionUID = 5705371332119770215L;\n- private Accumulator<Double> _aNnz = null;\n+ private LongAccumulator _aNnz = null;\nprivate boolean _containsID;\nprivate boolean _isVector;\n- public DataFrameAnalysisFunction( Accumulator<Double> aNnz, boolean containsID, boolean isVector) {\n+ public DataFrameAnalysisFunction( LongAccumulator aNnz, boolean containsID, boolean isVector) {\n_aNnz = aNnz;\n_containsID = containsID;\n_isVector = isVector;\n@@ -940,7 +940,7 @@ public class RDDConverterUtils\nint lnnz = countNnz(vect, _isVector, off);\n//update counters\n- _aNnz.add( (double)lnnz );\n+ _aNnz.add( lnnz );\nreturn arg0;\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1268] Replace accumulators with new accumulatorV2 framework This patch globally replaces all uses of deprecated accumulators with the new accumulatorV2 framework. For custom accumulators, this entailed a reimplementation. Furthermore, we now avoid expensive double-long casting and use named accumulators for easier debugging in the webui.
49,738
16.02.2017 12:12:57
28,800
73afc2c19fe34caf08ec2c63bdbfb0b42aab881f
Improved nnz maintenance on spark rdd write We now consistently piggyback any nnz maintenance on write operations in order to avoid unnecessary RDD computation. Furthermore, this change also removes the utils primitive to compute the nnz in isolation in order to prevent reintroducing such inefficiencies.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -35,6 +35,7 @@ import org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.broadcast.Broadcast;\nimport org.apache.spark.storage.RDDInfo;\nimport org.apache.spark.storage.StorageLevel;\n+import org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.MLContextProxy;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -55,6 +56,7 @@ import org.apache.sysml.runtime.instructions.spark.data.LineageObject;\nimport org.apache.sysml.runtime.instructions.spark.data.PartitionedBlock;\nimport org.apache.sysml.runtime.instructions.spark.data.PartitionedBroadcast;\nimport org.apache.sysml.runtime.instructions.spark.data.RDDObject;\n+import org.apache.sysml.runtime.instructions.spark.functions.ComputeBinaryBlockNnzFunction;\nimport org.apache.sysml.runtime.instructions.spark.functions.CopyBinaryCellFunction;\nimport org.apache.sysml.runtime.instructions.spark.functions.CopyFrameBlockPairFunction;\nimport org.apache.sysml.runtime.instructions.spark.functions.CopyTextInputFunction;\n@@ -966,8 +968,9 @@ public class SparkExecutionContext extends ExecutionContext\n{\nJavaPairRDD<MatrixIndexes,MatrixBlock> lrdd = (JavaPairRDD<MatrixIndexes, MatrixBlock>) rdd.getRDD();\n- //recompute nnz\n- long nnz = SparkUtils.computeNNZFromBlocks(lrdd);\n+ //piggyback nnz maintenance on write\n+ LongAccumulator aNnz = getSparkContextStatic().sc().longAccumulator(\"nnz\");\n+ lrdd = lrdd.mapValues(new ComputeBinaryBlockNnzFunction(aNnz));\n//save file is an action which also triggers nnz maintenance\nlrdd.saveAsHadoopFile(path,\n@@ -976,7 +979,7 @@ public class SparkExecutionContext extends ExecutionContext\noinfo.outputFormatClass);\n//return nnz aggregate of all blocks\n- return nnz;\n+ return aNnz.value();\n}\n@SuppressWarnings(\"unchecked\")\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/WriteSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/WriteSPInstruction.java", "diff": "@@ -39,7 +39,6 @@ import org.apache.sysml.runtime.instructions.spark.functions.ComputeBinaryBlockN\nimport org.apache.sysml.runtime.instructions.spark.utils.FrameRDDConverterUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.FrameRDDConverterUtils.LongFrameToLongWritableFrameFunction;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\n-import org.apache.sysml.runtime.instructions.spark.utils.SparkUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.CSVFileFormatProperties;\nimport org.apache.sysml.runtime.matrix.data.FileFormatProperties;\n@@ -179,9 +178,12 @@ public class WriteSPInstruction extends SPInstruction\nif( oi == OutputInfo.MatrixMarketOutputInfo\n|| oi == OutputInfo.TextCellOutputInfo )\n{\n- //recompute nnz if necessary (required for header if matrix market)\n- if ( isInputMatrixBlock && !mc.nnzKnown() )\n- mc.setNonZeros( SparkUtils.computeNNZFromBlocks(in1) );\n+ //piggyback nnz maintenance on write\n+ LongAccumulator aNnz = null;\n+ if ( isInputMatrixBlock && !mc.nnzKnown() ) {\n+ aNnz = sec.getSparkContext().sc().longAccumulator(\"nnz\");\n+ in1 = in1.mapValues(new ComputeBinaryBlockNnzFunction(aNnz));\n+ }\nJavaRDD<String> header = null;\nif( oi == OutputInfo.MatrixMarketOutputInfo ) {\n@@ -199,6 +201,9 @@ public class WriteSPInstruction extends SPInstruction\ncustomSaveTextFile(header.union(ijv), fname, true);\nelse\ncustomSaveTextFile(ijv, fname, false);\n+\n+ if ( isInputMatrixBlock && !mc.nnzKnown() )\n+ mc.setNonZeros( aNnz.value() );\n}\nelse if( oi == OutputInfo.CSVOutputInfo )\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java", "diff": "@@ -249,29 +249,4 @@ public class SparkUtils\narg0.getNonZeros() + arg1.getNonZeros() ); //sum\n}\n}\n-\n- /**\n- * Utility to compute number of non-zeros from the given RDD of MatrixBlocks\n- *\n- * @param rdd matrix as {@code JavaPairRDD<MatrixIndexes, MatrixBlock>}\n- * @return number of non-zeros\n- */\n- public static long computeNNZFromBlocks(JavaPairRDD<MatrixIndexes, MatrixBlock> rdd) {\n- long nnz = rdd.values().aggregate( 0L,\n- new Function2<Long,MatrixBlock,Long>() {\n- private static final long serialVersionUID = 4907645080949985267L;\n- @Override\n- public Long call(Long v1, MatrixBlock v2) throws Exception {\n- return (v1 + v2.getNonZeros());\n- }\n- },\n- new Function2<Long,Long,Long>() {\n- private static final long serialVersionUID = 333028431986883739L;\n- @Override\n- public Long call(Long v1, Long v2) throws Exception {\n- return v1+v2;\n- }\n- } );\n- return nnz;\n- }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1274] Improved nnz maintenance on spark rdd write We now consistently piggyback any nnz maintenance on write operations in order to avoid unnecessary RDD computation. Furthermore, this change also removes the utils primitive to compute the nnz in isolation in order to prevent reintroducing such inefficiencies.
49,738
16.02.2017 12:13:10
28,800
066a8213ebe6a67aee69dbe6e7e039f3efc21e67
Fix hops construction relational expressions w/o target
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -1795,8 +1795,9 @@ public class DMLTranslator\ntarget = createTarget(source);\nif(left.getDataType() == DataType.MATRIX || right.getDataType() == DataType.MATRIX) {\n// Added to support matrix relational comparison\n+ // (we support only matrices of value type double)\ntarget.setDataType(DataType.MATRIX);\n- target.setValueType(ValueType.BOOLEAN);\n+ target.setValueType(ValueType.DOUBLE);\n}\nelse {\n// Added to support scalar relational comparison\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1257] Fix hops construction relational expressions w/o target
49,736
16.02.2017 14:26:42
28,800
bbc77e71eb9b5aa464f0130380bc30d3f42107b6
[MINOR] Code refactoring MatrixIndexingSPInstruction to enable parallel improvements in both indexing as well as prefetching
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/MatrixIndexingSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/MatrixIndexingSPInstruction.java", "diff": "@@ -85,33 +85,43 @@ public class MatrixIndexingSPInstruction extends IndexingSPInstruction\nsuper(op, lhsInput, rhsInput, rl, ru, cl, cu, out, opcode, istr);\n}\n- @Override\n- public void processInstruction(ExecutionContext ec)\n- throws DMLRuntimeException\n- {\n- SparkExecutionContext sec = (SparkExecutionContext)ec;\n- String opcode = getOpcode();\n+ public static MatrixBlock inmemoryIndexing(JavaPairRDD<MatrixIndexes,MatrixBlock> in1,\n+ MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange) throws DMLRuntimeException {\n+ if( isSingleBlockLookup(mcIn, ixrange) ) {\n+ return singleBlockIndexing(in1, mcIn, mcOut, ixrange);\n+ }\n+ else if( isMultiBlockLookup(in1, mcIn, mcOut, ixrange) ) {\n+ return multiBlockIndexing(in1, mcIn, mcOut, ixrange);\n+ }\n+ else\n+ throw new DMLRuntimeException(\"Incorrect usage of inmemoryIndexing\");\n+ }\n- //get indexing range\n- long rl = ec.getScalarInput(rowLower.getName(), rowLower.getValueType(), rowLower.isLiteral()).getLongValue();\n- long ru = ec.getScalarInput(rowUpper.getName(), rowUpper.getValueType(), rowUpper.isLiteral()).getLongValue();\n- long cl = ec.getScalarInput(colLower.getName(), colLower.getValueType(), colLower.isLiteral()).getLongValue();\n- long cu = ec.getScalarInput(colUpper.getName(), colUpper.getValueType(), colUpper.isLiteral()).getLongValue();\n- IndexRange ixrange = new IndexRange(rl, ru, cl, cu);\n+ private static MatrixBlock multiBlockIndexing(JavaPairRDD<MatrixIndexes,MatrixBlock> in1,\n+ MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange) throws DMLRuntimeException {\n+ //create list of all required matrix indexes\n+ List<MatrixIndexes> filter = new ArrayList<MatrixIndexes>();\n+ long rlix = UtilFunctions.computeBlockIndex(ixrange.rowStart, mcIn.getRowsPerBlock());\n+ long ruix = UtilFunctions.computeBlockIndex(ixrange.rowEnd, mcIn.getRowsPerBlock());\n+ long clix = UtilFunctions.computeBlockIndex(ixrange.colStart, mcIn.getColsPerBlock());\n+ long cuix = UtilFunctions.computeBlockIndex(ixrange.colEnd, mcIn.getColsPerBlock());\n+ for( long r=rlix; r<=ruix; r++ )\n+ for( long c=clix; c<=cuix; c++ )\n+ filter.add( new MatrixIndexes(r,c) );\n- //right indexing\n- if( opcode.equalsIgnoreCase(\"rangeReIndex\") )\n- {\n- //update and check output dimensions\n- MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(input1.getName());\n- MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(output.getName());\n- mcOut.set(ru-rl+1, cu-cl+1, mcIn.getRowsPerBlock(), mcIn.getColsPerBlock());\n- checkValidOutputDimensions(mcOut);\n+ //wrap PartitionPruningRDD around input to exploit pruning for out-of-core datasets\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> out = createPartitionPruningRDD(in1, filter);\n+ out = out.filter(new IsBlockInRange(ixrange.rowStart, ixrange.rowEnd, ixrange.colStart, ixrange.colEnd, mcOut)) //filter unnecessary blocks\n+ .mapToPair(new SliceBlock2(ixrange, mcOut)); //slice relevant blocks\n- //execute right indexing operation (partitioning-preserving if possible)\n- JavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable( input1.getName() );\n+ //collect output without shuffle to avoid side-effects with custom PartitionPruningRDD\n+ MatrixBlock mbout = SparkExecutionContext.toMatrixBlock(out, (int)mcOut.getRows(),\n+ (int)mcOut.getCols(), mcOut.getRowsPerBlock(), mcOut.getColsPerBlock(), -1);\n+ return mbout;\n+ }\n- if( isSingleBlockLookup(mcIn, ixrange) ) {\n+ private static MatrixBlock singleBlockIndexing(JavaPairRDD<MatrixIndexes,MatrixBlock> in1,\n+ MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange) throws DMLRuntimeException {\n//single block output via lookup (on partitioned inputs, this allows for single partition\n//access to avoid a full scan of the input; note that this is especially important for\n//out-of-core datasets as entire partitions are read, not just keys as in the in-memory setting.\n@@ -128,47 +138,64 @@ public class MatrixIndexingSPInstruction extends IndexingSPInstruction\nUtilFunctions.computeCellInBlock(ixrange.rowEnd, mcIn.getRowsPerBlock()),\nUtilFunctions.computeCellInBlock(ixrange.colStart, mcIn.getColsPerBlock()),\nUtilFunctions.computeCellInBlock(ixrange.colEnd, mcIn.getColsPerBlock()), new MatrixBlock());\n-\n- sec.setMatrixOutput(output.getName(), mbout);\n+ return mbout;\n}\n- else if( isMultiBlockLookup(in1, mcIn, mcOut, ixrange) ) {\n- //create list of all required matrix indexes\n- List<MatrixIndexes> filter = new ArrayList<MatrixIndexes>();\n- long rlix = UtilFunctions.computeBlockIndex(ixrange.rowStart, mcIn.getRowsPerBlock());\n- long ruix = UtilFunctions.computeBlockIndex(ixrange.rowEnd, mcIn.getRowsPerBlock());\n- long clix = UtilFunctions.computeBlockIndex(ixrange.colStart, mcIn.getColsPerBlock());\n- long cuix = UtilFunctions.computeBlockIndex(ixrange.colEnd, mcIn.getColsPerBlock());\n- for( long r=rlix; r<=ruix; r++ )\n- for( long c=clix; c<=cuix; c++ )\n- filter.add( new MatrixIndexes(r,c) );\n- //wrap PartitionPruningRDD around input to exploit pruning for out-of-core datasets\n- JavaPairRDD<MatrixIndexes,MatrixBlock> out = createPartitionPruningRDD(in1, filter);\n- out = out.filter(new IsBlockInRange(rl, ru, cl, cu, mcOut)) //filter unnecessary blocks\n- .mapToPair(new SliceBlock2(ixrange, mcOut)); //slice relevant blocks\n-\n- //collect output without shuffle to avoid side-effects with custom PartitionPruningRDD\n- MatrixBlock mbout = SparkExecutionContext.toMatrixBlock(out, (int)mcOut.getRows(),\n- (int)mcOut.getCols(), mcOut.getRowsPerBlock(), mcOut.getColsPerBlock(), -1);\n- sec.setMatrixOutput(output.getName(), mbout);\n- }\n- else { //rdd output for general case\n+ public static JavaPairRDD<MatrixIndexes,MatrixBlock> generalCaseRightIndexing(JavaPairRDD<MatrixIndexes,MatrixBlock> in1,\n+ MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange, SparkAggType aggType) {\nJavaPairRDD<MatrixIndexes,MatrixBlock> out = null;\nif( isPartitioningPreservingRightIndexing(mcIn, ixrange) ) {\nout = in1.mapPartitionsToPair(\nnew SliceBlockPartitionFunction(ixrange, mcOut), true);\n}\n- else if( _aggType == SparkAggType.NONE\n+ else if( aggType == SparkAggType.NONE\n|| OptimizerUtils.isIndexingRangeBlockAligned(ixrange, mcIn) ) {\n- out = in1.filter(new IsBlockInRange(rl, ru, cl, cu, mcOut))\n+ out = in1.filter(new IsBlockInRange(ixrange.rowStart, ixrange.rowEnd, ixrange.colStart, ixrange.colEnd, mcOut))\n.mapToPair(new SliceSingleBlock(ixrange, mcOut));\n}\nelse {\n- out = in1.filter(new IsBlockInRange(rl, ru, cl, cu, mcOut))\n+ out = in1.filter(new IsBlockInRange(ixrange.rowStart, ixrange.rowEnd, ixrange.colStart, ixrange.colEnd, mcOut))\n.flatMapToPair(new SliceMultipleBlocks(ixrange, mcOut));\nout = RDDAggregateUtils.mergeByKey(out);\n}\n+ return out;\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec)\n+ throws DMLRuntimeException\n+ {\n+ SparkExecutionContext sec = (SparkExecutionContext)ec;\n+ String opcode = getOpcode();\n+\n+ //get indexing range\n+ long rl = ec.getScalarInput(rowLower.getName(), rowLower.getValueType(), rowLower.isLiteral()).getLongValue();\n+ long ru = ec.getScalarInput(rowUpper.getName(), rowUpper.getValueType(), rowUpper.isLiteral()).getLongValue();\n+ long cl = ec.getScalarInput(colLower.getName(), colLower.getValueType(), colLower.isLiteral()).getLongValue();\n+ long cu = ec.getScalarInput(colUpper.getName(), colUpper.getValueType(), colUpper.isLiteral()).getLongValue();\n+ IndexRange ixrange = new IndexRange(rl, ru, cl, cu);\n+\n+ //right indexing\n+ if( opcode.equalsIgnoreCase(\"rangeReIndex\") )\n+ {\n+ //update and check output dimensions\n+ MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(input1.getName());\n+ MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(output.getName());\n+ mcOut.set(ru-rl+1, cu-cl+1, mcIn.getRowsPerBlock(), mcIn.getColsPerBlock());\n+ checkValidOutputDimensions(mcOut);\n+\n+ //execute right indexing operation (partitioning-preserving if possible)\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable( input1.getName() );\n+\n+ if( isSingleBlockLookup(mcIn, ixrange) ) {\n+ sec.setMatrixOutput(output.getName(), singleBlockIndexing(in1, mcIn, mcOut, ixrange));\n+ }\n+ else if( isMultiBlockLookup(in1, mcIn, mcOut, ixrange) ) {\n+ sec.setMatrixOutput(output.getName(), multiBlockIndexing(in1, mcIn, mcOut, ixrange));\n+ }\n+ else { //rdd output for general case\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> out = generalCaseRightIndexing(in1, mcIn, mcOut, ixrange, _aggType);\n//put output RDD handle into symbol table\nsec.setRDDHandleForVariable(output.getName(), out);\n@@ -252,7 +279,7 @@ public class MatrixIndexingSPInstruction extends IndexingSPInstruction\n* @param ixrange index range\n* @return true if index range covers a single block of the input matrix\n*/\n- private static boolean isSingleBlockLookup(MatrixCharacteristics mcIn, IndexRange ixrange) {\n+ public static boolean isSingleBlockLookup(MatrixCharacteristics mcIn, IndexRange ixrange) {\nreturn UtilFunctions.computeBlockIndex(ixrange.rowStart, mcIn.getRowsPerBlock())\n== UtilFunctions.computeBlockIndex(ixrange.rowEnd, mcIn.getRowsPerBlock())\n&& UtilFunctions.computeBlockIndex(ixrange.colStart, mcIn.getColsPerBlock())\n@@ -271,7 +298,7 @@ public class MatrixIndexingSPInstruction extends IndexingSPInstruction\n* @param ixrange index range\n* @return true if index range requires a multi-block lookup\n*/\n- private static boolean isMultiBlockLookup(JavaPairRDD<?,?> in, MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange) {\n+ public static boolean isMultiBlockLookup(JavaPairRDD<?,?> in, MatrixCharacteristics mcIn, MatrixCharacteristics mcOut, IndexRange ixrange) {\nreturn SparkUtils.isHashPartitioned(in) //existing partitioner\n&& OptimizerUtils.estimatePartitionedSizeExactSparsity(mcIn) //out-of-core dataset\n> SparkExecutionContext.getDataMemoryBudget(true, true)\n@@ -557,7 +584,7 @@ public class MatrixIndexingSPInstruction extends IndexingSPInstruction\n* @param filter partition filter\n* @return matrix as {@code JavaPairRDD<MatrixIndexes,MatrixBlock>}\n*/\n- private JavaPairRDD<MatrixIndexes,MatrixBlock> createPartitionPruningRDD(\n+ private static JavaPairRDD<MatrixIndexes,MatrixBlock> createPartitionPruningRDD(\nJavaPairRDD<MatrixIndexes,MatrixBlock> in, List<MatrixIndexes> filter )\n{\n//build hashset of required partition ids\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Code refactoring MatrixIndexingSPInstruction to enable parallel improvements in both indexing as well as prefetching
49,762
16.02.2017 16:13:14
28,800
77ffd02ee1991e1090782e0c5f03f579d6f40128
Decrease numCols to prevent spark codegen issue Closes
[ { "change_type": "MODIFY", "old_path": "docs/spark-mlcontext-programming-guide.md", "new_path": "docs/spark-mlcontext-programming-guide.md", "diff": "@@ -124,7 +124,7 @@ None\n## DataFrame Example\n-For demonstration purposes, we'll use Spark to create a `DataFrame` called `df` of random `double`s from 0 to 1 consisting of 10,000 rows and 1,000 columns.\n+For demonstration purposes, we'll use Spark to create a `DataFrame` called `df` of random `double`s from 0 to 1 consisting of 10,000 rows and 100 columns.\n<div class=\"codetabs\">\n@@ -134,7 +134,7 @@ import org.apache.spark.sql._\nimport org.apache.spark.sql.types.{StructType,StructField,DoubleType}\nimport scala.util.Random\nval numRows = 10000\n-val numCols = 1000\n+val numCols = 100\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\nval df = spark.createDataFrame(data, schema)\n@@ -155,8 +155,8 @@ import scala.util.Random\nscala> val numRows = 10000\nnumRows: Int = 10000\n-scala> val numCols = 1000\n-numCols: Int = 1000\n+scala> val numCols = 100\n+numCols: Int = 100\nscala> val data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\ndata: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = MapPartitionsRDD[1] at map at <console>:42\n@@ -175,7 +175,7 @@ df: org.apache.spark.sql.DataFrame = [C0: double, C1: double, C2: double, C3: do\nWe'll create a DML script to find the minimum, maximum, and mean values in a matrix. This\nscript has one input variable, matrix `Xin`, and three output variables, `minOut`, `maxOut`, and `meanOut`.\n-For performance, we'll specify metadata indicating that the matrix has 10,000 rows and 1,000 columns.\n+For performance, we'll specify metadata indicating that the matrix has 10,000 rows and 100 columns.\nWe'll create a DML script using the ScriptFactory `dml` method with the `minMaxMean` script String. The\ninput variable is specified to be our `DataFrame` `df` with `MatrixMetadata` `mm`. The output\n@@ -218,7 +218,7 @@ meanOut = mean(Xin)\n\"\nscala> val mm = new MatrixMetadata(numRows, numCols)\n-mm: org.apache.sysml.api.mlcontext.MatrixMetadata = rows: 10000, columns: 1000, non-zeros: None, rows per block: None, columns per block: None\n+mm: org.apache.sysml.api.mlcontext.MatrixMetadata = rows: 10000, columns: 100, non-zeros: None, rows per block: None, columns per block: None\nscala> val minMaxMeanScript = dml(minMaxMean).in(\"Xin\", df, mm).out(\"minOut\", \"maxOut\", \"meanOut\")\nminMaxMeanScript: org.apache.sysml.api.mlcontext.Script =\n@@ -929,7 +929,7 @@ Symbol Table:\n[1] (Double) meanOut: 0.5000954668004209\n[2] (Double) maxOut: 0.9999999956646207\n[3] (Double) minOut: 1.4149740823476975E-7\n- [4] (Matrix) Xin: Matrix: scratch_space/temp_1166464711339222, [10000 x 1000, nnz=10000000, blocks (1000 x 1000)], binaryblock, not-dirty\n+ [4] (Matrix) Xin: Matrix: scratch_space/temp_1166464711339222, [10000 x 100, nnz=1000000, blocks (1000 x 1000)], binaryblock, not-dirty\nScript String:\n@@ -980,7 +980,7 @@ Symbol Table:\n[1] (Double) meanOut: 0.5000954668004209\n[2] (Double) maxOut: 0.9999999956646207\n[3] (Double) minOut: 1.4149740823476975E-7\n- [4] (Matrix) Xin: Matrix: scratch_space/temp_1166464711339222, [10000 x 1000, nnz=10000000, blocks (1000 x 1000)], binaryblock, not-dirty\n+ [4] (Matrix) Xin: Matrix: scratch_space/temp_1166464711339222, [10000 x 100, nnz=1000000, blocks (1000 x 1000)], binaryblock, not-dirty\nscala> minMaxMeanScript.clearAll\n@@ -1129,7 +1129,7 @@ meanOut = mean(Xin)\n\"\nscala> val mm = new MatrixMetadata(numRows, numCols)\n-mm: org.apache.sysml.api.mlcontext.MatrixMetadata = rows: 10000, columns: 1000, non-zeros: None, rows per block: None, columns per block: None\n+mm: org.apache.sysml.api.mlcontext.MatrixMetadata = rows: 10000, columns: 100, non-zeros: None, rows per block: None, columns per block: None\nscala> val minMaxMeanScript = dml(minMaxMean).in(\"Xin\", df, mm).out(\"minOut\", \"maxOut\", \"meanOut\")\nminMaxMeanScript: org.apache.sysml.api.mlcontext.Script =\n@@ -1147,7 +1147,7 @@ scala> val (min, max, mean) = ml.execute(minMaxMeanScript).getTuple[Double, Doub\nPROGRAM\n--MAIN PROGRAM\n----GENERIC (lines 1-8) [recompile=false]\n-------(12) TRead Xin [10000,1000,1000,1000,10000000] [0,0,76 -> 76MB] [chkpt], CP\n+------(12) TRead Xin [10000,100,1000,1000,1000000] [0,0,76 -> 76MB] [chkpt], CP\n------(13) ua(minRC) (12) [0,0,-1,-1,-1] [76,0,0 -> 76MB], CP\n------(21) TWrite minOut (13) [0,0,-1,-1,-1] [0,0,0 -> 0MB], CP\n------(14) ua(maxRC) (12) [0,0,-1,-1,-1] [76,0,0 -> 76MB], CP\n@@ -1523,7 +1523,7 @@ There are currently two mechanisms for this in SystemML: **(1) BinaryBlockMatrix\nIf you have an input DataFrame, it can be converted to a BinaryBlockMatrix, and this BinaryBlockMatrix\ncan be passed as an input rather than passing in the DataFrame as an input.\n-For example, suppose we had a 10000x1000 matrix represented as a DataFrame, as we saw in an earlier example.\n+For example, suppose we had a 10000x100 matrix represented as a DataFrame, as we saw in an earlier example.\nNow suppose we create two Script objects with the DataFrame as an input, as shown below. In the Spark Shell,\nwhen executing this code, you can see that each of the two Script object creations requires the\ntime-consuming data conversion step.\n@@ -1533,7 +1533,7 @@ import org.apache.spark.sql._\nimport org.apache.spark.sql.types.{StructType,StructField,DoubleType}\nimport scala.util.Random\nval numRows = 10000\n-val numCols = 1000\n+val numCols = 100\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\nval df = spark.createDataFrame(data, schema)\n@@ -1554,7 +1554,7 @@ import org.apache.spark.sql._\nimport org.apache.spark.sql.types.{StructType,StructField,DoubleType}\nimport scala.util.Random\nval numRows = 10000\n-val numCols = 1000\n+val numCols = 100\nval data = sc.parallelize(0 to numRows-1).map { _ => Row.fromSeq(Seq.fill(numCols)(Random.nextDouble)) }\nval schema = StructType((0 to numCols-1).map { i => StructField(\"C\" + i, DoubleType, true) } )\nval df = spark.createDataFrame(data, schema)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1279] Decrease numCols to prevent spark codegen issue Closes #395.
49,736
16.02.2017 21:41:07
28,800
7e2383bfa51fc51cfe637680f94758eb1ae193cf
[MINOR] Addressed corner cases (i.e. empty blocks and extremely large blocks) as well added recomputing nnz in the converters
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "diff": "@@ -154,8 +154,10 @@ public class RDDConverterUtilsExt\ndouble val = buf1.getDouble();\nint rowIndex = buf2.getInt();\nint colIndex = buf3.getInt();\n- mb.setValue(rowIndex, colIndex, val); // TODO: Improve the performance\n+ mb.setValue(rowIndex, colIndex, val);\n}\n+ mb.recomputeNonZeros();\n+ mb.examSparsity();\nreturn mb;\n}\n@@ -169,7 +171,10 @@ public class RDDConverterUtilsExt\nthrow new DMLRuntimeException(\"Convertion to sparse format not supported\");\n}\nelse {\n- double [] denseBlock = new double[rlen*clen];\n+ long limit = rlen*clen;\n+ if( limit > Integer.MAX_VALUE )\n+ throw new DMLRuntimeException(\"Dense NumPy array of size \" + limit + \" cannot be converted to MatrixBlock\");\n+ double [] denseBlock = new double[(int) limit];\nByteBuffer buf = ByteBuffer.wrap(data);\nbuf.order(ByteOrder.nativeOrder());\nfor(int i = 0; i < rlen*clen; i++) {\n@@ -177,6 +182,7 @@ public class RDDConverterUtilsExt\n}\nmb.init( denseBlock, rlen, clen );\n}\n+ mb.recomputeNonZeros();\nmb.examSparsity();\nreturn mb;\n}\n@@ -185,18 +191,28 @@ public class RDDConverterUtilsExt\nbyte [] ret = null;\nif(mb.isInSparseFormat()) {\nmb.sparseToDense();\n-// throw new DMLRuntimeException(\"Sparse to dense conversion is not yet implemented\");\n}\n+ long limit = mb.getNumRows()*mb.getNumColumns();\n+ int times = Double.SIZE / Byte.SIZE;\n+ if( limit * times > Integer.MAX_VALUE )\n+ throw new DMLRuntimeException(\"MatrixBlock of size \" + limit + \" cannot be converted to dense numpy array\");\n+ ret = new byte[(int) (limit * times)];\n+\ndouble [] denseBlock = mb.getDenseBlock();\n- if(denseBlock == null) {\n- throw new DMLRuntimeException(\"Sparse to dense conversion is not yet implemented\");\n+ if(mb.isEmptyBlock()) {\n+ for(int i=0;i < limit;i++){\n+ ByteBuffer.wrap(ret, i*times, times).order(ByteOrder.nativeOrder()).putDouble(0);\n}\n- int times = Double.SIZE / Byte.SIZE;\n- ret = new byte[denseBlock.length * times];\n+ }\n+ else if(denseBlock == null) {\n+ throw new DMLRuntimeException(\"Error while dealing with empty blocks.\");\n+ }\n+ else {\nfor(int i=0;i < denseBlock.length;i++){\nByteBuffer.wrap(ret, i*times, times).order(ByteOrder.nativeOrder()).putDouble(denseBlock[i]);\n}\n+ }\nreturn ret;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Addressed corner cases (i.e. empty blocks and extremely large blocks) as well added recomputing nnz in the converters
49,736
17.02.2017 14:54:23
28,800
9d0087cbbd250c9b486923555b450602f816cf19
Updated the default parameters of mllearn to match that of scikit learn. Also updated the test to compare our algorithm to scikit-learn. Closes
[ { "change_type": "MODIFY", "old_path": "docs/algorithms-regression.md", "new_path": "docs/algorithms-regression.md", "diff": "@@ -83,8 +83,8 @@ efficient when the number of features $m$ is relatively small\n<div data-lang=\"Python\" markdown=\"1\">\n{% highlight python %}\nfrom systemml.mllearn import LinearRegression\n-# C = 1/reg\n-lr = LinearRegression(sqlCtx, fit_intercept=True, C=1.0, solver='direct-solve')\n+# C = 1/reg (to disable regularization, use float(\"inf\"))\n+lr = LinearRegression(sqlCtx, fit_intercept=True, normalize=False, C=float(\"inf\"), solver='direct-solve')\n# X_train, y_train and X_test can be NumPy matrices or Pandas DataFrame or SciPy Sparse Matrix\ny_test = lr.fit(X_train, y_train)\n# df_train is DataFrame that contains two columns: \"features\" (of type Vector) and \"label\". df_test is a DataFrame that contains the column \"features\"\n@@ -125,8 +125,8 @@ y_test = lr.fit(df_train)\n<div data-lang=\"Python\" markdown=\"1\">\n{% highlight python %}\nfrom systemml.mllearn import LinearRegression\n-# C = 1/reg\n-lr = LinearRegression(sqlCtx, fit_intercept=True, max_iter=100, tol=0.000001, C=1.0, solver='newton-cg')\n+# C = 1/reg (to disable regularization, use float(\"inf\"))\n+lr = LinearRegression(sqlCtx, fit_intercept=True, normalize=False, max_iter=100, tol=0.000001, C=float(\"inf\"), solver='newton-cg')\n# X_train, y_train and X_test can be NumPy matrices or Pandas DataFrames or SciPy Sparse matrices\ny_test = lr.fit(X_train, y_train)\n# df_train is DataFrame that contains two columns: \"features\" (of type Vector) and \"label\". df_test is a DataFrame that contains the column \"features\"\n" }, { "change_type": "MODIFY", "old_path": "docs/beginners-guide-python.md", "new_path": "docs/beginners-guide-python.md", "diff": "@@ -228,7 +228,7 @@ X_test = diabetes_X[-20:]\ny_train = diabetes.target[:-20]\ny_test = diabetes.target[-20:]\n# Create linear regression object\n-regr = LinearRegression(sqlCtx, fit_intercept=True, C=1, solver='direct-solve')\n+regr = LinearRegression(sqlCtx, fit_intercept=True, C=float(\"inf\"), solver='direct-solve')\n# Train the model using the training sets\nregr.fit(X_train, y_train)\ny_predicted = regr.predict(X_test)\n" }, { "change_type": "MODIFY", "old_path": "docs/python-reference.md", "new_path": "docs/python-reference.md", "diff": "@@ -731,7 +731,7 @@ LogisticRegression score: 0.922222\n### Reference documentation\n- *class*`systemml.mllearn.estimators.LinearRegression`(*sqlCtx*, *fit\\_intercept=True*, *max\\_iter=100*, *tol=1e-06*, *C=1.0*, *solver='newton-cg'*, *transferUsingDF=False*)(#systemml.mllearn.estimators.LinearRegression \"Permalink to this definition\")\n+ *class*`systemml.mllearn.estimators.LinearRegression`(*sqlCtx*, *fit\\_intercept=True*, *normalize=False*, *max\\_iter=100*, *tol=1e-06*, *C=float(\"inf\")*, *solver='newton-cg'*, *transferUsingDF=False*)(#systemml.mllearn.estimators.LinearRegression \"Permalink to this definition\")\n: Bases: `systemml.mllearn.estimators.BaseSystemMLRegressor`{.xref .py\n.py-class .docutils .literal}\n@@ -760,7 +760,7 @@ LogisticRegression score: 0.922222\n>>> # The mean square error\n>>> print(\"Residual sum of squares: %.2f\" % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))\n- *class*`systemml.mllearn.estimators.LogisticRegression`(*sqlCtx*, *penalty='l2'*, *fit\\_intercept=True*, *max\\_iter=100*, *max\\_inner\\_iter=0*, *tol=1e-06*, *C=1.0*, *solver='newton-cg'*, *transferUsingDF=False*)(#systemml.mllearn.estimators.LogisticRegression \"Permalink to this definition\")\n+ *class*`systemml.mllearn.estimators.LogisticRegression`(*sqlCtx*, *penalty='l2'*, *fit\\_intercept=True*, *normalize=False*, *max\\_iter=100*, *max\\_inner\\_iter=0*, *tol=1e-06*, *C=1.0*, *solver='newton-cg'*, *transferUsingDF=False*)(#systemml.mllearn.estimators.LogisticRegression \"Permalink to this definition\")\n: Bases: `systemml.mllearn.estimators.BaseSystemMLClassifier`{.xref\n.py .py-class .docutils .literal}\n@@ -817,7 +817,7 @@ LogisticRegression score: 0.922222\n>>> prediction = model.transform(test)\n>>> prediction.show()\n- *class*`systemml.mllearn.estimators.SVM`(*sqlCtx*, *fit\\_intercept=True*, *max\\_iter=100*, *tol=1e-06*, *C=1.0*, *is\\_multi\\_class=False*, *transferUsingDF=False*)(#systemml.mllearn.estimators.SVM \"Permalink to this definition\")\n+ *class*`systemml.mllearn.estimators.SVM`(*sqlCtx*, *fit\\_intercept=True*, *normalize=False*, *max\\_iter=100*, *tol=1e-06*, *C=1.0*, *is\\_multi\\_class=False*, *transferUsingDF=False*)(#systemml.mllearn.estimators.SVM \"Permalink to this definition\")\n: Bases: `systemml.mllearn.estimators.BaseSystemMLClassifier`{.xref\n.py .py-class .docutils .literal}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "diff": "@@ -195,7 +195,7 @@ public class RDDConverterUtilsExt\nlong limit = mb.getNumRows()*mb.getNumColumns();\nint times = Double.SIZE / Byte.SIZE;\n- if( limit * times > Integer.MAX_VALUE )\n+ if( limit > Integer.MAX_VALUE / times )\nthrow new DMLRuntimeException(\"MatrixBlock of size \" + limit + \" cannot be converted to dense numpy array\");\nret = new byte[(int) (limit * times)];\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/estimators.py", "new_path": "src/main/python/systemml/mllearn/estimators.py", "diff": "@@ -294,7 +294,7 @@ class LogisticRegression(BaseSystemMLClassifier):\n\"\"\"\n- def __init__(self, sparkSession, penalty='l2', fit_intercept=True, max_iter=100, max_inner_iter=0, tol=0.000001, C=1.0, solver='newton-cg', transferUsingDF=False):\n+ def __init__(self, sparkSession, penalty='l2', fit_intercept=True, normalize=False, max_iter=100, max_inner_iter=0, tol=0.000001, C=1.0, solver='newton-cg', transferUsingDF=False):\n\"\"\"\nPerforms both binomial and multinomial logistic regression.\n@@ -303,10 +303,11 @@ class LogisticRegression(BaseSystemMLClassifier):\nsparkSession: PySpark SparkSession\npenalty: Only 'l2' supported\nfit_intercept: Specifies whether to add intercept or not (default: True)\n+ normalize: This parameter is ignored when fit_intercept is set to False. (default: False)\nmax_iter: Maximum number of outer (Fisher scoring) iterations (default: 100)\nmax_inner_iter: Maximum number of inner (conjugate gradient) iterations, or 0 if no maximum limit provided (default: 0)\ntol: Tolerance used in the convergence criterion (default: 0.000001)\n- C: 1/regularization parameter (default: 1.0)\n+ C: 1/regularization parameter (default: 1.0 similar to scikit-learn. To disable regularization, please use float(\"inf\"))\nsolver: Only 'newton-cg' solver supported\n\"\"\"\nself.sparkSession = sparkSession\n@@ -316,12 +317,11 @@ class LogisticRegression(BaseSystemMLClassifier):\nself.estimator = self.sc._jvm.org.apache.sysml.api.ml.LogisticRegression(self.uid, self.sc._jsc.sc())\nself.estimator.setMaxOuterIter(max_iter)\nself.estimator.setMaxInnerIter(max_inner_iter)\n- if C <= 0:\n- raise Exception('C has to be positive')\n- reg = 1.0 / C\n+ reg = 0.0 if C == float(\"inf\") else 1.0 / C\n+ icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)\nself.estimator.setRegParam(reg)\nself.estimator.setTol(tol)\n- self.estimator.setIcpt(int(fit_intercept))\n+ self.estimator.setIcpt(icpt)\nself.transferUsingDF = transferUsingDF\nself.setOutputRawPredictionsToFalse = True\nif penalty != 'l2':\n@@ -361,7 +361,7 @@ class LinearRegression(BaseSystemMLRegressor):\n\"\"\"\n- def __init__(self, sparkSession, fit_intercept=True, max_iter=100, tol=0.000001, C=1.0, solver='newton-cg', transferUsingDF=False):\n+ def __init__(self, sparkSession, fit_intercept=True, normalize=False, max_iter=100, tol=0.000001, C=float(\"inf\"), solver='newton-cg', transferUsingDF=False):\n\"\"\"\nPerforms linear regression to model the relationship between one numerical response variable and one or more explanatory (feature) variables.\n@@ -369,9 +369,10 @@ class LinearRegression(BaseSystemMLRegressor):\n----------\nsparkSession: PySpark SparkSession\nfit_intercept: Specifies whether to add intercept or not (default: True)\n+ normalize: If True, the regressors X will be normalized before regression. This parameter is ignored when fit_intercept is set to False. (default: False)\nmax_iter: Maximum number of conjugate gradient iterations, or 0 if no maximum limit provided (default: 100)\ntol: Tolerance used in the convergence criterion (default: 0.000001)\n- C: 1/regularization parameter (default: 1.0)\n+ C: 1/regularization parameter (default: float(\"inf\") as scikit learn doesnot support regularization by default)\nsolver: Supports either 'newton-cg' or 'direct-solve' (default: 'newton-cg').\nDepending on the size and the sparsity of the feature matrix, one or the other solver may be more efficient.\n'direct-solve' solver is more efficient when the number of features is relatively small (m < 1000) and\n@@ -386,12 +387,11 @@ class LinearRegression(BaseSystemMLRegressor):\nelse:\nraise Exception('Only newton-cg solver supported')\nself.estimator.setMaxIter(max_iter)\n- if C <= 0:\n- raise Exception('C has to be positive')\n- reg = 1.0 / C\n+ reg = 0.0 if C == float(\"inf\") else 1.0 / C\n+ icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)\nself.estimator.setRegParam(reg)\nself.estimator.setTol(tol)\n- self.estimator.setIcpt(int(fit_intercept))\n+ self.estimator.setIcpt(icpt)\nself.transferUsingDF = transferUsingDF\nself.setOutputRawPredictionsToFalse = False\n@@ -421,7 +421,7 @@ class SVM(BaseSystemMLClassifier):\n\"\"\"\n- def __init__(self, sparkSession, fit_intercept=True, max_iter=100, tol=0.000001, C=1.0, is_multi_class=False, transferUsingDF=False):\n+ def __init__(self, sparkSession, fit_intercept=True, normalize=False, max_iter=100, tol=0.000001, C=1.0, is_multi_class=False, transferUsingDF=False):\n\"\"\"\nPerforms both binary-class and multiclass SVM (Support Vector Machines).\n@@ -429,9 +429,10 @@ class SVM(BaseSystemMLClassifier):\n----------\nsparkSession: PySpark SparkSession\nfit_intercept: Specifies whether to add intercept or not (default: True)\n+ normalize: This parameter is ignored when fit_intercept is set to False. (default: False)\nmax_iter: Maximum number iterations (default: 100)\ntol: Tolerance used in the convergence criterion (default: 0.000001)\n- C: 1/regularization parameter (default: 1.0)\n+ C: 1/regularization parameter (default: 1.0 similar to scikit-learn. To disable regularization, please use float(\"inf\"))\nis_multi_class: Specifies whether to use binary-class SVM or multi-class SVM algorithm (default: False)\n\"\"\"\nself.sparkSession = sparkSession\n@@ -442,10 +443,11 @@ class SVM(BaseSystemMLClassifier):\nself.estimator.setMaxIter(max_iter)\nif C <= 0:\nraise Exception('C has to be positive')\n- reg = 1.0 / C\n+ reg = 0.0 if C == float(\"inf\") else 1.0 / C\n+ icpt = 2 if fit_intercept == True and normalize == True else int(fit_intercept)\nself.estimator.setRegParam(reg)\nself.estimator.setTol(tol)\n- self.estimator.setIcpt(int(fit_intercept))\n+ self.estimator.setIcpt(icpt)\nself.transferUsingDF = transferUsingDF\nself.setOutputRawPredictionsToFalse = False\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/tests/test_mllearn_df.py", "new_path": "src/main/python/tests/test_mllearn_df.py", "diff": "@@ -40,7 +40,8 @@ from pyspark.sql import SparkSession\nfrom sklearn import datasets, metrics, neighbors\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n-\n+from sklearn import linear_model\n+from sklearn.metrics import accuracy_score, r2_score\nfrom systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM\nsc = SparkContext()\n@@ -61,20 +62,40 @@ class TestMLLearn(unittest.TestCase):\ny_test = y_digits[int(.9 * n_samples):]\n# Convert to DataFrame for i/o: current way to transfer data\nlogistic = LogisticRegression(sparkSession, transferUsingDF=True)\n- score = logistic.fit(X_train, y_train).score(X_test, y_test)\n- self.failUnless(score > 0.9)\n+ logistic.fit(X_train, y_train)\n+ mllearn_predicted = logistic.predict(X_test)\n+ sklearn_logistic = linear_model.LogisticRegression()\n+ sklearn_logistic.fit(X_train, y_train)\n+ self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\n- def test_linear_regression_sk2(self):\n+ def test_linear_regression(self):\ndiabetes = datasets.load_diabetes()\ndiabetes_X = diabetes.data[:, np.newaxis, 2]\ndiabetes_X_train = diabetes_X[:-20]\ndiabetes_X_test = diabetes_X[-20:]\ndiabetes_y_train = diabetes.target[:-20]\ndiabetes_y_test = diabetes.target[-20:]\n- regr = LinearRegression(sparkSession, transferUsingDF=True)\n+ regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)\nregr.fit(diabetes_X_train, diabetes_y_train)\n- score = regr.score(diabetes_X_test, diabetes_y_test)\n- self.failUnless(score > 0.4) # TODO: Improve r2-score (may be I am using it incorrectly)\n+ mllearn_predicted = regr.predict(diabetes_X_test)\n+ sklearn_regr = linear_model.LinearRegression()\n+ sklearn_regr.fit(diabetes_X_train, diabetes_y_train)\n+ self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\n+\n+ def test_linear_regression_cg(self):\n+ diabetes = datasets.load_diabetes()\n+ diabetes_X = diabetes.data[:, np.newaxis, 2]\n+ diabetes_X_train = diabetes_X[:-20]\n+ diabetes_X_test = diabetes_X[-20:]\n+ diabetes_y_train = diabetes.target[:-20]\n+ diabetes_y_test = diabetes.target[-20:]\n+ regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)\n+ regr.fit(diabetes_X_train, diabetes_y_train)\n+ mllearn_predicted = regr.predict(diabetes_X_test)\n+ sklearn_regr = linear_model.LinearRegression()\n+ sklearn_regr.fit(diabetes_X_train, diabetes_y_train)\n+ self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\n+\ndef test_svm_sk2(self):\ndigits = datasets.load_digits()\n@@ -86,22 +107,11 @@ class TestMLLearn(unittest.TestCase):\nX_test = X_digits[int(.9 * n_samples):]\ny_test = y_digits[int(.9 * n_samples):]\nsvm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)\n- score = svm.fit(X_train, y_train).score(X_test, y_test)\n- self.failUnless(score > 0.9)\n-\n- #def test_naive_bayes_sk2(self):\n- # categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']\n- # newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)\n- # newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)\n- # vectorizer = TfidfVectorizer()\n- # # Both vectors and vectors_test are SciPy CSR matrix\n- # vectors = vectorizer.fit_transform(newsgroups_train.data)\n- # vectors_test = vectorizer.transform(newsgroups_test.data)\n- # nb = NaiveBayes(sparkSession)\n- # nb.fit(vectors, newsgroups_train.target)\n- # pred = nb.predict(vectors_test)\n- # score = metrics.f1_score(newsgroups_test.target, pred, average='weighted')\n- # self.failUnless(score > 0.8)\n+ mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)\n+ from sklearn import linear_model, svm\n+ clf = svm.LinearSVC()\n+ sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)\n+ self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )\nif __name__ == '__main__':\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/tests/test_mllearn_numpy.py", "new_path": "src/main/python/tests/test_mllearn_numpy.py", "diff": "@@ -40,11 +40,26 @@ from pyspark.sql import SparkSession\nfrom sklearn import datasets, metrics, neighbors\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n-\n+from sklearn.metrics import accuracy_score, r2_score\nfrom systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM\n+from sklearn import linear_model\nsc = SparkContext()\nsparkSession = SparkSession.builder.getOrCreate()\n+import os\n+\n+def writeColVector(X, fileName):\n+ fileName = os.path.join(os.getcwd(), fileName)\n+ X.tofile(fileName, sep='\\n')\n+ metaDataFileContent = '{ \"data_type\": \"matrix\", \"value_type\": \"double\", \"rows\":' + str(len(X)) + ', \"cols\": 1, \"nnz\": -1, \"format\": \"csv\", \"author\": \"systemml-tests\", \"created\": \"0000-00-00 00:00:00 PST\" }'\n+ with open(fileName+'.mtd', 'w') as text_file:\n+ text_file.write(metaDataFileContent)\n+\n+def deleteIfExists(fileName):\n+ try:\n+ os.remove(fileName)\n+ except OSError:\n+ pass\n# Currently not integrated with JUnit test\n# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py\n@@ -59,8 +74,11 @@ class TestMLLearn(unittest.TestCase):\nX_test = X_digits[int(.9 * n_samples):]\ny_test = y_digits[int(.9 * n_samples):]\nlogistic = LogisticRegression(sparkSession)\n- score = logistic.fit(X_train, y_train).score(X_test, y_test)\n- self.failUnless(score > 0.9)\n+ logistic.fit(X_train, y_train)\n+ mllearn_predicted = logistic.predict(X_test)\n+ sklearn_logistic = linear_model.LogisticRegression()\n+ sklearn_logistic.fit(X_train, y_train)\n+ self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\ndef test_logistic_mlpipeline(self):\ntraining = sparkSession.createDataFrame([\n@@ -101,10 +119,26 @@ class TestMLLearn(unittest.TestCase):\ndiabetes_X_test = diabetes_X[-20:]\ndiabetes_y_train = diabetes.target[:-20]\ndiabetes_y_test = diabetes.target[-20:]\n- regr = LinearRegression(sparkSession)\n+ regr = LinearRegression(sparkSession, solver='direct-solve')\n+ regr.fit(diabetes_X_train, diabetes_y_train)\n+ mllearn_predicted = regr.predict(diabetes_X_test)\n+ sklearn_regr = linear_model.LinearRegression()\n+ sklearn_regr.fit(diabetes_X_train, diabetes_y_train)\n+ self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\n+\n+ def test_linear_regression_cg(self):\n+ diabetes = datasets.load_diabetes()\n+ diabetes_X = diabetes.data[:, np.newaxis, 2]\n+ diabetes_X_train = diabetes_X[:-20]\n+ diabetes_X_test = diabetes_X[-20:]\n+ diabetes_y_train = diabetes.target[:-20]\n+ diabetes_y_test = diabetes.target[-20:]\n+ regr = LinearRegression(sparkSession, solver='newton-cg')\nregr.fit(diabetes_X_train, diabetes_y_train)\n- score = regr.score(diabetes_X_test, diabetes_y_test)\n- self.failUnless(score > 0.4) # TODO: Improve r2-score (may be I am using it incorrectly)\n+ mllearn_predicted = regr.predict(diabetes_X_test)\n+ sklearn_regr = linear_model.LinearRegression()\n+ sklearn_regr.fit(diabetes_X_train, diabetes_y_train)\n+ self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn\ndef test_svm(self):\ndigits = datasets.load_digits()\n@@ -116,8 +150,11 @@ class TestMLLearn(unittest.TestCase):\nX_test = X_digits[int(.9 * n_samples):]\ny_test = y_digits[int(.9 * n_samples):]\nsvm = SVM(sparkSession, is_multi_class=True)\n- score = svm.fit(X_train, y_train).score(X_test, y_test)\n- self.failUnless(score > 0.9)\n+ mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)\n+ from sklearn import linear_model, svm\n+ clf = svm.LinearSVC()\n+ sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)\n+ self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )\ndef test_naive_bayes(self):\ndigits = datasets.load_digits()\n@@ -129,22 +166,26 @@ class TestMLLearn(unittest.TestCase):\nX_test = X_digits[int(.9 * n_samples):]\ny_test = y_digits[int(.9 * n_samples):]\nnb = NaiveBayes(sparkSession)\n- score = nb.fit(X_train, y_train).score(X_test, y_test)\n- self.failUnless(score > 0.8)\n-\n- #def test_naive_bayes1(self):\n- # categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']\n- # newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)\n- # newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)\n- # vectorizer = TfidfVectorizer()\n- # # Both vectors and vectors_test are SciPy CSR matrix\n- # vectors = vectorizer.fit_transform(newsgroups_train.data)\n- # vectors_test = vectorizer.transform(newsgroups_test.data)\n- # nb = NaiveBayes(sparkSession)\n- # nb.fit(vectors, newsgroups_train.target)\n- # pred = nb.predict(vectors_test)\n- # score = metrics.f1_score(newsgroups_test.target, pred, average='weighted')\n- # self.failUnless(score > 0.8)\n+ mllearn_predicted = nb.fit(X_train, y_train).predict(X_test)\n+ from sklearn.naive_bayes import MultinomialNB\n+ clf = MultinomialNB()\n+ sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)\n+ self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )\n+\n+ def test_naive_bayes1(self):\n+ categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']\n+ newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)\n+ newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)\n+ vectorizer = TfidfVectorizer()\n+ # Both vectors and vectors_test are SciPy CSR matrix\n+ vectors = vectorizer.fit_transform(newsgroups_train.data)\n+ vectors_test = vectorizer.transform(newsgroups_test.data)\n+ nb = NaiveBayes(sparkSession)\n+ mllearn_predicted = nb.fit(vectors, newsgroups_train.target).predict(vectors_test)\n+ from sklearn.naive_bayes import MultinomialNB\n+ clf = MultinomialNB()\n+ sklearn_predicted = clf.fit(vectors, newsgroups_train.target).predict(vectors_test)\n+ self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )\nif __name__ == '__main__':\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1238] Updated the default parameters of mllearn to match that of scikit learn. - Also updated the test to compare our algorithm to scikit-learn. Closes #398.
49,738
19.02.2017 23:40:53
28,800
e1cad8a8b76d2600f6771928df2af25aadfcbcf9
Fix frame left indexing w/ heterogeneous column schemas
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "diff": "@@ -756,8 +756,17 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n//copy data to output and partial overwrite w/ rhs\nfor( int j=0; j<getNumColumns(); j++ ) {\nArray tmp = _coldata[j].clone();\n- if( j>=cl && j<=cu )\n+ if( j>=cl && j<=cu ) {\n+ //fast-path for homogeneous column schemas\n+ if( _schema[j]==rhsFrame._schema[j-cl] )\ntmp.set(rl, ru, rhsFrame._coldata[j-cl]);\n+ //general-path for heterogeneous column schemas\n+ else {\n+ for( int i=rl; i<=ru; i++ )\n+ tmp.set(i, UtilFunctions.objectToObject(\n+ _schema[j], rhsFrame._coldata[j-cl].get(i-rl)));\n+ }\n+ }\nret._coldata[j] = tmp;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1337] Fix frame left indexing w/ heterogeneous column schemas
49,768
20.02.2017 03:01:34
28,800
cba7d2f7b3595d2c6d6c2248fedfed157107965f
Create RC builds from branch
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -48,7 +48,7 @@ OPTIONS\n--developmentVersion - Release identifier used for next development cyce\n--releaseRc - Release RC identifier used when publishing, default 'rc1'\n--tag - Release Tag identifier used when taging the release, default 'v$releaseVersion'\n---gitCommitHash - Release tag or commit to build from, default master HEAD\n+--gitCommitHash - Release tag, branch name or commit to build from, default master HEAD\n--dryRun - Dry run only, mostly used for testing.\nA GPG passphrase is expected as an environment variable\n@@ -61,6 +61,9 @@ release-build.sh --release-prepare --releaseVersion=\"0.11.0-incubating\" --develo\nrelease-build.sh --release-prepare --releaseVersion=\"0.11.0-incubating\" --developmentVersion=\"0.12.0-SNAPSHOT\" --releaseRc=\"rc1\" --tag=\"v0.11.0-incubating-rc1\"\nrelease-build.sh --release-prepare --releaseVersion=\"0.11.0-incubating\" --developmentVersion=\"0.12.0-SNAPSHOT\" --releaseRc=\"rc1\" --tag=\"v0.11.0-incubating-rc1\" --gitCommitHash=\"a874b73\" --dryRun\n+# Create 0.12 RC2 builds from branch-0.12\n+./release-build.sh --release-prepare --releaseVersion=\"0.12.0-incubating\" --developmentVersion=\"0.12.1-incubating-SNAPSHOT\" --releaseRc=\"rc2\" --tag=\"v0.12.0-incubating-rc2\" --gitCommitHash=\"branch-0.12\"\n+\nrelease-build.sh --release-publish --gitCommitHash=\"a874b73\"\nrelease-build.sh --release-publish --gitTag=\"v0.11.0-incubating-rc1\"\n@@ -223,7 +226,7 @@ echo \" \"\nfunction checkout_code {\n# Checkout code\nrm -rf $RELEASE_WORK_DIR\n- mkdir $RELEASE_WORK_DIR\n+ mkdir -p $RELEASE_WORK_DIR\ncd $RELEASE_WORK_DIR\ngit clone https://git-wip-us.apache.org/repos/asf/incubator-systemml.git\ncd incubator-systemml\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1191] Create RC builds from branch
49,768
20.02.2017 12:08:38
28,800
07f26ca4ebf9471789c4535053c679f55a2291dc
[maven-release-plugin] prepare release v0.13.0-incubating-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.13.0-incubating-SNAPSHOT</version>\n+ <version>0.13.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.13.0-incubating-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v0.13.0-incubating-rc1
49,767
20.02.2017 16:22:24
28,800
8eed1ec94b8070710d532358906a050cd4f727fc
Autoencoder script for acoustic signal modeling Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/autoencoder-2layer.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Trains a 2-layer autoencoder with minibatch SGD, momentum and step-size decay.\n+# If invoked with H1 > H2 then it becomes a 'bowtie' structured autoencoder\n+# Weights are initialized using Glorot & Bengio (2010) AISTATS initialization.\n+# The script standardizes the input before training (can be turned off).\n+# Also, it randomly reshuffles rows before training.\n+# Currently, tanh is set to be the activation function.\n+# By re-implementing 'func' DML-bodied function, one can change the activation.\n+\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Filename where the input is stored\n+# H1 Int --- Number of neurons in the 1st hidden layer\n+# H2 Int --- Number of neurons in the 2nd hidden layer\n+# EPOCH Int --- Number of epochs to train for\n+# fmt String 'text' Output format (\"text\", \"csv\", \"binary\" etc.)\n+# OBJ Boolean FALSE If TRUE, Computes objective function value (squared-loss)\n+# at the end of each epoch. Note that, computing the full\n+# objective can take a lot of time.\n+# BATCH Int 256 Mini-batch size (training parameter)\n+# STEP Double 1e-5 Initial step size (training parameter)\n+# DECAY Double 0.95 Decays step size after each epoch (training parameter)\n+# MOMENTUM Double 0.9 Momentum parameter (training parameter)\n+# ---------------------------------------------------------------------------------------------\n+#\n+# OUTPUT PARAMETERS (all filenames):\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# W1_out String --- File to store weights between input layer and 1st hidden layer\n+# b1_out String --- File to store bias between input layer and 1st hidden layer\n+# W2_out String --- File to store weights between 1st hidden layer and 2nd hidden layer\n+# b2_out String --- File to store bias between 1st hidden layer and 2nd hidden layer\n+# W3_out String --- File to store weights between 2nd hidden layer and 3rd hidden layer\n+# b3_out String --- File to store bias between 2nd hidden layer and 3rd hidden layer\n+# W4_out String --- File to store weights between 3rd hidden layer and output layer\n+# b4_out String --- File to store bias between 3rd hidden layer and output layer\n+# HIDDEN String \" \" File to store the hidden (2nd) layer representation if needed\n+# ---------------------------------------------------------------------------------------------\n+#\n+# INVOCATION:\n+# -f autoencoder_2layer.dml --nvargs X=<input file> H1=500 H2=2 EPOCH=1 fmt=\"csv\"\n+# W1_out=<weights from input to 1st hidden layer> b1_out=<bias from input to 1st hidden layer>\n+# W2_out=<weights from 1st hidden layer to 2nd hidden layer> b2_out=<bias from 1st hidden layer to 2nd hidden layer>\n+# W3_out=<weights from 2nd hidden layer to 3rd hidden layer> b3_out=<bias from 2nd hidden layer to 3rd hidden layer>\n+# W4_out=<weights from 3rd hidden layer to output> b4_out=<bias from 3rd hidden layer to output>\n+#\n+\n+#implements tanh\n+#to use another activation fn, implement a DML-bodied function with\n+#function name 'func' and comment out this one\n+func = function(Matrix[Double] X) return(Matrix[Double] Y, Matrix[Double] Y_prime){\n+ Y = (exp(2*X) - 1)/(exp(2*X) + 1)\n+ Y_prime = 1 - Y^2\n+}\n+\n+feedForward = function(Matrix[Double] X,\n+ Matrix[Double] W1, Matrix[Double] b1,\n+ Matrix[Double] W2, Matrix[Double] b2,\n+ Matrix[Double] W3, Matrix[Double] b3,\n+ Matrix[Double] W4, Matrix[Double] b4,\n+ Matrix[Double] Y)\n+ return(Matrix[Double] H1, Matrix[Double] H1_prime,\n+ Matrix[Double] H2, Matrix[Double] H2_prime,\n+ Matrix[Double] H3, Matrix[Double] H3_prime,\n+ Matrix[Double] Yhat, Matrix[Double] Yhat_prime,\n+ Matrix[Double] E){\n+ H1_in = t(W1 %*% t(X) + b1)\n+ [H1, H1_prime] = func(H1_in)\n+\n+ H2_in = t(W2 %*% t(H1) + b2)\n+ [H2, H2_prime] = func(H2_in)\n+\n+ H3_in = t(W3 %*% t(H2) + b3)\n+ [H3, H3_prime] = func(H3_in)\n+\n+ Yhat_in = t(W4 %*% t(H3) + b4)\n+ [Yhat, Yhat_prime] = func(Yhat_in)\n+ E = Yhat - Y\n+}\n+\n+grad = function(Matrix[Double] X,\n+ Matrix[Double] H1, Matrix[Double] H1_prime,\n+ Matrix[Double] H2, Matrix[Double] H2_prime,\n+ Matrix[Double] H3, Matrix[Double] H3_prime,\n+ Matrix[Double] Yhat_prime,\n+ Matrix[Double] E,\n+ Matrix[Double] W1, Matrix[Double] W2, Matrix[Double] W3, Matrix[Double] W4)\n+ return(Matrix[Double] W1_grad, Matrix[Double] b1_grad,\n+ Matrix[Double] W2_grad, Matrix[Double] b2_grad,\n+ Matrix[Double] W3_grad, Matrix[Double] b3_grad,\n+ Matrix[Double] W4_grad, Matrix[Double] b4_grad){\n+ #backprop\n+ delta4 = E * Yhat_prime\n+ delta3 = H3_prime * (delta4 %*% W4)\n+ delta2 = H2_prime * (delta3 %*% W3)\n+ delta1 = H1_prime * (delta2 %*% W2)\n+\n+ #compute gradients\n+ b4_grad = t(colSums(delta4))\n+ b3_grad = t(colSums(delta3))\n+ b2_grad = t(colSums(delta2))\n+ b1_grad = t(colSums(delta1))\n+\n+ W4_grad = t(delta4) %*% H3\n+ W3_grad = t(delta3) %*% H2\n+ W2_grad = t(delta2) %*% H1\n+ W1_grad = t(delta1) %*% X\n+}\n+\n+obj = function(Matrix[Double] E) return(Double val){\n+ val = 0.5 * sum(E^2)\n+}\n+\n+batch_size = ifdef($BATCH, 256)\n+mu = ifdef($MOMENTUM, 0.9)\n+step = ifdef($STEP, 1e-5)\n+decay = ifdef($DECAY, 0.95)\n+hfile = ifdef($HIDDEN, \" \")\n+fmt = ifdef($fmt, \"text\")\n+full_obj = ifdef($OBJ, FALSE)\n+\n+X = read($X)\n+num_hidden1 = $H1\n+num_hidden2 = $H2\n+max_epochs = $EPOCH\n+\n+n = nrow(X)\n+m = ncol(X)\n+\n+#z-transform, whitening operator is better\n+means = colSums(X)/n\n+stds = sqrt((colSums(X^2)/n - means*means)*n/(n-1)) + 1e-17\n+X = (X - means)/stds\n+\n+#randomly reordering rows\n+permut = table(seq(1,n,1), order(target=Rand(rows=n, cols=1, min=0, max=1, pdf=\"uniform\"), by=1, index.return=TRUE), n, n)\n+X = permut %*% X\n+\n+W1 = sqrt(6)/sqrt(m + num_hidden1) * Rand(rows=num_hidden1, cols=m, min=-1, max=1, pdf=\"uniform\")\n+b1 = matrix(0, rows=num_hidden1, cols=1)\n+W2 = sqrt(6)/sqrt(num_hidden1 + num_hidden2) * Rand(rows=num_hidden2, cols=num_hidden1, min=-1, max=1, pdf=\"uniform\")\n+b2 = matrix(0, rows=num_hidden2, cols=1)\n+W3 = sqrt(6)/sqrt(num_hidden2 + num_hidden1) * Rand(rows=num_hidden1, cols=num_hidden2, min=-1, max=1, pdf=\"uniform\")\n+b3 = matrix(0, rows=num_hidden1, cols=1)\n+W4 = sqrt(6)/sqrt(num_hidden2 + m) * Rand(rows=m, cols=num_hidden1, min=-1, max=1, pdf=\"uniform\")\n+b4 = matrix(0, rows=m, cols=1)\n+\n+upd_W1 = matrix(0, rows=nrow(W1), cols=ncol(W1))\n+upd_b1 = matrix(0, rows=nrow(b1), cols=ncol(b1))\n+upd_W2 = matrix(0, rows=nrow(W2), cols=ncol(W2))\n+upd_b2 = matrix(0, rows=nrow(b2), cols=ncol(b2))\n+upd_W3 = matrix(0, rows=nrow(W3), cols=ncol(W3))\n+upd_b3 = matrix(0, rows=nrow(b3), cols=ncol(b3))\n+upd_W4 = matrix(0, rows=nrow(W4), cols=ncol(W4))\n+upd_b4 = matrix(0, rows=nrow(b4), cols=ncol(b4))\n+\n+if( full_obj ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime, full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ full_o = obj(full_E)\n+ print(\"EPOCHS=\" + 0 + \" OBJ (FULL DATA): \" + full_o)\n+}\n+\n+iter = 0\n+num_iters_per_epoch = ceil(n / batch_size)\n+max_iterations = max_epochs * num_iters_per_epoch\n+#print(\"num_iters_per_epoch=\" + num_iters_per_epoch + \" max_iterations=\" + max_iterations)\n+beg = 1\n+while( iter < max_iterations ){\n+ end = beg + batch_size - 1\n+ if(end > n) end = n\n+ X_batch = X[beg:end,]\n+\n+ [H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat, Yhat_prime, E] = feedForward(X_batch, W1, b1, W2, b2, W3, b3, W4, b4, X_batch)\n+ [W1_grad, b1_grad, W2_grad, b2_grad, W3_grad, b3_grad, W4_grad, b4_grad] = grad(X_batch, H1, H1_prime, H2, H2_prime, H3, H3_prime, Yhat_prime, E, W1, W2, W3, W4)\n+\n+ o = obj(E)\n+ print(\"epochs=%5.4f BATCH beg=%d end=%d obj=%f\", (iter / num_iters_per_epoch), beg, end, o)\n+\n+ #update\n+ local_step = step / nrow(X_batch)\n+ upd_W1 = mu * upd_W1 - local_step * W1_grad\n+ upd_b1 = mu * upd_b1 - local_step * b1\n+ upd_W2 = mu * upd_W2 - local_step * W2_grad\n+ upd_b2 = mu * upd_b2 - local_step * b2\n+ upd_W3 = mu * upd_W3 - local_step * W3_grad\n+ upd_b3 = mu * upd_b3 - local_step * b3\n+ upd_W4 = mu * upd_W4 - local_step * W4_grad\n+ upd_b4 = mu * upd_b4 - local_step * b4\n+ W1 = W1 + upd_W1\n+ b1 = b1 + upd_b1\n+ W2 = W2 + upd_W2\n+ b2 = b2 + upd_b2\n+ W3 = W3 + upd_W3\n+ b3 = b3 + upd_b3\n+ W4 = W4 + upd_W4\n+ b4 = b4 + upd_b4\n+\n+ iter = iter + 1\n+ if(end == n) beg = 1\n+ else beg = end + 1\n+\n+ if( iter %% num_iters_per_epoch == 0 ) step = step * decay\n+\n+ if( full_obj & iter %% num_iters_per_epoch == 0 ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime, full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ full_o = obj(full_E)\n+ epochs = iter %/% num_iters_per_epoch\n+ print(\"EPOCHS=\" + epochs + \" iter=\" + iter + \" OBJ (FULL DATA)=\" + full_o)\n+ }\n+}\n+\n+write(W1, $W1_out, format=fmt)\n+write(b1, $b1_out, format=fmt)\n+write(W2, $W2_out, format=fmt)\n+write(b2, $b2_out, format=fmt)\n+write(W3, $W3_out, format=fmt)\n+write(b3, $b3_out, format=fmt)\n+write(W4, $W4_out, format=fmt)\n+write(b4, $b4_out, format=fmt)\n+\n+if( hfile != \" \" ){\n+ [full_H1, full_H1_prime, full_H2, full_H2_prime, full_H3, full_H3_prime, full_Yhat, full_Yhat_prime, full_E] = feedForward(X, W1, b1, W2, b2, W3, b3, W4, b4, X)\n+ reordered_H = t(permut) %*% full_H2\n+ write(reordered_H, hfile, format=fmt)\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1339] Autoencoder script for acoustic signal modeling Closes #384.
49,717
21.02.2017 14:56:58
28,800
e1f713aaedb472842f448dabd7063978373836c8
Updated document to correspond to the currently released artifacts Closes
[ { "change_type": "MODIFY", "old_path": "docs/release-process.md", "new_path": "docs/release-process.md", "diff": "@@ -102,86 +102,64 @@ The build artifacts should be downloaded from [https://dist.apache.org/repos/dis\nthis OS X example.\n# download artifacts\n- wget -r -nH -nd -np -R index.html* https://dist.apache.org/repos/dist/dev/incubator/systemml/0.11.0-incubating-rc1/\n+ wget -r -nH -nd -np -R 'index.html*' https://dist.apache.org/repos/dist/dev/incubator/systemml/0.13.0-incubating-rc1/\n# verify standalone tgz works\n- tar -xvzf systemml-0.11.0-incubating-standalone.tgz\n- cd systemml-0.11.0-incubating-standalone\n+ tar -xvzf systemml-0.13.0-incubating-bin.tgz\n+ cd systemml-0.13.0-incubating-bin\necho \"print('hello world');\" > hello.dml\n./runStandaloneSystemML.sh hello.dml\ncd ..\n- # verify main jar works\n- mkdir lib\n- cp -R systemml-0.11.0-incubating-standalone/lib/* lib/\n- rm lib/systemml-0.11.0-incubating.jar\n- java -cp ./lib/*:systemml-0.11.0-incubating.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n-\n- # verify src works\n- tar -xvzf systemml-0.11.0-incubating-src.tgz\n- cd systemml-0.11.0-incubating-src\n- mvn clean package -P distribution\n- cd target/\n- java -cp ./lib/*:systemml-0.11.0-incubating.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n- java -cp ./lib/*:SystemML.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n- cd ..\n+ # verify standalon zip works\n+ rm -rf systemml-0.13.0-incubating-bin\n+ unzip systemml-0.13.0-incubating-bin.zip\n+ cd systemml-0.13.0-incubating-bin\n+ echo \"print('hello world');\" > hello.dml\n+ ./runStandaloneSystemML.sh hello.dml\ncd ..\n- # verify distrib tgz works\n- tar -xvzf systemml-0.11.0-incubating.tgz\n- cd systemml-0.11.0-incubating\n- java -cp ../lib/*:SystemML.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n-\n- # verify spark batch mode\n- export SPARK_HOME=/Users/deroneriksson/spark-1.5.1-bin-hadoop2.6\n- $SPARK_HOME/bin/spark-submit SystemML.jar -s \"print('hello world');\" -exec hybrid_spark\n-\n- # verify hadoop batch mode\n- hadoop jar SystemML.jar -s \"print('hello world');\"\n-\n-\n-Here is an example of doing a basic\n-sanity check on OS X after building the artifacts manually.\n-\n- # build distribution artifacts\n- mvn clean package -P distribution\n-\n- cd target\n-\n- # verify main jar works\n- java -cp ./lib/*:systemml-0.11.0-incubating.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n-\n- # verify SystemML.jar works\n- java -cp ./lib/*:SystemML.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n-\n# verify src works\n- tar -xvzf systemml-0.11.0-incubating-src.tgz\n- cd systemml-0.11.0-incubating-src\n+ tar -xvzf systemml-0.13.0-incubating-src.tgz\n+ cd systemml-0.13.0-incubating-src\nmvn clean package -P distribution\ncd target/\n- java -cp ./lib/*:systemml-0.11.0-incubating.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n- java -cp ./lib/*:SystemML.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n- cd ..\n- cd ..\n-\n- # verify standalone tgz works\n- tar -xvzf systemml-0.11.0-incubating-standalone.tgz\n- cd systemml-0.11.0-incubating-standalone\n- echo \"print('hello world');\" > hello.dml\n- ./runStandaloneSystemML.sh hello.dml\n- cd ..\n-\n- # verify distrib tgz works\n- tar -xvzf systemml-0.11.0-incubating.tgz\n- cd systemml-0.11.0-incubating\n- java -cp ../lib/*:SystemML.jar org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n+ java -cp \"./lib/*:systemml-0.13.0-incubating.jar\" org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n+ java -cp \"./lib/*:SystemML.jar\" org.apache.sysml.api.DMLScript -s \"print('hello world');\"\n+ cd ../..\n# verify spark batch mode\n- export SPARK_HOME=/Users/deroneriksson/spark-1.5.1-bin-hadoop2.6\n- $SPARK_HOME/bin/spark-submit SystemML.jar -s \"print('hello world');\" -exec hybrid_spark\n+ export SPARK_HOME=~/spark-2.1.0-bin-hadoop2.7\n+ cd systemml-0.13.0-incubating-bin/target/lib\n+ $SPARK_HOME/bin/spark-submit systemml-0.13.0-incubating.jar -s \"print('hello world');\" -exec hybrid_spark\n# verify hadoop batch mode\n- hadoop jar SystemML.jar -s \"print('hello world');\"\n+ hadoop jar systemml-0.13.0-incubating.jar -s \"print('hello world');\"\n+\n+\n+ # verify python artifact\n+ # install numpy, pandas, scipy & set SPARK_HOME\n+ pip install numpy\n+ pip install pandas\n+ pip install scipy\n+ export SPARK_HOME=~/spark-2.1.0-bin-hadoop2.7\n+ # get into the pyspark prompt\n+ cd systemml-0.13.0\n+ $SPARK_HOME/bin/pyspark --driver-class-path systemml-java/systemml-0.13.0-incubating.jar\n+ # Use this program at the prompt:\n+ import systemml as sml\n+ import numpy as np\n+ m1 = sml.matrix(np.ones((3,3)) + 2)\n+ m2 = sml.matrix(np.ones((3,3)) + 3)\n+ m2 = m1 * (m2 + m1)\n+ m4 = 1.0 - m2\n+ m4.sum(axis=1).toNumPy()\n+\n+ # This should be printed\n+ # array([[-60.],\n+ # [-60.],\n+ # [-60.]])\n+\n## Python Tests\n@@ -229,8 +207,8 @@ The project should be built using the `src` (tgz and zip) artifacts.\nIn addition, the test suite should be run using an `src` artifact and\nthe tests should pass.\n- tar -xvzf systemml-0.11.0-incubating-src.tgz\n- cd systemml-0.11.0-incubating-src\n+ tar -xvzf systemml-0.13.0-incubating-src.tgz\n+ cd systemml-0.13.0-incubating-src\nmvn clean package -P distribution\nmvn verify\n@@ -246,13 +224,14 @@ standalone distributions.\nHere is an example based on the [Standalone Guide](http://apache.github.io/incubator-systemml/standalone-guide.html)\ndemonstrating the execution of an algorithm (on OS X).\n- $ tar -xvzf systemml-0.11.0-incubating-standalone.tgz\n- $ cd systemml-0.11.0-incubating-standalone\n- $ wget -P data/ http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data\n- $ echo '{\"rows\": 306, \"cols\": 4, \"format\": \"csv\"}' > data/haberman.data.mtd\n- $ echo '1,1,1,2' > data/types.csv\n- $ echo '{\"rows\": 1, \"cols\": 4, \"format\": \"csv\"}' > data/types.csv.mtd\n- $ ./runStandaloneSystemML.sh scripts/algorithms/Univar-Stats.dml -nvargs X=data/haberman.data TYPES=data/types.csv STATS=data/univarOut.mtx CONSOLE_OUTPUT=TRUE\n+ tar -xvzf systemml-0.13.0-incubating-bin.tgz\n+ cd systemml-0.13.0-incubating-bin\n+ wget -P data/ http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data\n+ echo '{\"rows\": 306, \"cols\": 4, \"format\": \"csv\"}' > data/haberman.data.mtd\n+ echo '1,1,1,2' > data/types.csv\n+ echo '{\"rows\": 1, \"cols\": 4, \"format\": \"csv\"}' > data/types.csv.mtd\n+ ./runStandaloneSystemML.sh scripts/algorithms/Univar-Stats.dml -nvargs X=data/haberman.data TYPES=data/types.csv STATS=data/univarOut.mtx CONSOLE_OUTPUT=TRUE\n+ cd ..\n## Single-Node Spark\n@@ -263,13 +242,13 @@ Verify that SystemML runs algorithms on Spark locally.\nHere is an example of running the `Univar-Stats.dml` algorithm on random generated data.\n- $ tar -xvzf systemml-0.11.0-incubating.tgz\n- $ cd systemml-0.11.0-incubating\n- $ export SPARK_HOME=/Users/deroneriksson/spark-1.5.1-bin-hadoop2.6\n- $ $SPARK_HOME/bin/spark-submit SystemML.jar -f scripts/datagen/genRandData4Univariate.dml -exec hybrid_spark -args 1000000 100 10 1 2 3 4 uni.mtx\n- $ echo '1' > uni-types.csv\n- $ echo '{\"rows\": 1, \"cols\": 1, \"format\": \"csv\"}' > uni-types.csv.mtd\n- $ $SPARK_HOME/bin/spark-submit SystemML.jar -f scripts/algorithms/Univar-Stats.dml -exec hybrid_spark -nvargs X=uni.mtx TYPES=uni-types.csv STATS=uni-stats.txt CONSOLE_OUTPUT=TRUE\n+ cd systemml-0.13.0-incubating-bin/lib\n+ export SPARK_HOME=~/spark-2.1.0-bin-hadoop2.7\n+ $SPARK_HOME/bin/spark-submit systemml-0.13.0-incubating.jar -f ../scripts/datagen/genRandData4Univariate.dml -exec hybrid_spark -args 1000000 100 10 1 2 3 4 uni.mtx\n+ echo '1' > uni-types.csv\n+ echo '{\"rows\": 1, \"cols\": 1, \"format\": \"csv\"}' > uni-types.csv.mtd\n+ $SPARK_HOME/bin/spark-submit systemml-0.13.0-incubating.jar -f ../scripts/algorithms/Univar-Stats.dml -exec hybrid_spark -nvargs X=uni.mtx TYPES=uni-types.csv STATS=uni-stats.txt CONSOLE_OUTPUT=TRUE\n+ cd ..\n## Single-Node Hadoop\n@@ -280,7 +259,8 @@ Verify that SystemML runs algorithms on Hadoop locally.\nBased on the \"Single-Node Spark\" setup above, the `Univar-Stats.dml` algorithm could be run as follows:\n- $ hadoop jar SystemML.jar -f scripts/algorithms/Univar-Stats.dml -nvargs X=uni.mtx TYPES=uni-types.csv STATS=uni-stats.txt CONSOLE_OUTPUT=TRUE\n+ cd systemml-0.13.0-incubating-bin/lib\n+ hadoop jar systemml-0.13.0-incubating.jar -f ../scripts/algorithms/Univar-Stats.dml -nvargs X=uni.mtx TYPES=uni-types.csv STATS=uni-stats.txt CONSOLE_OUTPUT=TRUE\n## Notebooks\n@@ -313,5 +293,3 @@ has been approved.\nTo be written. (What steps need to be done? How is the release deployed to the central maven repo? What updates need to\nhappen to the main website, such as updating the Downloads page? Where do the release notes for the release go?)\n-\n-\n" } ]
Java
Apache License 2.0
apache/systemds
Updated document to correspond to the currently released artifacts Closes #403
49,772
23.02.2017 14:52:51
28,800
0daae6cf05961d693e1797c21333904b24d45e2f
Updating Preprocessing Notebook Updates to the Preprocessing notebook for Spark 2.x and general cleanup.
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/Preprocessing.ipynb", "new_path": "projects/breast_cancer/Preprocessing.ipynb", "diff": "\"cells\": [\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML\\n\",\n\"## Preprocessing\\n\",\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Setup\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\"import openslide\\n\",\n\"from openslide.deepzoom import DeepZoomGenerator\\n\",\n\"import pandas as pd\\n\",\n- \"from pyspark.mllib.linalg import Vectors\\n\",\n+ \"from pyspark.ml.linalg import Vectors\\n\",\n\"from scipy.ndimage.morphology import binary_fill_holes\\n\",\n\"from skimage.color import rgb2gray\\n\",\n\"from skimage.feature import canny\\n\",\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Open Whole-Slide Image\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\" An OpenSlide object representing a whole-slide image.\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" if training:\\n\",\n- \" filename = os.path.join(folder, \\\"training_image_data\\\", \\\"TUPAC-TR-{}.svs\\\".format(str(slide_num).zfill(3)))\\n\",\n+ \" filename = os.path.join(folder, \\\"training_image_data\\\",\\n\",\n+ \" \\\"TUPAC-TR-{}.svs\\\".format(str(slide_num).zfill(3)))\\n\",\n\" else:\\n\",\n\" # Testing images\\n\",\n- \" filename = os.path.join(folder, \\\"testing_image_data\\\", \\\"TUPAC-TE-{}.svs\\\".format(str(slide_num).zfill(3)))\\n\",\n+ \" filename = os.path.join(folder, \\\"testing_image_data\\\",\\n\",\n+ \" \\\"TUPAC-TE-{}.svs\\\".format(str(slide_num).zfill(3)))\\n\",\n\" slide = openslide.open_slide(filename)\\n\",\n\" return slide\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Create Tile Generator\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Determine 20x Magnification Zoom Level\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Generate Tile Indices For Whole-Slide Image.\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Generate Tile From Tile Index\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\" slide = open_slide(slide_num, folder, training)\\n\",\n\" # Create tile generator.\\n\",\n\" generator = create_tile_generator(slide, tile_size, overlap)\\n\",\n- \" # Generate tile\\n\",\n+ \" # Generate tile.\\n\",\n\" tile = np.array(generator.get_tile(zoom_level, (col, row)))\\n\",\n\" return (slide_num, tile)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Filter Tile For Dimensions & Tissue Threshold\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Generate Flattened Samples From Tile\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\" x, y, ch = tile.shape\\n\",\n\" # 1. Reshape into a 5D array of (num_x, sample_size_x, num_y, sample_size_y, ch), where\\n\",\n\" # num_x and num_y are the number of chopped tiles on the x and y axes, respectively.\\n\",\n- \" # 2. Swap sample_size_x and num_y axes to create (num_x, num_y, sample_size_x, sample_size_y, ch).\\n\",\n+ \" # 2. Swap sample_size_x and num_y axes to create\\n\",\n+ \" # (num_x, num_y, sample_size_x, sample_size_y, ch).\\n\",\n\" # 3. Combine num_x and num_y into single axis, returning\\n\",\n\" # (num_samples, sample_size_x, sample_size_y, ch).\\n\",\n\" # 4. Swap axes from (num_samples, sample_size_x, sample_size_y, ch) to\\n\",\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Visualize Tile\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Visualize Sample\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Get Ground Truth Labels\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\" labels = pd.read_csv(filename, names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n\" labels[\\\"slide_num\\\"] = range(1, 501)\\n\",\n\"\\n\",\n- \" # Create slide_num -> tumor_score, and slide_num -> molecular_score dictionaries\\n\",\n+ \" # Create slide_num -> tumor_score, and slide_num -> molecular_score dictionaries.\\n\",\n\" tumor_score_dict = {int(s): int(l) for s,l in zip(labels.slide_num, labels.tumor_score)}\\n\",\n\" molecular_score_dict = {int(s): float(l) for s,l in zip(labels.slide_num, labels.molecular_score)}\\n\",\n\" return tumor_score_dict, molecular_score_dict\"\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Process All Slides Into A Saved Spark DataFrame\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\" \\n\",\n\" Args:\\n\",\n\" slide_nums: List of whole-slide numbers to process.\\n\",\n- \" folder: Directory in which the slides folder is stored, as a string.\\n\",\n+ \" folder: Local directory in which the slides folder is stored, as a string.\\n\",\n\" This should contain either a `training_image_data` folder with\\n\",\n\" images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`\\n\",\n\" folder with images in the format `TUPAC-TE-###.svs`.\\n\",\n\" molecular score, and the sample stretched out into a Vector.\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" slides = sc.parallelize(slide_nums)\\n\",\n- \" # Force even partitioning by collecting and parallelizing -- for memory issues\\n\",\n+ \" # Force even partitioning by collecting and parallelizing -- for memory issues.\\n\",\n+ \" # TODO: Explore computing the ideal paritition sizes based on projected number\\n\",\n+ \" # of tiles after filtering.\\n\",\n\" ## HACK Note: This was a PySpark bug with a fix in the master branch now.\\n\",\n- \" tile_indices = slides.flatMap(lambda slide: process_slide(slide, folder, training, tile_size, overlap)).collect()\\n\",\n- \" tile_indices = sc.parallelize(tile_indices, num_partitions)\\n\",\n+ \" #tile_indices = slides.flatMap(\\n\",\n+ \" # lambda slide: process_slide(slide, folder, training, tile_size, overlap)).collect()\\n\",\n+ \" #tile_indices = sc.parallelize(tile_indices, num_partitions)\\n\",\n\" ## END HACK -- update later\\n\",\n+ \" tile_indices = (slides.flatMap(\\n\",\n+ \" lambda slide: process_slide(slide, folder, training, tile_size, overlap)))\\n\",\n+ \" tile_indices = tile_indices.repartition(num_partitions)\\n\",\n\" tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))\\n\",\n\" filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))\\n\",\n\" samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))\\n\",\n\" if training:\\n\",\n\" tumor_score_dict, molecular_score_dict = create_ground_truth_maps(folder)\\n\",\n- \" samples_with_labels = (samples.map(lambda tup: \\n\",\n- \" (tup[0], tumor_score_dict[tup[0]], molecular_score_dict[tup[0]],\\n\",\n- \" Vectors.dense(tup[1]))))\\n\",\n+ \" samples_with_labels = (samples.map(\\n\",\n+ \" lambda tup: (tup[0], tumor_score_dict[tup[0]],\\n\",\n+ \" molecular_score_dict[tup[0]], Vectors.dense(tup[1]))))\\n\",\n\" df = samples_with_labels.toDF([\\\"slide_num\\\", \\\"tumor_score\\\", \\\"molecular_score\\\", \\\"sample\\\"])\\n\",\n- \" df = df.select(df.slide_num.astype(\\\"int\\\"), df.tumor_score.astype(\\\"int\\\"), df.molecular_score, df[\\\"sample\\\"])\\n\",\n+ \" df = df.select(df.slide_num.astype(\\\"int\\\"), df.tumor_score.astype(\\\"int\\\"),\\n\",\n+ \" df.molecular_score, df[\\\"sample\\\"])\\n\",\n\" else: # testing data -- no labels\\n\",\n\" df = samples.toDF([\\\"slide_num\\\", \\\"sample\\\"])\\n\",\n\" df = df.select(df.slide_num.astype(\\\"int\\\"), df[\\\"sample\\\"])\\n\",\n- \" df = df.repartition(num_partitions) # Even out the partitions\\n\",\n+ \" #df = df.repartition(num_partitions) # Even out the partitions\\n\",\n\" return df\\n\",\n\"\\n\",\n- \"def save(df, training=True, sample_size=256, grayscale=False, mode=\\\"error\\\"):\\n\",\n+ \"def save(df, filename, sample_size=256, grayscale=False, folder=\\\"data\\\",\\n\",\n+ \" mode=\\\"error\\\", format=\\\"parquet\\\", file_size=128):\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" Save a preprocessed DataFrame of samples in Parquet format.\\n\",\n- \" \\n\",\n- \" The filename will be formatted as follows:\\n\",\n- \" `samples_{labels|testing}_SAMPLE-SIZE[_grayscale].parquet`\\n\",\n+ \" Save a preprocessed DataFrame with a constraint on the file sizes.\\n\",\n\" \\n\",\n\" Args:\\n\",\n- \" df: A DataFrame in which each row contains the slide number, tumor score,\\n\",\n- \" molecular score, and the sample stretched out into a Vector.\\n\",\n- \" training: Boolean for training or testing datasets.\\n\",\n+ \" df: A DataFrame.\\n\",\n+ \" filename: Name of the file to save.\\n\",\n\" sample_size: The width and height of the square samples.\\n\",\n\" grayscale: Whether or not to the samples are in grayscale format, rather\\n\",\n\" than RGB.\\n\",\n+ \" folder: HDFS directory in which to save the DataFrame.\\n\",\n\" mode: Specifies the behavior of `df.write.mode` when the data already exists.\\n\",\n\" Options include:\\n\",\n\" * `append`: Append contents of this :class:`DataFrame` to existing data.\\n\",\n\" * `overwrite`: Overwrite existing data.\\n\",\n\" * `error`: Throw an exception if data already exists.\\n\",\n\" * `ignore`: Silently ignore this operation if data already exists.\\n\",\n+ \" format: The format in which to save the DataFrame.\\n\",\n+ \" file_size: Size in MB of each saved file. 128 MB is an empirically ideal size.\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" filename = \\\"samples_{}_{}{}.parquet\\\".format(\\\"labels\\\" if training else \\\"testing\\\",\\n\",\n- \" sample_size,\\n\",\n- \" \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \" filepath = os.path.join(\\\"data\\\", filename)\\n\",\n- \" df.write.mode(mode).save(filepath, format=\\\"parquet\\\")\"\n+ \" channels = 1 if grayscale else 3\\n\",\n+ \" row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB\\n\",\n+ \" rows_per_file = round(file_size / row_mb)\\n\",\n+ \" filepath = os.path.join(folder, filename)\\n\",\n+ \" df.write.option(\\\"maxRecordsPerFile\\\", rows_per_file).mode(mode).save(filepath, format=format)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"---\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Execute Preprocessing & Save\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Get list of image numbers, minus the broken ones\\n\",\n+ \"# Get list of image numbers, minus the broken ones.\\n\",\n\"broken = {2, 45, 91, 112, 242, 256, 280, 313, 329, 467}\\n\",\n\"slide_nums = sorted(set(range(1,501)) - broken)\\n\",\n\"\\n\",\n\"sample_size = 256\\n\",\n\"grayscale = False\\n\",\n\"num_partitions = 20000\\n\",\n- \"folder = \\\"/home/MDM/breast_cancer/data\\\"\"\n+ \"folder = \\\"/home/MDM/breast_cancer/data\\\"\\n\",\n+ \"filename = \\\"samples_{}_{}{}.parquet\\\".format(\\n\",\n+ \" \\\"labels\\\" if training else \\\"testing\\\", sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \"tr_filename = \\\"train_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \"val_filename = \\\"val_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Process all slides\\n\",\n+ \"# Process all slides.\\n\",\n\"df = preprocess(slide_nums, tile_size=tile_size, sample_size=sample_size, grayscale=grayscale,\\n\",\n\" training=training, num_partitions=num_partitions, folder=folder)\\n\",\n\"df\"\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Save DataFrame of samples\\n\",\n- \"save(df, sample_size=sample_size, grayscale=grayscale, training=training)\"\n+ \"# Save DataFrame of samples.\\n\",\n+ \"save(df, filename, sample_size, grayscale)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"---\"\n- ]\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n},\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n\"source\": [\n\"# Split Into Separate Train & Validation DataFrames Based On Slide Number\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"### TODO: Wrap this in a function with appropriate default arguments\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"filename = \\\"samples_{}_{}{}.parquet\\\".format(\\\"labels\\\" if training else \\\"testing\\\",\\n\",\n- \" sample_size,\\n\",\n- \" \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \"# Load full DataFrame from disk.\\n\",\n\"filepath = os.path.join(\\\"data\\\", filename)\\n\",\n- \"df = sqlContext.read.load(filepath)\"\n+ \"df = spark.read.load(filepath)\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true,\n+ \"scrolled\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"labels = pd.read_csv(\\\"data/training_ground_truth.csv\\\", names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n+ \"# Determine how to split data.\\n\",\n+ \"labels = pd.read_csv(\\n\",\n+ \" \\\"data/training_ground_truth.csv\\\", names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n\"labels[\\\"slide_num\\\"] = range(1, 501)\\n\",\n\"\\n\",\n- \"# Create slide_num -> tumor_score and slide_num -> molecular_score dictionaries\\n\",\n- \"tumor_score_dict = {int(s): int(l) for s,l in zip(labels.slide_num, labels.tumor_score)}\\n\",\n- \"molecular_score_dict = {int(s): float(l) for s,l in zip(labels.slide_num, labels.molecular_score)}\"\n+ \"# # Create slide_num -> tumor_score and slide_num -> molecular_score dictionaries\\n\",\n+ \"# tumor_score_dict = {int(s): int(l) for s,l in zip(labels.slide_num, labels.tumor_score)}\\n\",\n+ \"# molecular_score_dict = {int(s): float(l) for s,l in zip(labels.slide_num, labels.molecular_score)}\\n\",\n+ \"\\n\",\n+ \"print(labels[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \"print(labels[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n+ \"print(labels[labels.slide_num <= 400][\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n+ \"print(labels[labels.slide_num > 400][\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"print(labels[\\\"tumor_score\\\"].value_counts() / labels.tumor_score.count())\\n\",\n- \"print(labels[labels.slide_num > 400][\\\"tumor_score\\\"].value_counts() / labels[labels.slide_num > 400].tumor_score.count())\"\n+ \"# Split data into train and validation sets.\\n\",\n+ \"# TODO: Stratified random split.\\n\",\n+ \"train = df.where(df.slide_num <= 400)\\n\",\n+ \"val = df.where(df.slide_num > 400)\\n\",\n+ \"train, val\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"train = (df.where(df.slide_num <= 400)\\n\",\n- \" .rdd\\n\",\n+ \"# Add row indices for use with SystemML.\\n\",\n+ \"# TODO: Wrap this in a function with appropriate default arguments.\\n\",\n+ \"train = (train.rdd\\n\",\n\" .zipWithIndex()\\n\",\n- \" .map(lambda r: (r[1] + 1, *r[0]))\\n\",\n+ \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n\" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"train = train.select(train[\\\"__INDEX\\\"].astype(\\\"int\\\"), train.slide_num.astype(\\\"int\\\"), train.tumor_score.astype(\\\"int\\\"),\\n\",\n- \" train.molecular_score, train[\\\"sample\\\"])\\n\",\n+ \"train = train.select(train[\\\"__INDEX\\\"].astype(\\\"int\\\"), train.slide_num.astype(\\\"int\\\"), \\n\",\n+ \" train.tumor_score.astype(\\\"int\\\"), train.molecular_score, train[\\\"sample\\\"])\\n\",\n\"\\n\",\n- \"val = (df.where(df.slide_num > 400)\\n\",\n- \" .rdd\\n\",\n+ \"val = (val.rdd\\n\",\n\" .zipWithIndex()\\n\",\n- \" .map(lambda r: (r[1] + 1, *r[0]))\\n\",\n+ \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n\" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"val = val.select(val[\\\"__INDEX\\\"].astype(\\\"int\\\"), val.slide_num.astype(\\\"int\\\"), val.tumor_score.astype(\\\"int\\\"),\\n\",\n- \" val.molecular_score, val[\\\"sample\\\"])\\n\",\n+ \"val = val.select(val[\\\"__INDEX\\\"].astype(\\\"int\\\"), val.slide_num.astype(\\\"int\\\"),\\n\",\n+ \" val.tumor_score.astype(\\\"int\\\"), val.molecular_score, val[\\\"sample\\\"])\\n\",\n\"\\n\",\n\"train, val\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Write\\n\",\n- \"# TODO: Wrap this in a function with appropriate default arguments\\n\",\n- \"mode = \\\"error\\\"\\n\",\n- \"tr_filename = os.path.join(\\\"data\\\", \\\"train_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"val_filename = os.path.join(\\\"data\\\", \\\"val_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"train.write.mode(mode).save(tr_filename, format=\\\"parquet\\\")\\n\",\n- \"val.write.mode(mode).save(val_filename, format=\\\"parquet\\\")\"\n+ \"# Save train and validation DataFrames.\\n\",\n+ \"save(train, tr_filename, sample_size, grayscale)\\n\",\n+ \"save(val, val_filename, sample_size, grayscale)\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"source\": [\n\"---\"\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Sample Data\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"### TODO: Wrap this in a function with appropriate default arguments\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"tr_filename = os.path.join(\\\"data\\\", \\\"train_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"val_filename = os.path.join(\\\"data\\\", \\\"val_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"train = sqlContext.read.load(tr_filename)\\n\",\n- \"val = sqlContext.read.load(val_filename)\"\n+ \"# Load train and validation DataFrames from disk.\\n\",\n+ \"train = spark.read.load(tr_filename)\\n\",\n+ \"val = spark.read.load(val_filename)\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Take a stratified sample\\n\",\n+ \"# Take a stratified sample.\\n\",\n\"p=0.01\\n\",\n\"train_sample = train.drop(\\\"__INDEX\\\").sampleBy(\\\"tumor_score\\\", fractions={1: p, 2: p, 3: p}, seed=42)\\n\",\n\"val_sample = val.drop(\\\"__INDEX\\\").sampleBy(\\\"tumor_score\\\", fractions={1: p, 2: p, 3: p}, seed=42)\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# TODO: turn this into a function\\n\",\n- \"# Repartition to get ~128MB partitions\\n\",\n- \"\\n\",\n- \"# TODO: Update logic to use the following to automatically\\n\",\n- \"# select the number of partitions:\\n\",\n- \"# ex_mb = SIZE*SIZE*CHANNELS * 8 / 1024 / 1024 # size of one example in MB\\n\",\n- \"# ideal_part_size_mb = 128 # 128 MB partitions sizes are empirically ideal\\n\",\n- \"# ideal_exs_per_part = round(ideal_part_size_mb / ex_mb)\\n\",\n- \"# tr_parts = round(tc / ideal_exs_per_part)\\n\",\n- \"# val_parts = round(vc / ideal_exs_per_part)\\n\",\n- \"\\n\",\n- \"if grayscale:\\n\",\n- \" train_sample = train_sample.repartition(150) #300) #3000)\\n\",\n- \" val_sample = val_sample.repartition(40) #80) #800)\\n\",\n- \"else: # 3x\\n\",\n- \" train_sample = train_sample.repartition(450) #900) #9000)\\n\",\n- \" val_sample = val_sample.repartition(120) #240) #2400)\\n\",\n- \"\\n\",\n- \"# Reassign row indices\\n\",\n+ \"# Reassign row indices.\\n\",\n+ \"# TODO: Wrap this in a function with appropriate default arguments.\\n\",\n\"train_sample = (\\n\",\n\" train_sample.rdd\\n\",\n\" .zipWithIndex()\\n\",\n\" .map(lambda r: (r[1] + 1, *r[0]))\\n\",\n\" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"train_sample = train_sample.select(train_sample[\\\"__INDEX\\\"].astype(\\\"int\\\"), train_sample.slide_num.astype(\\\"int\\\"), \\n\",\n- \" train_sample.tumor_score.astype(\\\"int\\\"), train_sample.molecular_score, train_sample[\\\"sample\\\"])\\n\",\n+ \"train_sample = train_sample.select(train_sample[\\\"__INDEX\\\"].astype(\\\"int\\\"),\\n\",\n+ \" train_sample.slide_num.astype(\\\"int\\\"), \\n\",\n+ \" train_sample.tumor_score.astype(\\\"int\\\"),\\n\",\n+ \" train_sample.molecular_score,\\n\",\n+ \" train_sample[\\\"sample\\\"])\\n\",\n\"\\n\",\n\"val_sample = (\\n\",\n\" val_sample.rdd\\n\",\n\" .zipWithIndex()\\n\",\n\" .map(lambda r: (r[1] + 1, *r[0]))\\n\",\n\" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"val_sample = val_sample.select(val_sample[\\\"__INDEX\\\"].astype(\\\"int\\\"), val_sample.slide_num.astype(\\\"int\\\"), \\n\",\n- \" val_sample.tumor_score.astype(\\\"int\\\"), val_sample.molecular_score, val_sample[\\\"sample\\\"])\\n\",\n- \"\\n\",\n- \"train_sample, val_sample\\n\",\n+ \"val_sample = val_sample.select(val_sample[\\\"__INDEX\\\"].astype(\\\"int\\\"),\\n\",\n+ \" val_sample.slide_num.astype(\\\"int\\\"), \\n\",\n+ \" val_sample.tumor_score.astype(\\\"int\\\"),\\n\",\n+ \" val_sample.molecular_score,\\n\",\n+ \" val_sample[\\\"sample\\\"])\\n\",\n\"\\n\",\n- \"# Write\\n\",\n- \"# TODO: Wrap this in a function with appropriate default arguments\\n\",\n- \"mode = \\\"error\\\"\\n\",\n- \"tr_sample_filename = os.path.join(\\\"data\\\", \\\"train_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"val_sample_filename = os.path.join(\\\"data\\\", \\\"val_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \"train_sample.write.mode(mode).save(tr_sample_filename, format=\\\"parquet\\\")\\n\",\n- \"val_sample.write.mode(mode).save(val_sample_filename, format=\\\"parquet\\\")\"\n+ \"train_sample, val_sample\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Save train and validation DataFrames.\\n\",\n+ \"tr_sample_filename = \\\"train_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \"val_sample_filename = \\\"val_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \"save(train_sample, tr_sample_filename, sample_size, grayscale)\\n\",\n+ \"save(val_sample, val_sample_filename, sample_size, grayscale)\"\n]\n}\n],\n\"metadata\": {\n\"kernelspec\": {\n- \"display_name\": \"Python 3\",\n+ \"display_name\": \"Python 3 + Spark 2.x + SystemML\",\n\"language\": \"python\",\n- \"name\": \"python3\"\n+ \"name\": \"pyspark3_2.x\"\n},\n\"language_info\": {\n\"codemirror_mode\": {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1185] Updating Preprocessing Notebook Updates to the Preprocessing notebook for Spark 2.x and general cleanup.
49,772
24.02.2017 17:42:56
28,800
b6a46500df3eb764b79f887d085c8c6578f8c3fd
Updating Preprocessing Notebook Updating the Preprocessing notebook with randomized train/val split on the original slide numbers.
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/Preprocessing.ipynb", "new_path": "projects/breast_cancer/Preprocessing.ipynb", "diff": "\"from openslide.deepzoom import DeepZoomGenerator\\n\",\n\"import pandas as pd\\n\",\n\"from pyspark.ml.linalg import Vectors\\n\",\n+ \"import pyspark.sql.functions as F\\n\",\n\"from scipy.ndimage.morphology import binary_fill_holes\\n\",\n\"from skimage.color import rgb2gray\\n\",\n\"from skimage.feature import canny\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false,\n+ \"collapsed\": true,\n\"deletable\": true,\n\"editable\": true\n},\n\"metadata\": {\n\"collapsed\": false,\n\"deletable\": true,\n- \"editable\": true,\n- \"scrolled\": true\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"# Determine how to split data.\\n\",\n+ \"# Create DataFrame of labels.\\n\",\n\"labels = pd.read_csv(\\n\",\n\" \\\"data/training_ground_truth.csv\\\", names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n- \"labels[\\\"slide_num\\\"] = range(1, 501)\\n\",\n+ \"labels[\\\"slide_num\\\"] = range(1, 501) # add slide num column\\n\",\n\"\\n\",\n- \"# # Create slide_num -> tumor_score and slide_num -> molecular_score dictionaries\\n\",\n- \"# tumor_score_dict = {int(s): int(l) for s,l in zip(labels.slide_num, labels.tumor_score)}\\n\",\n- \"# molecular_score_dict = {int(s): float(l) for s,l in zip(labels.slide_num, labels.molecular_score)}\\n\",\n+ \"# Create DataFrames of the slide numbers being used (i.e. without the broken ones)\\n\",\n+ \"# and merge with labels.\\n\",\n+ \"slide_nums_df = pd.DataFrame(slide_nums, columns=[\\\"slide_num\\\"])\\n\",\n+ \"labeled_slide_nums_df = pd.merge(slide_nums_df, labels, how=\\\"inner\\\", on=\\\"slide_num\\\")\\n\",\n\"\\n\",\n- \"print(labels[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n- \"print(labels[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n- \"print(labels[labels.slide_num <= 400][\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n- \"print(labels[labels.slide_num > 400][\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\"\n+ \"# Examine class distribution.\\n\",\n+ \"for pdf in [labels, labeled_slide_nums_df]:\\n\",\n+ \" print(pdf.count())\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n+ \" print()\"\n]\n},\n{\n},\n\"outputs\": [],\n\"source\": [\n- \"# Split data into train and validation sets.\\n\",\n- \"# TODO: Stratified random split.\\n\",\n- \"train = df.where(df.slide_num <= 400)\\n\",\n- \"val = df.where(df.slide_num > 400)\\n\",\n+ \"# Randomly split slides 80%/20% into train and validation sets.\\n\",\n+ \"train_nums_pdf = labeled_slide_nums_df.sample(frac=0.8, random_state=24)\\n\",\n+ \"val_nums_pdf = labeled_slide_nums_df.drop(train_nums_pdf.index)\\n\",\n+ \"\\n\",\n+ \"train_nums = (spark.createDataFrame(train_nums_pdf)\\n\",\n+ \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n+ \" .coalesce(1))\\n\",\n+ \"val_nums = (spark.createDataFrame(val_nums_pdf)\\n\",\n+ \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n+ \" .coalesce(1))\\n\",\n+ \"\\n\",\n+ \"# Note: Explicitly mark the smaller DataFrames as able to be broadcasted\\n\",\n+ \"# in order to have Catalyst choose the more efficient BroadcastHashJoin, \\n\",\n+ \"# rather than the costly SortMergeJoin.\\n\",\n+ \"train = df.join(F.broadcast(train_nums), on=\\\"slide_num\\\")\\n\",\n+ \"val = df.join(F.broadcast(val_nums), on=\\\"slide_num\\\")\\n\",\n+ \"\\n\",\n\"train, val\"\n]\n},\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Sanity checks.\\n\",\n+ \"assert len(pd.merge(train_nums_pdf, val_nums_pdf, on=\\\"slide_num\\\")) == 0\\n\",\n+ \"assert train_nums.join(val_nums, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \"assert train.join(val, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \"\\n\",\n+ \"# Check distributions.\\n\",\n+ \"for pdf in train_nums_pdf, val_nums_pdf:\\n\",\n+ \" print(pdf.count())\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False), \\\"\\\\n\\\")\\n\",\n+ \"\\n\",\n+ \"# Check total number of examples in each.\\n\",\n+ \"print(train.count(), val.count())\\n\",\n+ \"\\n\",\n+ \"# Check physical plans for broadcast join.\\n\",\n+ \"print(train.explain(), val.explain())\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"outputs\": [],\n\"source\": [\n\"# Load train and validation DataFrames from disk.\\n\",\n- \"train = spark.read.load(tr_filename)\\n\",\n- \"val = spark.read.load(val_filename)\"\n+ \"train = spark.read.load(os.path.join(\\\"data\\\", tr_filename))\\n\",\n+ \"val = spark.read.load(os.path.join(\\\"data\\\", val_filename))\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false,\n+ \"collapsed\": true,\n\"deletable\": true,\n\"editable\": true\n},\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1185] Updating Preprocessing Notebook Updating the Preprocessing notebook with randomized train/val split on the original slide numbers.
49,738
24.02.2017 17:50:16
28,800
e82de90b21df44a3194a86ce0e087dc710cd34af
Fix parfor spark/mr handling of custom compiler configs This patch fixes an issue that has been introduced with and affects, in the context of parfor spark/mr execute jobs, all configurations that are taken over from the dml config into our compiler config (i.e., optimization level and blocksize).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ProgramConverter.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ProgramConverter.java", "diff": "@@ -31,9 +31,11 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.hadoop.mapred.JobConf;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.CompilerConfig.ConfigType;\n+import org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.recompile.Recompiler;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.DataIdentifier;\n@@ -1345,8 +1347,10 @@ public class ProgramConverter\nJobConf job = ConfigurationManager.getCachedJobConf();\nif( !InfrastructureAnalyzer.isLocalMode(job) ) {\nif( confStr != null && !confStr.trim().isEmpty() ) {\n- DMLConfig config = DMLConfig.parseDMLConfig(confStr);\n- ConfigurationManager.setLocalConfig(config);\n+ DMLConfig dmlconf = DMLConfig.parseDMLConfig(confStr);\n+ CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(dmlconf);\n+ ConfigurationManager.setLocalConfig(dmlconf);\n+ ConfigurationManager.setLocalConfig(cconf);\n}\n//init internal configuration w/ parsed or default config\nParForProgramBlock.initInternalConfigurations(\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1126] Fix parfor spark/mr handling of custom compiler configs This patch fixes an issue that has been introduced with SYSTEMML-584 and affects, in the context of parfor spark/mr execute jobs, all configurations that are taken over from the dml config into our compiler config (i.e., optimization level and blocksize).
49,738
24.02.2017 19:03:56
28,800
b028e6cee12d8cc9bb4e1728fffc852cef7282c1
Avoid unnecessary RDD export on parfor spark dpesp
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -23,6 +23,7 @@ import java.io.BufferedWriter;\nimport java.io.IOException;\nimport java.io.OutputStreamWriter;\nimport java.util.ArrayList;\n+import java.util.Arrays;\nimport java.util.Collection;\nimport java.util.HashMap;\nimport java.util.HashSet;\n@@ -1063,8 +1064,8 @@ public class ParForProgramBlock extends ForProgramBlock\nif( _monitor )\nStatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_TASKS_T, time.stop());\n- //write matrices to HDFS\n- exportMatricesToHDFS(ec);\n+ //write matrices to HDFS, except DP matrix which is the input to the RemoteDPParForSpark job\n+ exportMatricesToHDFS(ec, _colocatedDPMatrix);\n// Step 4) submit MR job (wait for finished work)\nOutputInfo inputOI = ((inputMatrix.getSparsity()<0.1 && inputDPF==PDataPartitionFormat.COLUMN_WISE)||\n@@ -1258,37 +1259,33 @@ public class ParForProgramBlock extends ForProgramBlock\n}\n}\n- private void exportMatricesToHDFS( ExecutionContext ec )\n+ private void exportMatricesToHDFS(ExecutionContext ec, String... blacklistNames)\nthrows CacheException\n{\nParForStatementBlock sb = (ParForStatementBlock)getStatementBlock();\n+ HashSet<String> blacklist = new HashSet<String>(Arrays.asList(blacklistNames));\nif( LIVEVAR_AWARE_EXPORT && sb != null)\n{\n//optimization to prevent unnecessary export of matrices\n//export only variables that are read in the body\nVariableSet varsRead = sb.variablesRead();\n- for (String key : ec.getVariables().keySet() )\n- {\n+ for (String key : ec.getVariables().keySet() ) {\n+ if( varsRead.containsVariable(key) && !blacklist.contains(key) ) {\nData d = ec.getVariable(key);\n- if ( d.getDataType() == DataType.MATRIX\n- && varsRead.containsVariable(key) )\n- {\n- MatrixObject mo = (MatrixObject)d;\n- mo.exportData( _replicationExport );\n+ if( d.getDataType() == DataType.MATRIX )\n+ ((MatrixObject)d).exportData(_replicationExport);\n}\n}\n}\nelse\n{\n//export all matrices in symbol table\n- for (String key : ec.getVariables().keySet() )\n- {\n+ for (String key : ec.getVariables().keySet() ) {\n+ if( !blacklist.contains(key) ) {\nData d = ec.getVariable(key);\nif( d.getDataType() == DataType.MATRIX )\n- {\n- MatrixObject mo = (MatrixObject)d;\n- mo.exportData( _replicationExport );\n+ ((MatrixObject)d).exportData(_replicationExport);\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1350] Avoid unnecessary RDD export on parfor spark dpesp
49,738
24.02.2017 22:00:33
28,800
b78c125934fa7a947a5118f0c08473afa926fa5d
Fix parfor spark working dir delete on shutdown
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "diff": "@@ -32,6 +32,7 @@ import org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartition\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.parfor.Task.TaskType;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.PairWritableBlock;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.PairWritableCell;\n@@ -145,21 +146,28 @@ public class RemoteDPParForSparkWorker extends ParWorker implements PairFlatMapF\n_numTasks = 0;\n_numIters = 0;\n- //init local cache manager\n- if( !CacheableData.isCachingActive() ) {\n+ //init and register-cleanup of buffer pool (in parfor spark, multiple tasks might\n+ //share the process-local, i.e., per executor, buffer pool; hence we synchronize\n+ //the initialization and immediately register the created directory for cleanup\n+ //on process exit, i.e., executor exit, including any files created in the future.\n+ synchronized( CacheableData.class ) {\n+ if( !CacheableData.isCachingActive() && !InfrastructureAnalyzer.isLocalMode() ) {\n+ //create id, executor working dir, and cache dir\nString uuid = IDHandler.createDistributedUniqueID();\nLocalFileUtils.createWorkingDirectoryWithUUID( uuid );\n- CacheableData.initCaching( uuid ); //incl activation, cache dir creation (each map task gets its own dir for simplified cleanup)\n+ CacheableData.initCaching( uuid ); //incl activation and cache dir creation\n+ CacheableData.cacheEvictionLocalFilePrefix =\n+ CacheableData.cacheEvictionLocalFilePrefix +\"_\" + _workerID;\n+ //register entire working dir for delete on shutdown\n+ RemoteParForUtils.cleanupWorkingDirectoriesOnShutdown();\n}\n- if( !CacheableData.cacheEvictionLocalFilePrefix.contains(\"_\") ){ //account for local mode\n- CacheableData.cacheEvictionLocalFilePrefix = CacheableData.cacheEvictionLocalFilePrefix +\"_\" + _workerID;\n}\n//ensure that resultvar files are not removed\nsuper.pinResultVariables();\n- //enable/disable caching (if required)\n- if( !_caching )\n+ //enable/disable caching (if required and not in CP process)\n+ if( !_caching && !InfrastructureAnalyzer.isLocalMode() )\nCacheableData.disableCaching();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSparkWorker.java", "diff": "@@ -28,6 +28,7 @@ import org.apache.spark.api.java.function.PairFlatMapFunction;\nimport org.apache.spark.util.LongAccumulator;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\nimport org.apache.sysml.runtime.util.LocalFileUtils;\n@@ -35,23 +36,20 @@ import scala.Tuple2;\npublic class RemoteParForSparkWorker extends ParWorker implements PairFlatMapFunction<Task, Long, String>\n{\n-\nprivate static final long serialVersionUID = -3254950138084272296L;\n+ private final String _prog;\nprivate boolean _initialized = false;\n- private String _prog = null;\nprivate boolean _caching = true;\n- private LongAccumulator _aTasks = null;\n- private LongAccumulator _aIters = null;\n+ private final LongAccumulator _aTasks;\n+ private final LongAccumulator _aIters;\npublic RemoteParForSparkWorker(String program, boolean cpCaching, LongAccumulator atasks, LongAccumulator aiters)\nthrows DMLRuntimeException\n{\n- //keep inputs (unfortunately, spark does not expose task ids and it would be implementation-dependent\n- //when this constructor is actually called; hence, we do lazy initialization on task execution)\n- _initialized = false;\n_prog = program;\n+ _initialized = false;\n_caching = cpCaching;\n//setup spark accumulators\n@@ -65,7 +63,7 @@ public class RemoteParForSparkWorker extends ParWorker implements PairFlatMapFun\n{\n//lazy parworker initialization\nif( !_initialized )\n- configureWorker( TaskContext.get().taskAttemptId() ); //requires Spark 1.3\n+ configureWorker( TaskContext.get().taskAttemptId() );\n//execute a single task\nlong numIter = getExecutedIterations();\n@@ -98,24 +96,31 @@ public class RemoteParForSparkWorker extends ParWorker implements PairFlatMapFun\n_numTasks = 0;\n_numIters = 0;\n- //init local cache manager\n- if( !CacheableData.isCachingActive() ) {\n+ //init and register-cleanup of buffer pool (in parfor spark, multiple tasks might\n+ //share the process-local, i.e., per executor, buffer pool; hence we synchronize\n+ //the initialization and immediately register the created directory for cleanup\n+ //on process exit, i.e., executor exit, including any files created in the future.\n+ synchronized( CacheableData.class ) {\n+ if( !CacheableData.isCachingActive() && !InfrastructureAnalyzer.isLocalMode() ) {\n+ //create id, executor working dir, and cache dir\nString uuid = IDHandler.createDistributedUniqueID();\nLocalFileUtils.createWorkingDirectoryWithUUID( uuid );\n- CacheableData.initCaching( uuid ); //incl activation, cache dir creation (each map task gets its own dir for simplified cleanup)\n+ CacheableData.initCaching( uuid ); //incl activation and cache dir creation\n+ CacheableData.cacheEvictionLocalFilePrefix =\n+ CacheableData.cacheEvictionLocalFilePrefix +\"_\" + _workerID;\n+ //register entire working dir for delete on shutdown\n+ RemoteParForUtils.cleanupWorkingDirectoriesOnShutdown();\n}\n- if( !CacheableData.cacheEvictionLocalFilePrefix.contains(\"_\") ){ //account for local mode\n- CacheableData.cacheEvictionLocalFilePrefix = CacheableData.cacheEvictionLocalFilePrefix +\"_\" + _workerID;\n}\n//ensure that resultvar files are not removed\nsuper.pinResultVariables();\n- //enable/disable caching (if required)\n- if( !_caching )\n+ //enable/disable caching (if required and not in CP process)\n+ if( !_caching && !InfrastructureAnalyzer.isLocalMode() )\nCacheableData.disableCaching();\n- //make as lazily intialized\n+ //mark as initialized\n_initialized = true;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForUtils.java", "diff": "@@ -191,11 +191,8 @@ public class RemoteParForUtils\nreturn ret;\n}\n-\n/**\n- * Cleanup all temporary files created by this SystemML process\n- * instance.\n- *\n+ * Cleanup all temporary files created by this SystemML process.\n*/\npublic static void cleanupWorkingDirectories()\n{\n@@ -216,6 +213,15 @@ public class RemoteParForUtils\n}\n}\n+ /**\n+ * Cleanup all temporary files created by this SystemML process,\n+ * on shutdown via exit or interrupt.\n+ */\n+ public static void cleanupWorkingDirectoriesOnShutdown() {\n+ Runtime.getRuntime().addShutdownHook(\n+ new DeleteWorkingDirectoriesTask());\n+ }\n+\npublic static LocalVariableMap[] getResults( List<Tuple2<Long,String>> out, Log LOG )\nthrows DMLRuntimeException\n{\n@@ -241,4 +247,16 @@ public class RemoteParForUtils\n//create return array\nreturn tmp.values().toArray(new LocalVariableMap[0]);\n}\n+\n+ /**\n+ * Task to be registered as shutdown hook in order to delete the\n+ * all working directories, including any remaining files, which\n+ * might not have been created at time of registration.\n+ */\n+ private static class DeleteWorkingDirectoriesTask extends Thread {\n+ @Override\n+ public void run() {\n+ cleanupWorkingDirectories();\n+ }\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/LocalFileUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/LocalFileUtils.java", "diff": "@@ -305,7 +305,7 @@ public class LocalFileUtils\nreturn createWorkingDirectoryWithUUID( DMLScript.getUUID() );\n}\n- public static synchronized String createWorkingDirectoryWithUUID( String uuid )\n+ public static String createWorkingDirectoryWithUUID( String uuid )\nthrows DMLRuntimeException\n{\n//create local tmp dir if not existing\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1309] Fix parfor spark working dir delete on shutdown
49,738
26.02.2017 19:01:36
28,800
982ecb1a4be69685a8e124eccfa3a12331f998b0
Code generator runtime integration
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "diff": "@@ -61,6 +61,7 @@ import org.apache.sysml.runtime.instructions.cp.QuantileSortCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.QuaternaryCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.RelationalBinaryCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.ReorgCPInstruction;\n+import org.apache.sysml.runtime.instructions.cp.SpoofCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.StringInitCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.TernaryCPInstruction;\nimport org.apache.sysml.runtime.instructions.cp.UaggOuterChainCPInstruction;\n@@ -273,6 +274,7 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"partition\", CPINSTRUCTION_TYPE.Partition);\nString2CPInstructionType.put( \"compress\", CPINSTRUCTION_TYPE.Compression);\n+ String2CPInstructionType.put( \"spoof\", CPINSTRUCTION_TYPE.SpoofFused);\n//CP FILE instruction\nString2CPFileInstructionType = new HashMap<String, CPINSTRUCTION_TYPE>();\n@@ -425,15 +427,18 @@ public class CPInstructionParser extends InstructionParser\ncase Partition:\nreturn DataPartitionCPInstruction.parseInstruction(str);\n- case Compression:\n- return (CPInstruction) CompressionCPInstruction.parseInstruction(str);\n-\ncase CentralMoment:\nreturn CentralMomentCPInstruction.parseInstruction(str);\ncase Covariance:\nreturn CovarianceCPInstruction.parseInstruction(str);\n+ case Compression:\n+ return (CPInstruction) CompressionCPInstruction.parseInstruction(str);\n+\n+ case SpoofFused:\n+ return SpoofCPInstruction.parseInstruction(str);\n+\ncase INVALID:\ndefault:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/SPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/SPInstructionParser.java", "diff": "@@ -73,6 +73,7 @@ import org.apache.sysml.runtime.instructions.spark.ReorgSPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.RmmSPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.SPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.SPInstruction.SPINSTRUCTION_TYPE;\n+import org.apache.sysml.runtime.instructions.spark.SpoofSPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.TernarySPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.Tsmm2SPInstruction;\nimport org.apache.sysml.runtime.instructions.spark.TsmmSPInstruction;\n@@ -281,6 +282,8 @@ public class SPInstructionParser extends InstructionParser\nString2SPInstructionType.put( \"castdtm\" , SPINSTRUCTION_TYPE.Cast);\nString2SPInstructionType.put( \"castdtf\" , SPINSTRUCTION_TYPE.Cast);\n+\n+ String2SPInstructionType.put( \"spoof\" , SPINSTRUCTION_TYPE.SpoofFused);\n}\npublic static SPInstruction parseSingleInstruction (String str )\n@@ -447,6 +450,9 @@ public class SPInstructionParser extends InstructionParser\ncase Compression:\nreturn CompressionSPInstruction.parseInstruction(str);\n+ case SpoofFused:\n+ return SpoofSPInstruction.parseInstruction(str);\n+\ncase Cast:\nreturn CastSPInstruction.parseInstruction(str);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CPInstruction.java", "diff": "@@ -29,7 +29,13 @@ import org.apache.sysml.runtime.matrix.operators.Operator;\npublic abstract class CPInstruction extends Instruction\n{\n- public enum CPINSTRUCTION_TYPE { INVALID, AggregateUnary, AggregateBinary, AggregateTernary, ArithmeticBinary, Ternary, Quaternary, BooleanBinary, BooleanUnary, BuiltinBinary, BuiltinUnary, BuiltinMultiple, MultiReturnParameterizedBuiltin, ParameterizedBuiltin, MultiReturnBuiltin, Builtin, Reorg, RelationalBinary, File, Variable, External, Append, Rand, QSort, QPick, MatrixIndexing, MMTSJ, PMMJ, MMChain, MatrixReshape, Partition, Compression, StringInit, CentralMoment, Covariance, UaggOuterChain, Convolution };\n+ public enum CPINSTRUCTION_TYPE { INVALID,\n+ AggregateUnary, AggregateBinary, AggregateTernary, ArithmeticBinary,\n+ Ternary, Quaternary, BooleanBinary, BooleanUnary, BuiltinBinary, BuiltinUnary,\n+ BuiltinMultiple, MultiReturnParameterizedBuiltin, ParameterizedBuiltin, MultiReturnBuiltin,\n+ Builtin, Reorg, RelationalBinary, File, Variable, External, Append, Rand, QSort, QPick,\n+ MatrixIndexing, MMTSJ, PMMJ, MMChain, MatrixReshape, Partition, Compression, SpoofFused,\n+ StringInit, CentralMoment, Covariance, UaggOuterChain, Convolution };\nprotected CPINSTRUCTION_TYPE _cptype;\nprotected Operator _optr;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/SpoofCPInstruction.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.cp;\n+\n+import java.util.ArrayList;\n+\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.codegen.CodegenUtils;\n+import org.apache.sysml.runtime.codegen.SpoofOperator;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.instructions.cp.ComputationCPInstruction;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+\n+public class SpoofCPInstruction extends ComputationCPInstruction\n+{\n+ private Class<?> _class = null;\n+ private int _numThreads = 1;\n+ private CPOperand[] _in = null;\n+\n+ public SpoofCPInstruction(Class<?> cla, int k, CPOperand[] in, CPOperand out, String opcode, String str) {\n+ super(null, null, null, out, opcode, str);\n+ _class = cla;\n+ _numThreads = k;\n+ _in = in;\n+ }\n+\n+ public static SpoofCPInstruction parseInstruction(String str)\n+ throws DMLRuntimeException\n+ {\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+\n+ //String opcode = parts[0];\n+ ArrayList<CPOperand> inlist = new ArrayList<CPOperand>();\n+ Class<?> cla = CodegenUtils.loadClass(parts[1], null);\n+ String opcode = parts[0] + CodegenUtils.getSpoofType(cla);\n+\n+ for( int i=2; i<parts.length-2; i++ )\n+ inlist.add(new CPOperand(parts[i]));\n+ CPOperand out = new CPOperand(parts[parts.length-2]);\n+ int k = Integer.parseInt(parts[parts.length-1]);\n+\n+ return new SpoofCPInstruction(cla, k, inlist.toArray(new CPOperand[0]), out, opcode, str);\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec)\n+ throws DMLRuntimeException\n+ {\n+ SpoofOperator op = (SpoofOperator) CodegenUtils.createInstance(_class);\n+\n+ //get input matrices and scalars, incl pinning of matrices\n+ ArrayList<MatrixBlock> inputs = new ArrayList<MatrixBlock>();\n+ ArrayList<ScalarObject> scalars = new ArrayList<ScalarObject>();\n+ for (CPOperand input : _in) {\n+ if(input.getDataType()==DataType.MATRIX)\n+ inputs.add(ec.getMatrixInput(input.getName()));\n+ else if(input.getDataType()==DataType.SCALAR)\n+ scalars.add(ec.getScalarInput(input.getName(), input.getValueType(), input.isLiteral()));\n+ }\n+\n+ // set the output dimensions to the hop node matrix dimensions\n+ if( output.getDataType() == DataType.MATRIX) {\n+ MatrixBlock out = new MatrixBlock();\n+ op.execute(inputs, scalars, out, _numThreads);\n+ ec.setMatrixOutput(output.getName(), out);\n+ }\n+ else if (output.getDataType() == DataType.SCALAR) {\n+ ScalarObject out = op.execute(inputs, scalars, _numThreads);\n+ ec.setScalarOutput(output.getName(), out);\n+ }\n+\n+ // release input matrices\n+ for (CPOperand input : _in)\n+ if(input.getDataType()==DataType.MATRIX)\n+ ec.releaseMatrixInput(input.getName());\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SPInstruction.java", "diff": "@@ -37,7 +37,7 @@ public abstract class SPInstruction extends Instruction\nCentralMoment, Covariance, QSort, QPick,\nParameterizedBuiltin, MAppend, RAppend, GAppend, GAlignedAppend, Rand,\nMatrixReshape, Ternary, Quaternary, CumsumAggregate, CumsumOffset, BinUaggChain, UaggOuterChain,\n- Write, INVALID,\n+ Write, SpoofFused, INVALID,\nConvolution\n};\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SpoofSPInstruction.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.spark;\n+\n+import java.util.ArrayList;\n+import java.util.Iterator;\n+import java.util.List;\n+\n+import org.apache.spark.api.java.JavaPairRDD;\n+import org.apache.spark.api.java.function.PairFlatMapFunction;\n+import org.apache.spark.api.java.function.PairFunction;\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.codegen.CodegenUtils;\n+import org.apache.sysml.runtime.codegen.SpoofCellwise;\n+import org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\n+import org.apache.sysml.runtime.codegen.SpoofOperator;\n+import org.apache.sysml.runtime.codegen.SpoofOuterProduct;\n+import org.apache.sysml.runtime.codegen.SpoofOuterProduct.OutProdType;\n+import org.apache.sysml.runtime.codegen.SpoofRowAggregate;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.instructions.cp.DoubleObject;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysml.runtime.instructions.spark.SPInstruction;\n+import org.apache.sysml.runtime.instructions.spark.data.PartitionedBroadcast;\n+import org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+\n+import scala.Tuple2;\n+\n+public class SpoofSPInstruction extends SPInstruction\n+{\n+ private final Class<?> _class;\n+ private final byte[] _classBytes;\n+ private final CPOperand[] _in;\n+ private final CPOperand _out;\n+\n+ public SpoofSPInstruction(Class<?> cls , byte[] classBytes, CPOperand[] in, CPOperand out, String opcode, String str) {\n+ super(opcode, str);\n+ _class = cls;\n+ _classBytes = classBytes;\n+ _sptype = SPINSTRUCTION_TYPE.SpoofFused;\n+ _in = in;\n+ _out = out;\n+ }\n+\n+ public static SpoofSPInstruction parseInstruction(String str)\n+ throws DMLRuntimeException\n+ {\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+\n+ //String opcode = parts[0];\n+ ArrayList<CPOperand> inlist = new ArrayList<CPOperand>();\n+ Class<?> cls = CodegenUtils.loadClass(parts[1], null);\n+ byte[] classBytes = CodegenUtils.getClassAsByteArray(parts[1]);\n+ String opcode = parts[0] + CodegenUtils.getSpoofType(cls);\n+\n+ for( int i=2; i<parts.length-2; i++ )\n+ inlist.add(new CPOperand(parts[i]));\n+ CPOperand out = new CPOperand(parts[parts.length-2]);\n+ //note: number of threads parts[parts.length-1] always ignored\n+\n+ return new SpoofSPInstruction(cls, classBytes, inlist.toArray(new CPOperand[0]), out, opcode, str);\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec)\n+ throws DMLRuntimeException\n+ {\n+ SparkExecutionContext sec = (SparkExecutionContext)ec;\n+\n+ //get input rdd and variable name\n+ ArrayList<String> bcVars = new ArrayList<String>();\n+ MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(_in[0].getName());\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable( _in[0].getName() );\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> out = null;\n+\n+ //simple case: map-side only operation (one rdd input, broadcast all)\n+ //keep track of broadcast variables\n+ ArrayList<PartitionedBroadcast<MatrixBlock>> bcMatrices = new ArrayList<PartitionedBroadcast<MatrixBlock>>();\n+ ArrayList<ScalarObject> scalars = new ArrayList<ScalarObject>();\n+ for( int i=1; i<_in.length; i++ ) {\n+ if( _in[i].getDataType()==DataType.MATRIX) {\n+ bcMatrices.add(sec.getBroadcastForVariable(_in[i].getName()));\n+ bcVars.add(_in[i].getName());\n+ }\n+ else if(_in[i].getDataType()==DataType.SCALAR) {\n+ scalars.add(sec.getScalarInput(_in[i].getName(), _in[i].getValueType(), _in[i].isLiteral()));\n+ }\n+ }\n+\n+ //initialize Spark Operator\n+ if(_class.getSuperclass() == SpoofCellwise.class) // cellwise operator\n+ {\n+ if( _out.getDataType()==DataType.MATRIX ) {\n+ SpoofOperator op = (SpoofOperator) CodegenUtils.createInstance(_class);\n+\n+ out = in.mapPartitionsToPair(new CellwiseFunction(_class.getName(), _classBytes, bcMatrices, scalars), true);\n+ if( ((SpoofCellwise)op).getCellType()==CellType.ROW_AGG && mcIn.getCols() > mcIn.getColsPerBlock() ) {\n+ //NOTE: workaround with partition size needed due to potential bug in SPARK\n+ //TODO investigate if some other side effect of correct blocks\n+ if( out.partitions().size() > mcIn.getNumRowBlocks() )\n+ out = RDDAggregateUtils.sumByKeyStable(out, (int)mcIn.getNumRowBlocks());\n+ else\n+ out = RDDAggregateUtils.sumByKeyStable(out);\n+ }\n+ sec.setRDDHandleForVariable(_out.getName(), out);\n+\n+ //maintain lineage information for output rdd\n+ sec.addLineageRDD(_out.getName(), _in[0].getName());\n+ for( String bcVar : bcVars )\n+ sec.addLineageBroadcast(_out.getName(), bcVar);\n+\n+ //update matrix characteristics\n+ updateOutputMatrixCharacteristics(sec, op);\n+ }\n+ else { //SCALAR\n+ out = in.mapPartitionsToPair(new CellwiseFunction(_class.getName(), _classBytes, bcMatrices, scalars), true);\n+ MatrixBlock tmpMB = RDDAggregateUtils.sumStable(out);\n+ sec.setVariable(_out.getName(), new DoubleObject(tmpMB.getValue(0, 0)));\n+ }\n+ }\n+ else if(_class.getSuperclass() == SpoofOuterProduct.class) // outer product operator\n+ {\n+ if( _out.getDataType()==DataType.MATRIX ) {\n+ SpoofOperator op = (SpoofOperator) CodegenUtils.createInstance(_class);\n+ OutProdType type = ((SpoofOuterProduct)op).getOuterProdType();\n+\n+ //update matrix characteristics\n+ updateOutputMatrixCharacteristics(sec, op);\n+ MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(_out.getName());\n+\n+ out = in.mapPartitionsToPair(new OuterProductFunction(_class.getName(), _classBytes, bcMatrices, scalars), true);\n+ if(type == OutProdType.LEFT_OUTER_PRODUCT || type == OutProdType.RIGHT_OUTER_PRODUCT ) {\n+ //NOTE: workaround with partition size needed due to potential bug in SPARK\n+ //TODO investigate if some other side effect of correct blocks\n+ if( in.partitions().size() > mcOut.getNumRowBlocks()*mcOut.getNumColBlocks() )\n+ out = RDDAggregateUtils.sumByKeyStable( out, (int)(mcOut.getNumRowBlocks()*mcOut.getNumColBlocks()) );\n+ else\n+ out = RDDAggregateUtils.sumByKeyStable( out );\n+ }\n+ sec.setRDDHandleForVariable(_out.getName(), out);\n+\n+ //maintain lineage information for output rdd\n+ sec.addLineageRDD(_out.getName(), _in[0].getName());\n+ for( String bcVar : bcVars )\n+ sec.addLineageBroadcast(_out.getName(), bcVar);\n+\n+ }\n+ else {\n+ out = in.mapPartitionsToPair(new OuterProductFunction(_class.getName(), _classBytes, bcMatrices, scalars), true);\n+ MatrixBlock tmp = RDDAggregateUtils.sumStable(out);\n+ sec.setVariable(_out.getName(), new DoubleObject(tmp.getValue(0, 0)));\n+ }\n+ }\n+ else if( _class.getSuperclass() == SpoofRowAggregate.class ) { //row aggregate operator\n+ RowAggregateFunction fmmc = new RowAggregateFunction(_class.getName(), _classBytes, bcMatrices, scalars);\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> tmpRDD = in.mapToPair(fmmc);\n+ MatrixBlock tmpMB = RDDAggregateUtils.sumStable(tmpRDD);\n+ sec.setMatrixOutput(_out.getName(), tmpMB);\n+ return;\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Operator \" + _class.getSuperclass() + \" is not supported on Spark\");\n+ }\n+ }\n+\n+ private void updateOutputMatrixCharacteristics(SparkExecutionContext sec, SpoofOperator op)\n+ throws DMLRuntimeException\n+ {\n+ if(op instanceof SpoofCellwise)\n+ {\n+ MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(_in[0].getName());\n+ MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(_out.getName());\n+ if( ((SpoofCellwise)op).getCellType()==CellType.ROW_AGG )\n+ mcOut.set(mcIn.getRows(), 1, mcIn.getRowsPerBlock(), mcIn.getColsPerBlock());\n+ else if( ((SpoofCellwise)op).getCellType()==CellType.NO_AGG )\n+ mcOut.set(mcIn);\n+ }\n+ else if(op instanceof SpoofOuterProduct)\n+ {\n+ MatrixCharacteristics mcIn1 = sec.getMatrixCharacteristics(_in[0].getName()); //X\n+ MatrixCharacteristics mcIn2 = sec.getMatrixCharacteristics(_in[1].getName()); //U\n+ MatrixCharacteristics mcIn3 = sec.getMatrixCharacteristics(_in[2].getName()); //V\n+ MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(_out.getName());\n+ OutProdType type = ((SpoofOuterProduct)op).getOuterProdType();\n+\n+ if( type == OutProdType.CELLWISE_OUTER_PRODUCT)\n+ mcOut.set(mcIn1.getRows(), mcIn1.getCols(), mcIn1.getRowsPerBlock(), mcIn1.getColsPerBlock());\n+ else if( type == OutProdType.LEFT_OUTER_PRODUCT)\n+ mcOut.set(mcIn3.getRows(), mcIn3.getCols(), mcIn3.getRowsPerBlock(), mcIn3.getColsPerBlock());\n+ else if( type == OutProdType.RIGHT_OUTER_PRODUCT )\n+ mcOut.set(mcIn2.getRows(), mcIn2.getCols(), mcIn2.getRowsPerBlock(), mcIn2.getColsPerBlock());\n+ }\n+ }\n+\n+ private static class RowAggregateFunction implements PairFunction<Tuple2<MatrixIndexes, MatrixBlock>, MatrixIndexes, MatrixBlock>\n+ {\n+ private static final long serialVersionUID = -7926980450209760212L;\n+\n+ private ArrayList<PartitionedBroadcast<MatrixBlock>> _vectors = null;\n+ private ArrayList<ScalarObject> _scalars = null;\n+ private byte[] _classBytes = null;\n+ private String _className = null;\n+ private SpoofOperator _op = null;\n+\n+ public RowAggregateFunction(String className, byte[] classBytes, ArrayList<PartitionedBroadcast<MatrixBlock>> bcMatrices, ArrayList<ScalarObject> scalars)\n+ throws DMLRuntimeException\n+ {\n+ _className = className;\n+ _classBytes = classBytes;\n+ _vectors = bcMatrices;\n+ _scalars = scalars;\n+ }\n+\n+ @Override\n+ public Tuple2<MatrixIndexes, MatrixBlock> call( Tuple2<MatrixIndexes, MatrixBlock> arg0 )\n+ throws Exception\n+ {\n+ //lazy load of shipped class\n+ if( _op == null ) {\n+ Class<?> loadedClass = CodegenUtils.loadClass(_className, _classBytes);\n+ _op = (SpoofOperator) CodegenUtils.createInstance(loadedClass);\n+ }\n+\n+ //get main input block and indexes\n+ MatrixIndexes ixIn = arg0._1();\n+ MatrixBlock blkIn = arg0._2();\n+ int rowIx = (int)ixIn.getRowIndex();\n+\n+ //prepare output and execute single-threaded operator\n+ ArrayList<MatrixBlock> inputs = getVectorInputsFromBroadcast(blkIn, rowIx);\n+ MatrixIndexes ixOut = new MatrixIndexes(1,1);\n+ MatrixBlock blkOut = new MatrixBlock();\n+ _op.execute(inputs, _scalars, blkOut);\n+\n+ //output new tuple\n+ return new Tuple2<MatrixIndexes, MatrixBlock>(ixOut, blkOut);\n+ }\n+\n+ private ArrayList<MatrixBlock> getVectorInputsFromBroadcast(MatrixBlock blkIn, int rowIndex)\n+ throws DMLRuntimeException\n+ {\n+ ArrayList<MatrixBlock> ret = new ArrayList<MatrixBlock>();\n+ ret.add(blkIn);\n+ for( PartitionedBroadcast<MatrixBlock> vector : _vectors )\n+ ret.add(vector.getBlock((vector.getNumRowBlocks()>=rowIndex)?rowIndex:1, 1));\n+ return ret;\n+ }\n+ }\n+\n+ private static class CellwiseFunction implements PairFlatMapFunction<Iterator<Tuple2<MatrixIndexes, MatrixBlock>>, MatrixIndexes, MatrixBlock>\n+ {\n+ private static final long serialVersionUID = -8209188316939435099L;\n+\n+ private ArrayList<PartitionedBroadcast<MatrixBlock>> _vectors = null;\n+ private ArrayList<ScalarObject> _scalars = null;\n+ private byte[] _classBytes = null;\n+ private String _className = null;\n+ private SpoofOperator _op = null;\n+\n+ public CellwiseFunction(String className, byte[] classBytes, ArrayList<PartitionedBroadcast<MatrixBlock>> bcMatrices, ArrayList<ScalarObject> scalars)\n+ throws DMLRuntimeException\n+ {\n+ _className = className;\n+ _classBytes = classBytes;\n+ _vectors = bcMatrices;\n+ _scalars = scalars;\n+ }\n+\n+ @Override\n+ public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Iterator<Tuple2<MatrixIndexes, MatrixBlock>> arg)\n+ throws Exception\n+ {\n+ //lazy load of shipped class\n+ if( _op == null ) {\n+ Class<?> loadedClass = CodegenUtils.loadClass(_className, _classBytes);\n+ _op = (SpoofOperator) CodegenUtils.createInstance(loadedClass);\n+ }\n+\n+ List<Tuple2<MatrixIndexes, MatrixBlock>> ret = new ArrayList<Tuple2<MatrixIndexes,MatrixBlock>>();\n+ while(arg.hasNext())\n+ {\n+ Tuple2<MatrixIndexes,MatrixBlock> tmp = arg.next();\n+ MatrixIndexes ixIn = tmp._1();\n+ MatrixBlock blkIn = tmp._2();\n+ MatrixIndexes ixOut = ixIn;\n+ MatrixBlock blkOut = new MatrixBlock();\n+ ArrayList<MatrixBlock> inputs = getVectorInputsFromBroadcast(blkIn, (int)ixIn.getRowIndex());\n+\n+ //execute core operation\n+ if(((SpoofCellwise)_op).getCellType()==CellType.FULL_AGG) {\n+ ScalarObject obj = _op.execute(inputs, _scalars, 1);\n+ blkOut.reset(1, 1);\n+ blkOut.quickSetValue(0, 0, obj.getDoubleValue());\n+ }\n+ else {\n+ if(((SpoofCellwise)_op).getCellType()==CellType.ROW_AGG)\n+ ixOut = new MatrixIndexes(ixOut.getRowIndex(), 1);\n+ _op.execute(inputs, _scalars, blkOut);\n+ }\n+ ret.add(new Tuple2<MatrixIndexes,MatrixBlock>(ixOut, blkOut));\n+ }\n+ return ret.iterator();\n+ }\n+\n+ private ArrayList<MatrixBlock> getVectorInputsFromBroadcast(MatrixBlock blkIn, int rowIndex)\n+ throws DMLRuntimeException\n+ {\n+ ArrayList<MatrixBlock> ret = new ArrayList<MatrixBlock>();\n+ ret.add(blkIn);\n+ for( PartitionedBroadcast<MatrixBlock> vector : _vectors )\n+ ret.add(vector.getBlock((vector.getNumRowBlocks()>=rowIndex)?rowIndex:1, 1));\n+ return ret;\n+ }\n+ }\n+\n+ private static class OuterProductFunction implements PairFlatMapFunction<Iterator<Tuple2<MatrixIndexes, MatrixBlock>>, MatrixIndexes, MatrixBlock>\n+ {\n+ private static final long serialVersionUID = -8209188316939435099L;\n+\n+ private ArrayList<PartitionedBroadcast<MatrixBlock>> _bcMatrices = null;\n+ private ArrayList<ScalarObject> _scalars = null;\n+ private byte[] _classBytes = null;\n+ private String _className = null;\n+ private SpoofOperator _op = null;\n+\n+ public OuterProductFunction(String className, byte[] classBytes, ArrayList<PartitionedBroadcast<MatrixBlock>> bcMatrices, ArrayList<ScalarObject> scalars)\n+ throws DMLRuntimeException\n+ {\n+ _className = className;\n+ _classBytes = classBytes;\n+ _bcMatrices = bcMatrices;\n+ _scalars = scalars;\n+ }\n+\n+ @Override\n+ public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Iterator<Tuple2<MatrixIndexes, MatrixBlock>> arg)\n+ throws Exception\n+ {\n+ //lazy load of shipped class\n+ if( _op == null ) {\n+ Class<?> loadedClass = CodegenUtils.loadClass(_className, _classBytes);\n+ _op = (SpoofOperator) CodegenUtils.createInstance(loadedClass);\n+ }\n+\n+ List<Tuple2<MatrixIndexes, MatrixBlock>> ret = new ArrayList<Tuple2<MatrixIndexes,MatrixBlock>>();\n+ while(arg.hasNext())\n+ {\n+ Tuple2<MatrixIndexes,MatrixBlock> tmp = arg.next();\n+ MatrixIndexes ixIn = tmp._1();\n+ MatrixBlock blkIn = tmp._2();\n+ MatrixBlock blkOut = new MatrixBlock();\n+\n+ ArrayList<MatrixBlock> inputs = new ArrayList<MatrixBlock>();\n+ inputs.add(blkIn);\n+ inputs.add(_bcMatrices.get(0).getBlock((int)ixIn.getRowIndex(), 1)); // U\n+ inputs.add(_bcMatrices.get(1).getBlock((int)ixIn.getColumnIndex(), 1)); // V\n+\n+ //execute core operation\n+ if(((SpoofOuterProduct)_op).getOuterProdType()==OutProdType.AGG_OUTER_PRODUCT) {\n+ ScalarObject obj = _op.execute(inputs, _scalars,1);\n+ blkOut.reset(1, 1);\n+ blkOut.quickSetValue(0, 0, obj.getDoubleValue());\n+ }\n+ else {\n+ _op.execute(inputs, _scalars, blkOut);\n+ }\n+\n+ ret.add(new Tuple2<MatrixIndexes,MatrixBlock>(createOutputIndexes(ixIn,_op), blkOut));\n+ }\n+\n+ return ret.iterator();\n+ }\n+\n+ private MatrixIndexes createOutputIndexes(MatrixIndexes in, SpoofOperator spoofOp) {\n+ if( ((SpoofOuterProduct)spoofOp).getOuterProdType() == OutProdType.LEFT_OUTER_PRODUCT )\n+ return new MatrixIndexes(in.getColumnIndex(), 1);\n+ else if ( ((SpoofOuterProduct)spoofOp).getOuterProdType() == OutProdType.RIGHT_OUTER_PRODUCT)\n+ return new MatrixIndexes(in.getRowIndex(), 1);\n+ else\n+ return in;\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDAggregateUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDAggregateUtils.java", "diff": "@@ -69,13 +69,17 @@ public class RDDAggregateUtils\n}\n}\n- public static JavaPairRDD<MatrixIndexes, MatrixBlock> sumByKeyStable( JavaPairRDD<MatrixIndexes, MatrixBlock> in )\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sumByKeyStable( JavaPairRDD<MatrixIndexes, MatrixBlock> in ) {\n+ return sumByKeyStable(in, in.getNumPartitions());\n+ }\n+\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sumByKeyStable( JavaPairRDD<MatrixIndexes, MatrixBlock> in, int numPartitions )\n{\n//stable sum of blocks per key, by passing correction blocks along with aggregates\nJavaPairRDD<MatrixIndexes, CorrMatrixBlock> tmp =\nin.combineByKey( new CreateCorrBlockCombinerFunction(),\nnew MergeSumBlockValueFunction(),\n- new MergeSumBlockCombinerFunction() );\n+ new MergeSumBlockCombinerFunction(), numPartitions );\n//strip-off correction blocks from\nJavaPairRDD<MatrixIndexes, MatrixBlock> out =\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1287] Code generator runtime integration
49,738
01.03.2017 17:35:08
28,800
6aab005ab5fa0a430e08e68936ab5b146df8b7ea
Fix minor memory leaks for tests (caches, resources) This patch fixes various minor memory leaks (such as cleanup of static caches on exit instead of init, proper closing of file resources, and removed redundancy) in order to reduce memory pressure on testsuite runs within a single jvm process.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -682,12 +682,12 @@ public class DMLScript\n}\nfinally //ensure cleanup/shutdown\n{\n- if(DMLScript.USE_ACCELERATOR && ec != null) {\n+ if(DMLScript.USE_ACCELERATOR && ec != null)\nec.destroyGPUContext();\n- }\n- if(ec != null && ec instanceof SparkExecutionContext) {\n+ if( dmlconf.getBooleanValue(DMLConfig.CODEGEN) )\n+ SpoofCompiler.cleanupCodeGenerator();\n+ if(ec != null && ec instanceof SparkExecutionContext)\n((SparkExecutionContext) ec).close();\n- }\n//display statistics (incl caching stats if enabled)\nStatistics.stopRunTimer();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -85,9 +85,6 @@ public class SpoofCompiler\npublic static void generateCode(DMLProgram dmlp)\nthrows LanguageException, HopsException, DMLRuntimeException\n{\n- // cleanup static plan cache\n- planCache.clear();\n-\n// for each namespace, handle function statement blocks\nfor (String namespaceKey : dmlp.getNamespaces().keySet()) {\nfor (String fname : dmlp.getFunctionStatementBlocks(namespaceKey).keySet()) {\n@@ -177,6 +174,13 @@ public class SpoofCompiler\nreturn optimize(new ArrayList<Hop>(Arrays.asList(root)), compileLiterals).get(0);\n}\n+ public static void cleanupCodeGenerator() {\n+ if( USE_PLAN_CACHE ) {\n+ CodegenUtils.clearClassCache(); //class cache\n+ planCache.clear(); //plan cache\n+ }\n+ }\n+\n/**\n* Main interface of sum-product optimizer, statement block dag.\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "diff": "@@ -127,7 +127,7 @@ public class Dag<N extends Lop>\nprivate HashMap<Long, Integer> IDMap = null;\n- private class NodeOutput {\n+ private static class NodeOutput {\nString fileName;\nString varName;\nOutputInfo outInfo;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/CodegenUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/CodegenUtils.java", "diff": "@@ -265,4 +265,8 @@ public class CodegenUtils\nelse\nreturn \"UNKNOWN\";\n}\n+\n+ public static void clearClassCache() {\n+ _cache.clear();\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/utils/LinearAlgebraUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/utils/LinearAlgebraUtils.java", "diff": "package org.apache.sysml.runtime.compress.utils;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixMult;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n/**\n- * Various low-level primitives for compressed matrix blocks, some of which\n- * were copied from LibMatrixMult.\n- *\n+ * This library contains all vector primitives that are used compressed\n+ * linear algebra. For primitives that exist in LibMatrixMult, these\n+ * calls are simply forwarded to ensure consistency in performance and\n+ * result correctness.\n*/\n-public class LinearAlgebraUtils {\n-\n- public static double dotProduct(double[] a, double[] b, final int len)\n+public class LinearAlgebraUtils\n{\n- double val = 0;\n- final int bn = len % 8;\n-\n- // compute rest\n- for (int i = 0; i < bn; i++)\n- val += a[i] * b[i];\n+ //forwarded calls to LibMatrixMult\n- // unrolled 8-block (for better instruction-level parallelism)\n- for (int i = bn; i < len; i += 8) {\n- // read 64B cachelines of a and b\n- // compute cval' = sum(a * b) + cval\n- val += a[i + 0] * b[i + 0]\n- + a[i + 1] * b[i + 1]\n- + a[i + 2] * b[i + 2]\n- + a[i + 3] * b[i + 3]\n- + a[i + 4] * b[i + 4]\n- + a[i + 5] * b[i + 5]\n- + a[i + 6] * b[i + 6]\n- + a[i + 7] * b[i + 7];\n+ public static double dotProduct(double[] a, double[] b, final int len) {\n+ return LibMatrixMult.dotProduct(a, b, 0, 0, len);\n}\n- // scalar result\n- return val;\n+ public static double dotProduct( double[] a, double[] b, int ai, int bi, final int len ) {\n+ return LibMatrixMult.dotProduct(a, b, ai, bi, len);\n}\n- public static double dotProduct( double[] a, double[] b, int ai, int bi, final int len )\n- {\n- double val = 0;\n- final int bn = len%8;\n-\n- //compute rest\n- for( int i = 0; i < bn; i++, ai++, bi++ )\n- val += a[ ai ] * b[ bi ];\n-\n- //unrolled 8-block (for better instruction-level parallelism)\n- for( int i = bn; i < len; i+=8, ai+=8, bi+=8 )\n- {\n- //read 64B cachelines of a and b\n- //compute cval' = sum(a * b) + cval\n- val += a[ ai+0 ] * b[ bi+0 ]\n- + a[ ai+1 ] * b[ bi+1 ]\n- + a[ ai+2 ] * b[ bi+2 ]\n- + a[ ai+3 ] * b[ bi+3 ]\n- + a[ ai+4 ] * b[ bi+4 ]\n- + a[ ai+5 ] * b[ bi+5 ]\n- + a[ ai+6 ] * b[ bi+6 ]\n- + a[ ai+7 ] * b[ bi+7 ];\n+ public static void vectMultiplyAdd( final double aval, double[] b, double[] c, int bi, int ci, final int len ) {\n+ LibMatrixMult.vectMultiplyAdd(aval, b, c, bi, ci, len);\n}\n- //scalar result\n- return val;\n+ public static void vectMultiplyAdd( final double aval, double[] b, double[] c, int[] bix, final int bi, final int ci, final int len ) {\n+ LibMatrixMult.vectMultiplyAdd(aval, b, c, bix, bi, ci, len);\n}\n- public static void vectAdd( double[] a, double[] c, int ai, int ci, final int len )\n- {\n- final int bn = len%8;\n-\n- //rest, not aligned to 8-blocks\n- for( int j = 0; j < bn; j++, ai++, ci++)\n- c[ ci ] += a[ ai ];\n-\n- //unrolled 8-block (for better instruction-level parallelism)\n- for( int j = bn; j < len; j+=8, ai+=8, ci+=8)\n- {\n- //read 64B cachelines of a and c\n- //compute c' = c * a\n- //write back 64B cacheline of c = c'\n- c[ ci+0 ] += a[ ai+0 ];\n- c[ ci+1 ] += a[ ai+1 ];\n- c[ ci+2 ] += a[ ai+2 ];\n- c[ ci+3 ] += a[ ai+3 ];\n- c[ ci+4 ] += a[ ai+4 ];\n- c[ ci+5 ] += a[ ai+5 ];\n- c[ ci+6 ] += a[ ai+6 ];\n- c[ ci+7 ] += a[ ai+7 ];\n- }\n+ public static void vectAdd( double[] a, double[] c, int ai, int ci, final int len ) {\n+ LibMatrixMult.vectAdd(a, c, ai, ci, len);\n}\npublic static void vectAdd( final double aval, double[] c, char[] bix, final int bi, final int ci, final int len )\n@@ -152,52 +96,6 @@ public class LinearAlgebraUtils {\n}\n}\n- public static void vectMultiplyAdd( final double aval, double[] b, double[] c, int[] bix, final int bi, final int ci, final int len )\n- {\n- final int bn = (len-bi)%8;\n-\n- //rest, not aligned to 8-blocks\n- for( int j = bi; j < bi+bn; j++ )\n- c[ ci + bix[j] ] += aval * b[ j ];\n-\n- //unrolled 8-block (for better instruction-level parallelism)\n- for( int j = bi+bn; j < len; j+=8 )\n- {\n- c[ ci+bix[j+0] ] += aval * b[ j+0 ];\n- c[ ci+bix[j+1] ] += aval * b[ j+1 ];\n- c[ ci+bix[j+2] ] += aval * b[ j+2 ];\n- c[ ci+bix[j+3] ] += aval * b[ j+3 ];\n- c[ ci+bix[j+4] ] += aval * b[ j+4 ];\n- c[ ci+bix[j+5] ] += aval * b[ j+5 ];\n- c[ ci+bix[j+6] ] += aval * b[ j+6 ];\n- c[ ci+bix[j+7] ] += aval * b[ j+7 ];\n- }\n- }\n-\n- public static void vectMultiplyAdd( final double aval, double[] b, double[] c, int bi, int ci, final int len )\n- {\n- final int bn = len%8;\n-\n- //rest, not aligned to 8-blocks\n- for( int j = 0; j < bn; j++, bi++, ci++)\n- c[ ci ] += aval * b[ bi ];\n-\n- //unrolled 8-block (for better instruction-level parallelism)\n- for( int j = bn; j < len; j+=8, bi+=8, ci+=8)\n- {\n- //read 64B cachelines of b and c\n- //compute c' = aval * b + c\n- //write back 64B cacheline of c = c'\n- c[ ci+0 ] += aval * b[ bi+0 ];\n- c[ ci+1 ] += aval * b[ bi+1 ];\n- c[ ci+2 ] += aval * b[ bi+2 ];\n- c[ ci+3 ] += aval * b[ bi+3 ];\n- c[ ci+4 ] += aval * b[ bi+4 ];\n- c[ ci+5 ] += aval * b[ bi+5 ];\n- c[ ci+6 ] += aval * b[ bi+6 ];\n- c[ ci+7 ] += aval * b[ bi+7 ];\n- }\n- }\npublic static double vectSum( double[] a, char[] bix, final int ai, final int bi, final int len )\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/LazyWriteBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/LazyWriteBuffer.java", "diff": "@@ -308,7 +308,7 @@ public class LazyWriteBuffer\n_pool.shutdown();\n}\n- private class FileCleanerTask implements Runnable {\n+ private static class FileCleanerTask implements Runnable {\nprivate String _fname = null;\npublic FileCleanerTask( String fname ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/GMR.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/GMR.java", "diff": "@@ -188,7 +188,8 @@ public class GMR\n}\n}\n- setupDistributedCache(job, instructionsInMapper, otherInstructionsInReducer, realinputs, realrlens, realclens);\n+ boolean resetDistCache = setupDistributedCache(job, instructionsInMapper,\n+ otherInstructionsInReducer, realinputs, realrlens, realclens);\n//set up the input files and their format information\nboolean[] distCacheOnly = getDistCacheOnlyInputs(realIndexes, recordReaderInstruction, instructionsInMapper, aggInstructionsInReducer, otherInstructionsInReducer);\n@@ -301,20 +302,20 @@ public class GMR\nRunningJob runjob=JobClient.runJob(job);\nGroup group=runjob.getCounters().getGroup(MRJobConfiguration.NUM_NONZERO_CELLS);\n- //MatrixCharacteristics[] stats=new MatrixCharacteristics[resultIndexes.length];\n- for(int i=0; i<resultIndexes.length; i++) {\n- // number of non-zeros\n+ for(int i=0; i<resultIndexes.length; i++)\nstats[i].setNonZeros(group.getCounter(Integer.toString(i)));\n- }\n+ //cleanups\nString dir = dimsUnknownFilePrefix + \"/\" + runjob.getID().toString() + \"_dimsFile\";\nstats = MapReduceTool.processDimsFiles(dir, stats);\nMapReduceTool.deleteFileIfExistOnHDFS(dir);\n+ if( resetDistCache )\n+ MRBaseForCommonInstructions.resetDistCache();\nreturn new JobReturn(stats, outputInfos, runjob.isSuccessful());\n}\n- private static void setupDistributedCache(JobConf job, String instMap, String instRed, String[] inputs, long[] rlens, long[] clens)\n+ private static boolean setupDistributedCache(JobConf job, String instMap, String instRed, String[] inputs, long[] rlens, long[] clens)\nthrows DMLRuntimeException\n{\n//concatenate mapper and reducer instructions\n@@ -367,11 +368,15 @@ public class GMR\nMRJobConfiguration.setupDistCacheInputs(job, indexString.toString(), pathString.toString(), pathList);\n//clean in-memory cache (prevent job interference in local mode)\n- if( InfrastructureAnalyzer.isLocalMode(job) )\n+ if( InfrastructureAnalyzer.isLocalMode(job) ) {\nMRBaseForCommonInstructions.resetDistCache();\n+ return true;\n}\n}\n+ return false;\n+ }\n+\n/**\n* Determine which indices are only used as inputs through distributed cache and hence would\n* be redundant job inputs.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/FrameReblockBuffer.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/FrameReblockBuffer.java", "diff": "@@ -174,7 +174,7 @@ public class FrameReblockBuffer\n* compute the block indexes on-the-fly based on the given cell indexes.\n*\n*/\n- private class FrameReblockBufferComparator implements Comparator<FrameCell>\n+ private static class FrameReblockBufferComparator implements Comparator<FrameCell>\n{\n@Override\npublic int compare(FrameCell arg0, FrameCell arg1)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/LocalFileUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/LocalFileUtils.java", "diff": "@@ -123,8 +123,8 @@ public class LocalFileUtils\nret.readFields(in);\n}\nfinally {\n- IOUtilFunctions.closeSilently(\n- (InputStream)in);\n+ IOUtilFunctions.closeSilently((InputStream)in);\n+ IOUtilFunctions.closeSilently(fis);\n}\nreturn ret;\n@@ -169,6 +169,7 @@ public class LocalFileUtils\n}\nfinally {\nIOUtilFunctions.closeSilently(out);\n+ IOUtilFunctions.closeSilently(fos);\n}\n}\n@@ -208,6 +209,7 @@ public class LocalFileUtils\n}\nfinally {\nIOUtilFunctions.closeSilently(in);\n+ IOUtilFunctions.closeSilently(fis);\n}\nreturn bufferSize;\n@@ -232,6 +234,7 @@ public class LocalFileUtils\n}\nfinally{\nIOUtilFunctions.closeSilently(out);\n+ IOUtilFunctions.closeSilently(fos);\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1358] Fix minor memory leaks for tests (caches, resources) This patch fixes various minor memory leaks (such as cleanup of static caches on exit instead of init, proper closing of file resources, and removed redundancy) in order to reduce memory pressure on testsuite runs within a single jvm process.
49,738
01.03.2017 22:01:41
28,800
9b69f36a9aad831b8e78c7e45be3d7c80386ec01
Extended code generator for existing cellwise fused ops This patch adds the existing cell-wise fused unary operators selp, sprop, sigmoid, log_nz, as well as ternary operators +* and -* in order to prevent these operators from breaking fusion boundaries.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "diff": "@@ -37,7 +37,7 @@ public class CNodeBinary extends CNode\npublic static boolean contains(String value) {\nfor( BinType bt : values() )\n- if( bt.toString().equals(value) )\n+ if( bt.name().equals(value) )\nreturn true;\nreturn false;\n}\n@@ -188,6 +188,7 @@ public class CNodeBinary extends CNode\n}\n}\n+ @Override\npublic void setOutputDims()\n{\nswitch(_type) {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.codegen.cplan;\n+\n+import java.util.Arrays;\n+\n+import org.apache.sysml.parser.Expression.DataType;\n+\n+\n+public class CNodeTernary extends CNode\n+{\n+ public enum TernaryType {\n+ PLUS_MULT, MINUS_MULT;\n+\n+ public static boolean contains(String value) {\n+ for( TernaryType tt : values() )\n+ if( tt.name().equals(value) )\n+ return true;\n+ return false;\n+ }\n+\n+ public String getTemplate(boolean sparse) {\n+ switch (this) {\n+ case PLUS_MULT:\n+ return \" double %TMP% = %IN1% + %IN2% * %IN3%;\\n\" ;\n+\n+ case MINUS_MULT:\n+ return \" double %TMP% = %IN1% - %IN2% * %IN3%;\\n;\\n\" ;\n+\n+ default:\n+ throw new RuntimeException(\"Invalid ternary type: \"+this.toString());\n+ }\n+ }\n+ }\n+\n+ private final TernaryType _type;\n+\n+ public CNodeTernary( CNode in1, CNode in2, CNode in3, TernaryType type ) {\n+ _inputs.add(in1);\n+ _inputs.add(in2);\n+ _inputs.add(in3);\n+ _type = type;\n+ setOutputDims();\n+ }\n+\n+ public TernaryType getType() {\n+ return _type;\n+ }\n+\n+ @Override\n+ public String codegen(boolean sparse) {\n+ if( _generated )\n+ return \"\";\n+\n+ StringBuilder sb = new StringBuilder();\n+\n+ //generate children\n+ sb.append(_inputs.get(0).codegen(sparse));\n+ sb.append(_inputs.get(1).codegen(sparse));\n+ sb.append(_inputs.get(2).codegen(sparse));\n+\n+ //generate binary operation\n+ String var = createVarname();\n+ String tmp = _type.getTemplate(sparse);\n+ tmp = tmp.replaceAll(\"%TMP%\", var);\n+ for( int j=1; j<=3; j++ ) {\n+ String varj = _inputs.get(j-1).getVarname();\n+ tmp = tmp.replaceAll(\"%IN\"+j+\"%\", varj );\n+ }\n+ sb.append(tmp);\n+\n+ //mark as generated\n+ _generated = true;\n+\n+ return sb.toString();\n+ }\n+\n+ @Override\n+ public String toString() {\n+ switch(_type) {\n+ case PLUS_MULT: return \"t(+*)\";\n+ case MINUS_MULT: return \"t(-*)\";\n+ default:\n+ return super.toString();\n+ }\n+ }\n+\n+ @Override\n+ public void setOutputDims() {\n+ switch(_type) {\n+ case PLUS_MULT:\n+ case MINUS_MULT:\n+ _rows = 0;\n+ _cols = 0;\n+ _dataType= DataType.SCALAR;\n+ break;\n+ }\n+ }\n+\n+ @Override\n+ public int hashCode() {\n+ if( _hash == 0 ) {\n+ int h1 = super.hashCode();\n+ int h2 = _type.hashCode();\n+ _hash = Arrays.hashCode(new int[]{h1,h2});\n+ }\n+ return _hash;\n+ }\n+\n+ @Override\n+ public boolean equals(Object o) {\n+ if( !(o instanceof CNodeTernary) )\n+ return false;\n+\n+ CNodeTernary that = (CNodeTernary) o;\n+ return super.equals(that)\n+ && _type == that._type;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java", "diff": "@@ -27,16 +27,15 @@ import org.apache.sysml.parser.Expression.DataType;\npublic class CNodeUnary extends CNode\n{\npublic enum UnaryType {\n- ROW_SUMS, LOOKUP, LOOKUP0,\n+ ROW_SUMS, LOOKUP, LOOKUP0, //codegen specific\nEXP, POW2, MULT2, SQRT, LOG,\nABS, ROUND, CEIL, FLOOR, SIGN,\nSIN, COS, TAN, ASIN, ACOS, ATAN,\n- IQM, STOP,\n- DOTPRODUCT_ROW_SUMS; //row sums via dot product for debugging purposes\n+ SELP, SPROP, SIGMOID, LOG_NZ;\npublic static boolean contains(String value) {\nfor( UnaryType ut : values() )\n- if( ut.toString().equals(value) )\n+ if( ut.name().equals(value) )\nreturn true;\nreturn false;\n}\n@@ -82,8 +81,17 @@ public class CNodeUnary extends CNode\nreturn \" double %TMP% = Math.ceil(%IN1%);\\n\";\ncase FLOOR:\nreturn \" double %TMP% = Math.floor(%IN1%);\\n\";\n+ case SELP:\n+ return \" double %TMP% = (%IN1%>0) ? %IN1% : 0;\\n\";\n+ case SPROP:\n+ return \" double %TMP% = %IN1% * (1 - %IN1%);\\n\";\n+ case SIGMOID:\n+ return \" double %TMP% = 1 / (1 + FastMath.exp(-%IN1%));\\n\";\n+ case LOG_NZ:\n+ return \" double %TMP% = (%IN1%==0) ? 0 : FastMath.log(%IN1%);\\n\";\n+\ndefault:\n- throw new RuntimeException(\"Invalid binary type: \"+this.toString());\n+ throw new RuntimeException(\"Invalid unary type: \"+this.toString());\n}\n}\n}\n@@ -150,8 +158,7 @@ public class CNodeUnary extends CNode\n@Override\npublic void setOutputDims() {\n- switch(_type)\n- {\n+ switch(_type) {\ncase ROW_SUMS:\ncase EXP:\ncase LOOKUP:\n@@ -169,10 +176,12 @@ public class CNodeUnary extends CNode\ncase SQRT:\ncase LOG:\ncase ROUND:\n- case IQM:\n- case STOP:\ncase CEIL:\ncase FLOOR:\n+ case SELP:\n+ case SPROP:\n+ case SIGMOID:\n+ case LOG_NZ:\n_rows = 0;\n_cols = 0;\n_dataType= DataType.SCALAR;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "diff": "@@ -33,6 +33,7 @@ import org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.Hop.OpOp2;\n+import org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary.BinType;\n@@ -41,6 +42,9 @@ import org.apache.sysml.hops.codegen.cplan.CNodeData;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary.TernaryType;\n+import org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\nimport org.apache.sysml.runtime.matrix.data.Pair;\n@@ -67,16 +71,17 @@ public class CellTpl extends BaseTpl\nreturn false;\n//re-assign initialHop to fuse the sum/rowsums (before checking for chains)\n- for (Hop h : _initialHop.getParent())\n+ //TODO add aggbinary (vector tsmm) as potential head for cellwise operation\n+ for (Hop h : _initialHop.getParent()) {\nif( h instanceof AggUnaryOp && ((AggUnaryOp) h).getOp() == AggOp.SUM\n&& ((AggUnaryOp) h).getDirection()!= Direction.Col ) {\n_initialHop = h;\n}\n+ }\n//unary matrix && endHop found && endHop is not direct child of the initialHop (i.e., chain of operators)\nif(_endHop != null && _endHop != _initialHop)\n{\n-\n// if final hop is unary add its child to the input\nif(_endHop instanceof UnaryOp)\n_matrixInputs.add(_endHop.getInput().get(0));\n@@ -199,7 +204,6 @@ public class CellTpl extends BaseTpl\nif( TemplateUtils.isColVector(cdata2) )\ncdata2 = new CNodeUnary(cdata2, UnaryType.LOOKUP);\n-\nif( bop.getOp()==OpOp2.POW && cdata2.isLiteral() && cdata2.getVarname().equals(\"2\") )\nout = new CNodeUnary(cdata1, UnaryType.POW2);\nelse if( bop.getOp()==OpOp2.MULT && cdata2.isLiteral() && cdata2.getVarname().equals(\"2\") )\n@@ -207,6 +211,24 @@ public class CellTpl extends BaseTpl\nelse //default binary\nout = new CNodeBinary(cdata1, cdata2, BinType.valueOf(primitiveOpName));\n}\n+ else if(hop instanceof TernaryOp)\n+ {\n+ TernaryOp top = (TernaryOp) hop;\n+ CNode cdata1 = cnodeData.get(0);\n+ CNode cdata2 = cnodeData.get(1);\n+ CNode cdata3 = cnodeData.get(2);\n+\n+ //cdata1 is vector\n+ if( TemplateUtils.isColVector(cdata1) )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP);\n+ //cdata3 is vector\n+ if( TemplateUtils.isColVector(cdata3) )\n+ cdata3 = new CNodeUnary(cdata3, UnaryType.LOOKUP);\n+\n+ //construct ternary cnode, primitive operation derived from OpOp3\n+ out = new CNodeTernary(cdata1, cdata2, cdata3,\n+ TernaryType.valueOf(top.getOp().toString()));\n+ }\nelse if (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getOp() == AggOp.SUM\n&& (((AggUnaryOp) hop).getDirection() == Direction.RowCol\n|| ((AggUnaryOp) hop).getDirection() == Direction.Row) && root == hop)\n@@ -283,7 +305,11 @@ public class CellTpl extends BaseTpl\n&& TemplateUtils.isVectorOrScalar(hop.getInput().get(1)) && !TemplateUtils.isBinaryMatrixRowVector(hop))\n||(TemplateUtils.isVectorOrScalar( hop.getInput().get(0))\n&& hop.getInput().get(1).getDataType() == DataType.MATRIX && !TemplateUtils.isBinaryMatrixRowVector(hop)) );\n+ boolean isTernaryVectorScalarVector = hop instanceof TernaryOp && hop.getInput().size()==3 && hop.dimsKnown()\n+ && HopRewriteUtils.checkInputDataTypes(hop, DataType.MATRIX, DataType.SCALAR, DataType.MATRIX)\n+ && TemplateUtils.isVector(hop.getInput().get(0)) && TemplateUtils.isVector(hop.getInput().get(2));\n+\nreturn hop.getDataType() == DataType.MATRIX && TemplateUtils.isOperationSupported(hop)\n- && (hop instanceof UnaryOp || isBinaryMatrixScalar || isBinaryMatrixVector);\n+ && (hop instanceof UnaryOp || isBinaryMatrixScalar || isBinaryMatrixVector || isTernaryVectorScalarVector);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -33,6 +33,7 @@ import org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.ReorgOp;\n+import org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.Hop.ReOrgOp;\n@@ -45,6 +46,7 @@ import org.apache.sysml.hops.codegen.cplan.CNodeOuterProduct;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary.TernaryType;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\nimport org.apache.sysml.runtime.matrix.data.Pair;\n@@ -231,10 +233,11 @@ public class TemplateUtils\npublic static boolean isOperationSupported(Hop h) {\nif(h instanceof UnaryOp)\n- return UnaryType.contains(((UnaryOp)h).getOp().toString());\n+ return UnaryType.contains(((UnaryOp)h).getOp().name());\nelse if(h instanceof BinaryOp)\n- return BinType.contains(((BinaryOp)h).getOp().toString());\n- else\n+ return BinType.contains(((BinaryOp)h).getOp().name());\n+ else if(h instanceof TernaryOp)\n+ return TernaryType.contains(((TernaryOp)h).getOp().name());\nreturn false;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -779,6 +779,13 @@ public class HopRewriteUtils\nreturn false;\n}\n+ public static boolean checkInputDataTypes(Hop hop, DataType... dt) {\n+ for( int i=0; i<hop.getInput().size(); i++ )\n+ if( hop.getInput().get(i).getDataType() != dt[i] )\n+ return false;\n+ return true;\n+ }\n+\npublic static boolean isFullColumnIndexing(LeftIndexingOp hop)\n{\nboolean colPred = hop.getColLowerEqualsUpper(); //single col\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1361] Extended code generator for existing cellwise fused ops This patch adds the existing cell-wise fused unary operators selp, sprop, sigmoid, log_nz, as well as ternary operators +* and -* in order to prevent these operators from breaking fusion boundaries.
49,717
03.03.2017 18:11:45
28,800
3757995b50aef019b0ce22d9ae93eae42aed02b4
Upgraded to use jcuda8 (from the maven repo) Closes
[ { "change_type": "MODIFY", "old_path": "docs/devdocs/gpu-backend.md", "new_path": "docs/devdocs/gpu-backend.md", "diff": "@@ -19,52 +19,43 @@ limitations under the License.\n# Initial prototype for GPU backend\n-A GPU backend implements two important abstract classes:\n+The GPU backend implements two important abstract classes:\n1. `org.apache.sysml.runtime.controlprogram.context.GPUContext`\n2. `org.apache.sysml.runtime.controlprogram.context.GPUObject`\n-The GPUContext is responsible for GPU memory management and initialization/destruction of Cuda handles.\n+The `GPUContext` is responsible for GPU memory management and initialization/destruction of Cuda handles.\n+Currently, an active instance of the `GPUContext` class is made available globally and is used to store handles\n+of the allocated blocks on the GPU. A count is kept per block for the number of instructions that need it.\n+When the count is 0, the block may be evicted on a call to `GPUObject.evict()`.\n-A GPUObject (like RDDObject and BroadcastObject) is stored in CacheableData object. It gets call-backs from SystemML's bufferpool on following methods\n+A `GPUObject` (like RDDObject and BroadcastObject) is stored in CacheableData object. It gets call-backs from SystemML's bufferpool on following methods\n1. void acquireDeviceRead()\n-2. void acquireDenseDeviceModify(int numElemsToAllocate)\n-3. void acquireHostRead()\n-4. void acquireHostModify()\n-5. void release(boolean isGPUCopyModified)\n+2. void acquireDeviceModifyDense()\n+3. void acquireDeviceModifySparse\n+4. void acquireHostRead()\n+5. void acquireHostModify()\n+6. void releaseInput()\n+7. void releaseOutput()\n-## JCudaContext:\n-The current prototype supports Nvidia's CUDA libraries using JCuda wrapper. The implementation for the above classes can be found in:\n-1. `org.apache.sysml.runtime.controlprogram.context.JCudaContext`\n-2. `org.apache.sysml.runtime.controlprogram.context.JCudaObject`\n+Sparse matrices on GPU are represented in `CSR` format. In the SystemML runtime, they are represented in `MCSR` or modified `CSR` format.\n+A conversion cost is incurred when sparse matrices are sent back and forth between host and device memory.\n-### Setup instructions for JCudaContext:\n+Concrete classes `JCudaContext` and `JCudaObject` (which extend `GPUContext` & `GPUObject` respectively) contain references to `org.jcuda.*`.\n-1. Follow the instructions from `https://developer.nvidia.com/cuda-downloads` and install CUDA 7.5.\n-2. Follow the instructions from `https://developer.nvidia.com/cudnn` and install CuDNN v4.\n-3. Download install JCuda binaries version 0.7.5b and JCudnn version 0.7.5. Easiest option would be to use mavenized jcuda:\n-```python\n-git clone https://github.com/MysterionRise/mavenized-jcuda.git\n-mvn -Djcuda.version=0.7.5b -Djcudnn.version=0.7.5 clean package\n-CURR_DIR=`pwd`\n-JCUDA_PATH=$CURR_DIR\"/target/lib/\"\n-JAR_PATH=\".\"\n-for j in `ls $JCUDA_PATH/*.jar`\n-do\n- JAR_PATH=$JAR_PATH\":\"$j\n-done\n-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JCUDA_PATH\n-```\n+The `LibMatrixCUDA` class contains methods to invoke CUDA libraries (where available) and invoke custom kernels.\n+Runtime classes (that extend `GPUInstruction`) redirect calls to functions in this class.\n+Some functions in `LibMatrixCUDA` need finer control over GPU memory management primitives. These are provided by `JCudaObject`.\n+\n+### Setup instructions:\n-Note for Windows users:\n-* CuDNN v4 is available to download: `http://developer.download.nvidia.com/compute/redist/cudnn/v4/cudnn-7.0-win-x64-v4.0-prod.zip`\n-* If above steps doesn't work for JCuda, copy the DLLs into C:\\lib (or /lib) directory.\n+1. Follow the instructions from `https://developer.nvidia.com/cuda-downloads` and install CUDA 8.0.\n+2. Follow the instructions from `https://developer.nvidia.com/cudnn` and install CuDNN v5.1.\n-To use SystemML's GPU backend,\n+To use SystemML's GPU backend when using the jar or uber-jar\n1. Add JCuda's jar into the classpath.\n-2. Include CUDA, CuDNN and JCuda's libraries in LD_LIBRARY_PATH (or using -Djava.library.path).\n-3. Use `-gpu` flag.\n+2. Use `-gpu` flag.\nFor example: to use GPU backend in standalone mode:\n-```python\n-java -classpath $JAR_PATH:systemml-0.10.0-incubating-SNAPSHOT-standalone.jar org.apache.sysml.api.DMLScript -f MyDML.dml -gpu -exec singlenode ...\n+```bash\n+java -classpath $JAR_PATH:systemml-0.14.0-incubating-SNAPSHOT-standalone.jar org.apache.sysml.api.DMLScript -f MyDML.dml -gpu -exec singlenode ...\n```\n" }, { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<scala.test.version>2.2.6</scala.test.version>\n<maven.build.timestamp.format>yyyy-MM-dd HH:mm:ss z</maven.build.timestamp.format>\n<enableGPU>false</enableGPU>\n+ <jcuda.scope>provided</jcuda.scope>\n+ <jcuda.version>0.8.0</jcuda.version>\n<!-- OS-specific JVM arguments for running integration tests -->\n<integrationTestExtraJVMArgs />\n</properties>\n<enabled>true</enabled>\n</releases>\n</repository>\n- <repository>\n- <id>mavenized-jcuda-mvn-repo</id>\n- <url>https://raw.github.com/niketanpansare/mavenized-jcuda/mvn-repo/</url>\n- <snapshots>\n- <enabled>true</enabled>\n- <updatePolicy>always</updatePolicy>\n- </snapshots>\n- </repository>\n</repositories>\n<build>\n<goals>\n<goal>shade</goal>\n</goals>\n+ <configuration>\n+ <artifactSet>\n+ <!--<excludes>\n+ <exclude>org.jcuda:*</exclude>\n+ </excludes>-->\n+ </artifactSet>\n+ </configuration>\n</execution>\n</executions>\n</build>\n<profiles>\n+\n+ <profile>\n+ <id>windows-x86_64</id>\n+ <activation>\n+ <os>\n+ <family>windows</family>\n+ <arch>amd64</arch>\n+ </os>\n+ </activation>\n+ <properties>\n+ <jcuda.os>windows</jcuda.os>\n+ <jcuda.arch>x86_64</jcuda.arch>\n+ </properties>\n+ </profile>\n+ <profile>\n+ <id>linux-x86_64</id>\n+ <activation>\n+ <os>\n+ <family>unix</family>\n+ <arch>amd64</arch>\n+ </os>\n+ </activation>\n+ <properties>\n+ <jcuda.os>linux</jcuda.os>\n+ <jcuda.arch>x86_64</jcuda.arch>\n+ </properties>\n+ </profile>\n+ <profile>\n+ <id>apple-x86_64</id>\n+ <activation>\n+ <os>\n+ <family>mac</family>\n+ <arch>x86_64</arch>\n+ </os>\n+ </activation>\n+ <properties>\n+ <jcuda.os>apple</jcuda.os>\n+ <jcuda.arch>x86_64</jcuda.arch>\n+ </properties>\n+ </profile>\n+ <profile>\n+ <id>linux-ppc_64</id>\n+ <activation>\n+ <os>\n+ <family>unix</family>\n+ <arch>ppc64le</arch>\n+ </os>\n+ </activation>\n+ <properties>\n+ <jcuda.os>linux</jcuda.os>\n+ <jcuda.arch>ppc_64</jcuda.arch>\n+ </properties>\n+ </profile>\n+\n<profile>\n<id>scala-2.10</id>\n<properties>\n<dependencies>\n- <!-- For GPU backend\n- Use org.mystic:mavenized-jcuda until Alan puts org.jcuda:*\n- -->\n- <dependency>\n- <groupId>org.mystic</groupId>\n- <artifactId>mavenized-jcuda</artifactId>\n- <version>0.7.5b</version>\n- <type>jar</type>\n- <scope>provided</scope>\n- <exclusions>\n- <exclusion>\n- <groupId>*</groupId>\n- <artifactId>*</artifactId>\n- </exclusion>\n- </exclusions>\n- </dependency>\n- <!-- Since there is no mvn repo for jcuda\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda</artifactId>\n- <version>0.7.5b</version>\n- <scope>provided</scope>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas</artifactId>\n- <version>0.7.5b</version>\n- <scope>provided</scope>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcufft</artifactId>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse</artifactId>\n- <version>0.7.5b</version>\n- <scope>provided</scope>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver</artifactId>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcurand</artifactId>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jnvgraph</artifactId>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn</artifactId>\n- <version>0.7.5</version>\n- <scope>provided</scope>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcuda-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcublas-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcufft-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusparse-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcurand-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jnvgraph-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcudnn-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n</dependency>\n- -->\n- <!-- ************************* -->\n<dependency>\n<groupId>org.apache.spark</groupId>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -25,6 +25,7 @@ import static jcuda.jcudnn.JCudnn.cudnnActivationForward;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionBackwardData;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionBackwardFilter;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionForward;\n+import static jcuda.jcudnn.JCudnn.cudnnCreateActivationDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnCreateConvolutionDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnCreateFilterDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnCreatePoolingDescriptor;\n@@ -38,12 +39,14 @@ import static jcuda.jcudnn.JCudnn.cudnnGetConvolutionBackwardFilterWorkspaceSize\nimport static jcuda.jcudnn.JCudnn.cudnnGetConvolutionForwardWorkspaceSize;\nimport static jcuda.jcudnn.JCudnn.cudnnPoolingBackward;\nimport static jcuda.jcudnn.JCudnn.cudnnPoolingForward;\n+import static jcuda.jcudnn.JCudnn.cudnnSetActivationDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnSetConvolution2dDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnSetFilter4dDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnSetPooling2dDescriptor;\nimport static jcuda.jcudnn.JCudnn.cudnnSetTensor4dDescriptor;\nimport static jcuda.jcudnn.cudnnConvolutionMode.CUDNN_CROSS_CORRELATION;\nimport static jcuda.jcudnn.cudnnDataType.CUDNN_DATA_DOUBLE;\n+import static jcuda.jcudnn.cudnnNanPropagation.CUDNN_PROPAGATE_NAN;\nimport static jcuda.jcudnn.cudnnPoolingMode.CUDNN_POOLING_MAX;\nimport static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\nimport static jcuda.jcusparse.JCusparse.cusparseDcsrgemm;\n@@ -75,6 +78,7 @@ import jcuda.jcublas.JCublas2;\nimport jcuda.jcublas.cublasFillMode;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcublas.cublasOperation;\n+import jcuda.jcudnn.cudnnActivationDescriptor;\nimport jcuda.jcudnn.cudnnConvolutionDescriptor;\nimport jcuda.jcudnn.cudnnConvolutionFwdPreference;\nimport jcuda.jcudnn.cudnnFilterDescriptor;\n@@ -268,7 +272,7 @@ public class LibMatrixCUDA {\nprivate static cudnnFilterDescriptor allocateFilterDescriptor(int K, int C, int R, int S) {\ncudnnFilterDescriptor filterDesc = new cudnnFilterDescriptor();\ncudnnCreateFilterDescriptor(filterDesc);\n- cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_DOUBLE, K, C, R, S);\n+ cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_DOUBLE, CUDNN_TENSOR_NCHW, K, C, R, S);\nreturn filterDesc;\n}\n@@ -285,7 +289,7 @@ public class LibMatrixCUDA {\nprivate static cudnnPoolingDescriptor allocatePoolingDescriptor(int R, int S, int pad_h, int pad_w, int stride_h, int stride_w) {\ncudnnPoolingDescriptor poolingDesc = new cudnnPoolingDescriptor();\ncudnnCreatePoolingDescriptor(poolingDesc);\n- cudnnSetPooling2dDescriptor(poolingDesc, CUDNN_POOLING_MAX, R, S, pad_h, pad_w, stride_h, stride_w);\n+ cudnnSetPooling2dDescriptor(poolingDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, R, S, pad_h, pad_w, stride_h, stride_w);\nreturn poolingDesc;\n}\n@@ -474,8 +478,11 @@ public class LibMatrixCUDA {\n// Allocate descriptors\nsrcTensorDesc = allocateTensorDescriptor((int)N, 1, (int)H, (int)W);\ndstTensorDesc = allocateTensorDescriptor((int)N, 1, (int)H, (int)W);\n-\n- cudnnActivationForward(cudnnHandle, CUDNN_ACTIVATION_RELU,\n+ cudnnActivationDescriptor activationDescriptor = new cudnnActivationDescriptor();\n+ cudnnCreateActivationDescriptor(activationDescriptor);\n+ double dummy = -1;\n+ cudnnSetActivationDescriptor(activationDescriptor, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, dummy);\n+ cudnnActivationForward(cudnnHandle, activationDescriptor,\nalpha, srcTensorDesc, srcData,\nbeta, dstTensorDesc, dstData);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
Upgraded to use jcuda8 (from the maven repo) Closes #291
49,738
03.03.2017 19:04:11
28,800
6946ff047f3f67f1b252261980473fd51b456280
Fix minor memory leaks for tests (cla, test utils)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/estim/CompressedSizeEstimatorSample.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/estim/CompressedSizeEstimatorSample.java", "diff": "@@ -45,20 +45,12 @@ public class CompressedSizeEstimatorSample extends CompressedSizeEstimator\npublic static final boolean HAAS_AND_STOKES_UJ2A_CUT2 = true; //cut frequency in half\npublic static final boolean HAAS_AND_STOKES_UJ2A_SOLVE = true; //true recommended\npublic static final int MAX_SOLVE_CACHE_SIZE = 64*1024; //global 2MB cache\n- //note: we use a relatively high ALPHA2 and the cut-in-half approach because it\n- //leads to moderate overestimation (compared to systematic underestimation) in\n- //order to follow a conservative approach\nprivate static final Log LOG = LogFactory.getLog(CompressedSizeEstimatorSample.class.getName());\n- private static ThreadLocal<RandomDataGenerator> _rng = new ThreadLocal<RandomDataGenerator>() {\n- protected RandomDataGenerator initialValue() { return new RandomDataGenerator(); }\n- };\n-\nprivate int[] _sampleRows = null;\nprivate HashMap<Integer, Double> _solveCache = null;\n-\npublic CompressedSizeEstimatorSample(MatrixBlock data, int sampleSize)\nthrows DMLRuntimeException\n{\n@@ -315,7 +307,7 @@ public class CompressedSizeEstimatorSample extends CompressedSizeEstimator\nprivate static int[] getSortedUniformSample(int range, int smplSize) {\nif (smplSize == 0)\nreturn new int[] {};\n- RandomDataGenerator rng = _rng.get();\n+ RandomDataGenerator rng = new RandomDataGenerator();\nint[] sample = rng.nextPermutation(range, smplSize);\nArrays.sort(sample);\nreturn sample;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "new_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "diff": "@@ -95,81 +95,6 @@ public class TestUtils\nprivate static ArrayList<String> _AssertInfos = new ArrayList<String>();\nprivate static boolean _AssertOccured = false;\n- /**\n- * <p>\n- * Compares to arrays for equality. The elements in the array can be in\n- * different order.\n- * </p>\n- *\n- * @param expecteds\n- * expected values\n- * @param actuals\n- * actual values\n- */\n- public static void assertInterchangedArraysEquals(String[] expecteds, String[] actuals) {\n- assertEquals(\"different number of elements in arrays\", expecteds.length, actuals.length);\n- ArrayList<Integer> foundIndexes = new ArrayList<Integer>();\n- expactation: for (int i = 0; i < expecteds.length; i++) {\n- for (int j = 0; j < actuals.length; j++) {\n- if (expecteds[i] == actuals[j] && !foundIndexes.contains(Integer.valueOf(j))) {\n- foundIndexes.add(Integer.valueOf(j));\n- continue expactation;\n- }\n- }\n- fail(\"Missing element \" + expecteds[i]);\n- }\n- }\n-\n- /**\n- * <p>\n- * Compares to arrays for equality. The elements in the array can be in\n- * different order.\n- * </p>\n- *\n- * @param expecteds\n- * expected values\n- * @param actuals\n- * actual values\n- */\n- public static void assertInterchangedArraysEquals(int[] expecteds, int[] actuals) {\n- assertEquals(\"different number of elements in arrays\", expecteds.length, actuals.length);\n- ArrayList<Integer> foundIndexes = new ArrayList<Integer>();\n- expactation: for (int i = 0; i < expecteds.length; i++) {\n- for (int j = 0; j < actuals.length; j++) {\n- if (expecteds[i] == actuals[j] && !foundIndexes.contains(Integer.valueOf(j))) {\n- foundIndexes.add(Integer.valueOf(j));\n- continue expactation;\n- }\n- }\n- fail(\"Missing element \" + expecteds[i]);\n- }\n- }\n-\n- /**\n- * <p>\n- * Compares to arrays for equality. The elements in the array can be in\n- * different order.\n- * </p>\n- *\n- * @param expecteds\n- * expected values\n- * @param actuals\n- * actual values\n- */\n- public static void assertInterchangedArraysEquals(double[] expecteds, double[] actuals) {\n- assertEquals(\"different number of elements in arrays\", expecteds.length, actuals.length);\n- ArrayList<Integer> foundIndexes = new ArrayList<Integer>();\n- expactation: for (int i = 0; i < expecteds.length; i++) {\n- for (int j = 0; j < actuals.length; j++) {\n- if (expecteds[i] == actuals[j] && !foundIndexes.contains(Integer.valueOf(j))) {\n- foundIndexes.add(Integer.valueOf(j));\n- continue expactation;\n- }\n- }\n- fail(\"Missing element \" + expecteds[i]);\n- }\n- }\n-\n/* Compare expected scalar generated by Java with actual scalar generated by DML */\npublic static void compareDMLScalarWithJavaScalar(String expectedFile, String actualFile, double epsilon) {\ntry {\n@@ -337,7 +262,7 @@ public class TestUtils\ndouble v = Double.parseDouble(st.nextToken());\nactualValues.put(new CellIndex(i, j), v);\n}\n-\n+ outIn.close();\nint countErrors = 0;\n@@ -349,8 +274,6 @@ public class TestUtils\nif (actualValue == null)\nactualValue = 0.0;\n- // System.out.println(\"actual value: \"+actualValue+\", expected value: \"+expectedValue);\n-\nif (!compareCellValue(expectedValue, actualValue, epsilon, false)) {\nSystem.out.println(expectedFile+\": \"+index+\" mismatch: expected \" + expectedValue + \", actual \" + actualValue);\ncountErrors++;\n@@ -361,6 +284,7 @@ public class TestUtils\nfail(\"unable to read file: \" + e.getMessage());\n}\n}\n+\n/**\n* <p>\n* Compares the expected values calculated in Java by testcase and which are\n@@ -421,8 +345,6 @@ public class TestUtils\nif (actualValue == null)\nactualValue = 0.0;\n- // System.out.println(\"actual value: \"+actualValue+\", expected value: \"+expectedValue);\n-\nif (!compareCellValue(expectedValue, actualValue, epsilon, false)) {\nSystem.out.println(expectedFile+\": \"+index+\" mismatch: expected \" + expectedValue + \", actual \" + actualValue);\ncountErrors++;\n@@ -482,10 +404,6 @@ public class TestUtils\n* @param filePath\n* @return\n*/\n-\n- // TODO: we must use http://www.inf.uni-konstanz.de/algo/lehre/ws05/pp/mtj/mvio/MatrixVectorReader.html\n- // to read matrices from R\n-\npublic static HashMap<CellIndex, Double> readRMatrixFromFS(String filePath)\n{\nHashMap<CellIndex, Double> expectedValues = new HashMap<CellIndex, Double>();\n@@ -600,7 +518,9 @@ public class TestUtils\nFileStatus[] outFiles = fs.listStatus(outDirectory);\nfor (FileStatus file : outFiles) {\nFSDataInputStream fsout = fs.open(file.getPath());\n- sb.append(IOUtils.toString(new InputStreamReader(fsout)));\n+ InputStreamReader is = new InputStreamReader(fsout);\n+ sb.append(IOUtils.toString(is));\n+ is.close();\n}\nreturn sb.toString();\n} catch (IOException e) {\n@@ -681,8 +601,7 @@ public class TestUtils\nout.append(fileContents);\n}\n} finally {\n- if (null != out)\n- out.close();\n+ IOUtilFunctions.closeSilently(out);\n}\ncsvFile = tmp.getCanonicalPath();\n@@ -1078,7 +997,7 @@ public class TestUtils\nHashMap<CellIndex, Double> expectedValues = new HashMap<CellIndex, Double>();\nHashMap<CellIndex, Double> actualValues = new HashMap<CellIndex, Double>();\nString line;\n- /** skip both R header lines */\n+ // skip both R header lines\ncompareIn.readLine();\ncompareIn.readLine();\nwhile ((line = compareIn.readLine()) != null) {\n@@ -1376,20 +1295,13 @@ public class TestUtils\n*/\npublic static double[][] generateTestMatrix(int rows, int cols, double min, double max, double sparsity, long seed) {\ndouble[][] matrix = new double[rows][cols];\n- Random random;\n- if (seed == -1)\n- random = TestUtils.random;\n- else\n- random = new Random(seed);\n-\n+ Random random = (seed == -1) ? TestUtils.random : new Random(seed);\nfor (int i = 0; i < rows; i++) {\nfor (int j = 0; j < cols; j++) {\nif (random.nextDouble() > sparsity)\ncontinue;\nmatrix[i][j] = (random.nextDouble() * (max - min) + min);\n- // System.out.print(matrix[i][j] + \"(\" + i + \",\" + j + \")\");\n}\n- // System.out.println();\n}\nreturn matrix;\n@@ -1418,12 +1330,7 @@ public class TestUtils\n*/\npublic static double[][] generateNonZeroTestMatrix(int rows, int cols, double min, double max, long seed) {\ndouble[][] matrix = new double[rows][cols];\n- Random random;\n- if (seed == -1)\n- random = TestUtils.random;\n- else\n- random = new Random(seed);\n-\n+ Random random = (seed == -1) ? TestUtils.random : new Random(seed);\nfor (int i = 0; i < rows; i++) {\nfor (int j = 0; j < cols; j++) {\ndouble randValue;\n@@ -1468,11 +1375,7 @@ public class TestUtils\nPath inFile = new Path(file);\nDataOutputStream out = fs.create(inFile);\nPrintWriter pw = new PrintWriter(out);\n- Random random;\n- if (seed == -1)\n- random = TestUtils.random;\n- else\n- random = new Random(seed);\n+ Random random = (seed == -1) ? TestUtils.random : new Random(seed);\nfor (int i = 1; i <= rows; i++) {\nfor (int j = 1; j <= cols; j++) {\n@@ -1484,7 +1387,6 @@ public class TestUtils\n}\n}\npw.close();\n- out.close();\n} catch (IOException e) {\nfail(\"unable to write test matrix: \" + e.getMessage());\n}\n@@ -1512,10 +1414,8 @@ public class TestUtils\ntry\n{\n//create outputstream to HDFS / FS and writer\n- DataOutputStream out = null;\nFileSystem fs = FileSystem.get(conf);\n- out = fs.create(new Path(file), true);\n-\n+ DataOutputStream out = fs.create(new Path(file), true);\nBufferedWriter pw = new BufferedWriter(new OutputStreamWriter(out));\n//writer actual matrix\n@@ -1536,7 +1436,6 @@ public class TestUtils\n//close writer and streams\npw.close();\n- out.close();\n}\ncatch (IOException e)\n{\n@@ -1605,7 +1504,6 @@ public class TestUtils\n//close writer and streams\npw.close();\n- out.close();\n}\ncatch (IOException e)\n{\n@@ -1695,7 +1593,6 @@ public class TestUtils\nPrintWriter pw = new PrintWriter(out);\npw.println(value);\npw.close();\n- out.close();\n} catch (IOException e) {\nfail(\"unable to write test scalar (\" + file + \"): \" + e.getMessage());\n}\n@@ -1867,20 +1764,9 @@ public class TestUtils\n* temporary script file\n*/\npublic static void renameTempDMLScript(String dmlScriptFile) {\n- // try {\n- // FileSystem fs = FileSystem.get(conf);\n- // Path oldPath = new Path(dmlScriptFile + \"t\");\n- // Path newPath = new Path(dmlScriptFile);\n- // if (fs.exists(oldPath))\n- // fs.rename(oldPath, newPath);\nFile oldPath = new File(dmlScriptFile + \"t\");\nFile newPath = new File(dmlScriptFile);\noldPath.renameTo(newPath);\n-\n- /*\n- * } catch (IOException e) { e.printStackTrace();\n- * fail(\"unable to write dml script back: \" + e.getMessage()); }\n- */\n}\n/**\n@@ -2019,6 +1905,7 @@ public class TestUtils\nfor (FileStatus file : files) {\nSequenceFile.Reader reader = new SequenceFile.Reader(FileSystem.get(conf), file.getPath(), conf);\n+ try {\nwhile (reader.next(indexes, value)) {\nif (value.getNumRows() < rowsInBlock) {\nif (rowsInLastBlock == -1)\n@@ -2074,8 +1961,10 @@ public class TestUtils\n}\n}\n}\n-\n- reader.close();\n+ }\n+ finally {\n+ IOUtilFunctions.closeSilently(reader);\n+ }\n}\nlong nonZeros = 0;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1358] Fix minor memory leaks for tests (cla, test utils)
49,717
06.03.2017 14:22:22
28,800
c7eebddb17820398ac5e8ee740c6944d893ec95a
toString now prints NaN & Infinity like how as.scalar prints them Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/DataConverter.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/DataConverter.java", "diff": "@@ -780,6 +780,22 @@ public class DataConverter\n}\n}\n+ /**\n+ * Convenience method to print NaN & Infinity compliant with how as.scalar prints them.\n+ * {@link DecimalFormat} prints NaN as \\uFFFD and Infinity as \\u221E\n+ * http://docs.oracle.com/javase/6/docs/api/java/text/DecimalFormat.html\n+ * @param df The {@link DecimalFormat} instance, constructed with the appropriate options\n+ * @param value The double value to print\n+ * @return a string formatted with the {@link DecimalFormat} instance or \"NaN\" or \"Infinity\" or \"-Infinity\"\n+ */\n+ private static String dfFormat(DecimalFormat df, double value) {\n+ if (Double.isNaN(value) || Double.isInfinite(value)){\n+ return Double.toString(value);\n+ } else {\n+ return df.format(value);\n+ }\n+ }\n+\npublic static String toString(MatrixBlock mb) {\nreturn toString(mb, false, \" \", \"\\n\", mb.getNumRows(), mb.getNumColumns(), 3);\n}\n@@ -826,7 +842,7 @@ public class DataConverter\nif (row < rowLength && col < colLength) {\n// Print (row+1) and (col+1) since for a DML user, everything is 1-indexed\nsb.append(row+1).append(separator).append(col+1).append(separator);\n- sb.append(df.format(value)).append(lineseparator);\n+ sb.append(dfFormat(df, value)).append(lineseparator);\n}\n}\n} else { // Block is in dense format\n@@ -835,7 +851,7 @@ public class DataConverter\ndouble value = mb.getValue(i, j);\nif (value != 0.0){\nsb.append(i+1).append(separator).append(j+1).append(separator);\n- sb.append(df.format(value)).append(lineseparator);\n+ sb.append(dfFormat(df, value)).append(lineseparator);\n}\n}\n}\n@@ -845,11 +861,11 @@ public class DataConverter\nfor (int i=0; i<rowLength; i++){\nfor (int j=0; j<colLength-1; j++){\ndouble value = mb.quickGetValue(i, j);\n- sb.append(df.format(value));\n+ sb.append(dfFormat(df, value));\nsb.append(separator);\n}\ndouble value = mb.quickGetValue(i, colLength-1);\n- sb.append(df.format(value)); // Do not put separator after last element\n+ sb.append(dfFormat(df, value)); // Do not put separator after last element\nsb.append(lineseparator);\n}\n}\n@@ -910,7 +926,7 @@ public class DataConverter\nfor( int j=0; j<colLength; j++ ) {\nif( row[j]!=null ) {\nif( fb.getSchema()[j] == ValueType.DOUBLE )\n- sb.append(df.format(row[j]));\n+ sb.append(dfFormat(df, (Double)row[j]));\nelse\nsb.append(row[j]);\nif( j != colLength-1 )\n" } ]
Java
Apache License 2.0
apache/systemds
toString now prints NaN & Infinity like how as.scalar prints them Closes #415
49,738
07.03.2017 11:31:53
28,800
6f4d8762d97699b6a9c7ba479f284f0e029b5ee4
[MINOR] Fix issues w/ gpu instructions (imports, unused vars/suppress)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -78,7 +78,6 @@ import org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\nimport org.apache.sysml.runtime.matrix.CleanupMR;\n-import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\nimport org.apache.sysml.runtime.matrix.mapred.MRConfigurationNames;\nimport org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -23,7 +23,6 @@ import org.apache.sysml.runtime.controlprogram.caching.CacheException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.utils.GPUStatistics;\n-import org.apache.sysml.utils.Statistics;\nimport java.util.Collections;\nimport java.util.Comparator;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaContext.java", "diff": "@@ -27,7 +27,6 @@ import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\nimport org.apache.sysml.utils.GPUStatistics;\n-import org.apache.sysml.utils.Statistics;\nimport jcuda.driver.JCudaDriver;\nimport jcuda.jcublas.JCublas2;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "diff": "@@ -47,10 +47,8 @@ import org.apache.sysml.runtime.matrix.data.SparseBlockCOO;\nimport org.apache.sysml.runtime.matrix.data.SparseBlockCSR;\nimport org.apache.sysml.runtime.matrix.data.SparseBlockMCSR;\nimport org.apache.sysml.utils.GPUStatistics;\n-import org.apache.sysml.utils.Statistics;\nimport jcuda.Pointer;\n-// import jcuda.Sizeof;\nimport jcuda.jcublas.JCublas2;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcusparse.JCusparse;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -1115,7 +1115,7 @@ public class LibMatrixCUDA {\n// Convert right to dense and do a cuBlas matmul\n// BDenseTransposed is a column major matrix\n// Note the arguments to denseDenseMatmult to accommodate for this.\n- long t0=0, t1=0, t2=0;\n+ long t0=0, t1=0;\nif (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nPointer BDenseTransposed = B.toColumnMajorDenseMatrix(cusparseHandle, cublasHandle, (int)right.getNumRows(), (int)right.getNumColumns());\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SPARSE_TO_DENSE, System.nanoTime() - t0);\n@@ -1585,7 +1585,6 @@ public class LibMatrixCUDA {\nPointer in = ((JCudaObject)in1.getGPUObject()).jcudaDenseMatrixPtr;\nint size = rlen * clen;\n- long t0=0;\n// For scalars, set the scalar output in the Execution Context object\nswitch (opIndex){\ncase OP_PLUS: {\n@@ -1610,7 +1609,6 @@ public class LibMatrixCUDA {\n}\ncase OP_PLUS_SQ : {\n// Calculate the squares in a temporary object tmp\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nPointer tmp = JCudaObject.allocate(instName, size * Sizeof.DOUBLE);\nsquareMatrix(instName, in, tmp, rlen, clen);\n@@ -1710,7 +1708,6 @@ public class LibMatrixCUDA {\n}\ncase OP_VARIANCE : {\n// Temporary GPU array for\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nPointer tmp = JCudaObject.allocate(instName, size * Sizeof.DOUBLE);\nPointer tmp2 = JCudaObject.allocate(instName, size * Sizeof.DOUBLE);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -79,7 +79,6 @@ public class LibMatrixDNN {\nprivate static AtomicLong loopedConvBwdDataMatMultTime = new AtomicLong(0);\nprivate static AtomicLong loopedConvBwdDataCol2ImTime = new AtomicLong(0);\n- @SuppressWarnings(\"unused\")\npublic static void appendStatistics(StringBuilder sb) {\nif(DMLScript.STATISTICS && DISPLAY_STATISTICS && (conv2dDenseCount.get() != 0 || conv2dSparseCount.get() != 0)) {\nsb.append(\"LibMatrixDNN dense count (conv/bwdF/bwdD/im2col/maxBwd):\\t\"\n@@ -136,7 +135,6 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- @SuppressWarnings(\"unused\")\npublic static void conv2dBackwardData(MatrixBlock filter, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = filter;\nparams.input2 = dout;\n@@ -173,7 +171,6 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- @SuppressWarnings(\"unused\")\npublic static void conv2dBackwardFilter(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = dout;\n@@ -268,7 +265,6 @@ public class LibMatrixDNN {\nLibMatrixReorg.transpose(tmpAgg, ret);\n}\n- @SuppressWarnings(\"unused\")\nprivate static void doLoopedIm2ColConv2dBwdData(int n, MatrixBlock dout_reshaped, ConvolutionParameters params) throws DMLRuntimeException {\nMatrixBlock filter = params.input1;\nMatrixBlock dout = params.input2;\n@@ -287,7 +283,6 @@ public class LibMatrixDNN {\n}\n}\n- @SuppressWarnings(\"unused\")\nprivate static MatrixBlock doLoopedIm2ColConv2dBwdFilter(int n,\nMatrixBlock im2ColOutBlock, MatrixBlock dout_reshaped, MatrixBlock partialRetBlock, ConvolutionParameters params) throws DMLRuntimeException {\nlong t1 = DMLScript.STATISTICS && DISPLAY_STATISTICS ? System.nanoTime() : 0;\n@@ -317,7 +312,6 @@ public class LibMatrixDNN {\nret[2] = j % W;\n}\n- @SuppressWarnings(\"unused\")\npublic static void conv2d(MatrixBlock input, MatrixBlock filter, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = filter;\n@@ -350,7 +344,6 @@ public class LibMatrixDNN {\noutputBlock.recomputeNonZeros();\n}\n- @SuppressWarnings(\"unused\")\nprivate static void doLoopedIm2ColConv2d(int n, MatrixBlock im2ColOutBlock, ConvolutionParameters params) throws DMLRuntimeException {\nlong t1 = DMLScript.STATISTICS && DISPLAY_STATISTICS ? System.nanoTime() : 0;\ndoIm2col(n, im2ColOutBlock, params);\n@@ -403,7 +396,6 @@ public class LibMatrixDNN {\n* @param params convolution parameters\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- @SuppressWarnings(\"unused\")\npublic static void maxpoolingBackward(MatrixBlock input, MatrixBlock dout, MatrixBlock outputBlock, ConvolutionParameters params) throws DMLRuntimeException {\nparams.input1 = input;\nparams.input2 = dout;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "package org.apache.sysml.utils;\n-import org.apache.sysml.api.DMLScript;\n-\n-import java.util.*;\n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.Comparator;\n+import java.util.HashMap;\n+import java.util.Iterator;\n+import java.util.List;\n+import java.util.Map;\nimport java.util.concurrent.atomic.AtomicLong;\n/**\n@@ -29,6 +33,8 @@ import java.util.concurrent.atomic.AtomicLong;\n* Printed as part of {@link Statistics}.\n*/\npublic class GPUStatistics {\n+ //TODO fix formatting\n+\n// Whether or not extra per-instruction statistics will be recorded and shown for the GPU\npublic static boolean DISPLAY_STATISTICS = false;\n@@ -149,7 +155,9 @@ public class GPUStatistics {\nStringBuffer sb = new StringBuffer();\nHashMap<String, Long> miscTimerMap = _cpInstMiscTime.get(instructionName);\nif (miscTimerMap != null) {\n- List<Map.Entry<String, Long>> sortedList = new ArrayList<Map.Entry<String, Long>>(miscTimerMap.entrySet());\n+ List<Map.Entry<String, Long>> sortedList\n+ = new ArrayList<\n+ Map.Entry<String, Long>>(miscTimerMap.entrySet());\n// Sort the times to display by the most expensive first\nCollections.sort(sortedList, new Comparator<Map.Entry<String, Long>>() {\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -22,8 +22,12 @@ package org.apache.sysml.utils;\nimport java.lang.management.CompilationMXBean;\nimport java.lang.management.GarbageCollectorMXBean;\nimport java.lang.management.ManagementFactory;\n-import java.util.*;\n+import java.util.Arrays;\n+import java.util.Comparator;\n+import java.util.HashMap;\n+import java.util.List;\nimport java.util.Map.Entry;\n+import java.util.Set;\nimport java.util.concurrent.atomic.AtomicLong;\nimport org.apache.sysml.api.DMLScript;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix issues w/ gpu instructions (imports, unused vars/suppress)
49,717
07.03.2017 13:41:03
28,800
6b1572e4bba31619c5bed19fd0c106d2e759f159
added gpu option to MLContext API Additionally, Changed initialization of CUDA libraries from static to per instance Added documentation to mlcontext programming guide Closes
[ { "change_type": "MODIFY", "old_path": "docs/spark-mlcontext-programming-guide.md", "new_path": "docs/spark-mlcontext-programming-guide.md", "diff": "@@ -1086,6 +1086,96 @@ mean: Double = 0.5002109404821844\n</div>\n+## GPU\n+\n+If the driver node has a GPU, SystemML may be able to utilize it, subject to memory constraints and what instructions are used in the dml script\n+\n+<div class=\"codetabs\">\n+\n+<div data-lang=\"Scala\" markdown=\"1\">\n+{% highlight scala %}\n+ml.setGPU(true)\n+ml.setStatistics(true)\n+val matMultScript = dml(\"\"\"\n+A = rand(rows=10, cols=1000)\n+B = rand(rows=1000, cols=10)\n+C = A %*% B\n+print(toString(C))\n+\"\"\")\n+ml.execute(matMultScript)\n+{% endhighlight %}\n+</div>\n+\n+<div data-lang=\"Spark Shell\" markdown=\"1\">\n+{% highlight scala %}\n+scala> ml.setGPU(true)\n+\n+scala> ml.setStatistics(true)\n+\n+scala> val matMultScript = dml(\"\"\"\n+ | A = rand(rows=10, cols=1000)\n+ | B = rand(rows=1000, cols=10)\n+ | C = A %*% B\n+ | print(toString(C))\n+ | \"\"\")\n+matMultScript: org.apache.sysml.api.mlcontext.Script =\n+Inputs:\n+None\n+\n+Outputs:\n+None\n+\n+scala> ml.execute(matMultScript)\n+249.977 238.545 233.700 234.489 248.556 244.423 249.051 255.043 249.117 251.605\n+249.226 248.680 245.532 238.258 254.451 249.827 260.957 251.273 250.577 257.571\n+258.703 246.969 243.463 246.547 250.784 251.758 251.654 258.318 251.817 254.097\n+248.788 242.960 230.920 244.026 249.159 247.998 251.330 254.718 248.013 255.706\n+253.251 248.788 235.785 242.941 252.096 248.675 256.865 251.677 252.872 250.490\n+256.087 245.035 234.124 238.307 248.630 252.522 251.122 251.577 249.171 247.974\n+245.419 243.114 232.262 239.776 249.583 242.351 250.972 249.244 246.729 251.807\n+250.081 242.367 230.334 240.955 248.332 240.730 246.940 250.396 244.107 249.729\n+247.368 239.882 234.353 237.087 252.337 248.801 246.627 249.077 244.305 245.621\n+252.827 257.352 239.546 246.529 258.916 255.612 260.480 254.805 252.695 257.531\n+\n+SystemML Statistics:\n+Total elapsed time: 0.000 sec.\n+Total compilation time: 0.000 sec.\n+Total execution time: 0.000 sec.\n+Number of compiled Spark inst: 0.\n+Number of executed Spark inst: 0.\n+CUDA/CuLibraries init time: 0.000/0.003 sec.\n+Number of executed GPU inst: 8.\n+GPU mem tx time (alloc/dealloc/toDev/fromDev): 0.003/0.002/0.010/0.002 sec.\n+GPU mem tx count (alloc/dealloc/toDev/fromDev/evict): 24/24/0/16/8/0.\n+GPU conversion time (sparseConv/sp2dense/dense2sp): 0.000/0.000/0.000 sec.\n+GPU conversion count (sparseConv/sp2dense/dense2sp): 0/0/0.\n+Cache hits (Mem, WB, FS, HDFS): 40/0/0/0.\n+Cache writes (WB, FS, HDFS): 21/0/0.\n+Cache times (ACQr/m, RLS, EXP): 0.002/0.002/0.003/0.000 sec.\n+HOP DAGs recompiled (PRED, SB): 0/0.\n+HOP DAGs recompile time: 0.000 sec.\n+Spark ctx create time (lazy): 0.000 sec.\n+Spark trans counts (par,bc,col):0/0/0.\n+Spark trans times (par,bc,col): 0.000/0.000/0.000 secs.\n+Total JIT compile time: 11.426 sec.\n+Total JVM GC count: 20.\n+Total JVM GC time: 1.078 sec.\n+Heavy hitter instructions (name, time, count):\n+-- 1) toString 0.085 sec 8\n+-- 2) rand 0.027 sec 16\n+-- 3) gpu_ba+* 0.018 sec 8\n+-- 4) print 0.006 sec 8\n+-- 5) createvar 0.003 sec 24\n+-- 6) rmvar 0.003 sec 40\n+\n+res20: org.apache.sysml.api.mlcontext.MLResults =\n+None\n+{% endhighlight %}\n+</div>\n+\n+</div>\n+\n+Note that GPU instructions show up prepended with a \"gpu\" in the statistics.\n## Explain\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "diff": "@@ -98,6 +98,11 @@ public class MLContext {\n*/\nprivate boolean statistics = false;\n+ /**\n+ * Whether or not GPU mode should be enabled\n+ */\n+ private boolean gpu = false;\n+\n/**\n* The number of heavy hitters that are printed as part of the statistics\n* option\n@@ -274,6 +279,7 @@ public class MLContext {\nScriptExecutor scriptExecutor = new ScriptExecutor();\nscriptExecutor.setExplain(explain);\nscriptExecutor.setExplainLevel(explainLevel);\n+ scriptExecutor.setGPU(gpu);\nscriptExecutor.setStatistics(statistics);\nscriptExecutor.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\nscriptExecutor.setInit(scriptHistoryStrings.isEmpty());\n@@ -411,6 +417,25 @@ public class MLContext {\n+ \"(valid types: hops, runtime, recompile_hops, recompile_runtime).\");\n}\n+ /**\n+ * Whether or not to use (an available) GPU on the driver node.\n+ * If a GPU is not available, and the GPU mode is set, SystemML will crash when the program is run.\n+ * @param enable\n+ * true if needs to be enabled, false otherwise\n+ */\n+ public void setGPU(boolean enable) {\n+ this.gpu = true;\n+ }\n+\n+ /**\n+ * Whether or not the GPU mode is enabled.\n+ * @return true if enabled, false otherwise\n+ */\n+ public boolean isGPU() {\n+ return this.gpu;\n+ }\n+\n+\n/**\n* Used internally by MLContextProxy.\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -46,6 +46,7 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\n@@ -114,6 +115,7 @@ public class ScriptExecutor {\nprotected Script script;\nprotected boolean init = false;\nprotected boolean explain = false;\n+ protected boolean gpu = false;\nprotected boolean statistics = false;\nprotected ExplainLevel explainLevel;\nprotected int statisticsMaxHeavyHitters = 10;\n@@ -307,7 +309,7 @@ public class ScriptExecutor {\n* Sets the script in the ScriptExecutor, checks that the script has a type\n* and string, sets the ScriptExecutor in the script, sets the script string\n* in the Spark Monitor, and globally sets the script type.\n- *\n+ * Also does GPU initialization\n* @param script\n* the DML or PYDML script to execute\n*/\n@@ -317,6 +319,12 @@ public class ScriptExecutor {\nscript.setScriptExecutor(this);\n// Set global variable indicating the script type\nDMLScript.SCRIPT_TYPE = script.getScriptType();\n+ try {\n+ if (gpu)\n+ GPUContext.getGPUContext();\n+ } catch (DMLRuntimeException e) {\n+ throw new MLContextException(\"Exception occurred during initialization of GPU\", e);\n+ }\n}\n/**\n@@ -324,6 +332,12 @@ public class ScriptExecutor {\n*/\nprotected void cleanupAfterExecution() {\nrestoreInputsInSymbolTable();\n+ try {\n+ if (gpu)\n+ executionContext.destroyGPUContext();\n+ } catch (DMLRuntimeException e) {\n+ throw new MLContextException(\"Exception occurred during cleanup of GPU related resources\", e);\n+ }\n}\n/**\n@@ -632,4 +646,14 @@ public class ScriptExecutor {\n}\n}\n+ /**\n+ * Whether or not to enable GPU usage\n+ * @param enabled\n+ * true if enabled, false otherwise\n+ */\n+ public void setGPU(boolean enabled) {\n+ this.gpu = enabled;\n+ DMLScript.USE_ACCELERATOR = enabled;\n+ }\n+\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -41,7 +41,7 @@ public abstract class GPUContext {\npublic static ConcurrentLinkedQueue<Future> pendingDeallocates = new ConcurrentLinkedQueue<Future>();\n/** All asynchronous cudaFree calls will be done on this executor service */\n- public static ExecutorService deallocExecutorService = Executors.newSingleThreadExecutor();\n+ public static ExecutorService deallocExecutorService;\n/** Synchronization object to make sure no allocations happen when something is being evicted from memory */\npublic static final Object syncObj = new Object();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaContext.java", "diff": "*/\npackage org.apache.sysml.runtime.instructions.gpu.context;\n+import java.util.concurrent.Executors;\nimport java.util.concurrent.atomic.AtomicLong;\nimport org.apache.commons.logging.Log;\n@@ -104,27 +105,8 @@ public class JCudaContext extends GPUContext {\nLOG.info(\"Active CUDA device number : \" + device[0]);\nLOG.info(\"Max Blocks/Threads/SharedMem : \" + maxBlocks + \"/\" + maxThreadsPerBlock + \"/\" + sharedMemPerBlock);\n- GPUStatistics.cudaInitTime = System.nanoTime() - start;\n-\n- start = System.nanoTime();\n- LibMatrixCUDA.cudnnHandle = new cudnnHandle();\n- cudnnCreate(LibMatrixCUDA.cudnnHandle);\n- LibMatrixCUDA.cublasHandle = new cublasHandle();\n- cublasCreate(LibMatrixCUDA.cublasHandle);\n- // For cublas v2, cublasSetPointerMode tells Cublas whether to expect scalar arguments on device or on host\n- // This applies to arguments like \"alpha\" in Dgemm, and \"y\" in Ddot.\n- // cublasSetPointerMode(LibMatrixCUDA.cublasHandle, cublasPointerMode.CUBLAS_POINTER_MODE_DEVICE);\n- LibMatrixCUDA.cusparseHandle = new cusparseHandle();\n- cusparseCreate(LibMatrixCUDA.cusparseHandle);\n- GPUStatistics.cudaLibrariesInitTime = System.nanoTime() - start;\n-\n- try {\n- LibMatrixCUDA.kernels = new JCudaKernels();\n- } catch (DMLRuntimeException e) {\n- System.err.println(\"ERROR - Unable to initialize JCudaKernels. System in an inconsistent state\");\n- LibMatrixCUDA.kernels = null;\n- }\n+ GPUStatistics.cudaInitTime = System.nanoTime() - start;\n}\n@Override\n@@ -268,6 +250,26 @@ public class JCudaContext extends GPUContext {\nLOG.info(\"Total GPU memory: \" + (totalNumBytes*(1e-6)) + \" MB\");\nLOG.info(\"Available GPU memory: \" + (deviceMemBytes.get()*(1e-6)) + \" MB\");\n+ long start = System.nanoTime();\n+ LibMatrixCUDA.cudnnHandle = new cudnnHandle();\n+ cudnnCreate(LibMatrixCUDA.cudnnHandle);\n+ LibMatrixCUDA.cublasHandle = new cublasHandle();\n+ cublasCreate(LibMatrixCUDA.cublasHandle);\n+ // For cublas v2, cublasSetPointerMode tells Cublas whether to expect scalar arguments on device or on host\n+ // This applies to arguments like \"alpha\" in Dgemm, and \"y\" in Ddot.\n+ // cublasSetPointerMode(LibMatrixCUDA.cublasHandle, cublasPointerMode.CUBLAS_POINTER_MODE_DEVICE);\n+ LibMatrixCUDA.cusparseHandle = new cusparseHandle();\n+ cusparseCreate(LibMatrixCUDA.cusparseHandle);\n+ try {\n+ LibMatrixCUDA.kernels = new JCudaKernels();\n+ } catch (DMLRuntimeException e) {\n+ System.err.println(\"ERROR - Unable to initialize JCudaKernels. System in an inconsistent state\");\n+ LibMatrixCUDA.kernels = null;\n+ }\n+ GPUStatistics.cudaLibrariesInitTime = System.nanoTime() - start;\n+\n+ GPUContext.deallocExecutorService = Executors.newSingleThreadExecutor();\n+\n}\n@Override\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-942] added gpu option to MLContext API Additionally, - Changed initialization of CUDA libraries from static to per instance - Added documentation to mlcontext programming guide Closes #420
49,736
07.03.2017 22:58:46
28,800
eef34599e4539c158a250f70ef059f75854eb867
[HOTFIX] Bugfix in setGPU method of MLContext
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "diff": "@@ -424,7 +424,7 @@ public class MLContext {\n* true if needs to be enabled, false otherwise\n*/\npublic void setGPU(boolean enable) {\n- this.gpu = true;\n+ this.gpu = enable;\n}\n/**\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Bugfix in setGPU method of MLContext
49,736
08.03.2017 13:16:41
28,800
8c1e89e51006302b6f5fc7d115eb482aa0f212de
[HOTFIX] Show total time in statistics for MLContext
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -276,6 +276,9 @@ public class ScriptExecutor {\npublic MLResults execute(Script script) {\n// main steps in script execution\n+ if(statistics) {\n+ Statistics.startRunTimer();\n+ }\nsetup(script);\nparseScript();\nliveVariableAnalysis();\n@@ -299,6 +302,7 @@ public class ScriptExecutor {\nscript.setResults(mlResults);\nif (statistics) {\n+ Statistics.stopRunTimer();\nSystem.out.println(Statistics.display(statisticsMaxHeavyHitters));\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Show total time in statistics for MLContext
49,738
08.03.2017 23:06:31
28,800
0a61fe084094c32493d7fd5a013fed8ecd461acc
Native dataset support in parfor spark dp-execute, tests See for details.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "diff": "@@ -49,6 +49,7 @@ import org.apache.sysml.runtime.controlprogram.caching.CacheException;\nimport org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.instructions.spark.data.DatasetObject;\nimport org.apache.sysml.runtime.instructions.spark.data.RDDObject;\nimport org.apache.sysml.runtime.instructions.spark.functions.ConvertStringToLongTextPair;\nimport org.apache.sysml.runtime.instructions.spark.functions.CopyTextInputFunction;\n@@ -328,7 +329,11 @@ public class MLContextConversionUtil {\n{\nmatrixMetadata = (matrixMetadata!=null) ? matrixMetadata : new MatrixMetadata();\nJavaPairRDD<MatrixIndexes, MatrixBlock> binaryBlock = dataFrameToMatrixBinaryBlocks(dataFrame, matrixMetadata);\n- return binaryBlocksToMatrixObject(variableName, binaryBlock, matrixMetadata, false);\n+ MatrixObject mo = binaryBlocksToMatrixObject(variableName, binaryBlock, matrixMetadata, false);\n+ //keep lineage of original dataset to allow bypassing binary block conversion if possible\n+ mo.getRDDHandle().addLineageChild(new DatasetObject(dataFrame, variableName,\n+ isDataFrameWithIDColumn(matrixMetadata),isVectorBasedDataFrame(matrixMetadata)));\n+ return mo;\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "diff": "@@ -29,6 +29,11 @@ import org.apache.hadoop.io.Writable;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.api.java.function.Function;\n+import org.apache.spark.api.java.function.PairFunction;\n+import org.apache.spark.ml.linalg.SparseVector;\n+import org.apache.spark.ml.linalg.Vector;\n+import org.apache.spark.sql.Dataset;\n+import org.apache.spark.sql.Row;\nimport org.apache.spark.util.LongAccumulator;\nimport scala.Tuple2;\n@@ -40,13 +45,17 @@ import org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartition\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.parfor.util.PairWritableBlock;\n+import org.apache.sysml.runtime.instructions.spark.data.DatasetObject;\n+import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.SparkUtils;\n+import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils.DataFrameExtractIDFunction;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n-import org.apache.sysml.runtime.matrix.MatrixDimensionsMetaData;\nimport org.apache.sysml.runtime.matrix.data.InputInfo;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.utils.Statistics;\n/**\n@@ -71,9 +80,8 @@ public class RemoteDPParForSpark\nJavaSparkContext sc = sec.getSparkContext();\n//prepare input parameters\n- MatrixDimensionsMetaData md = (MatrixDimensionsMetaData) input.getMetaData();\n- MatrixCharacteristics mc = md.getMatrixCharacteristics();\n- InputInfo ii = InputInfo.BinaryBlockInputInfo;\n+ MatrixObject mo = sec.getMatrixObject(matrixvar);\n+ MatrixCharacteristics mc = mo.getMatrixCharacteristics();\n//initialize accumulators for tasks/iterations, and inputs\nJavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(matrixvar);\n@@ -86,11 +94,10 @@ public class RemoteDPParForSpark\nint numReducers2 = Math.max(numReducers, Math.min(numParts, numParts2));\n//core parfor datapartition-execute (w/ or w/o shuffle, depending on data characteristics)\n- DataPartitionerRemoteSparkMapper dpfun = new DataPartitionerRemoteSparkMapper(mc, ii, oi, dpf);\nRemoteDPParForSparkWorker efun = new RemoteDPParForSparkWorker(program, clsMap,\nmatrixvar, itervar, enableCPCaching, mc, tSparseCol, dpf, oi, aTasks, aIters);\n- JavaPairRDD<Long,Writable> tmp = in.flatMapToPair(dpfun);\n- List<Tuple2<Long,String>> out = (requiresGrouping(dpf, mc) ?\n+ JavaPairRDD<Long,Writable> tmp = getPartitionedInput(sec, matrixvar, oi, dpf);\n+ List<Tuple2<Long,String>> out = (requiresGrouping(dpf, mo) ?\ntmp.groupByKey(numReducers2) : tmp.map(new PseudoGrouping()) )\n.mapPartitionsToPair(efun) //execute parfor tasks, incl cleanup\n.collect(); //get output handles\n@@ -113,10 +120,57 @@ public class RemoteDPParForSpark\nreturn ret;\n}\n+ private static JavaPairRDD<Long, Writable> getPartitionedInput(SparkExecutionContext sec,\n+ String matrixvar, OutputInfo oi, PDataPartitionFormat dpf)\n+ throws DMLRuntimeException\n+ {\n+ InputInfo ii = InputInfo.BinaryBlockInputInfo;\n+ MatrixObject mo = sec.getMatrixObject(matrixvar);\n+ MatrixCharacteristics mc = mo.getMatrixCharacteristics();\n+\n+ //leverage existing dataset (w/o shuffling for reblock and data partitioning)\n+ //NOTE: there will always be a checkpoint rdd on top of the input rdd and the dataset\n+ if( hasInputDataSet(dpf, mo) )\n+ {\n+ DatasetObject dsObj = (DatasetObject)mo.getRDDHandle()\n+ .getLineageChilds().get(0).getLineageChilds().get(0);\n+ Dataset<Row> in = dsObj.getDataset();\n+\n+ //construct or reuse row ids\n+ JavaPairRDD<Row, Long> prepinput = dsObj.containsID() ?\n+ in.javaRDD().mapToPair(new DataFrameExtractIDFunction(\n+ in.schema().fieldIndex(RDDConverterUtils.DF_ID_COLUMN))) :\n+ in.javaRDD().zipWithIndex(); //zip row index\n+\n+ //convert row to row in matrix block format\n+ return prepinput.mapToPair(new DataFrameToRowBinaryBlockFunction(\n+ mc.getCols(), dsObj.isVectorBased(), dsObj.containsID()));\n+ }\n+ //default binary block input rdd\n+ else\n+ {\n+ //get input rdd and data partitioning\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(matrixvar);\n+ DataPartitionerRemoteSparkMapper dpfun = new DataPartitionerRemoteSparkMapper(mc, ii, oi, dpf);\n+ return in.flatMapToPair(dpfun);\n+ }\n+ }\n+\n//determines if given input matrix requires grouping of partial partition slices\n- private static boolean requiresGrouping(PDataPartitionFormat dpf, MatrixCharacteristics mc) {\n- return (dpf == PDataPartitionFormat.ROW_WISE && mc.getNumColBlocks() > 1)\n- || (dpf == PDataPartitionFormat.COLUMN_WISE && mc.getNumRowBlocks() > 1);\n+ private static boolean requiresGrouping(PDataPartitionFormat dpf, MatrixObject mo) {\n+ MatrixCharacteristics mc = mo.getMatrixCharacteristics();\n+ return ((dpf == PDataPartitionFormat.ROW_WISE && mc.getNumColBlocks() > 1)\n+ || (dpf == PDataPartitionFormat.COLUMN_WISE && mc.getNumRowBlocks() > 1))\n+ && !hasInputDataSet(dpf, mo);\n+ }\n+\n+ //determines if given input matrix wraps input data set applicable to direct processing\n+ private static boolean hasInputDataSet(PDataPartitionFormat dpf, MatrixObject mo) {\n+ return (dpf == PDataPartitionFormat.ROW_WISE\n+ && mo.getRDDHandle().isCheckpointRDD()\n+ && mo.getRDDHandle().getLineageChilds().size()==1\n+ && mo.getRDDHandle().getLineageChilds().get(0).getLineageChilds().size()==1\n+ && mo.getRDDHandle().getLineageChilds().get(0).getLineageChilds().get(0) instanceof DatasetObject);\n}\n//function to map data partition output to parfor input signature without grouping\n@@ -128,4 +182,56 @@ public class RemoteDPParForSpark\nreturn new Tuple2<Long, Iterable<Writable>>(arg0._1(), Collections.singletonList(arg0._2()));\n}\n}\n+\n+ //function to map dataset rows to rows in binary block representation\n+ private static class DataFrameToRowBinaryBlockFunction implements PairFunction<Tuple2<Row,Long>,Long,Writable>\n+ {\n+ private static final long serialVersionUID = -3162404379379461523L;\n+\n+ private final long _clen;\n+ private final boolean _containsID;\n+ private final boolean _isVector;\n+\n+ public DataFrameToRowBinaryBlockFunction(long clen, boolean containsID, boolean isVector) {\n+ _clen = clen;\n+ _containsID = containsID;\n+ _isVector = isVector;\n+ }\n+\n+ @Override\n+ public Tuple2<Long, Writable> call(Tuple2<Row, Long> arg0)\n+ throws Exception\n+ {\n+ long rowix = arg0._2() + 1;\n+\n+ //process row data\n+ int off = _containsID ? 1: 0;\n+ Object obj = _isVector ? arg0._1().get(off) : arg0._1();\n+ boolean sparse = (obj instanceof SparseVector);\n+ MatrixBlock mb = new MatrixBlock(1, (int)_clen, sparse);\n+\n+ if( _isVector ) {\n+ Vector vect = (Vector) obj;\n+ if( vect instanceof SparseVector ) {\n+ SparseVector svect = (SparseVector) vect;\n+ int lnnz = svect.numNonzeros();\n+ for( int k=0; k<lnnz; k++ )\n+ mb.appendValue(0, svect.indices()[k], svect.values()[k]);\n+ }\n+ else { //dense\n+ for( int j=0; j<_clen; j++ )\n+ mb.appendValue(0, j, vect.apply(j));\n+ }\n+ }\n+ else { //row\n+ Row row = (Row) obj;\n+ for( int j=off; j<off+_clen; j++ )\n+ mb.appendValue(0, j-off, UtilFunctions.getDouble(row.get(j)));\n+ }\n+ mb.examSparsity();\n+\n+ return new Tuple2<Long, Writable>(rowix,\n+ new PairWritableBlock(new MatrixIndexes(1,1),mb));\n+ }\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/util/PairWritableBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/util/PairWritableBlock.java", "diff": "@@ -36,12 +36,20 @@ import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n*/\npublic class PairWritableBlock implements Writable, Serializable\n{\n-\nprivate static final long serialVersionUID = -6022511967446089164L;\npublic MatrixIndexes indexes;\npublic MatrixBlock block;\n+ public PairWritableBlock() {\n+\n+ }\n+\n+ public PairWritableBlock(MatrixIndexes ix, MatrixBlock mb) {\n+ indexes = ix;\n+ block = mb;\n+ }\n+\n@Override\npublic void readFields(DataInput in) throws IOException\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/BroadcastObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/BroadcastObject.java", "diff": "@@ -29,10 +29,9 @@ public class BroadcastObject<T extends CacheBlock> extends LineageObject\n//soft reference storage for graceful cleanup in case of memory pressure\nprotected SoftReference<PartitionedBroadcast<T>> _bcHandle = null;\n- public BroadcastObject( PartitionedBroadcast<T> bvar, String varName )\n- {\n+ public BroadcastObject( PartitionedBroadcast<T> bvar, String varName ) {\n+ super(varName);\n_bcHandle = new SoftReference<PartitionedBroadcast<T>>(bvar);\n- _varName = varName;\n}\n@SuppressWarnings(\"rawtypes\")\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/DatasetObject.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.spark.data;\n+\n+import org.apache.spark.sql.Dataset;\n+import org.apache.spark.sql.Row;\n+\n+public class DatasetObject extends LineageObject\n+{\n+ private final Dataset<Row> _dsHandle;\n+ private final boolean _isVector;\n+ private final boolean _containsID;\n+\n+ public DatasetObject( Dataset<Row> dsvar, String varName) {\n+ this(dsvar, varName, true, true);\n+ }\n+\n+ public DatasetObject( Dataset<Row> dsvar, String varName, boolean isVector, boolean containsID) {\n+ super(varName);\n+ _dsHandle = dsvar;\n+ _isVector = isVector;\n+ _containsID = containsID;\n+ }\n+\n+ public Dataset<Row> getDataset() {\n+ return _dsHandle;\n+ }\n+\n+ public boolean isVectorBased() {\n+ return _isVector;\n+ }\n+\n+ public boolean containsID() {\n+ return _containsID;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/LineageObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/LineageObject.java", "diff": "@@ -29,12 +29,13 @@ public abstract class LineageObject\n//basic lineage information\nprotected int _numRef = -1;\nprotected List<LineageObject> _childs = null;\n- protected String _varName = null;\n+ protected final String _varName;\n//N:1 back reference to matrix/frame object\nprotected CacheableData<?> _cd = null;\n- protected LineageObject() {\n+ protected LineageObject(String varName) {\n+ _varName = varName;\n_numRef = 0;\n_childs = new ArrayList<LineageObject>();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/RDDObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/RDDObject.java", "diff": "@@ -23,7 +23,6 @@ import org.apache.spark.api.java.JavaPairRDD;\npublic class RDDObject extends LineageObject\n{\n-\nprivate JavaPairRDD<?,?> _rddHandle = null;\n//meta data on origin of given rdd handle\n@@ -31,10 +30,9 @@ public class RDDObject extends LineageObject\nprivate boolean _hdfsfile = false; //created from hdfs file\nprivate String _hdfsFname = null; //hdfs filename, if created from hdfs.\n- public RDDObject( JavaPairRDD<?,?> rddvar, String varName)\n- {\n+ public RDDObject( JavaPairRDD<?,?> rddvar, String varName) {\n+ super(varName);\n_rddHandle = rddvar;\n- _varName = varName;\n}\npublic JavaPairRDD<?,?> getRDD()\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java", "diff": "@@ -1151,7 +1151,7 @@ public class RDDConverterUtils\n}\n}\n- protected static class DataFrameExtractIDFunction implements PairFunction<Row, Row,Long>\n+ public static class DataFrameExtractIDFunction implements PairFunction<Row, Row,Long>\n{\nprivate static final long serialVersionUID = 7438855241666363433L;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextParforDatasetTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext;\n+\n+import static org.apache.sysml.api.mlcontext.ScriptFactory.dml;\n+\n+import org.apache.spark.SparkConf;\n+import org.apache.spark.api.java.JavaPairRDD;\n+import org.apache.spark.api.java.JavaSparkContext;\n+import org.apache.spark.sql.Dataset;\n+import org.apache.spark.sql.Row;\n+import org.apache.spark.sql.SparkSession;\n+import org.apache.sysml.api.mlcontext.MLContext;\n+import org.apache.sysml.api.mlcontext.MLResults;\n+import org.apache.sysml.api.mlcontext.MatrixFormat;\n+import org.apache.sysml.api.mlcontext.MatrixMetadata;\n+import org.apache.sysml.api.mlcontext.Script;\n+import org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\n+import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.After;\n+import org.junit.AfterClass;\n+import org.junit.BeforeClass;\n+import org.junit.Test;\n+\n+\n+public class MLContextParforDatasetTest extends AutomatedTestBase\n+{\n+ protected final static String TEST_DIR = \"org/apache/sysml/api/mlcontext\";\n+ protected final static String TEST_NAME = \"MLContext\";\n+\n+ private final static int rows = 100;\n+ private final static int cols = 1600;\n+ private final static double sparsity = 0.7;\n+\n+ private static SparkConf conf;\n+ private static JavaSparkContext sc;\n+ private static MLContext ml;\n+\n+ @BeforeClass\n+ public static void setUpClass() {\n+ if (conf == null)\n+ conf = SparkExecutionContext.createSystemMLSparkConf()\n+ .setAppName(\"MLContextTest\").setMaster(\"local\");\n+ if (sc == null)\n+ sc = new JavaSparkContext(conf);\n+ ml = new MLContext(sc);\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_DIR, TEST_NAME);\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ }\n+\n+\n+ @Test\n+ public void testParforDatasetVector() {\n+ runMLContextParforDatasetTest(true, false);\n+ }\n+\n+ @Test\n+ public void testParforDatasetRow() {\n+ runMLContextParforDatasetTest(false, false);\n+ }\n+\n+ @Test\n+ public void testParforDatasetVectorUnkownDims() {\n+ runMLContextParforDatasetTest(true, true);\n+ }\n+\n+ @Test\n+ public void testParforDatasetRowUnknownDims() {\n+ runMLContextParforDatasetTest(false, true);\n+ }\n+\n+ private void runMLContextParforDatasetTest(boolean vector, boolean unknownDims)\n+ {\n+ //modify memory budget to trigger fused datapartition-execute\n+ long oldmem = InfrastructureAnalyzer.getLocalMaxMemory();\n+ InfrastructureAnalyzer.setLocalMaxMemory(1*1024*1024); //1MB\n+\n+ try\n+ {\n+ double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 76543);\n+ MatrixBlock mbA = DataConverter.convertToMatrixBlock(A);\n+ int blksz = ConfigurationManager.getBlocksize();\n+ MatrixCharacteristics mc1 = new MatrixCharacteristics(rows, cols, blksz, blksz, mbA.getNonZeros());\n+ MatrixCharacteristics mc2 = unknownDims ? new MatrixCharacteristics() : new MatrixCharacteristics(mc1);\n+\n+ //create input dataset\n+ SparkSession sparkSession = SparkSession.builder().sparkContext(sc.sc()).getOrCreate();\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> in = SparkExecutionContext.toMatrixJavaPairRDD(sc, mbA, blksz, blksz);\n+ Dataset<Row> df = RDDConverterUtils.binaryBlockToDataFrame(sparkSession, in, mc1, vector);\n+ MatrixMetadata mm = new MatrixMetadata(vector ? MatrixFormat.DF_VECTOR_WITH_INDEX : MatrixFormat.DF_DOUBLES_WITH_INDEX);\n+ mm.setMatrixCharacteristics(mc2);\n+\n+ String s = \"v = matrix(0, rows=nrow(X), cols=1)\"\n+ + \"parfor(i in 1:nrow(X), log=DEBUG) {\"\n+ + \" v[i, ] = sum(X[i, ]);\"\n+ + \"}\"\n+ + \"r = sum(v);\";\n+ Script script = dml(s).in(\"X\", df, mm).out(\"r\");\n+ MLResults results = ml.execute(script);\n+\n+ //compare aggregation results\n+ double sum1 = results.getDouble(\"r\");\n+ double sum2 = mbA.sum();\n+\n+ TestUtils.compareScalars(sum2, sum1, 0.000001);\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ InfrastructureAnalyzer.setLocalMaxMemory(oldmem);\n+ }\n+ }\n+\n+ @After\n+ public void tearDown() {\n+ super.tearDown();\n+ }\n+\n+ @AfterClass\n+ public static void tearDownClass() {\n+ // stop spark context to allow single jvm tests (otherwise the\n+ // next test that tries to create a SparkContext would fail)\n+ sc.stop();\n+ sc = null;\n+ conf = null;\n+\n+ // clear status mlcontext and spark exec context\n+ ml.close();\n+ ml = null;\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1378] Native dataset support in parfor spark dp-execute, tests See https://issues.apache.org/jira/browse/SYSTEMML-1378 for details.
49,738
09.03.2017 17:25:51
28,800
442b9a5b4aaca57f0d459da267551c885a35e6e1
Fix worst-case size propagation convolution hop
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -244,15 +244,14 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nprotected long[] inferOutputCharacteristics( MemoTable memo )\n{\n// [numRows, numCols, NNZ]\n- long[] ret = null;\n+ long[] ret = new long[3];\nif(op == ConvOp.BIAS_ADD) {\nMatrixCharacteristics[] mc = memo.getAllInputStats(getInput());\n- ret = new long[3];\nret[0] = mc[0].rowsKnown() ? mc[0].getRows() : -1;\nret[1] = mc[0].colsKnown() ? mc[0].getCols() : -1;\nret[2] = -1;\n- return ret;\n+ return (ret[0]>0 && ret[1]>0) ? ret : null;\n}\nConvolutionParameters params;\n@@ -264,41 +263,26 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nswitch(op)\n{\n- case MAX_POOLING:\n- {\n- ret = new long[3];\n+ case MAX_POOLING: {\nret[0] = getInput().get(0)._dim1;\nret[1] = getExtractedVal(params.C, params.P, params.Q);\nret[2] = -1;\nbreak;\n}\n- case MAX_POOLING_BACKWARD:\n- {\n- ret = new long[3];\n- ret[0] = getInput().get(0)._dim1;\n- ret[1] = getInput().get(0)._dim2;\n- ret[2] = -1;\n- break;\n- }\n- case DIRECT_CONV2D:\n- {\n- ret = new long[3];\n+ case DIRECT_CONV2D: {\nret[0] = getInput().get(0)._dim1;\nret[1] = getExtractedVal(getInput().get(1)._dim1, params.P, params.Q);\nret[2] = -1;\nbreak;\n}\n- case DIRECT_CONV2D_BACKWARD_FILTER:\n- {\n- ret = new long[3];\n+ case DIRECT_CONV2D_BACKWARD_FILTER: {\nret[0] = getInput().get(1)._dim1;\nret[1] = getInput().get(1)._dim2;\nret[2] = -1;\nbreak;\n}\n- case DIRECT_CONV2D_BACKWARD_DATA:\n- {\n- ret = new long[3];\n+ case MAX_POOLING_BACKWARD:\n+ case DIRECT_CONV2D_BACKWARD_DATA: {\nret[0] = getInput().get(0)._dim1;\nret[1] = getInput().get(0)._dim2;\nret[2] = -1;\n@@ -316,7 +300,8 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n\" pad=[\" + params.pad_h + \" \" + params.pad_w + \"]\");\n}\n- return ret;\n+ //safe return (create entry only if at least dims known)\n+ return (ret[0]>0 && ret[1]>0) ? ret : null;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1381] Fix worst-case size propagation convolution hop
49,738
09.03.2017 17:45:56
28,800
feb1cb72dfc742ff7f181f1025d217d9efdf51b5
[MINOR] Cleanups of mlcontext testsuite and gpu javadoc warnings
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -58,7 +58,7 @@ public abstract class GPUObject\n/**\n* Signal intent that a matrix block will be read (as input) on the GPU\n* @return true if a host memory to device memory transfer happened\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\npublic abstract boolean acquireDeviceRead() throws DMLRuntimeException;\n/**\n@@ -84,7 +84,7 @@ public abstract class GPUObject\n/**\n* Signal intent that a block needs to be read on the host\n* @return true if copied from device to host\n- * @throws CacheException\n+ * @throws CacheException ?\n*/\npublic abstract boolean acquireHostRead() throws CacheException;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "diff": "@@ -242,7 +242,7 @@ public class JCudaObject extends GPUObject {\n* @param handle a valid {@link cusparseHandle}\n* @param C Output matrix\n* @param rowsC number of rows in C\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\nprivate static void step1AllocateRowPointers(cusparseHandle handle, CSRPointer C, int rowsC) throws DMLRuntimeException {\ncusparseSetPointerMode(handle, cusparsePointerMode.CUSPARSE_POINTER_MODE_HOST);\n@@ -261,7 +261,7 @@ public class JCudaObject extends GPUObject {\n* @param C Output Sparse Matrix C on GPU\n* @param m Rows in C\n* @param n Columns in C\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\nprivate static void step2GatherNNZGeam(cusparseHandle handle, CSRPointer A, CSRPointer B, CSRPointer C, int m, int n) throws DMLRuntimeException {\nint[] CnnzArray = { -1 };\n@@ -293,7 +293,7 @@ public class JCudaObject extends GPUObject {\n* @param m Number of rows of sparse matrix op ( A ) and C\n* @param n Number of columns of sparse matrix op ( B ) and C\n* @param k Number of columns/rows of sparse matrix op ( A ) / op ( B )\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\nprivate static void step2GatherNNZGemm(cusparseHandle handle, CSRPointer A, int transA, CSRPointer B, int transB, CSRPointer C, int m, int n, int k) throws DMLRuntimeException {\nint[] CnnzArray = { -1 };\n@@ -321,7 +321,7 @@ public class JCudaObject extends GPUObject {\n*\n* @param handle a valid {@link cusparseHandle}\n* @param C Output sparse matrix on GPU\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\nprivate static void step3AllocateValNInd(cusparseHandle handle, CSRPointer C) throws DMLRuntimeException {\n// Increment cudaCount by one when all three arrays of CSR sparse array are allocated\n@@ -1074,7 +1074,7 @@ public class JCudaObject extends GPUObject {\n* Convert sparse to dense (Performs transpose, use sparseToColumnMajorDense if the kernel can deal with column major format)\n* Also records per instruction invokation of sparseToDense.\n* @param instructionName Name of the instruction for which statistics are recorded in {@link GPUStatistics}\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException ?\n*/\npublic void sparseToDense(String instructionName) throws DMLRuntimeException {\nlong start = System.nanoTime();\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/ZPackageSuite.java", "diff": "@@ -27,10 +27,11 @@ import org.junit.runners.Suite;\n* they should not be run in parallel. */\n@RunWith(Suite.class)\[email protected]({\n- org.apache.sysml.test.integration.mlcontext.MLContextFrameTest.class,\n- org.apache.sysml.test.integration.mlcontext.MLContextMultipleScriptsTest.class,\n- org.apache.sysml.test.integration.mlcontext.MLContextScratchCleanupTest.class,\n- org.apache.sysml.test.integration.mlcontext.MLContextTest.class\n+ MLContextFrameTest.class,\n+ MLContextMultipleScriptsTest.class,\n+ MLContextParforDatasetTest.class,\n+ MLContextScratchCleanupTest.class,\n+ MLContextTest.class\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanups of mlcontext testsuite and gpu javadoc warnings
49,738
09.03.2017 17:58:44
28,800
b945543d933c37ad92adc04ccbfa51410434e777
Fix execution context creation w/ forced singlenode
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContextFactory.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContextFactory.java", "diff": "package org.apache.sysml.runtime.controlprogram.context;\nimport org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.controlprogram.Program;\npublic class ExecutionContextFactory\n@@ -41,6 +43,14 @@ public class ExecutionContextFactory\nswitch( DMLScript.rtplatform )\n{\ncase SINGLE_NODE:\n+ //NOTE: even in case of forced singlenode operations, users might still\n+ //want to run remote parfor which requires the correct execution context\n+ if( OptimizerUtils.getDefaultExecutionMode()==RUNTIME_PLATFORM.HYBRID)\n+ ec = new ExecutionContext(allocateVars, prog);\n+ else\n+ ec = new SparkExecutionContext(allocateVars, prog);\n+ break;\n+\ncase HADOOP:\ncase HYBRID:\nec = new ExecutionContext(allocateVars, prog);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1386] Fix execution context creation w/ forced singlenode
49,738
09.03.2017 18:51:21
28,800
600e641b2c5e39fca1e7cde197f1f1e26ce112b6
Fix load imbalance parfor spark w/ factoring
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParForSpark.java", "diff": "@@ -71,8 +71,8 @@ public class RemoteParForSpark\n//run remote_spark parfor job\n//(w/o lazy evaluation to fit existing parfor framework, e.g., result merge)\nRemoteParForSparkWorker func = new RemoteParForSparkWorker(program, clsMap, cpCaching, aTasks, aIters);\n- List<Tuple2<Long,String>> out =\n- sc.parallelize( tasks, numMappers ) //create rdd of parfor tasks\n+ List<Tuple2<Long,String>> out = sc\n+ .parallelize(tasks, tasks.size()) //create rdd of parfor tasks\n.flatMapToPair(func) //execute parfor tasks\n.collect(); //get output handles\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1388] Fix load imbalance parfor spark w/ factoring
49,738
09.03.2017 18:56:40
28,800
e3a75d1412c1b3d159e05cc877dd6e90fbdf5153
Fix parfor constrained optimizer (fused dpe selection)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -1499,8 +1499,8 @@ public class OptimizerRuleBased extends Optimizer\n// (this also implies that the body is CP only)\n// try to merge MR data partitioning and MR exec\n- if( (pn.getExecType()==ExecType.MR || pn.getExecType()==ExecType.SPARK) //MR/SP EXEC and CP body\n- && M < _rm2 //fits into remote memory of reducers\n+ if( (pn.getExecType()==ExecType.MR && M < _rm2 //fits into remote memory of reducers\n+ || pn.getExecType()==ExecType.SPARK) //MR/SP EXEC and CP body\n&& partitioner!=null && partitioner.equals(REMOTE_DP.toString()) //MR/SP partitioning\n&& partitionedMatrices.size()==1 ) //only one partitioned matrix\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1387] Fix parfor constrained optimizer (fused dpe selection)
49,772
09.03.2017 22:33:13
28,800
be9941097ef88eae0bb221142fd76ae2231ac954
Updating Preprocessing Notebook Adding more aggressive filtering by utilizing optical density values and effectively skipping the 1024x1024 tiles by generating tiles of the same size as the final "samples".
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/Preprocessing.ipynb", "new_path": "projects/breast_cancer/Preprocessing.ipynb", "diff": "},\n\"outputs\": [],\n\"source\": [\n+ \"def optical_density(tile):\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" Convert a tile to optical density values.\\n\",\n+ \" \\n\",\n+ \" Args:\\n\",\n+ \" tile: A 3D NumPy array of shape (tile_size, tile_size, channels).\\n\",\n+ \" \\n\",\n+ \" Returns:\\n\",\n+ \" A 3D NumPy array of shape (tile_size, tile_size, channels) representing\\n\",\n+ \" optical density values.\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" tile = tile.astype(np.float64)\\n\",\n+ \" #od = -np.log10(tile/255 + 1e-8)\\n\",\n+ \" od = -np.log((tile+1)/240)\\n\",\n+ \" return od\\n\",\n+ \"\\n\",\n\"def keep_tile(tile_tuple, tile_size, tissue_threshold):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Determine if a tile should be kept.\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" slide_num, tile = tile_tuple\\n\",\n\" if tile.shape[0:2] == (tile_size, tile_size):\\n\",\n+ \" tile_orig = tile\\n\",\n+ \" \\n\",\n+ \" # Check 1\\n\",\n\" # Convert 3D RGB image to 2D grayscale image, from\\n\",\n\" # 0 (dense tissue) to 1 (plain background).\\n\",\n\" tile = rgb2gray(tile)\\n\",\n\" tile = binary_fill_holes(tile)\\n\",\n\" # Calculate percentage of tissue coverage.\\n\",\n\" percentage = tile.mean()\\n\",\n- \" return percentage >= tissue_threshold\\n\",\n+ \" check1 = percentage >= tissue_threshold\\n\",\n+ \" \\n\",\n+ \" # Check 2\\n\",\n+ \" # Convert to optical density values\\n\",\n+ \" tile = optical_density(tile_orig)\\n\",\n+ \" # Threshold at beta\\n\",\n+ \" beta = 0.15\\n\",\n+ \" tile = np.min(tile, axis=2) >= beta\\n\",\n+ \" # Apply morphology for same reasons as above.\\n\",\n+ \" tile = binary_closing(tile, disk(2))\\n\",\n+ \" tile = binary_dilation(tile, disk(2))\\n\",\n+ \" tile = binary_fill_holes(tile)\\n\",\n+ \" percentage = tile.mean()\\n\",\n+ \" check2 = percentage >= tissue_threshold\\n\",\n+ \" \\n\",\n+ \" return check1 and check2\\n\",\n\" else:\\n\",\n\" return False\"\n]\n\" tile_indices = (slides.flatMap(\\n\",\n\" lambda slide: process_slide(slide, folder, training, tile_size, overlap)))\\n\",\n\" tile_indices = tile_indices.repartition(num_partitions)\\n\",\n+ \" tile_indices.cache()\\n\",\n\" tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))\\n\",\n\" filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))\\n\",\n\" samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n+ \"collapsed\": false,\n\"deletable\": true,\n\"editable\": true\n},\n\"\\n\",\n\"# Settings\\n\",\n\"training = True\\n\",\n- \"tile_size = 1024\\n\",\n+ \"tile_size = 256\\n\",\n\"sample_size = 256\\n\",\n\"grayscale = False\\n\",\n\"num_partitions = 20000\\n\",\n\"metadata\": {\n\"collapsed\": false,\n\"deletable\": true,\n- \"editable\": true\n+ \"editable\": true,\n+ \"scrolled\": false\n},\n\"outputs\": [],\n\"source\": [\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1185] Updating Preprocessing Notebook Adding more aggressive filtering by utilizing optical density values and effectively skipping the 1024x1024 tiles by generating tiles of the same size as the final "samples".
49,772
09.03.2017 22:34:16
28,800
88ad73939cc19278979c93b55ce394557535f788
Updating MachineLearning Notebook Updating the image sizes back to the full 256x256x3 shape.
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/MachineLearning.ipynb", "new_path": "projects/breast_cancer/MachineLearning.ipynb", "diff": "\"cells\": [\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML\\n\",\n\"\\n\",\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Setup\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Read in train & val data\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\"# Settings\\n\",\n- \"size=64\\n\",\n- \"grayscale = True\\n\",\n+ \"size=256\\n\",\n+ \"grayscale = False\\n\",\n\"c = 1 if grayscale else 3\\n\",\n\"p = 0.01\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n+ \"if p < 1:\\n\",\n\" tr_sample_filename = os.path.join(\\\"data\\\", \\\"train_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n\" val_sample_filename = os.path.join(\\\"data\\\", \\\"val_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \"else:\\n\",\n+ \" tr_filename = \\\"train_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n+ \" val_filename = \\\"val_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n\"train_df = sqlContext.read.load(tr_sample_filename)\\n\",\n\"val_df = sqlContext.read.load(val_sample_filename)\\n\",\n\"train_df, val_df\"\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Extract X and Y matrices\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Convert to SystemML Matrices\\n\",\n\"Note: This allows for reuse of the matrices on multiple\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Trigger Caching (Optional)\\n\",\n\"Note: This will take a while and is not necessary, but doing it\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Save Matrices (Optional)\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"---\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"# Softmax Classifier\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Sanity Check: Overfit Small Portion\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Train\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Eval\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"---\"\n]\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {\n- \"collapsed\": true\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"source\": [\n\"# LeNet-like ConvNet\"\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Sanity Check: Overfit Small Portion\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Hyperparameter Search\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Train\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"markdown\",\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n\"source\": [\n\"## Eval\"\n]\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n}\n],\n\"metadata\": {\n- \"anaconda-cloud\": {},\n\"kernelspec\": {\n\"display_name\": \"Python 3\",\n\"language\": \"python\",\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1185] Updating MachineLearning Notebook Updating the image sizes back to the full 256x256x3 shape.
49,738
10.03.2017 01:05:13
28,800
9fd834ed235dea221a6b255e9fabf35b506491f8
Avoid unnecessary caching of parfor spark dpe inputs
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -1244,7 +1244,7 @@ public class SparkExecutionContext extends ExecutionContext\nreturn jsc.sc().getPersistentRDDs().contains(rddID);\n}\n- private boolean isRDDCached( int rddID ) {\n+ public boolean isRDDCached( int rddID ) {\n//check that rdd is marked for caching\nJavaSparkContext jsc = getSparkContext();\nif( !jsc.sc().getPersistentRDDs().contains(rddID) ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "diff": "@@ -47,6 +47,7 @@ import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.PairWritableBlock;\nimport org.apache.sysml.runtime.instructions.spark.data.DatasetObject;\n+import org.apache.sysml.runtime.instructions.spark.data.RDDObject;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.SparkUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils.DataFrameExtractIDFunction;\n@@ -120,6 +121,7 @@ public class RemoteDPParForSpark\nreturn ret;\n}\n+ @SuppressWarnings(\"unchecked\")\nprivate static JavaPairRDD<Long, Writable> getPartitionedInput(SparkExecutionContext sec,\nString matrixvar, OutputInfo oi, PDataPartitionFormat dpf)\nthrows DMLRuntimeException\n@@ -146,14 +148,28 @@ public class RemoteDPParForSpark\nreturn prepinput.mapToPair(new DataFrameToRowBinaryBlockFunction(\nmc.getCols(), dsObj.isVectorBased(), dsObj.containsID()));\n}\n- //default binary block input rdd\n- else\n+ //binary block input rdd without grouping\n+ else if( !requiresGrouping(dpf, mo) )\n{\n//get input rdd and data partitioning\nJavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(matrixvar);\nDataPartitionerRemoteSparkMapper dpfun = new DataPartitionerRemoteSparkMapper(mc, ii, oi, dpf);\nreturn in.flatMapToPair(dpfun);\n}\n+ //default binary block input rdd with grouping\n+ else\n+ {\n+ //get input rdd, avoid unnecessary caching if input is checkpoint and not cached yet\n+ //to reduce memory pressure for shuffle and subsequent\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(matrixvar);\n+ if( mo.getRDDHandle().isCheckpointRDD() && !sec.isRDDCached(in.id()) )\n+ in = (JavaPairRDD<MatrixIndexes,MatrixBlock>)((RDDObject)\n+ mo.getRDDHandle().getLineageChilds().get(0)).getRDD();\n+\n+ //data partitioning of input rdd\n+ DataPartitionerRemoteSparkMapper dpfun = new DataPartitionerRemoteSparkMapper(mc, ii, oi, dpf);\n+ return in.flatMapToPair(dpfun);\n+ }\n}\n//determines if given input matrix requires grouping of partial partition slices\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1390] Avoid unnecessary caching of parfor spark dpe inputs
49,738
10.03.2017 17:27:05
28,800
2b34f21bf839e6a243b6bea8a07df66e9f7eb354
Fix redundant write of results in parfor spark dpe
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "diff": "@@ -129,13 +129,12 @@ public class RemoteDPParForSparkWorker extends ParWorker implements PairFlatMapF\n//maintain accumulators\n_aTasks.add( 1 );\n_aIters.add( (int)(getExecutedIterations()-numIter) );\n+ }\n//write output if required (matrix indexed write)\n- //note: this copy is necessary for environments without spark libraries\nArrayList<String> tmp = RemoteParForUtils.exportResultVariables( _workerID, _ec.getVariables(), _resultVars );\nfor( String val : tmp )\nret.add(new Tuple2<Long,String>(_workerID, val));\n- }\nreturn ret.iterator();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1392] Fix redundant write of results in parfor spark dpe
49,736
10.03.2017 21:06:24
28,800
c1c7d3341b89d68e80f26c328a63611e53410520
Added DML scripts to crop rgb and grayscale images as well as added a utility function to print scikit-learn like classification report
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/utils/image_utils.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * Simple utility to crop image of shape [N, 3 * Hin * Win] into [N, 3 * Hout * Wout]\n+ * Assumption: Hout < Hin, Wout < Win and input contains values [0, ..]\n+ */\n+crop_rgb = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n+ start_h = Hin - Hout + 1\n+ end_h = start_h + Hout - 1\n+ start_w = Win - Wout + 1\n+ end_w = start_w + Wout - 1\n+ mask = matrix(0, rows=Hin, cols=Win)\n+ temp_mask = matrix(1, rows=Hout, cols=Wout)\n+ mask[start_h:end_h, start_w:end_w] = temp_mask\n+ mask = matrix(mask, rows=1, cols=Hin*Win)\n+ mask = cbind(cbind(mask, mask), mask)\n+ out = removeEmpty(target=(input+1), margin=\"cols\", select=mask) - 1\n+}\n+\n+/*\n+ * Simple utility to crop image of shape [N, Hin * Win] into [N, Hout * Wout]\n+ * Assumption: Hout < Hin, Wout < Win and input contains values [0, ..]\n+ *\n+ * Example PySpark script:\n+ * import matplotlib.pyplot as plt\n+ * from sklearn import datasets\n+ * digits = datasets.load_digits()\n+ * image = digits.images[3,].reshape(1, -1)\n+ * plt.imshow(image.reshape(8,8), cmap=plt.cm.gray_r)\n+ * plt.show()\n+ *\n+ * script = \"\"\"\n+ * crop_grayscale = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n+ * start_h = Hin - Hout + 1\n+ * end_h = start_h + Hout - 1\n+ * start_w = Win - Wout + 1\n+ * end_w = start_w + Wout - 1\n+ * mask = matrix(0, rows=Hin, cols=Win)\n+ * temp_mask = matrix(1, rows=Hout, cols=Wout)\n+ * mask[start_h:end_h, start_w:end_w] = temp_mask\n+ * mask = matrix(mask, rows=1, cols=Hin*Win)\n+ * out = removeEmpty(target=(input+1), margin=\"cols\", select=mask) - 1\n+ * }\n+ * Y = crop_grayscale(X, 8, 8, 6, 6)\n+ * \"\"\"\n+ * from systemml import MLContext, dml\n+ * ml = MLContext(sc)\n+ * script = dml(script).input(X=image).output(\"Y\")\n+ * out = ml.execute(script).get(\"Y\").toNumPy()\n+ * plt.imshow(out.reshape(6,6), cmap=plt.cm.gray_r)\n+ * plt.show()\n+ *\n+ */\n+crop_grayscale = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n+ start_h = Hin - Hout + 1\n+ end_h = start_h + Hout - 1\n+ start_w = Win - Wout + 1\n+ end_w = start_w + Wout - 1\n+ mask = matrix(0, rows=Hin, cols=Win)\n+ temp_mask = matrix(1, rows=Hout, cols=Wout)\n+ mask[start_h:end_h, start_w:end_w] = temp_mask\n+ mask = matrix(mask, rows=1, cols=Hin*Win)\n+ out = removeEmpty(target=(input+1), margin=\"cols\", select=mask) - 1\n+}\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/utils/metrics.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+/**\n+ * Gets sklearn.metrics.classification_report-like output that can be used by DML user.\n+ * y_true: row or column vector, Ground truth (correct) target values.\n+ * y_pred: row or column vector, Estimated targets as returned by a classifier.\n+ * labels: column vector, list of label to include in the report.\n+ *\n+ * PySpark example:\n+ * from sklearn import datasets, neighbors\n+ * from systemml.mllearn import LogisticRegression\n+ * from pyspark.sql import SQLContext\n+ * sqlCtx = SQLContext(sc)\n+ * digits = datasets.load_digits()\n+ * X_digits = digits.data\n+ * y_digits = digits.target + 1\n+ * n_samples = len(X_digits)\n+ * X_train = X_digits[:int(.9 * n_samples)]\n+ * y_train = y_digits[:int(.9 * n_samples)]\n+ * X_test = X_digits[int(.9 * n_samples):]\n+ * y_test = y_digits[int(.9 * n_samples):]\n+ * logistic = LogisticRegression(sqlCtx)\n+ * logistic.fit(X_train, y_train)\n+ * y_predicted = logistic.predict(X_test)\n+ *\n+ * script = \"\"\"\n+ * classification_report = function(matrix[double] y_true, matrix[double] y_pred, matrix[double] labels) return (string out) {\n+ * num_rows_error_measures = nrow(labels)\n+ * error_measures = matrix(0, rows=num_rows_error_measures, cols=5)\n+ * for(i in 1:num_rows_error_measures) {\n+ * class_i = labels[i,1]\n+ * tp = sum( (y_true == y_pred) * (y_true == class_i) )\n+ * tp_plus_fp = sum( (y_pred == class_i) )\n+ * tp_plus_fn = sum( (y_true == class_i) )\n+ * precision = tp / tp_plus_fp\n+ * recall = tp / tp_plus_fn\n+ * f1Score = 2*precision*recall / (precision+recall)\n+ * error_measures[i,1] = class_i\n+ * error_measures[i,2] = precision\n+ * error_measures[i,3] = recall\n+ * error_measures[i,4] = f1Score\n+ * error_measures[i,5] = tp_plus_fn\n+ * }\n+ * # Added num_true_labels to debug whether the input data was randomized or now, which is common requirement of SGD-style algorithms.\n+ * # Also, helps debug class-skew related problems.\n+ * out = \"class \\tprecision\\trecall \\tf1-score\\tnum_true_labels\\n\" + toString(error_measures, decimal=7, sep=\"\\t\")\n+ * }\n+ * out = classification_report(y_true, y_pred, seq(1, 10))\n+ * print(out)\n+ * \"\"\"\n+ * from systemml import MLContext, dml\n+ * ml = MLContext(sc)\n+ * script = dml(script).input(y_true=y_test, y_pred=y_predicted)\n+ * ml.execute(script)\n+ *\n+ * This outputs:\n+ * class precision recall f1-score num_true_labels\n+ * 1.0000000 1.0000000 1.0000000 1.0000000 16.0000000\n+ * 2.0000000 0.9444444 0.8947368 0.9189189 19.0000000\n+ * 3.0000000 1.0000000 1.0000000 1.0000000 17.0000000\n+ * 4.0000000 0.9166667 0.6111111 0.7333333 18.0000000\n+ * 5.0000000 0.9047619 0.9500000 0.9268293 20.0000000\n+ * 6.0000000 0.9000000 1.0000000 0.9473684 18.0000000\n+ * 7.0000000 1.0000000 1.0000000 1.0000000 18.0000000\n+ * 8.0000000 1.0000000 1.0000000 1.0000000 19.0000000\n+ * 9.0000000 0.7272727 0.9411765 0.8205128 17.0000000\n+ * 10.0000000 0.9411765 0.8888889 0.9142857 18.0000000\n+ *\n+ */\n+classification_report = function(matrix[double] y_true, matrix[double] y_pred, matrix[double] labels) return (string out) {\n+ num_rows_error_measures = nrow(labels)\n+ error_measures = matrix(0, rows=num_rows_error_measures, cols=5)\n+ for(i in 1:num_rows_error_measures) {\n+ class_i = labels[i,1]\n+ tp = sum( (y_true == y_pred) * (y_true == class_i) )\n+ tp_plus_fp = sum( (y_pred == class_i) )\n+ tp_plus_fn = sum( (y_true == class_i) )\n+ precision = tp / tp_plus_fp\n+ recall = tp / tp_plus_fn\n+ f1Score = 2*precision*recall / (precision+recall)\n+ error_measures[i,1] = class_i\n+ error_measures[i,2] = precision\n+ error_measures[i,3] = recall\n+ error_measures[i,4] = f1Score\n+ error_measures[i,5] = tp_plus_fn\n+ }\n+ # Added num_true_labels to debug whether the input data was randomized or now, which is common requirement of SGD-style algorithms.\n+ # Also, helps debug class-skew related problems.\n+ out = \"class \\tprecision\\trecall \\tf1-score\\tnum_true_labels\\n\" + toString(error_measures, decimal=7, sep=\"\\t\")\n+}\n\\ No newline at end of file\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1394] Added DML scripts to crop rgb and grayscale images as well as added a utility function to print scikit-learn like classification report
49,736
10.03.2017 21:16:26
28,800
c335cd403e6961460ba21234d4dbcb79ae22925f
[SYSTEMML-1340][SYSTEMML-1341] Implemented conv2d_bias_add and relu_maxpooling instruction for GPU Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -179,11 +179,11 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nArrayList<Hop> inputs1 = inputs;\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nOperationTypes lopOp = HopsConv2Lops.get(op);\n- if(op == ConvOp.MAX_POOLING && (et == ExecType.CP || et == ExecType.SPARK) && isInputReLU(inputs.get(0))) {\n+ if(op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\nin = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n- else if(op == ConvOp.BIAS_ADD && (et == ExecType.CP || et == ExecType.SPARK) && isInputConv2d(inputs.get(0))) {\n+ else if(op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {\nlopOp = OperationTypes.DIRECT_CONV2D_BIAS_ADD;\n// the first lop is image\n@@ -320,7 +320,7 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nif( _etypeForced != null )\n{\n- _etype = _etypeForced;\n+ _etype = findGPUExecTypeByMemEstimate(_etypeForced);\n}\nelse\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -41,6 +41,8 @@ public class GPUInstructionParser extends InstructionParser\n// Neural Network Operators\nString2GPUInstructionType.put( \"relu_backward\", GPUINSTRUCTION_TYPE.Convolution);\nString2GPUInstructionType.put( \"conv2d\", GPUINSTRUCTION_TYPE.Convolution);\n+ String2GPUInstructionType.put( \"relu_maxpooling\", GPUINSTRUCTION_TYPE.Convolution);\n+ String2GPUInstructionType.put( \"conv2d_bias_add\", GPUINSTRUCTION_TYPE.Convolution);\nString2GPUInstructionType.put( \"conv2d_backward_filter\", GPUINSTRUCTION_TYPE.Convolution);\nString2GPUInstructionType.put( \"conv2d_backward_data\", GPUINSTRUCTION_TYPE.Convolution);\nString2GPUInstructionType.put( \"maxpooling\", GPUINSTRUCTION_TYPE.Convolution);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "diff": "@@ -35,6 +35,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction\n{\nprivate CPOperand _input1;\nprivate CPOperand _input2;\n+ private CPOperand _input3;\nprivate CPOperand _output;\nprivate ArrayList<CPOperand> _input_shape;\nprivate ArrayList<CPOperand> _filter_shape;\n@@ -52,6 +53,15 @@ public class ConvolutionGPUInstruction extends GPUInstruction\n_output = out;\n}\n+ public ConvolutionGPUInstruction(CPOperand in1, CPOperand in2, CPOperand in3, CPOperand out, String opcode,\n+ String istr, ArrayList<CPOperand> stride,\n+ ArrayList<CPOperand> padding, ArrayList<CPOperand> input_shape,\n+ ArrayList<CPOperand> filter_shape)\n+ {\n+ this(in1, in2, out, opcode, istr, stride, padding, input_shape, filter_shape);\n+ _input3 = in3;\n+ }\n+\npublic ConvolutionGPUInstruction(CPOperand in1, CPOperand in2, CPOperand out, String opcode,\nString istr, ArrayList<CPOperand> stride,\nArrayList<CPOperand> padding, ArrayList<CPOperand> input_shape,\n@@ -104,7 +114,34 @@ public class ConvolutionGPUInstruction extends GPUInstruction\nreturn new ConvolutionGPUInstruction(in1, in2, out, opcode, str, stride,\npadding, input_shape, filter_shape);\n}\n- else if (opcode.equalsIgnoreCase(\"maxpooling\")) {\n+ else if (opcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\n+ InstructionUtils.checkNumFields(parts, 16);\n+ CPOperand in1 = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = new CPOperand(parts[3]);\n+ CPOperand out = new CPOperand(parts[16]);\n+\n+ ArrayList<CPOperand> stride = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> padding = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> input_shape = new ArrayList<CPOperand>();\n+ ArrayList<CPOperand> filter_shape = new ArrayList<CPOperand>();\n+ stride.add(new CPOperand(parts[4]));\n+ stride.add(new CPOperand(parts[5]));\n+ padding.add(new CPOperand(parts[6]));\n+ padding.add(new CPOperand(parts[7]));\n+ input_shape.add(new CPOperand(parts[8]));\n+ input_shape.add(new CPOperand(parts[9]));\n+ input_shape.add(new CPOperand(parts[10]));\n+ input_shape.add(new CPOperand(parts[11]));\n+ filter_shape.add(new CPOperand(parts[12]));\n+ filter_shape.add(new CPOperand(parts[13]));\n+ filter_shape.add(new CPOperand(parts[14]));\n+ filter_shape.add(new CPOperand(parts[15]));\n+\n+ return new ConvolutionGPUInstruction(in1, in2, in3, out, opcode, str, stride,\n+ padding, input_shape, filter_shape);\n+ }\n+ else if (opcode.equalsIgnoreCase(\"maxpooling\") || opcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nInstructionUtils.checkNumFields(parts, 14);\nCPOperand in1 = new CPOperand(parts[1]);\nCPOperand out = new CPOperand(parts[14]);\n@@ -216,6 +253,21 @@ public class ConvolutionGPUInstruction extends GPUInstruction\nLibMatrixCUDA.conv2d(getExtendedOpcode(), image, filter, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\n+ else if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\n+ MatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\n+ MatrixObject bias = getMatrixInputForGPUInstruction(ec, _input2.getName());\n+ MatrixObject filter = getMatrixInputForGPUInstruction(ec, _input3.getName());\n+\n+ if(image.getNumRows() != N || image.getNumColumns() != C*H*W)\n+ throw new DMLRuntimeException(\"Incorrect dimensions for image in conv2d\");\n+ if(filter.getNumRows() != K || filter.getNumColumns() != C*R*S)\n+ throw new DMLRuntimeException(\"Incorrect dimensions for filter in conv2d\");\n+\n+ ec.setMetaData(_output.getName(), N, K * P * Q);\n+ MatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName());\n+ LibMatrixCUDA.conv2dBiasAdd(getExtendedOpcode(), image, bias, filter, out, N, C, H, W,\n+ K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ }\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_filter\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\nMatrixObject dout = getMatrixInputForGPUInstruction(ec, _input2.getName());\n@@ -248,7 +300,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction\nLibMatrixCUDA.conv2dBackwardData(getExtendedOpcode(), filter, dout, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\n- else if (instOpcode.equalsIgnoreCase(\"maxpooling\")) {\n+ else if (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\nif(image.getNumRows() != N || image.getNumColumns() != C*H*W)\n@@ -257,8 +309,12 @@ public class ConvolutionGPUInstruction extends GPUInstruction\nec.setMetaData(_output.getName(), N, C * P * Q);\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName());\n+ if(instOpcode.equalsIgnoreCase(\"maxpooling\"))\nLibMatrixCUDA.maxpooling(getExtendedOpcode(), image, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ else\n+ LibMatrixCUDA.reluMaxpooling(getExtendedOpcode(), image, out, N, C, H, W,\n+ K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\nelse if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\n@@ -281,7 +337,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction\n// release inputs/outputs\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\n- if (!instOpcode.equalsIgnoreCase(\"maxpooling\"))\n+ if (!( instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) )\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\nec.releaseMatrixOutputForGPUInstruction(_output.getName());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -206,6 +206,13 @@ public class LibMatrixCUDA {\nprivate static int CONVOLUTION_PREFERENCE = cudnnConvolutionFwdPreference.CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;\n+ public static void conv2dBiasAdd(String instName, MatrixObject image, MatrixObject bias, MatrixObject filter, MatrixObject outputBlock, int N, int C, int H, int W,\n+ int K, int R, int S, int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q)\n+ throws DMLRuntimeException {\n+ conv2d(instName, image, filter, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ biasAdd(instName, outputBlock, bias, outputBlock);\n+ }\n+\npublic static void conv2d(String instName, MatrixObject image, MatrixObject filter, MatrixObject outputBlock, int N, int C, int H, int W,\nint K, int R, int S, int pad_h, int pad_w, int stride_h, int stride_w, int P, int Q)\nthrows DMLRuntimeException {\n@@ -651,6 +658,51 @@ public class LibMatrixCUDA {\nif(isInSparseFormat(image)) {\n((JCudaObject)image.getGPUObject()).sparseToDense(instName);\n}\n+ Pointer x = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n+ performMaxpooling(instName, x, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ }\n+\n+ /**\n+ * performs relu followed by maxpooling on GPU by exploiting cudnnPoolingForward(...)\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param image image as matrix object\n+ * @param outputBlock output matrix\n+ * @param N batch size\n+ * @param C number of channels\n+ * @param H height of image\n+ * @param W width of image\n+ * @param K number of filters\n+ * @param R height of filter\n+ * @param S width of filter\n+ * @param pad_h vertical padding\n+ * @param pad_w horizontal padding\n+ * @param stride_h horizontal stride\n+ * @param stride_w vertical stride\n+ * @param P (H - R + 1 + 2*pad_h)/stride_h\n+ * @param Q (W - S + 1 + 2*pad_w)/stride_w\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n+ */\n+ public static void reluMaxpooling(String instName, MatrixObject image,\n+ MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ int S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\n+ int Q) throws DMLRuntimeException {\n+ if(isInSparseFormat(image)) {\n+ ((JCudaObject)image.getGPUObject()).sparseToDense(instName);\n+ }\n+ Pointer x = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n+ MatrixObject temp = new MatrixObject(image);\n+ temp.getGPUObject().acquireDeviceModifyDense();\n+ Pointer y = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n+ performReLU(instName, x, y, N, C, H, W);\n+ performMaxpooling(instName, y, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ ((JCudaObject)temp.getGPUObject()).clearData(); // deallocate the temporary data\n+ }\n+\n+ private static void performMaxpooling(String instName, Pointer x,\n+ MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ int S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\n+ int Q) throws DMLRuntimeException {\n+\nPointer alpha = null;\nPointer beta = null;\ncudnnTensorDescriptor xDesc = null;\n@@ -666,7 +718,6 @@ public class LibMatrixCUDA {\npoolingDesc = allocatePoolingDescriptor(R, S, pad_h, pad_w, stride_h, stride_w);\n// Allocate data\n- Pointer x = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\nPointer y = ((JCudaObject)outputBlock.getGPUObject()).jcudaDenseMatrixPtr;\nalpha = pointerTo(1.0);\n@@ -808,51 +859,19 @@ public class LibMatrixCUDA {\n}\n}\n-\n- /**\n- * Performs the relu operation on the GPU.\n- * @param ec currently active {@link ExecutionContext}\n- * @param instName the invoking instruction's name for record {@link Statistics}.\n- * @param in input matrix\n- * @param outputName name of the output matrix\n- * @throws DMLRuntimeException if an error occurs\n- */\n- public static void relu(ExecutionContext ec, String instName, MatrixObject in, String outputName) throws DMLRuntimeException {\n- if(isInSparseFormat(in)) {\n- // TODO: FIXME: Implement sparse relu kernel\n- ((JCudaObject)in.getGPUObject()).sparseToDense(instName);\n- }\n-\n+ private static void performCuDNNReLU(String instName, Pointer srcData, Pointer dstData, long N, long C, long H, long W) {\ncudnnTensorDescriptor srcTensorDesc = null;\ncudnnTensorDescriptor dstTensorDesc = null;\nPointer alpha = null;\nPointer beta = null;\n-\n+ long t0=0;\ntry {\nalpha = pointerTo(1.0f);\nbeta = pointerTo(0.0f);\n- long N = in.getNumRows();\n- long H = in.getNumColumns();\n- long W = 1;\n- Pointer srcData = ((JCudaObject)in.getGPUObject()).jcudaDenseMatrixPtr;\n-\n- MatrixObject output = ec.getMatrixObject(outputName);\n- getDenseMatrixOutputForGPUInstruction(ec, instName, outputName); // Allocated the dense output matrix\n- Pointer dstData = ((JCudaObject)output.getGPUObject()).jcudaDenseMatrixPtr;\n- long t0=0;\n- if(N*H*W >= numDoublesIn2GB) {\n- // Invokes relu(double* A, double* ret, int rlen, int clen)\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n- kernels.launchKernel(\"relu\",\n- ExecutionConfig.getConfigForSimpleMatrixOperations((int)N, (int) (H*W)),\n- srcData, dstData, (int)N, (int) H*W);\n- if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_RELU_KERNEL, System.nanoTime() - t0);\n- }\n- else {\n// Allocate descriptors\n- srcTensorDesc = allocateTensorDescriptor((int)N, 1, (int)H, (int)W);\n- dstTensorDesc = allocateTensorDescriptor((int)N, 1, (int)H, (int)W);\n+ srcTensorDesc = allocateTensorDescriptor((int)N, (int)C, (int)H, (int)W);\n+ dstTensorDesc = allocateTensorDescriptor((int)N, (int)C, (int)H, (int)W);\ncudnnActivationDescriptor activationDescriptor = new cudnnActivationDescriptor();\ncudnnCreateActivationDescriptor(activationDescriptor);\ndouble dummy = -1;\n@@ -863,7 +882,6 @@ public class LibMatrixCUDA {\nbeta, dstTensorDesc, dstData);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_ACTIVATION_FORWARD_LIB, System.nanoTime() - t0);\n}\n- }\nfinally {\nlong t1=0;\nif (GPUStatistics.DISPLAY_STATISTICS) t1 = System.nanoTime();\n@@ -881,6 +899,47 @@ public class LibMatrixCUDA {\n}\n}\n+ private static void performReLU(String instName, Pointer srcData, Pointer dstData, long N, long C, long H, long W) throws DMLRuntimeException {\n+ long t0=0;\n+ if(N*H*W >= numDoublesIn2GB) {\n+ // Invokes relu(double* A, double* ret, int rlen, int clen)\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n+ kernels.launchKernel(\"relu\",\n+ ExecutionConfig.getConfigForSimpleMatrixOperations((int)N, (int) (H*W)),\n+ srcData, dstData, (int)N, (int) H*W);\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_RELU_KERNEL, System.nanoTime() - t0);\n+ }\n+ else {\n+ performCuDNNReLU(instName, srcData, dstData, N, 1, H, W);\n+ }\n+ }\n+\n+\n+ /**\n+ * Performs the relu operation on the GPU.\n+ * @param ec currently active {@link ExecutionContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param in input matrix\n+ * @param outputName name of the output matrix\n+ * @throws DMLRuntimeException if an error occurs\n+ */\n+ public static void relu(ExecutionContext ec, String instName, MatrixObject in, String outputName) throws DMLRuntimeException {\n+ if(isInSparseFormat(in)) {\n+ // TODO: FIXME: Implement sparse relu kernel\n+ ((JCudaObject)in.getGPUObject()).sparseToDense(instName);\n+ }\n+\n+ long N = in.getNumRows();\n+ long H = in.getNumColumns();\n+ long W = 1;\n+ Pointer srcData = ((JCudaObject)in.getGPUObject()).jcudaDenseMatrixPtr;\n+\n+ MatrixObject output = ec.getMatrixObject(outputName);\n+ getDenseMatrixOutputForGPUInstruction(ec, instName, outputName); // Allocated the dense output matrix\n+ Pointer dstData = ((JCudaObject)output.getGPUObject()).jcudaDenseMatrixPtr;\n+ performReLU(instName, srcData, dstData, N, 1, H, W);\n+ }\n+\n//********************************************************************/\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1340][SYSTEMML-1341] Implemented conv2d_bias_add and relu_maxpooling instruction for GPU Closes #425.
49,736
11.03.2017 11:36:08
28,800
f9b53b96fd7aea5f04de811d8c4a20017a329042
[HOTFIX] Enable center cropping in image_utils
[ { "change_type": "MODIFY", "old_path": "scripts/utils/image_utils.dml", "new_path": "scripts/utils/image_utils.dml", "diff": "* Assumption: Hout < Hin, Wout < Win and input contains values [0, ..]\n*/\ncrop_rgb = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n- start_h = Hin - Hout + 1\n+ start_h = ceil((Hin - Hout) / 2)\nend_h = start_h + Hout - 1\n- start_w = Win - Wout + 1\n+ start_w = ceil((Win - Wout) / 2)\nend_w = start_w + Wout - 1\nmask = matrix(0, rows=Hin, cols=Win)\ntemp_mask = matrix(1, rows=Hout, cols=Wout)\n@@ -50,9 +50,9 @@ crop_rgb = function(matrix[double] input, int Hin, int Win, int Hout, int Wout)\n*\n* script = \"\"\"\n* crop_grayscale = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n- * start_h = Hin - Hout + 1\n+ * start_h = ceil((Hin - Hout) / 2)\n* end_h = start_h + Hout - 1\n- * start_w = Win - Wout + 1\n+ * start_w = ceil((Win - Wout) / 2)\n* end_w = start_w + Wout - 1\n* mask = matrix(0, rows=Hin, cols=Win)\n* temp_mask = matrix(1, rows=Hout, cols=Wout)\n@@ -71,9 +71,9 @@ crop_rgb = function(matrix[double] input, int Hin, int Win, int Hout, int Wout)\n*\n*/\ncrop_grayscale = function(matrix[double] input, int Hin, int Win, int Hout, int Wout) return (matrix[double] out) {\n- start_h = Hin - Hout + 1\n+ start_h = ceil((Hin - Hout) / 2)\nend_h = start_h + Hout - 1\n- start_w = Win - Wout + 1\n+ start_w = ceil((Win - Wout) / 2)\nend_w = start_w + Wout - 1\nmask = matrix(0, rows=Hin, cols=Win)\ntemp_mask = matrix(1, rows=Hout, cols=Wout)\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Enable center cropping in image_utils
49,738
11.03.2017 19:42:53
28,800
28fe4fe8ff28cb093ed345cd22e6280db6654888
Fix cluster analysis (avoid yarn calls in spark modes)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -487,6 +487,9 @@ public class OptimizerUtils\n*/\npublic static int getNumReducers( boolean configOnly )\n{\n+ if( isSparkExecutionMode() )\n+ return SparkExecutionContext.getDefaultParallelism(false);\n+\nint ret = ConfigurationManager.getNumReducers();\nif( !configOnly ) {\nret = Math.min(ret,InfrastructureAnalyzer.getRemoteParallelReduceTasks());\n@@ -501,6 +504,9 @@ public class OptimizerUtils\npublic static int getNumMappers()\n{\n+ if( isSparkExecutionMode() )\n+ return SparkExecutionContext.getDefaultParallelism(false);\n+\nint ret = InfrastructureAnalyzer.getRemoteParallelMapTasks();\n//correction max number of reducers on yarn clusters\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1481,16 +1481,24 @@ public class ParForProgramBlock extends ForProgramBlock\nResultMerge rm = null;\n//determine degree of parallelism\n+ int maxMap = -1, maxRed = -1;\n+ if( OptimizerUtils.isSparkExecutionMode() ) {\n+ maxMap = (int) SparkExecutionContext.getDefaultParallelism(true);\n+ maxRed = maxMap; //equal map/reduce\n+ }\n+ else {\nint numReducers = ConfigurationManager.getNumReducers();\n- int maxMap = InfrastructureAnalyzer.getRemoteParallelMapTasks();\n- int maxRed = InfrastructureAnalyzer.getRemoteParallelReduceTasks();\n+ maxMap = InfrastructureAnalyzer.getRemoteParallelMapTasks();\n+ maxRed = Math.min(numReducers,\n+ InfrastructureAnalyzer.getRemoteParallelReduceTasks());\n//correction max number of reducers on yarn clusters\nif( InfrastructureAnalyzer.isYarnEnabled() ) {\nmaxMap = (int)Math.max( maxMap, YarnClusterAnalyzer.getNumCores() );\nmaxRed = (int)Math.max( maxRed, YarnClusterAnalyzer.getNumCores()/2 );\n}\n+ }\nint numMap = Math.max(_numThreads, maxMap);\n- int numRed = Math.min(numReducers, maxRed);\n+ int numRed = maxRed;\n//create result merge implementation\nswitch( prm )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -361,12 +361,26 @@ public class OptimizerRuleBased extends Optimizer\n_lk = InfrastructureAnalyzer.getLocalParallelism();\n_lkmaxCP = (int) Math.ceil( PAR_K_FACTOR * _lk );\n_lkmaxMR = (int) Math.ceil( PAR_K_MR_FACTOR * _lk );\n+ _lm = OptimizerUtils.getLocalMemBudget();\n+\n+ //spark-specific cluster characteristics\n+ if( OptimizerUtils.isSparkExecutionMode() ) {\n+ //we get all required cluster characteristics from spark's configuration\n+ //to avoid invoking yarns cluster status\n+ _rnk = SparkExecutionContext.getNumExecutors();\n+ _rk = (int) SparkExecutionContext.getDefaultParallelism(true);\n+ _rk2 = _rk; //equal map/reduce unless we find counter-examples\n+ int cores = SparkExecutionContext.getDefaultParallelism(true)\n+ / SparkExecutionContext.getNumExecutors();\n+ int ccores = (int) Math.min(cores, _N);\n+ _rm = SparkExecutionContext.getBroadcastMemoryBudget() / ccores;\n+ _rm2 = SparkExecutionContext.getBroadcastMemoryBudget() / ccores;\n+ }\n+ //mr/yarn-specific cluster characteristics\n+ else {\n_rnk = InfrastructureAnalyzer.getRemoteParallelNodes();\n_rk = InfrastructureAnalyzer.getRemoteParallelMapTasks();\n_rk2 = InfrastructureAnalyzer.getRemoteParallelReduceTasks();\n- _rkmax = (int) Math.ceil( PAR_K_FACTOR * _rk );\n- _rkmax2 = (int) Math.ceil( PAR_K_FACTOR * _rk2 );\n- _lm = OptimizerUtils.getLocalMemBudget();\n_rm = OptimizerUtils.getRemoteMemBudgetMap(false);\n_rm2 = OptimizerUtils.getRemoteMemBudgetReduce();\n@@ -379,20 +393,10 @@ public class OptimizerRuleBased extends Optimizer\n_rk = (int) Math.max( _rk, tmprk );\n_rk2 = (int) Math.max( _rk2, tmprk/2 );\n}\n+ }\n- //correction of max parallelism and memory if spark runtime enabled because\n- //spark limits the available parallelism by its own executor configuration\n- if( OptimizerUtils.isSparkExecutionMode() ) {\n- _rk = (int) SparkExecutionContext.getDefaultParallelism(true);\n- _rk2 = _rk; //equal map/reduce unless we find counter-examples\n_rkmax = (int) Math.ceil( PAR_K_FACTOR * _rk );\n_rkmax2 = (int) Math.ceil( PAR_K_FACTOR * _rk2 );\n- int cores = SparkExecutionContext.getDefaultParallelism(true)\n- / SparkExecutionContext.getNumExecutors();\n- int ccores = (int) Math.min(cores, _N);\n- _rm = SparkExecutionContext.getBroadcastMemoryBudget() / ccores;\n- _rm2 = SparkExecutionContext.getBroadcastMemoryBudget() / ccores;\n- }\n}\n///////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/stat/InfrastructureAnalyzer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/stat/InfrastructureAnalyzer.java", "diff": "@@ -26,6 +26,8 @@ import org.apache.hadoop.mapred.ClusterStatus;\nimport org.apache.hadoop.mapred.JobClient;\nimport org.apache.hadoop.mapred.JobConf;\nimport org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.matrix.mapred.MRConfigurationNames;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -243,7 +245,9 @@ public class InfrastructureAnalyzer\n*/\npublic static int getCkMaxMR()\n{\n- //default value (if not specified)\n+ if( OptimizerUtils.isSparkExecutionMode() )\n+ return SparkExecutionContext.getDefaultParallelism(true);\n+ else\nreturn getRemoteParallelMapTasks();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1276] Fix cluster analysis (avoid yarn calls in spark modes)
49,738
11.03.2017 23:08:48
28,800
bcf96e1ccc32948fa2420bd5d9649d15a8d4d521
Robustness of parfor spark partitioning w/ unknown nnz
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1070,9 +1070,11 @@ public class ParForProgramBlock extends ForProgramBlock\nexportMatricesToHDFS(ec, _colocatedDPMatrix);\n// Step 4) submit MR job (wait for finished work)\n- OutputInfo inputOI = ((inputMatrix.getSparsity()<0.1 && inputDPF==PDataPartitionFormat.COLUMN_WISE)||\n- (inputMatrix.getSparsity()<0.001 && inputDPF==PDataPartitionFormat.ROW_WISE))?\n- OutputInfo.BinaryCellOutputInfo : OutputInfo.BinaryBlockOutputInfo;\n+ //TODO runtime support for binary cell partitioning\n+ //OutputInfo inputOI = ((inputMatrix.getSparsity()<0.1 && inputDPF==PDataPartitionFormat.COLUMN_WISE)||\n+ // (inputMatrix.getSparsity()<0.001 && inputDPF==PDataPartitionFormat.ROW_WISE))?\n+ // OutputInfo.BinaryCellOutputInfo : OutputInfo.BinaryBlockOutputInfo;\n+ OutputInfo inputOI = OutputInfo.BinaryBlockOutputInfo;\nRemoteParForJobReturn ret = RemoteDPParForSpark.runJob(_ID, itervar.getName(), _colocatedDPMatrix, program, clsMap,\nresultFile, inputMatrix, ec, inputDPF, inputOI, _tSparseCol, _enableCPCaching, _numThreads );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java", "diff": "@@ -185,7 +185,7 @@ public class MatrixObject extends CacheableData<MatrixBlock>\npublic double getSparsity() {\nMatrixCharacteristics mc = getMatrixCharacteristics();\n- return ((double)mc.getNonZeros())/mc.getRows()/mc.getCols();\n+ return OptimizerUtils.getSparsity(mc);\n}\n// *********************************************\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1395] Robustness of parfor spark partitioning w/ unknown nnz
49,717
13.03.2017 14:21:49
25,200
95f300d9d18801f585e579227c4123c475eb5c9c
[HOTFIX] added missing license,removed missing exception in javadoc
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "diff": "@@ -1209,7 +1209,6 @@ public class JCudaObject extends GPUObject {\n* does lazy/eager cudaFree calls\n* @param toFree {@link Pointer} instance to be freed\n* @param eager true if to be done eagerly\n- * @throws DMLRuntimeException\n*/\npublic static void cudaFreeHelper(final Pointer toFree, boolean eager) {\ncudaFreeHelper(null, toFree, eager);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/LRUCacheMap.java", "new_path": "src/main/java/org/apache/sysml/utils/LRUCacheMap.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\npackage org.apache.sysml.utils;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/utils/LRUCacheMapTest.java", "new_path": "src/test/java/org/apache/sysml/test/utils/LRUCacheMapTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\npackage org.apache.sysml.test.utils;\nimport org.apache.sysml.utils.LRUCacheMap;\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] added missing license,removed missing exception in javadoc
49,736
13.03.2017 13:53:45
28,800
4344ad0c6dc36ec1e8ab5015832b3e5c1281e361
[MINOR] Added common errors and troubleshooting tricks Closes
[ { "change_type": "MODIFY", "old_path": "docs/troubleshooting-guide.md", "new_path": "docs/troubleshooting-guide.md", "diff": "@@ -94,3 +94,45 @@ Note: The default `SystemML-config.xml` is located in `<path to SystemML root>/c\nhadoop jar SystemML.jar [-? | -help | -f <filename>] (-config=<config_filename>) ([-args | -nvargs] <args-list>)\nSee [Invoking SystemML in Hadoop Batch Mode](hadoop-batch-mode.html) for details of the syntax.\n+\n+## Total size of serialized results is bigger than spark.driver.maxResultSize\n+\n+Spark aborts a job if the estimated result size of collect is greater than maxResultSize to avoid out-of-memory errors in driver.\n+However, SystemML's optimizer has estimates the memory required for each operator and provides guards against these out-of-memory errors in driver.\n+So, we recommend setting the configuration `--conf spark.driver.maxResultSize=0`.\n+\n+## File does not exist on HDFS/LFS error from remote parfor\n+\n+This error usually comes from incorrect HDFS configuration on the worker nodes. To investigate this, we recommend\n+\n+- Testing if HDFS is accessible from the worker node: `hadoop fs -ls <file path>`\n+- Synchronize hadoop configuration across the worker nodes.\n+- Set the environment variable `HADOOP_CONF_DIR`. You may have to restart the cluster-manager to get the hadoop configuration.\n+\n+## JVM Garbage Collection related flags\n+\n+We recommend providing 10% of maximum memory to young generation and using `-server` flag for robust garbage collection policy.\n+For example: if you intend to use 20G driver and 60G executor, then please add following to your configuration:\n+\n+ spark-submit --driver-memory 20G --executor-memory 60G --conf \"spark.executor.extraJavaOptions=-Xmn6G -server\" --conf \"spark.driver.extraJavaOptions=-Xmn2G -server\" ...\n+\n+## Memory overhead\n+\n+Spark sets `spark.yarn.executor.memoryOverhead`, `spark.yarn.driver.memoryOverhead` and `spark.yarn.am.memoryOverhead` to be 10% of memory provided\n+to the executor, driver and YARN Application Master respectively (with minimum of 384 MB). For certain workloads, the user may have to increase this\n+overhead to 12-15% of the memory budget.\n+\n+## Network timeout\n+\n+To avoid false-positive errors due to network failures in case of compute-bound scripts, the user may have to increase the timeout `spark.network.timeout` (default: 120s).\n+\n+## Advanced developer statistics\n+\n+Few of our operators (for example: convolution-related operator) and GPU backend allows an expert user to get advanced statistics\n+by setting the configuration `systemml.stats.extraGPU` and `systemml.stats.extraDNN` in the file SystemML-config.xml.\n+\n+## Out-Of-Memory on executors\n+\n+Out-Of-Memory on executors is often caused due to side-effects of lazy evaluation and in-memory input data of Spark for large-scale problems.\n+Though we are constantly improving our optimizer to address this scenario, a quick hack to resolve this is reducing the number of cores allocated to the executor.\n+We would highly appreciate if you file a bug report on our [issue tracker](https://issues.apache.org/jira/browse/SYSTEMML) if and when you encounter OOM.\n\\ No newline at end of file\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Added common errors and troubleshooting tricks Closes #428.
49,738
15.03.2017 10:57:14
25,200
143e61430cd24a39aa41818b8017fe95dd953020
Scalable statistics counters and atomic id sequence
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -760,7 +760,7 @@ public class DMLScript\nCacheableData.initCaching();\n//reset statistics (required if multiple scripts executed in one JVM)\n- Statistics.resetNoOfExecutedJobs( 0 );\n+ Statistics.resetNoOfExecutedJobs();\nif( STATISTICS ) {\nCacheStatistics.reset();\nStatistics.reset();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheStatistics.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheStatistics.java", "diff": "package org.apache.sysml.runtime.controlprogram.caching;\n-import java.util.concurrent.atomic.AtomicLong;\n+import java.util.concurrent.atomic.LongAdder;\n/**\n* This singleton provides basic caching statistics in CP.\n@@ -34,7 +34,6 @@ import java.util.concurrent.atomic.AtomicLong;\n*/\npublic class CacheStatistics\n{\n-\n//enum used for MR counters\npublic enum Stat {\nCACHE_HITS_MEM,\n@@ -51,234 +50,188 @@ public class CacheStatistics\n}\n//hit statistics (for acquire read)\n- private static AtomicLong _numHitsMem = null;\n- private static AtomicLong _numHitsFSBuff = null;\n- private static AtomicLong _numHitsFS = null;\n- private static AtomicLong _numHitsHDFS = null;\n+ private static final LongAdder _numHitsMem = new LongAdder();\n+ private static final LongAdder _numHitsFSBuff = new LongAdder();\n+ private static final LongAdder _numHitsFS = new LongAdder();\n+ private static final LongAdder _numHitsHDFS = new LongAdder();\n//write statistics caching\n- private static AtomicLong _numWritesFSBuff = null;\n- private static AtomicLong _numWritesFS = null;\n- private static AtomicLong _numWritesHDFS = null;\n+ private static final LongAdder _numWritesFSBuff = new LongAdder();\n+ private static final LongAdder _numWritesFS = new LongAdder();\n+ private static final LongAdder _numWritesHDFS = new LongAdder();\n//time statistics caching\n- private static AtomicLong _ctimeAcquireR = null; //in nano sec\n- private static AtomicLong _ctimeAcquireM = null; //in nano sec\n- private static AtomicLong _ctimeRelease = null; //in nano sec\n- private static AtomicLong _ctimeExport = null; //in nano sec\n-\n- static\n- {\n- reset();\n- }\n+ private static final LongAdder _ctimeAcquireR = new LongAdder(); //in nano sec\n+ private static final LongAdder _ctimeAcquireM = new LongAdder(); //in nano sec\n+ private static final LongAdder _ctimeRelease = new LongAdder(); //in nano sec\n+ private static final LongAdder _ctimeExport = new LongAdder(); //in nano sec\n- public static void reset()\n- {\n- _numHitsMem = new AtomicLong(0);\n- _numHitsFSBuff = new AtomicLong(0);\n- _numHitsFS = new AtomicLong(0);\n- _numHitsHDFS = new AtomicLong(0);\n+ public static void reset() {\n+ _numHitsMem.reset();\n+ _numHitsFSBuff.reset();\n+ _numHitsFS.reset();\n+ _numHitsHDFS.reset();\n- _numWritesFSBuff = new AtomicLong(0);\n- _numWritesFS = new AtomicLong(0);\n- _numWritesHDFS = new AtomicLong(0);\n+ _numWritesFSBuff.reset();\n+ _numWritesFS.reset();\n+ _numWritesHDFS.reset();\n- _ctimeAcquireR = new AtomicLong(0);\n- _ctimeAcquireM = new AtomicLong(0);\n- _ctimeRelease = new AtomicLong(0);\n- _ctimeExport = new AtomicLong(0);\n+ _ctimeAcquireR.reset();\n+ _ctimeAcquireM.reset();\n+ _ctimeRelease.reset();\n+ _ctimeExport.reset();\n}\n- public static void incrementMemHits()\n- {\n- _numHitsMem.incrementAndGet();\n+ public static void incrementMemHits() {\n+ _numHitsMem.increment();\n}\n- public static void incrementMemHits(int delta)\n- {\n- _numHitsMem.addAndGet(delta);\n+ public static void incrementMemHits(int delta) {\n+ _numHitsMem.add(delta);\n}\n- public static long getMemHits()\n- {\n- return _numHitsMem.get();\n+ public static long getMemHits() {\n+ return _numHitsMem.longValue();\n}\n- public static void incrementFSBuffHits()\n- {\n- _numHitsFSBuff.incrementAndGet();\n+ public static void incrementFSBuffHits() {\n+ _numHitsFSBuff.increment();\n}\n- public static void incrementFSBuffHits( int delta )\n- {\n- _numHitsFSBuff.addAndGet(delta);\n+ public static void incrementFSBuffHits( int delta ) {\n+ _numHitsFSBuff.add(delta);\n}\n- public static long getFSBuffHits()\n- {\n- return _numHitsFSBuff.get();\n+ public static long getFSBuffHits() {\n+ return _numHitsFSBuff.longValue();\n}\n- public static void incrementFSHits()\n- {\n- _numHitsFS.incrementAndGet();\n+ public static void incrementFSHits() {\n+ _numHitsFS.increment();\n}\n- public static void incrementFSHits(int delta)\n- {\n- _numHitsFS.addAndGet(delta);\n+ public static void incrementFSHits(int delta) {\n+ _numHitsFS.add(delta);\n}\n- public static long getFSHits()\n- {\n- return _numHitsFS.get();\n+ public static long getFSHits() {\n+ return _numHitsFS.longValue();\n}\n- public static void incrementHDFSHits()\n- {\n- _numHitsHDFS.incrementAndGet();\n+ public static void incrementHDFSHits() {\n+ _numHitsHDFS.increment();\n}\n- public static void incrementHDFSHits(int delta)\n- {\n- _numHitsHDFS.addAndGet(delta);\n+ public static void incrementHDFSHits(int delta) {\n+ _numHitsHDFS.add(delta);\n}\n- public static long getHDFSHits()\n- {\n- return _numHitsHDFS.get();\n+ public static long getHDFSHits() {\n+ return _numHitsHDFS.longValue();\n}\n- public static void incrementFSBuffWrites()\n- {\n- _numWritesFSBuff.incrementAndGet();\n+ public static void incrementFSBuffWrites() {\n+ _numWritesFSBuff.increment();\n}\n- public static void incrementFSBuffWrites(int delta)\n- {\n- _numWritesFSBuff.addAndGet(delta);\n+ public static void incrementFSBuffWrites(int delta) {\n+ _numWritesFSBuff.add(delta);\n}\n- public static long getFSBuffWrites()\n- {\n- return _numWritesFSBuff.get();\n+ public static long getFSBuffWrites() {\n+ return _numWritesFSBuff.longValue();\n}\n- public static void incrementFSWrites()\n- {\n- _numWritesFS.incrementAndGet();\n+ public static void incrementFSWrites() {\n+ _numWritesFS.increment();\n}\n- public static void incrementFSWrites(int delta)\n- {\n- _numWritesFS.addAndGet(delta);\n+ public static void incrementFSWrites(int delta) {\n+ _numWritesFS.add(delta);\n}\n- public static long getFSWrites()\n- {\n- return _numWritesFS.get();\n+ public static long getFSWrites() {\n+ return _numWritesFS.longValue();\n}\n- public static void incrementHDFSWrites()\n- {\n- _numWritesHDFS.incrementAndGet();\n+ public static void incrementHDFSWrites() {\n+ _numWritesHDFS.increment();\n}\n- public static void incrementHDFSWrites(int delta)\n- {\n- _numWritesHDFS.addAndGet(delta);\n+ public static void incrementHDFSWrites(int delta) {\n+ _numWritesHDFS.add(delta);\n}\n- public static long getHDFSWrites()\n- {\n- return _numWritesHDFS.get();\n+ public static long getHDFSWrites() {\n+ return _numWritesHDFS.longValue();\n}\n- public static void incrementAcquireRTime(long delta)\n- {\n- _ctimeAcquireR.addAndGet(delta);\n+ public static void incrementAcquireRTime(long delta) {\n+ _ctimeAcquireR.add(delta);\n}\n- public static long getAcquireRTime()\n- {\n- return _ctimeAcquireR.get();\n+ public static long getAcquireRTime() {\n+ return _ctimeAcquireR.longValue();\n}\n- public static void incrementAcquireMTime(long delta)\n- {\n- _ctimeAcquireM.addAndGet(delta);\n+ public static void incrementAcquireMTime(long delta) {\n+ _ctimeAcquireM.add(delta);\n}\n- public static long getAcquireMTime()\n- {\n- return _ctimeAcquireM.get();\n+ public static long getAcquireMTime() {\n+ return _ctimeAcquireM.longValue();\n}\n- public static void incrementReleaseTime(long delta)\n- {\n- _ctimeRelease.addAndGet(delta);\n+ public static void incrementReleaseTime(long delta) {\n+ _ctimeRelease.add(delta);\n}\n- public static long getReleaseTime()\n- {\n- return _ctimeRelease.get();\n+ public static long getReleaseTime() {\n+ return _ctimeRelease.longValue();\n}\n-\n- public static void incrementExportTime(long delta)\n- {\n- _ctimeExport.addAndGet(delta);\n+ public static void incrementExportTime(long delta) {\n+ _ctimeExport.add(delta);\n}\n- public static long getExportTime()\n- {\n- return _ctimeExport.get();\n+ public static long getExportTime() {\n+ return _ctimeExport.longValue();\n}\n-\n- public static String displayHits()\n- {\n+ public static String displayHits() {\nStringBuilder sb = new StringBuilder();\n- sb.append(_numHitsMem.get());\n+ sb.append(_numHitsMem.longValue());\nsb.append(\"/\");\n- sb.append(_numHitsFSBuff.get());\n+ sb.append(_numHitsFSBuff.longValue());\nsb.append(\"/\");\n- sb.append(_numHitsFS.get());\n+ sb.append(_numHitsFS.longValue());\nsb.append(\"/\");\n- sb.append(_numHitsHDFS.get());\n-\n+ sb.append(_numHitsHDFS.longValue());\nreturn sb.toString();\n}\n- public static String displayWrites()\n- {\n+ public static String displayWrites() {\nStringBuilder sb = new StringBuilder();\n- sb.append(_numWritesFSBuff.get());\n+ sb.append(_numWritesFSBuff.longValue());\nsb.append(\"/\");\n- sb.append(_numWritesFS.get());\n+ sb.append(_numWritesFS.longValue());\nsb.append(\"/\");\n- sb.append(_numWritesHDFS.get());\n-\n+ sb.append(_numWritesHDFS.longValue());\nreturn sb.toString();\n}\n- public static String displayTime()\n- {\n+ public static String displayTime() {\nStringBuilder sb = new StringBuilder();\n- sb.append(String.format(\"%.3f\", ((double)_ctimeAcquireR.get())/1000000000)); //in sec\n+ sb.append(String.format(\"%.3f\", ((double)_ctimeAcquireR.longValue())/1000000000)); //in sec\nsb.append(\"/\");\n- sb.append(String.format(\"%.3f\", ((double)_ctimeAcquireM.get())/1000000000)); //in sec\n+ sb.append(String.format(\"%.3f\", ((double)_ctimeAcquireM.longValue())/1000000000)); //in sec\nsb.append(\"/\");\n- sb.append(String.format(\"%.3f\", ((double)_ctimeRelease.get())/1000000000)); //in sec\n+ sb.append(String.format(\"%.3f\", ((double)_ctimeRelease.longValue())/1000000000)); //in sec\nsb.append(\"/\");\n- sb.append(String.format(\"%.3f\", ((double)_ctimeExport.get())/1000000000)); //in sec\n-\n- ;\n+ sb.append(String.format(\"%.3f\", ((double)_ctimeExport.longValue())/1000000000)); //in sec\nreturn sb.toString();\n}\n-\n-\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/util/IDSequence.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/util/IDSequence.java", "diff": "package org.apache.sysml.runtime.controlprogram.parfor.util;\n+import java.util.concurrent.atomic.AtomicLong;\n+\n/**\n* ID sequence for generating unique long identifiers with start 0 and increment 1.\n*\n*/\npublic class IDSequence\n{\n- private long _current = -1;\n- private boolean wrapAround = false;\n+ private final AtomicLong _current;\n+ private final boolean _wrapAround;\n- public IDSequence()\n- {\n- reset();\n+ public IDSequence() {\n+ this(false);\n}\n- public IDSequence(boolean wrapAround)\n- {\n- reset();\n- this.wrapAround = wrapAround;\n+ public IDSequence(boolean wrapAround) {\n+ _current = new AtomicLong(-1);\n+ _wrapAround = wrapAround;\n}\n/**\n@@ -44,25 +44,24 @@ public class IDSequence\n*\n* @return ID\n*/\n- public synchronized long getNextID()\n+ public long getNextID()\n{\n- _current++;\n+ long val = _current.incrementAndGet();\n- if( _current == Long.MAX_VALUE ) {\n- if (wrapAround)\n- reset();\n- else\n+ if( val == Long.MAX_VALUE ) {\n+ if( !_wrapAround )\nthrow new RuntimeException(\"WARNING: IDSequence will produced numeric overflow.\");\n+ reset();\n}\n- return _current;\n+ return val;\n}\n- public synchronized long getCurrentID() {\n- return _current;\n+ public long getCurrentID() {\n+ return _current.get();\n}\n- public synchronized void reset() {\n- _current = 0;\n+ public void reset() {\n+ _current.set(0);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "@@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicLong;\n*/\npublic class GPUStatistics {\n//TODO fix formatting\n+ //TODO replace AtomicLong with LongAdder\n// Whether or not extra per-instruction statistics will be recorded and shown for the GPU\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -28,7 +28,7 @@ import java.util.HashMap;\nimport java.util.List;\nimport java.util.Map.Entry;\nimport java.util.Set;\n-import java.util.concurrent.atomic.AtomicLong;\n+import java.util.concurrent.atomic.LongAdder;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -55,47 +55,46 @@ public class Statistics\nprivate static long execEndTime = 0;\n// number of compiled/executed MR jobs\n- private static int iNoOfExecutedMRJobs = 0;\n- private static int iNoOfCompiledMRJobs = 0;\n+ private static final LongAdder numExecutedMRJobs = new LongAdder();\n+ private static final LongAdder numCompiledMRJobs = new LongAdder();\n// number of compiled/executed SP instructions\n- private static int iNoOfExecutedSPInst = 0;\n- private static int iNoOfCompiledSPInst = 0;\n+ private static final LongAdder numExecutedSPInst = new LongAdder();\n+ private static final LongAdder numCompiledSPInst = new LongAdder();\n-\n- //JVM stats\n+ //JVM stats (low frequency updates)\nprivate static long jitCompileTime = 0; //in milli sec\nprivate static long jvmGCTime = 0; //in milli sec\nprivate static long jvmGCCount = 0; //count\n//HOP DAG recompile stats (potentially high update frequency)\n- private static AtomicLong hopRecompileTime = new AtomicLong(0); //in nano sec\n- private static AtomicLong hopRecompilePred = new AtomicLong(0); //count\n- private static AtomicLong hopRecompileSB = new AtomicLong(0); //count\n+ private static final LongAdder hopRecompileTime = new LongAdder(); //in nano sec\n+ private static final LongAdder hopRecompilePred = new LongAdder(); //count\n+ private static final LongAdder hopRecompileSB = new LongAdder(); //count\n//CODEGEN\n- private static AtomicLong codegenCompileTime = new AtomicLong(0); //in nano\n- private static AtomicLong codegenClassCompileTime = new AtomicLong(0); //in nano\n- private static AtomicLong codegenHopCompile = new AtomicLong(0); //count\n- private static AtomicLong codegenCPlanCompile = new AtomicLong(0); //count\n- private static AtomicLong codegenClassCompile = new AtomicLong(0); //count\n- private static AtomicLong codegenPlanCacheHits = new AtomicLong(0); //count\n- private static AtomicLong codegenPlanCacheTotal = new AtomicLong(0); //count\n+ private static final LongAdder codegenCompileTime = new LongAdder(); //in nano\n+ private static final LongAdder codegenClassCompileTime = new LongAdder(); //in nano\n+ private static final LongAdder codegenHopCompile = new LongAdder(); //count\n+ private static final LongAdder codegenCPlanCompile = new LongAdder(); //count\n+ private static final LongAdder codegenClassCompile = new LongAdder(); //count\n+ private static final LongAdder codegenPlanCacheHits = new LongAdder(); //count\n+ private static final LongAdder codegenPlanCacheTotal = new LongAdder(); //count\n//Function recompile stats\n- private static AtomicLong funRecompileTime = new AtomicLong(0); //in nano sec\n- private static AtomicLong funRecompiles = new AtomicLong(0); //count\n+ private static final LongAdder funRecompileTime = new LongAdder(); //in nano sec\n+ private static final LongAdder funRecompiles = new LongAdder(); //count\n//Spark-specific stats\nprivate static long sparkCtxCreateTime = 0;\n- private static AtomicLong sparkParallelize = new AtomicLong(0L);\n- private static AtomicLong sparkParallelizeCount = new AtomicLong(0L);\n- private static AtomicLong sparkCollect = new AtomicLong(0L);\n- private static AtomicLong sparkCollectCount = new AtomicLong(0L);\n- private static AtomicLong sparkBroadcast = new AtomicLong(0L);\n- private static AtomicLong sparkBroadcastCount = new AtomicLong(0L);\n-\n- //PARFOR optimization stats\n+ private static final LongAdder sparkParallelize = new LongAdder();\n+ private static final LongAdder sparkParallelizeCount = new LongAdder();\n+ private static final LongAdder sparkCollect = new LongAdder();\n+ private static final LongAdder sparkCollectCount = new LongAdder();\n+ private static final LongAdder sparkBroadcast = new LongAdder();\n+ private static final LongAdder sparkBroadcastCount = new LongAdder();\n+\n+ //PARFOR optimization stats (low frequency updates)\nprivate static long parforOptTime = 0; //in milli sec\nprivate static long parforOptCount = 0; //count\nprivate static long parforInitTime = 0; //in milli sec\n@@ -105,117 +104,84 @@ public class Statistics\nprivate static HashMap<String,Long> _cpInstTime = new HashMap<String, Long>();\nprivate static HashMap<String,Long> _cpInstCounts = new HashMap<String, Long>();\n- private static AtomicLong lTotalUIPVar = new AtomicLong(0);\n- private static AtomicLong lTotalLix = new AtomicLong(0);\n- private static AtomicLong lTotalLixUIP = new AtomicLong(0);\n-\n- public static synchronized void setNoOfExecutedMRJobs(int iNoOfExecutedMRJobs) {\n- Statistics.iNoOfExecutedMRJobs = iNoOfExecutedMRJobs;\n- }\n-\n- public static synchronized int getNoOfExecutedMRJobs() {\n- return iNoOfExecutedMRJobs;\n- }\n-\n- public static synchronized void incrementNoOfExecutedMRJobs() {\n- iNoOfExecutedMRJobs ++;\n- }\n-\n- public static synchronized void decrementNoOfExecutedMRJobs() {\n- iNoOfExecutedMRJobs --;\n- }\n+ private static final LongAdder lTotalUIPVar = new LongAdder();\n+ private static final LongAdder lTotalLix = new LongAdder();\n+ private static final LongAdder lTotalLixUIP = new LongAdder();\n- public static synchronized void setNoOfCompiledMRJobs(int numJobs) {\n- iNoOfCompiledMRJobs = numJobs;\n+ public static synchronized long getNoOfExecutedMRJobs() {\n+ return numExecutedMRJobs.longValue();\n}\n- public static synchronized int getNoOfCompiledMRJobs() {\n- return iNoOfCompiledMRJobs;\n+ public static void incrementNoOfExecutedMRJobs() {\n+ numExecutedMRJobs.increment();\n}\n- public static synchronized void incrementNoOfCompiledMRJobs() {\n- iNoOfCompiledMRJobs ++;\n+ public static void decrementNoOfExecutedMRJobs() {\n+ numExecutedMRJobs.decrement();\n}\n-\n- public static synchronized void setNoOfExecutedSPInst(int numJobs) {\n- iNoOfExecutedSPInst = numJobs;\n+ public static long getNoOfCompiledMRJobs() {\n+ return numCompiledMRJobs.longValue();\n}\n- public static synchronized int getNoOfExecutedSPInst() {\n- return iNoOfExecutedSPInst;\n+ public static void incrementNoOfCompiledMRJobs() {\n+ numCompiledMRJobs.increment();\n}\n- public static synchronized void incrementNoOfExecutedSPInst() {\n- iNoOfExecutedSPInst ++;\n+ public static long getNoOfExecutedSPInst() {\n+ return numExecutedSPInst.longValue();\n}\n- public static synchronized void decrementNoOfExecutedSPInst() {\n- iNoOfExecutedSPInst --;\n+ public static void incrementNoOfExecutedSPInst() {\n+ numExecutedSPInst.increment();\n}\n- public static synchronized void setNoOfCompiledSPInst(int numJobs) {\n- iNoOfCompiledSPInst = numJobs;\n+ public static void decrementNoOfExecutedSPInst() {\n+ numExecutedSPInst.decrement();\n}\n- public static synchronized int getNoOfCompiledSPInst() {\n- return iNoOfCompiledSPInst;\n+ public static long getNoOfCompiledSPInst() {\n+ return numCompiledSPInst.longValue();\n}\n- public static synchronized void incrementNoOfCompiledSPInst() {\n- iNoOfCompiledSPInst ++;\n+ public static void incrementNoOfCompiledSPInst() {\n+ numCompiledSPInst.increment();\n}\npublic static long getTotalUIPVar() {\n- return lTotalUIPVar.get();\n+ return lTotalUIPVar.longValue();\n}\npublic static void incrementTotalUIPVar() {\n- lTotalUIPVar.incrementAndGet();\n+ lTotalUIPVar.increment();\n}\npublic static long getTotalLixUIP() {\n- return lTotalLixUIP.get();\n+ return lTotalLixUIP.longValue();\n}\npublic static void incrementTotalLixUIP() {\n- lTotalLixUIP.incrementAndGet();\n+ lTotalLixUIP.increment();\n}\npublic static long getTotalLix() {\n- return lTotalLix.get();\n+ return lTotalLix.longValue();\n}\npublic static void incrementTotalLix() {\n- lTotalLix.incrementAndGet();\n+ lTotalLix.increment();\n}\n- public static void resetNoOfCompiledJobs( int count )\n- {\n+ public static void resetNoOfCompiledJobs( int count ) {\n//reset both mr/sp for multiple tests within one jvm\n-\n- if(OptimizerUtils.isSparkExecutionMode()) {\n- setNoOfCompiledSPInst(count);\n- setNoOfCompiledMRJobs(0);\n- }\n- else{\n- setNoOfCompiledMRJobs(count);\n- setNoOfCompiledSPInst(0);\n- }\n+ numCompiledSPInst.reset();\n+ numCompiledMRJobs.reset();\n}\n- public static void resetNoOfExecutedJobs( int count )\n- {\n+ public static void resetNoOfExecutedJobs() {\n//reset both mr/sp for multiple tests within one jvm\n-\n- if(OptimizerUtils.isSparkExecutionMode()) {\n- setNoOfExecutedSPInst(count);\n- setNoOfExecutedMRJobs(0);\n- }\n- else {\n- setNoOfExecutedMRJobs(count);\n- setNoOfExecutedSPInst(0);\n- }\n+ numExecutedSPInst.reset();\n+ numExecutedMRJobs.reset();\nif( DMLScript.USE_ACCELERATOR )\nGPUStatistics.setNoOfExecutedGPUInst(0);\n@@ -234,94 +200,87 @@ public class Statistics\n}\npublic static void incrementHOPRecompileTime( long delta ) {\n- //note: not synchronized due to use of atomics\n- hopRecompileTime.addAndGet(delta);\n+ hopRecompileTime.add(delta);\n}\npublic static void incrementHOPRecompilePred() {\n- //note: not synchronized due to use of atomics\n- hopRecompilePred.incrementAndGet();\n+ hopRecompilePred.increment();\n}\npublic static void incrementHOPRecompilePred(long delta) {\n- //note: not synchronized due to use of atomics\n- hopRecompilePred.addAndGet(delta);\n+ hopRecompilePred.add(delta);\n}\npublic static void incrementHOPRecompileSB() {\n- //note: not synchronized due to use of atomics\n- hopRecompileSB.incrementAndGet();\n+ hopRecompileSB.increment();\n}\npublic static void incrementHOPRecompileSB(long delta) {\n- //note: not synchronized due to use of atomics\n- hopRecompileSB.addAndGet(delta);\n+ hopRecompileSB.add(delta);\n}\npublic static void incrementCodegenDAGCompile() {\n- codegenHopCompile.incrementAndGet();\n+ codegenHopCompile.increment();\n}\npublic static void incrementCodegenCPlanCompile(long delta) {\n- codegenCPlanCompile.addAndGet(delta);\n+ codegenCPlanCompile.add(delta);\n}\npublic static void incrementCodegenClassCompile() {\n- codegenClassCompile.incrementAndGet();\n+ codegenClassCompile.increment();\n}\npublic static void incrementCodegenCompileTime(long delta) {\n- codegenCompileTime.addAndGet(delta);\n+ codegenCompileTime.add(delta);\n}\npublic static void incrementCodegenClassCompileTime(long delta) {\n- codegenClassCompileTime.addAndGet(delta);\n+ codegenClassCompileTime.add(delta);\n}\npublic static void incrementCodegenPlanCacheHits() {\n- codegenPlanCacheHits.incrementAndGet();\n+ codegenPlanCacheHits.increment();\n}\npublic static void incrementCodegenPlanCacheTotal() {\n- codegenPlanCacheTotal.incrementAndGet();\n+ codegenPlanCacheTotal.increment();\n}\npublic static long getCodegenDAGCompile() {\n- return codegenHopCompile.get();\n+ return codegenHopCompile.longValue();\n}\npublic static long getCodegenCPlanCompile() {\n- return codegenCPlanCompile.get();\n+ return codegenCPlanCompile.longValue();\n}\npublic static long getCodegenClassCompile() {\n- return codegenClassCompile.get();\n+ return codegenClassCompile.longValue();\n}\npublic static long getCodegenCompileTime() {\n- return codegenCompileTime.get();\n+ return codegenCompileTime.longValue();\n}\npublic static long getCodegenClassCompileTime() {\n- return codegenClassCompileTime.get();\n+ return codegenClassCompileTime.longValue();\n}\npublic static long getCodegenPlanCacheHits() {\n- return codegenPlanCacheHits.get();\n+ return codegenPlanCacheHits.longValue();\n}\npublic static long getCodegenPlanCacheTotal() {\n- return codegenPlanCacheTotal.get();\n+ return codegenPlanCacheTotal.longValue();\n}\npublic static void incrementFunRecompileTime( long delta ) {\n- //note: not synchronized due to use of atomics\n- funRecompileTime.addAndGet(delta);\n+ funRecompileTime.add(delta);\n}\npublic static void incrementFunRecompiles() {\n- //note: not synchronized due to use of atomics\n- funRecompiles.incrementAndGet();\n+ funRecompiles.increment();\n}\npublic static synchronized void incrementParForOptimCount(){\n@@ -381,21 +340,21 @@ public class Statistics\npublic static void reset()\n{\n- hopRecompileTime.set(0);\n- hopRecompilePred.set(0);\n- hopRecompileSB.set(0);\n+ hopRecompileTime.reset();\n+ hopRecompilePred.reset();\n+ hopRecompileSB.reset();\n- funRecompiles.set(0);\n- funRecompileTime.set(0);\n+ funRecompiles.reset();\n+ funRecompileTime.reset();\nparforOptCount = 0;\nparforOptTime = 0;\nparforInitTime = 0;\nparforMergeTime = 0;\n- lTotalLix.set(0);\n- lTotalLixUIP.set(0);\n- lTotalUIPVar.set(0);\n+ lTotalLix.reset();\n+ lTotalLixUIP.reset();\n+ lTotalUIPVar.reset();\nresetJITCompileTime();\nresetJVMgcTime();\n@@ -428,27 +387,27 @@ public class Statistics\n}\npublic static void accSparkParallelizeTime(long t) {\n- sparkParallelize.addAndGet(t);\n+ sparkParallelize.add(t);\n}\npublic static void incSparkParallelizeCount(long c) {\n- sparkParallelizeCount.addAndGet(c);\n+ sparkParallelizeCount.add(c);\n}\npublic static void accSparkCollectTime(long t) {\n- sparkCollect.addAndGet(t);\n+ sparkCollect.add(t);\n}\npublic static void incSparkCollectCount(long c) {\n- sparkCollectCount.addAndGet(c);\n+ sparkCollectCount.add(c);\n}\npublic static void accSparkBroadCastTime(long t) {\n- sparkBroadcast.addAndGet(t);\n+ sparkBroadcast.add(t);\n}\npublic static void incSparkBroadcastCount(long c) {\n- sparkBroadcastCount.addAndGet(c);\n+ sparkBroadcastCount.add(c);\n}\n@@ -467,7 +426,6 @@ public class Statistics\nif( inst instanceof FunctionCallCPInstruction ) {\nFunctionCallCPInstruction extfunct = (FunctionCallCPInstruction)inst;\nopcode = extfunct.getFunctionName();\n- //opcode = extfunct.getNamespace()+Program.KEY_DELIM+extfunct.getFunctionName();\n}\n}\nelse //CPInstructions\n@@ -476,7 +434,6 @@ public class Statistics\nif( inst instanceof FunctionCallCPInstruction ) {\nFunctionCallCPInstruction extfunct = (FunctionCallCPInstruction)inst;\nopcode = extfunct.getFunctionName();\n- //opcode = extfunct.getNamespace()+Program.KEY_DELIM+extfunct.getFunctionName();\n}\n}\n@@ -490,13 +447,11 @@ public class Statistics\n*/\npublic synchronized static void maintainCPHeavyHitters( String instructionName, long timeNanos )\n{\n- Long oldVal = _cpInstTime.get(instructionName);\n- Long newVal = timeNanos + ((oldVal!=null) ? oldVal : 0);\n- _cpInstTime.put(instructionName, newVal);\n+ Long oldVal = _cpInstTime.getOrDefault(instructionName, 0L);\n+ _cpInstTime.put(instructionName, oldVal + timeNanos);\n- Long oldCnt = _cpInstCounts.get(instructionName);\n- Long newCnt = 1 + ((oldCnt!=null) ? oldCnt : 0);\n- _cpInstCounts.put(instructionName, newCnt);\n+ Long oldCnt = _cpInstCounts.getOrDefault(instructionName, 0L);\n+ _cpInstCounts.put(instructionName, oldCnt + 1);\n}\n@@ -587,23 +542,23 @@ public class Statistics\n}\npublic static long getHopRecompileTime(){\n- return hopRecompileTime.get();\n+ return hopRecompileTime.longValue();\n}\npublic static long getHopRecompiledPredDAGs(){\n- return hopRecompilePred.get();\n+ return hopRecompilePred.longValue();\n}\npublic static long getHopRecompiledSBDAGs(){\n- return hopRecompileSB.get();\n+ return hopRecompileSB.longValue();\n}\npublic static long getFunRecompileTime(){\n- return funRecompileTime.get();\n+ return funRecompileTime.longValue();\n}\npublic static long getFunRecompiles(){\n- return funRecompiles.get();\n+ return funRecompiles.longValue();\n}\npublic static long getParforOptCount(){\n@@ -681,14 +636,14 @@ public class Statistics\nString lazy = SparkExecutionContext.isLazySparkContextCreation() ? \"(lazy)\" : \"(eager)\";\nsb.append(\"Spark ctx create time \"+lazy+\":\\t\"+\nString.format(\"%.3f\", ((double)sparkCtxCreateTime)*1e-9) + \" sec.\\n\" ); // nanoSec --> sec\n-\nsb.append(\"Spark trans counts (par,bc,col):\" +\n- String.format(\"%d/%d/%d.\\n\", sparkParallelizeCount.get(), sparkBroadcastCount.get(), sparkCollectCount.get()));\n+ String.format(\"%d/%d/%d.\\n\", sparkParallelizeCount.longValue(),\n+ sparkBroadcastCount.longValue(), sparkCollectCount.longValue()));\nsb.append(\"Spark trans times (par,bc,col):\\t\" +\nString.format(\"%.3f/%.3f/%.3f secs.\\n\",\n- ((double)sparkParallelize.get())*1e-9,\n- ((double)sparkBroadcast.get())*1e-9,\n- ((double)sparkCollect.get())*1e-9));\n+ ((double)sparkParallelize.longValue())*1e-9,\n+ ((double)sparkBroadcast.longValue())*1e-9,\n+ ((double)sparkCollect.longValue())*1e-9));\n}\nif( parforOptCount>0 ){\nsb.append(\"ParFor loops optimized:\\t\\t\" + getParforOptCount() + \".\\n\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1402] Scalable statistics counters and atomic id sequence
49,738
15.03.2017 15:19:48
25,200
0490fec93105e1ec4494e8aecd47878036dcb275
Fix reset number of compiled mr/spark inst (w/ offset)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -175,7 +175,9 @@ public class Statistics\npublic static void resetNoOfCompiledJobs( int count ) {\n//reset both mr/sp for multiple tests within one jvm\nnumCompiledSPInst.reset();\n+ numCompiledSPInst.add(count);\nnumCompiledMRJobs.reset();\n+ numCompiledMRJobs.add(count);\n}\npublic static void resetNoOfExecutedJobs() {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1402] Fix reset number of compiled mr/spark inst (w/ offset)
49,717
13.03.2017 15:40:08
25,200
5baac2d62f64026ff82b9d674b909bc4b80800b0
[HOTFIX] Disabling GPU fused relu & maxpooling operator because of bug Fixed the timer that counts the number of times memory chunks are zero-ed out Some minor code refactoring
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "package org.apache.sysml.hops;\n-import java.util.ArrayList;\n-\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.Hop.MultiThreadedHop;\nimport org.apache.sysml.lops.ConvolutionTransform;\nimport org.apache.sysml.lops.ConvolutionTransform.OperationTypes;\nimport org.apache.sysml.lops.Lop;\n-import org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.ReBlock;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n@@ -35,6 +33,8 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.ConvolutionParameters;\n+import java.util.ArrayList;\n+\npublic class ConvolutionOp extends Hop implements MultiThreadedHop\n{\nprivate Hop.ConvOp op;\n@@ -179,7 +179,11 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nArrayList<Hop> inputs1 = inputs;\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nOperationTypes lopOp = HopsConv2Lops.get(op);\n- if(op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\n+\n+ // The fused relu_maxpooling is being disabled for now on the GPU\n+ // There is a bug in LibMatrixCUDA#reluMaxpooling\n+ // which we need to understand before enabling this by removing the \"et != ExecType.GPU\" guard.\n+ if(op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0)) && et != ExecType.GPU) {\nin = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "diff": "*/\npackage org.apache.sysml.runtime.instructions.gpu;\n-import java.util.ArrayList;\n-\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n@@ -31,6 +29,8 @@ import org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\nimport org.apache.sysml.utils.GPUStatistics;\n+import java.util.ArrayList;\n+\npublic class ConvolutionGPUInstruction extends GPUInstruction\n{\nprivate CPOperand _input1;\n@@ -337,8 +337,13 @@ public class ConvolutionGPUInstruction extends GPUInstruction\n// release inputs/outputs\nec.releaseMatrixInputForGPUInstruction(_input1.getName());\n+\nif (!( instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) )\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\n+\n+ if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\"))\n+ ec.releaseMatrixInputForGPUInstruction(_input3.getName());\n+\nec.releaseMatrixOutputForGPUInstruction(_output.getName());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/JCudaObject.java", "diff": "@@ -1188,7 +1188,7 @@ public class JCudaObject extends GPUObject {\n// Set all elements to 0 since newly allocated space will contain garbage\nif (DMLScript.STATISTICS) t1 = System.nanoTime();\ncudaMemset(A, 0, size);\n- if (DMLScript.STATISTICS) end = System.nanoTime() - t1;\n+ if (DMLScript.STATISTICS) end = System.nanoTime();\nif (instructionName != null && GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instructionName, GPUInstruction.MISC_TIMER_SET_ZERO, end - t1);\nif (DMLScript.STATISTICS) GPUStatistics.cudaMemSet0Time.getAndAdd(end - t1);\nif (DMLScript.STATISTICS) GPUStatistics.cudaMemSet0Count.getAndAdd(1);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -648,7 +648,9 @@ public class LibMatrixCUDA {\n((JCudaObject)image.getGPUObject()).sparseToDense(instName);\n}\nPointer x = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n- performMaxpooling(instName, x, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ Pointer y = ((JCudaObject)outputBlock.getGPUObject()).jcudaDenseMatrixPtr;\n+\n+ performMaxpooling(instName, x, y, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\n/**\n@@ -678,17 +680,17 @@ public class LibMatrixCUDA {\nif(isInSparseFormat(image)) {\n((JCudaObject)image.getGPUObject()).sparseToDense(instName);\n}\n+ long size = image.getNumRows() * image.getNumColumns() * Sizeof.DOUBLE;\nPointer x = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n- //MatrixObject temp = new MatrixObject(image);\n- //temp.getGPUObject().acquireDeviceModifyDense();\n- Pointer y = ((JCudaObject)image.getGPUObject()).jcudaDenseMatrixPtr;\n- performReLU(instName, x, y, N, C, H, W);\n- performMaxpooling(instName, y, outputBlock, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n- //((JCudaObject)temp.getGPUObject()).clearData(); // deallocate the temporary data\n+ Pointer y = ((JCudaObject)outputBlock.getGPUObject()).jcudaDenseMatrixPtr;\n+ Pointer tmp = allocate(size);\n+ performReLU(instName, x, tmp, N, C, H, W);\n+ performMaxpooling(instName, tmp, y, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ cudaFreeHelper(tmp);\n}\nprivate static void performMaxpooling(String instName, Pointer x,\n- MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ Pointer y, int N, int C, int H, int W, int K, int R,\nint S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\nint Q) throws DMLRuntimeException {\n@@ -706,9 +708,6 @@ public class LibMatrixCUDA {\nxDesc = allocateTensorDescriptor(N, C, H, W);\npoolingDesc = allocatePoolingDescriptor(R, S, pad_h, pad_w, stride_h, stride_w);\n- // Allocate data\n- Pointer y = ((JCudaObject)outputBlock.getGPUObject()).jcudaDenseMatrixPtr;\n-\nalpha = pointerTo(1.0);\nbeta = pointerTo(0.0f);\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Disabling GPU fused relu & maxpooling operator because of bug - Fixed the timer that counts the number of times memory chunks are zero-ed out - Some minor code refactoring
49,736
17.03.2017 10:20:26
28,800
81090134d2de04a3ae90c6f8d79b4c68cb14aab5
Convert numpy to matrixblock by passing multiple blocks Here is the code to test this functionality: from systemml import MLContext, dml, convertToMatrixBlock import pandas as pd nr = 46900 X_pd = pd.DataFrame(range(1, (nr*784)+1,1),dtype=float).values.reshape(nr,784) convertToMatrixBlock(sc, X_pd) convertToMatrixBlock(sc, X_pd, maxSizeBlockInMB=100000) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java", "diff": "@@ -165,6 +165,37 @@ public class RDDConverterUtilsExt\nreturn convertPy4JArrayToMB(data, (int) rlen, (int) clen, isSparse);\n}\n+ public static MatrixBlock mergeRowBlocks(ArrayList<MatrixBlock> mb, int numRowsPerBlock, int rlen, int clen, boolean isSparse) throws DMLRuntimeException {\n+ return mergeRowBlocks(mb, (long)numRowsPerBlock, (long)rlen, (long)clen, isSparse);\n+ }\n+\n+ /**\n+ * This creates a MatrixBlock from list of row blocks\n+ *\n+ * @param mb list of row blocks\n+ * @param numRowsPerBlock number of rows per block\n+ * @param rlen number of rows\n+ * @param clen number of columns\n+ * @param isSparse is the output matrix in sparse format\n+ * @return a matrix block of shape (rlen, clen)\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n+ */\n+ public static MatrixBlock mergeRowBlocks(ArrayList<MatrixBlock> mb, long numRowsPerBlock, long rlen, long clen, boolean isSparse) throws DMLRuntimeException {\n+ if(clen >= Integer.MAX_VALUE)\n+ throw new DMLRuntimeException(\"Number of columns cannot be greater than \" + Integer.MAX_VALUE);\n+ if(rlen >= Integer.MAX_VALUE)\n+ throw new DMLRuntimeException(\"Number of rows cannot be greater than \" + Integer.MAX_VALUE);\n+\n+ MatrixBlock ret = new MatrixBlock((int)rlen, (int) clen, isSparse);\n+ ret.allocateDenseOrSparseBlock();\n+ for(int i = 0; i < mb.size(); i++) {\n+ ret.copy((int)(i*numRowsPerBlock), (int)Math.min((i+1)*numRowsPerBlock-1, rlen-1), 0, (int)(clen-1), mb.get(i), false);\n+ }\n+ ret.recomputeNonZeros();\n+ ret.examSparsity();\n+ return ret;\n+ }\n+\npublic static MatrixBlock convertPy4JArrayToMB(byte [] data, int rlen, int clen, boolean isSparse) throws DMLRuntimeException {\nMatrixBlock mb = new MatrixBlock(rlen, clen, isSparse, -1);\nif(isSparse) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/converters.py", "new_path": "src/main/python/systemml/converters.py", "diff": "@@ -23,6 +23,7 @@ __all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convertToNumPyArr', 'convertT\nimport numpy as np\nimport pandas as pd\n+import math\nfrom pyspark.context import SparkContext\nfrom scipy.sparse import coo_matrix, spmatrix\nfrom .classloader import *\n@@ -55,10 +56,7 @@ def convertToLabeledDF(sparkSession, X, y=None):\nelse:\nreturn out.select('features')\n-\n-def convertToMatrixBlock(sc, src):\n- if isinstance(src, spmatrix):\n- src = coo_matrix(src, dtype=np.float64)\n+def _convertSPMatrixToMB(sc, src):\nnumRows = src.shape[0]\nnumCols = src.shape[1]\ndata = src.data\n@@ -70,17 +68,41 @@ def convertToMatrixBlock(sc, src):\nbuf3 = bytearray(col.tostring())\ncreateJavaObject(sc, 'dummy')\nreturn sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(buf1, buf2, buf3, numRows, numCols, nnz)\n- elif isinstance(sc, SparkContext):\n- src = np.asarray(src)\n+\n+def _convertDenseMatrixToMB(sc, src):\nnumCols = getNumCols(src)\nnumRows = src.shape[0]\narr = src.ravel().astype(np.float64)\nbuf = bytearray(arr.tostring())\ncreateJavaObject(sc, 'dummy')\nreturn sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(buf, numRows, numCols)\n- else:\n- raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves\n+def convertToMatrixBlock(sc, src, maxSizeBlockInMB=8):\n+ if isinstance(src, spmatrix):\n+ src = coo_matrix(src, dtype=np.float64)\n+ else:\n+ src = np.asarray(src, dtype=np.float64)\n+ numRowsPerBlock = int(math.ceil((maxSizeBlockInMB*1000000) / (src.shape[1]*8)))\n+ # print(\"numRowsPerBlock=\" + str(numRowsPerBlock))\n+ multiBlockTransfer = False if numRowsPerBlock >= src.shape[0] else True\n+ if not multiBlockTransfer:\n+ if isinstance(src, spmatrix):\n+ return _convertSPMatrixToMB(sc, src)\n+ elif isinstance(sc, SparkContext):\n+ return _convertDenseMatrixToMB(sc, src)\n+ else:\n+ raise TypeError('sc needs to be of type SparkContext')\n+ else:\n+ if isinstance(src, spmatrix):\n+ numRowsPerBlock = 1 # To avoid unnecessary conversion to csr and then coo again\n+ rowBlocks = [ _convertSPMatrixToMB(sc, src.getrow(i)) for i in range(src.shape[0]) ]\n+ isSparse = True\n+ elif isinstance(sc, SparkContext):\n+ rowBlocks = [ _convertDenseMatrixToMB(sc, src[i:i+numRowsPerBlock,]) for i in range(0, src.shape[0], numRowsPerBlock) ]\n+ isSparse = False\n+ else:\n+ raise TypeError('sc needs to be of type SparkContext')\n+ return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.mergeRowBlocks(rowBlocks, int(numRowsPerBlock), int(src.shape[0]), int(src.shape[1]), isSparse)\ndef convertToNumPyArr(sc, mb):\nif isinstance(sc, SparkContext):\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1370] Convert numpy to matrixblock by passing multiple blocks Here is the code to test this functionality: from systemml import MLContext, dml, convertToMatrixBlock import pandas as pd nr = 46900 X_pd = pd.DataFrame(range(1, (nr*784)+1,1),dtype=float).values.reshape(nr,784) convertToMatrixBlock(sc, X_pd) convertToMatrixBlock(sc, X_pd, maxSizeBlockInMB=100000) Closes #413.
49,738
16.03.2017 15:56:18
25,200
96505b1996eac95fdb7a1ec7fdb0302c15fd4821
Hardened code generator for sparse-unsafe outer products
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/OuterProductTpl.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/OuterProductTpl.java", "diff": "@@ -32,6 +32,7 @@ import org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\n+import org.apache.sysml.hops.Hop.OpOp2;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary.BinType;\n@@ -62,7 +63,9 @@ public class OuterProductTpl extends BaseTpl {\npublic boolean fuse(Hop hop, Hop input) {\nreturn !isClosed()\n&&((hop instanceof UnaryOp && TemplateUtils.isOperationSupported(hop))\n- || (hop instanceof BinaryOp && TemplateUtils.isOperationSupported(hop))\n+ || (hop instanceof BinaryOp && TemplateUtils.isOperationSupported(hop)\n+ && (TemplateUtils.isBinaryMatrixColVector(hop) || HopRewriteUtils.isBinaryMatrixScalarOperation(hop)\n+ || (HopRewriteUtils.isBinaryMatrixMatrixOperation(hop) && HopRewriteUtils.isBinary(hop, OpOp2.MULT, OpOp2.DIV)) ))\n|| HopRewriteUtils.isTransposeOperation(hop)\n|| (hop instanceof AggBinaryOp && !HopRewriteUtils.isOuterProductLikeMM(hop))\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()==Direction.RowCol));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -750,6 +750,16 @@ public class HopRewriteUtils\nreturn hop instanceof BinaryOp && ((BinaryOp)hop).getOp()==type;\n}\n+ public static boolean isBinary(Hop hop, OpOp2... types) {\n+ if( hop instanceof BinaryOp ) {\n+ BinaryOp bop = (BinaryOp) hop;\n+ for( OpOp2 type : types )\n+ if( type == bop.getOp() )\n+ return true;\n+ }\n+ return false;\n+ }\n+\npublic static boolean isBinary(Hop hop, OpOp2 type, int maxParents) {\nreturn isBinary(hop, type) && hop.getParent().size() <= maxParents;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "diff": "@@ -266,11 +266,11 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- //TODO rework sparse safe test\n- double val = genexec( 0, b, scalars, n, m, 0, 0 );\n+ //sparse safe check\n+ boolean sparseSafe = (b.length == 0 //no sideways inputs\n+ && genexec( 0, b, scalars, n, m, 0, 0 ) == 0); //0 input results in 0\n- if(val == 0 && b.length==0) // sparse safe\n- {\n+ if( sparseSafe ) {\nif( sblock != null ) {\nfor( int i=rl; i<ru; i++ )\nif( !sblock.isEmpty(i) ) {\n@@ -283,8 +283,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n}\n}\n}\n- else //sparse-unsafe\n- {\n+ else { //sparse-unsafe\nfor(int i=rl; i<ru; i++)\nfor(int j=0; j<n; j++) {\ndouble valij = (sblock != null) ? sblock.get(i, j) : 0;\n@@ -297,14 +296,14 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nprivate long executeSparse(SparseBlock sblock, double[][] b, double[] scalars, double[] c, int n, int m, int rl, int ru)\n{\n- //TODO rework sparse safe test\n- double val0 = genexec( 0, b, scalars, n, m, 0, 0 );\n- long lnnz = 0;\n+ //sparse safe check\n+ boolean sparseSafe = (b.length == 0 //no sideways inputs\n+ && genexec( 0, b, scalars, n, m, 0, 0 ) == 0); //0 input results in 0\n+ long lnnz = 0;\nif( _type == CellType.NO_AGG )\n{\n- if(val0 == 0 && b.length == 0) // sparse safe\n- {\n+ if( sparseSafe ) {\nif( sblock != null ) {\nfor( int i=rl; i<ru; i++ )\nif( !sblock.isEmpty(i) ) {\n@@ -319,8 +318,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n}\n}\n}\n- else //sparse-unsafe\n- {\n+ else { //sparse-unsafe\nfor(int i=rl, cix=rl*n; i<ru; i++, cix+=n)\nfor(int j=0; j<n; j++) {\ndouble valij = (sblock != null) ? sblock.get(i, j) : 0;\n@@ -334,8 +332,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- if(val0 == 0 && b.length == 0) // sparse safe\n- {\n+ if( sparseSafe ) {\nif( sblock != null ) {\nfor( int i=rl; i<ru; i++ ) {\nif( sblock.isEmpty(i) ) continue;\n@@ -351,8 +348,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n}\n}\n}\n- else //sparse-unsafe\n- {\n+ else { //sparse-unsafe\nfor(int i=rl; i<ru; i++) {\nkbuff.set(0, 0);\nfor(int j=0; j<n; j++) {\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/OuterProdTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/OuterProdTmplTest.java", "diff": "@@ -42,6 +42,7 @@ public class OuterProdTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME5 = \"wdivmmRightNotranspose\";\nprivate static final String TEST_NAME6 = \"wdivmmbasic\";\nprivate static final String TEST_NAME7 = \"wdivmmTransposeOut\";\n+ private static final String TEST_NAME8 = \"wSparseUnsafeOuterProduct\";\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + OuterProdTmplTest.class.getSimpleName() + \"/\";\n@@ -60,6 +61,7 @@ public class OuterProdTmplTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"5\" }) );\naddTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"6\" }) );\naddTestConfiguration( TEST_NAME7, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME7, new String[] { \"7\" }) );\n+ addTestConfiguration( TEST_NAME8, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME8, new String[] { \"8\" }) );\n}\n@Test\n@@ -97,6 +99,11 @@ public class OuterProdTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME7, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenOuterProdRewrite8() {\n+ testCodegenIntegration( TEST_NAME8, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenOuterProd1() {\ntestCodegenIntegrationWithInput( TEST_NAME1, false, ExecType.CP );\n@@ -132,6 +139,11 @@ public class OuterProdTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME7, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenOuterProd8() {\n+ testCodegenIntegration( TEST_NAME8, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenOuterProdRewrite1_sp() {\ntestCodegenIntegrationWithInput( TEST_NAME1, true, ExecType.SPARK );\n@@ -152,6 +164,11 @@ public class OuterProdTmplTest extends AutomatedTestBase\ntestCodegenIntegrationWithInput( TEST_NAME4, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenOuterProdRewrite8_sp() {\n+ testCodegenIntegrationWithInput( TEST_NAME8, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\n@@ -188,7 +205,11 @@ public class OuterProdTmplTest extends AutomatedTestBase\nHashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"S\");\nHashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"S\");\nTestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n- if( !rewrites )\n+\n+ if( testname.equals(TEST_NAME8) )\n+ Assert.assertTrue(!(heavyHittersContainsSubString(\"spoofOP\")\n+ || heavyHittersContainsSubString(\"sp_spoofOP\")));\n+ else if( !rewrites )\nAssert.assertTrue(heavyHittersContainsSubString(\"spoofOP\")\n|| heavyHittersContainsSubString(\"sp_spoofOP\"));\n}\n@@ -232,6 +253,7 @@ public class OuterProdTmplTest extends AutomatedTestBase\nrunTest(true, false, null, -1);\nrunRScript(true);\n+\nif(testname.equals(TEST_NAME4)) { //wcemm\n//compare scalars\nHashMap<CellIndex, Double> dmlfile = readDMLScalarFromHDFS(\"S\");\n@@ -245,7 +267,10 @@ public class OuterProdTmplTest extends AutomatedTestBase\nTestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n}\n- if( !rewrites )\n+ if( testname.equals(TEST_NAME8) )\n+ Assert.assertTrue(!(heavyHittersContainsSubString(\"spoofOP\")\n+ || heavyHittersContainsSubString(\"sp_spoofOP\")));\n+ else if( !rewrites )\nAssert.assertTrue(heavyHittersContainsSubString(\"spoofOP\")\n|| heavyHittersContainsSubString(\"sp_spoofOP\"));\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/wSparseUnsafeOuterProduct.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix( 3, 1000, 2000)\n+X[100:900,] = matrix(0, 801, 2000)\n+U = matrix( 4, 1000, 10)\n+V = matrix( 5, 2000, 10)\n+S = X+1/(U%*%t(V));\n+\n+writeMM(as(S, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n+\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/wSparseUnsafeOuterProduct.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix( 3, rows=1000, cols=2000)\n+X[100:900,] = matrix(0, rows=801, cols=2000)\n+U = matrix( 4, rows=1000, cols=10)\n+V = matrix( 5, rows=2000, cols=10)\n+\n+if(1==1){}\n+\n+S = X+1/(U%*%t(V));\n+write(S,$1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1364] Hardened code generator for sparse-unsafe outer products
49,738
17.03.2017 12:52:20
25,200
c663e31ceefb383195d5405630af99b53260827d
[MINOR] Robust hdfs file delete w/ retries (fix occasional test issues)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/MapReduceTool.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/MapReduceTool.java", "diff": "@@ -64,6 +64,8 @@ import org.apache.wink.json4j.OrderedJSONObject;\npublic class MapReduceTool\n{\n+ private static final int MAX_DELETE_RETRIES = 10;\n+\nprivate static final Log LOG = LogFactory.getLog(MapReduceTool.class.getName());\nprivate static JobConf _rJob = null; //cached job conf for read-only operations\n@@ -120,29 +122,29 @@ public class MapReduceTool\nreturn ret;\n}\n- public static void deleteFileIfExistOnHDFS(Path outpath, JobConf job) throws IOException {\n- if (FileSystem.get(job).exists(outpath)) {\n- FileSystem.get(job).delete(outpath, true);\n- }\n+ public static void deleteFileWithMTDIfExistOnHDFS(String fname) throws IOException {\n+ deleteFileIfExistOnHDFS(fname);\n+ deleteFileIfExistOnHDFS(fname + \".mtd\");\n}\n- public static void deleteFileIfExistOnLFS(Path outpath, JobConf job) throws IOException {\n- if (FileSystem.getLocal(job).exists(outpath)) {\n- FileSystem.getLocal(job).delete(outpath, true);\n+ public static void deleteFileIfExistOnHDFS(String dir) throws IOException {\n+ deleteFileIfExists(FileSystem.get(_rJob), new Path(dir));\n}\n+\n+ public static void deleteFileIfExistOnHDFS(Path outpath, JobConf job) throws IOException {\n+ deleteFileIfExists(FileSystem.get(job), outpath);\n}\n- public static void deleteFileWithMTDIfExistOnHDFS(String fname) throws IOException {\n- deleteFileIfExistOnHDFS(fname);\n- deleteFileIfExistOnHDFS(fname + \".mtd\");\n+ public static void deleteFileIfExistOnLFS(Path outpath, JobConf job) throws IOException {\n+ deleteFileIfExists(FileSystem.getLocal(job), outpath);\n}\n- public static void deleteFileIfExistOnHDFS(String dir) throws IOException {\n- Path outpath = new Path(dir);\n- FileSystem fs = FileSystem.get(_rJob);\n+ private static void deleteFileIfExists(FileSystem fs, Path outpath) throws IOException {\nif( fs.exists(outpath) ) {\n- //System.err.println(\"Deleting \" + outpath + \" ... \");\n- fs.delete(outpath, true);\n+ int retries = MAX_DELETE_RETRIES;\n+ while( !fs.delete(outpath, true) && retries > 0 ) {\n+ retries--;\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/io/SeqParReadTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/io/SeqParReadTest.java", "diff": "@@ -270,8 +270,7 @@ public class SeqParReadTest extends AutomatedTestBase {\nprivate void writeMatrix( double[][] A, String fname, OutputInfo oi, long rows, long cols, int brows, int bcols, long nnz )\nthrows DMLRuntimeException, IOException\n{\n- MapReduceTool.deleteFileIfExistOnHDFS(fname);\n- MapReduceTool.deleteFileIfExistOnHDFS(fname+\".mtd\");\n+ MapReduceTool.deleteFileWithMTDIfExistOnHDFS(fname);\nMatrixCharacteristics mc = new MatrixCharacteristics(rows, cols, brows, bcols, nnz);\nMatrixBlock mb = DataConverter.convertToMatrixBlock(A);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Robust hdfs file delete w/ retries (fix occasional test issues)
49,738
17.03.2017 20:05:40
25,200
30f72e83fd334042eec1be5c42588e127a9f3ee0
Extended code generator (sparsity-exploiting celltmpl)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java", "diff": "@@ -39,15 +39,16 @@ public class CNodeCell extends CNodeTpl\n+ \"\\n\"\n+ \"public final class %TMP% extends SpoofCellwise {\\n\"\n+ \" public %TMP%() {\\n\"\n- + \" _type = CellType.%TYPE%;\\n\"\n+ + \" super(CellType.%TYPE%, %SPARSE_SAFE%);\\n\"\n+ \" }\\n\"\n- + \" protected double genexec( double a, double[][] b, double[] scalars, int n, int m, int rowIndex, int colIndex) { \\n\"\n+ + \" protected double genexec( double a, double[][] b, double[] scalars, int m, int n, int rowIndex, int colIndex) { \\n\"\n+ \"%BODY_dense%\"\n+ \" return %OUT%;\\n\"\n+ \" } \\n\"\n+ \"}\";\nprivate CellType _type = null;\n+ private boolean _sparseSafe = false;\nprivate boolean _requiresCastdtm = false;\nprivate boolean _multipleConsumers = false;\n@@ -72,6 +73,14 @@ public class CNodeCell extends CNodeTpl\nreturn _type;\n}\n+ public void setSparseSafe(boolean flag) {\n+ _sparseSafe = flag;\n+ }\n+\n+ public boolean isSparseSafe() {\n+ return _sparseSafe;\n+ }\n+\npublic void setRequiresCastDtm(boolean flag) {\n_requiresCastdtm = flag;\n_hash = 0;\n@@ -99,8 +108,9 @@ public class CNodeCell extends CNodeTpl\n//return last TMP\ntmp = tmp.replaceAll(\"%OUT%\", getCurrentVarName());\n- //replace aggregate information\n- tmp = tmp.replaceAll(\"%TYPE%\", getCellType().toString());\n+ //replace meta data information\n+ tmp = tmp.replaceAll(\"%TYPE%\", getCellType().name());\n+ tmp = tmp.replaceAll(\"%SPARSE_SAFE%\", String.valueOf(isSparseSafe()));\nreturn tmp;\n}\n@@ -136,9 +146,10 @@ public class CNodeCell extends CNodeTpl\nif( _hash == 0 ) {\nint h1 = super.hashCode();\nint h2 = _type.hashCode();\n- int h3 = Boolean.valueOf(_requiresCastdtm).hashCode();\n+ int h3 = Boolean.valueOf(_sparseSafe).hashCode();\n+ int h4 = Boolean.valueOf(_requiresCastdtm).hashCode();\n//note: _multipleConsumers irrelevant for plan comparison\n- _hash = Arrays.hashCode(new int[]{h1,h2,h3});\n+ _hash = Arrays.hashCode(new int[]{h1,h2,h3,h4});\n}\nreturn _hash;\n}\n@@ -151,6 +162,7 @@ public class CNodeCell extends CNodeTpl\nCNodeCell that = (CNodeCell)o;\nreturn super.equals(that)\n&& _type == that._type\n+ && _sparseSafe == that._sparseSafe\n&& _requiresCastdtm == that._requiresCastdtm\n&& equalInputReferences(\n_output, that._output, _inputs, that._inputs);\n@@ -161,6 +173,7 @@ public class CNodeCell extends CNodeTpl\nStringBuilder sb = new StringBuilder();\nsb.append(\"SPOOF CELLWISE [type=\");\nsb.append(_type.name());\n+ sb.append(\", spafeSafe=\"+_sparseSafe);\nsb.append(\", castdtm=\"+_requiresCastdtm);\nsb.append(\", mc=\"+_multipleConsumers);\nsb.append(\"]\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "diff": "@@ -112,6 +112,8 @@ public class CellTpl extends BaseTpl\nCNode output = tmp.get(hop.getHopID());\nCNodeCell tpl = new CNodeCell(inputs, output);\ntpl.setCellType(TemplateUtils.getCellType(hop));\n+ tpl.setSparseSafe((HopRewriteUtils.isBinary(hop, OpOp2.MULT) && hop.getInput().contains(sinHops.getFirst()))\n+ || (HopRewriteUtils.isBinary(hop, OpOp2.DIV) && hop.getInput().get(0) == sinHops.getFirst()));\ntpl.setRequiresCastDtm(hop instanceof AggBinaryOp);\n// return cplan instance\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "diff": "@@ -48,16 +48,22 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nROW_AGG,\n}\n- protected CellType _type = CellType.NO_AGG;\n-\n- public SpoofCellwise() {\n+ private final CellType _type;\n+ private final boolean _sparseSafe;\n+ public SpoofCellwise(CellType type, boolean sparseSafe) {\n+ _type = type;\n+ _sparseSafe = sparseSafe;\n}\npublic CellType getCellType() {\nreturn _type;\n}\n+ public boolean isSparseSafe() {\n+ return _sparseSafe;\n+ }\n+\n@Override\npublic ScalarObject execute(ArrayList<MatrixBlock> inputs, ArrayList<ScalarObject> scalarObjects, int k)\nthrows DMLRuntimeException\n@@ -73,15 +79,19 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n//input preparation\ndouble[][] b = prepInputMatrices(inputs);\ndouble[] scalars = prepInputScalars(scalarObjects);\n-\nfinal int m = inputs.get(0).getNumRows();\nfinal int n = inputs.get(0).getNumColumns();\n+\n+ //sparse safe check\n+ boolean sparseSafe = isSparseSafe() || (b.length == 0\n+ && genexec( 0, b, scalars, m, n, 0, 0 ) == 0);\n+\ndouble sum = 0;\nif( k <= 1 ) //SINGLE-THREADED\n{\nsum = ( !inputs.get(0).isInSparseFormat() ) ?\n- executeDenseAndAgg(inputs.get(0).getDenseBlock(), b, scalars, n, m, 0, m) :\n- executeSparseAndAgg(inputs.get(0).getSparseBlock(), b, scalars, n, m, 0, m);\n+ executeDenseAndAgg(inputs.get(0).getDenseBlock(), b, scalars, m, n, sparseSafe, 0, m) :\n+ executeSparseAndAgg(inputs.get(0).getSparseBlock(), b, scalars, m, n, sparseSafe, 0, m);\n}\nelse //MULTI-THREADED\n{\n@@ -91,7 +101,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nint nk = UtilFunctions.roundToNext(Math.min(8*k,m/32), k);\nint blklen = (int)(Math.ceil((double)m/nk));\nfor( int i=0; i<nk & i*blklen<m; i++ )\n- tasks.add(new ParAggTask(inputs.get(0), b, scalars, n, m,i*blklen, Math.min((i+1)*blklen, m)));\n+ tasks.add(new ParAggTask(inputs.get(0), b, scalars, m, n, sparseSafe, i*blklen, Math.min((i+1)*blklen, m)));\n//execute tasks\nList<Future<Double>> taskret = pool.invokeAll(tasks);\npool.shutdown();\n@@ -138,17 +148,19 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n//input preparation\ndouble[][] b = prepInputMatrices(inputs);\ndouble[] scalars = prepInputScalars(scalarObjects);\n-\n- //core sequential execute\nfinal int m = inputs.get(0).getNumRows();\nfinal int n = inputs.get(0).getNumColumns();\n+ //sparse safe check\n+ boolean sparseSafe = isSparseSafe() || (b.length == 0\n+ && genexec( 0, b, scalars, m, n, 0, 0 ) == 0);\n+\nlong lnnz = 0;\nif( k <= 1 ) //SINGLE-THREADED\n{\nlnnz = (!inputs.get(0).isInSparseFormat()) ?\n- executeDense(inputs.get(0).getDenseBlock(), b, scalars, c, n, m, 0, m) :\n- executeSparse(inputs.get(0).getSparseBlock(), b, scalars, c, n, m, 0, m);\n+ executeDense(inputs.get(0).getDenseBlock(), b, scalars, c, m, n, sparseSafe, 0, m) :\n+ executeSparse(inputs.get(0).getSparseBlock(), b, scalars, c, m, n, sparseSafe, 0, m);\n}\nelse //MULTI-THREADED\n{\n@@ -159,7 +171,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nint blklen = (int)(Math.ceil((double)m/nk));\nfor( int i=0; i<nk & i*blklen<m; i++ )\ntasks.add(new ParExecTask(inputs.get(0), b, scalars, c,\n- n, m, i*blklen, Math.min((i+1)*blklen, m)));\n+ m, n, sparseSafe, i*blklen, Math.min((i+1)*blklen, m)));\n//execute tasks\nList<Future<Long>> taskret = pool.invokeAll(tasks);\npool.shutdown();\n@@ -187,46 +199,48 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n* @param rl\n* @param ru\n*/\n- private double executeDenseAndAgg(double[] a, double[][] b, double[] scalars, int n, int m, int rl, int ru)\n+ private double executeDenseAndAgg(double[] a, double[][] b, double[] scalars, int m, int n, boolean sparseSafe, int rl, int ru)\n{\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- if( a == null ) { //empty\n+ if( a == null && !sparseSafe ) { //empty\n//note: we can't determine sparse-safeness by executing the operator once\n//as the output might change with different row indices\nfor( int i=rl; i<ru; i++ )\nfor( int j=0; j<n; j++ )\n- kplus.execute2(kbuff, genexec( 0, b, scalars, n, m, i, j ));\n+ kplus.execute2(kbuff, genexec( 0, b, scalars, m, n, i, j ));\n}\n- else { //general case\n+ else if( a != null ) { //general case\nfor( int i=rl, ix=rl*n; i<ru; i++ )\nfor( int j=0; j<n; j++, ix++ )\n- kplus.execute2(kbuff, genexec( a[ix], b, scalars, n, m, i, j ));\n+ if( a[ix] != 0 || !sparseSafe)\n+ kplus.execute2(kbuff, genexec( a[ix], b, scalars, m, n, i, j ));\n}\nreturn kbuff._sum;\n}\n- private long executeDense(double[] a, double[][] b, double[] scalars, double[] c, int n, int m, int rl, int ru)\n+ private long executeDense(double[] a, double[][] b, double[] scalars, double[] c, int m, int n, boolean sparseSafe, int rl, int ru)\n{\nlong lnnz = 0;\nif( _type == CellType.NO_AGG )\n{\n- if( a == null ) { //empty\n+ if( a == null && !sparseSafe ) { //empty\n//note: we can't determine sparse-safeness by executing the operator once\n//as the output might change with different row indices\nfor( int i=rl, ix=rl*n; i<ru; i++ )\nfor( int j=0; j<n; j++, ix++ ) {\n- c[ix] = genexec( 0, b, scalars, n, m, i, j );\n+ c[ix] = genexec( 0, b, scalars, m, n, i, j );\nlnnz += (c[ix]!=0) ? 1 : 0;\n}\n}\n- else { //general case\n+ else if( a != null ) { //general case\nfor( int i=rl, ix=rl*n; i<ru; i++ )\n- for( int j=0; j<n; j++, ix++ ) {\n- c[ix] = genexec( a[ix], b, scalars, n, m, i, j);\n+ for( int j=0; j<n; j++, ix++ )\n+ if( a[ix] != 0 || !sparseSafe) {\n+ c[ix] = genexec( a[ix], b, scalars, m, n, i, j);\nlnnz += (c[ix]!=0) ? 1 : 0;\n}\n}\n@@ -236,22 +250,23 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- if( a == null ) { //empty\n+ if( a == null && !sparseSafe ) { //empty\n//note: we can't determine sparse-safeness by executing the operator once\n//as the output might change with different row indices\nfor( int i=rl; i<ru; i++ ) {\nkbuff.set(0, 0);\nfor( int j=0; j<n; j++ )\n- kplus.execute2(kbuff, genexec( 0, b, scalars, n, m, i, j ));\n+ kplus.execute2(kbuff, genexec( 0, b, scalars, m, n, i, j ));\nc[i] = kbuff._sum;\nlnnz += (c[i]!=0) ? 1 : 0;\n}\n}\n- else { //general case\n+ else if( a != null ) { //general case\nfor( int i=rl, ix=rl*n; i<ru; i++ ) {\nkbuff.set(0, 0);\nfor( int j=0; j<n; j++, ix++ )\n- kplus.execute2(kbuff, genexec( a[ix], b, scalars, n, m, i, j ));\n+ if( a[ix] != 0 || !sparseSafe)\n+ kplus.execute2(kbuff, genexec( a[ix], b, scalars, m, n, i, j ));\nc[i] = kbuff._sum;\nlnnz += (c[i]!=0) ? 1 : 0;\n}\n@@ -261,15 +276,11 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nreturn lnnz;\n}\n- private double executeSparseAndAgg(SparseBlock sblock, double[][] b, double[] scalars, int n, int m, int rl, int ru)\n+ private double executeSparseAndAgg(SparseBlock sblock, double[][] b, double[] scalars, int m, int n, boolean sparseSafe, int rl, int ru)\n{\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n- //sparse safe check\n- boolean sparseSafe = (b.length == 0 //no sideways inputs\n- && genexec( 0, b, scalars, n, m, 0, 0 ) == 0); //0 input results in 0\n-\nif( sparseSafe ) {\nif( sblock != null ) {\nfor( int i=rl; i<ru; i++ )\n@@ -278,7 +289,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nint alen = sblock.size(i);\ndouble[] avals = sblock.values(i);\nfor( int j=apos; j<apos+alen; j++ ) {\n- kplus.execute2( kbuff, genexec(avals[j], b, scalars, n, m, i, j));\n+ kplus.execute2( kbuff, genexec(avals[j], b, scalars, m, n, i, j));\n}\n}\n}\n@@ -287,19 +298,15 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nfor(int i=rl; i<ru; i++)\nfor(int j=0; j<n; j++) {\ndouble valij = (sblock != null) ? sblock.get(i, j) : 0;\n- kplus.execute2( kbuff, genexec(valij, b, scalars, n, m, i, j));\n+ kplus.execute2( kbuff, genexec(valij, b, scalars, m, n, i, j));\n}\n}\nreturn kbuff._sum;\n}\n- private long executeSparse(SparseBlock sblock, double[][] b, double[] scalars, double[] c, int n, int m, int rl, int ru)\n+ private long executeSparse(SparseBlock sblock, double[][] b, double[] scalars, double[] c, int m, int n, boolean sparseSafe, int rl, int ru)\n{\n- //sparse safe check\n- boolean sparseSafe = (b.length == 0 //no sideways inputs\n- && genexec( 0, b, scalars, n, m, 0, 0 ) == 0); //0 input results in 0\n-\nlong lnnz = 0;\nif( _type == CellType.NO_AGG )\n{\n@@ -311,7 +318,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nint alen = sblock.size(i);\ndouble[] avals = sblock.values(i);\nfor( int j=apos; j<apos+alen; j++ ) {\n- double val = genexec(avals[j], b, scalars, n, m, i, j);\n+ double val = genexec(avals[j], b, scalars, m, n, i, j);\nc[i*n+sblock.indexes(i)[j]] = val;\nlnnz += (val!=0) ? 1 : 0;\n}\n@@ -322,7 +329,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nfor(int i=rl, cix=rl*n; i<ru; i++, cix+=n)\nfor(int j=0; j<n; j++) {\ndouble valij = (sblock != null) ? sblock.get(i, j) : 0;\n- c[cix+j] = genexec(valij, b, scalars, n, m, i, j);\n+ c[cix+j] = genexec(valij, b, scalars, m, n, i, j);\nlnnz += (c[cix+j]!=0) ? 1 : 0;\n}\n}\n@@ -341,7 +348,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nint alen = sblock.size(i);\ndouble[] avals = sblock.values(i);\nfor( int j=apos; j<apos+alen; j++ ) {\n- kplus.execute2(kbuff, genexec(avals[j], b, scalars, n, m, i, j));\n+ kplus.execute2(kbuff, genexec(avals[j], b, scalars, m, n, i, j));\n}\nc[i] = kbuff._sum;\nlnnz += (c[i]!=0) ? 1 : 0;\n@@ -353,7 +360,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nkbuff.set(0, 0);\nfor(int j=0; j<n; j++) {\ndouble valij = (sblock != null) ? sblock.get(i, j) : 0;\n- kplus.execute2( kbuff, genexec(valij, b, scalars, n, m, i, j));\n+ kplus.execute2( kbuff, genexec(valij, b, scalars, m, n, i, j));\n}\nc[i] = kbuff._sum;\nlnnz += (c[i]!=0) ? 1 : 0;\n@@ -364,24 +371,27 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nreturn lnnz;\n}\n- protected abstract double genexec( double a, double[][] b, double[] scalars, int n, int m, int rowIndex, int colIndex);\n+ protected abstract double genexec( double a, double[][] b, double[] scalars, int m, int n, int rowIndex, int colIndex);\nprivate class ParAggTask implements Callable<Double>\n{\nprivate final MatrixBlock _a;\nprivate final double[][] _b;\nprivate final double[] _scalars;\n- private final int _clen;\nprivate final int _rlen;\n+ private final int _clen;\n+ private final boolean _safe;\nprivate final int _rl;\nprivate final int _ru;\n- protected ParAggTask( MatrixBlock a, double[][] b, double[] scalars, int clen, int rlen, int rl, int ru ) {\n+ protected ParAggTask( MatrixBlock a, double[][] b, double[] scalars,\n+ int rlen, int clen, boolean sparseSafe, int rl, int ru ) {\n_a = a;\n_b = b;\n_scalars = scalars;\n- _clen = clen;\n_rlen = rlen;\n+ _clen = clen;\n+ _safe = sparseSafe;\n_rl = rl;\n_ru = ru;\n}\n@@ -389,8 +399,8 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n@Override\npublic Double call() throws DMLRuntimeException {\nreturn ( !_a.isInSparseFormat()) ?\n- executeDenseAndAgg(_a.getDenseBlock(), _b, _scalars, _clen, _rlen, _rl, _ru) :\n- executeSparseAndAgg(_a.getSparseBlock(), _b, _scalars, _clen, _rlen, _rl, _ru);\n+ executeDenseAndAgg(_a.getDenseBlock(), _b, _scalars, _rlen, _clen, _safe, _rl, _ru) :\n+ executeSparseAndAgg(_a.getSparseBlock(), _b, _scalars, _rlen, _clen, _safe, _rl, _ru);\n}\n}\n@@ -400,18 +410,21 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nprivate final double[][] _b;\nprivate final double[] _scalars;\nprivate final double[] _c;\n- private final int _clen;\nprivate final int _rlen;\n+ private final int _clen;\n+ private final boolean _safe;\nprivate final int _rl;\nprivate final int _ru;\n- protected ParExecTask( MatrixBlock a, double[][] b, double[] scalars, double[] c, int clen, int rlen, int rl, int ru ) {\n+ protected ParExecTask( MatrixBlock a, double[][] b, double[] scalars, double[] c,\n+ int rlen, int clen, boolean sparseSafe, int rl, int ru ) {\n_a = a;\n_b = b;\n_scalars = scalars;\n_c = c;\n- _clen = clen;\n_rlen = rlen;\n+ _clen = clen;\n+ _safe = sparseSafe;\n_rl = rl;\n_ru = ru;\n}\n@@ -419,8 +432,8 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n@Override\npublic Long call() throws DMLRuntimeException {\nreturn (!_a.isInSparseFormat()) ?\n- executeDense(_a.getDenseBlock(), _b, _scalars, _c, _clen, _rlen, _rl, _ru) :\n- executeSparse(_a.getSparseBlock(), _b, _scalars, _c, _clen, _rlen, _rl, _ru);\n+ executeDense(_a.getDenseBlock(), _b, _scalars, _c, _rlen, _clen, _safe, _rl, _ru) :\n+ executeSparse(_a.getSparseBlock(), _b, _scalars, _c, _rlen, _clen, _safe, _rl, _ru);\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java", "diff": "@@ -35,13 +35,15 @@ import org.apache.sysml.test.utils.TestUtils;\npublic class CellwiseTmplTest extends AutomatedTestBase\n{\n- private static final String TEST_NAME1 = \"cellwisetmpl1\";\n- private static final String TEST_NAME2 = \"cellwisetmpl2\";\n- private static final String TEST_NAME3 = \"cellwisetmpl3\";\n- private static final String TEST_NAME4 = \"cellwisetmpl4\";\n- private static final String TEST_NAME5 = \"cellwisetmpl5\";\n- private static final String TEST_NAME6 = \"cellwisetmpl6\";\n- private static final String TEST_NAME7 = \"cellwisetmpl7\";\n+ private static final String TEST_NAME = \"cellwisetmpl\";\n+ private static final String TEST_NAME1 = TEST_NAME+1;\n+ private static final String TEST_NAME2 = TEST_NAME+2;\n+ private static final String TEST_NAME3 = TEST_NAME+3;\n+ private static final String TEST_NAME4 = TEST_NAME+4;\n+ private static final String TEST_NAME5 = TEST_NAME+5;\n+ private static final String TEST_NAME6 = TEST_NAME+6;\n+ private static final String TEST_NAME7 = TEST_NAME+7;\n+ private static final String TEST_NAME8 = TEST_NAME+8;\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -53,13 +55,10 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"1\" }) );\n- addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"2\" }) );\n- addTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"3\" }) );\n- addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"4\" }) );\n- addTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"5\" }) );\n- addTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"6\" }) );\n- addTestConfiguration( TEST_NAME7, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME7, new String[] { \"7\" }) );\n+ for( int i=1; i<=8; i++ ) {\n+ addTestConfiguration( TEST_NAME+i, new TestConfiguration(\n+ TEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n+ }\n}\n@Test\n@@ -98,6 +97,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME7, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite8() {\n+ testCodegenIntegration( TEST_NAME8, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -134,6 +138,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME7, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise8() {\n+ testCodegenIntegration( TEST_NAME8, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n@@ -144,6 +153,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME7, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite8_sp() {\n+ testCodegenIntegration( TEST_NAME8, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl8.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(1, 1002, 23);\n+Y = seq(1, 1002);\n+X[100:900,] = matrix(0, 801, 23);\n+if(1==1){}\n+\n+R = X * ((X + 7.7) * (Y%*%matrix(1,1,23)));\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/cellwisetmpl8.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(1, rows=1002, cols=23);\n+Y = seq(1, 1002);\n+X[100:900,] = matrix(0, rows=801, cols=23);\n+if(1==1){}\n+\n+R = X * ((X + 7.7) * Y);\n+\n+write(R, $1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1420] Extended code generator (sparsity-exploiting celltmpl)
49,738
18.03.2017 21:39:28
25,200
9cbaf85ab1389cde9fb79f58e29c0adb6044c493
Improved candidate exploration of code generation plans This patch improves the codegen candidate exploration algorithm by (1) better memoization (which now also includes unsupported operators) and (2) a simplified creation of merge plans (which now also applies to ternary operators).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -44,6 +44,7 @@ import org.apache.sysml.hops.codegen.template.BaseTpl.CloseType;\nimport org.apache.sysml.hops.codegen.template.BaseTpl.TemplateType;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\n+import org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntrySet;\nimport org.apache.sysml.hops.codegen.template.TemplateUtils;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.Hop.OpOp1;\n@@ -318,68 +319,59 @@ public class SpoofCompiler\nthrows DMLException\n{\n//top-down memoization of processed dag nodes\n- if( memo.contains(hop.getHopID()) )\n+ if( memo.contains(hop.getHopID()) || memo.containsHop(hop) )\nreturn;\n- //recursively process child nodes\n+ //recursive candidate exploration\nfor( Hop c : hop.getInput() )\nrExploreCPlans(c, memo, compileLiterals);\n- //generate new node plans\n+ //open initial operator plans, if possible\nfor( BaseTpl tpl : TemplateUtils.TEMPLATES )\nif( tpl.open(hop) )\nmemo.add(hop, tpl.getType());\n+ //fuse and merge operator plans\nfor( Hop c : hop.getInput() ) {\nif( memo.contains(c.getHopID()) )\nfor( MemoTableEntry me : memo.get(c.getHopID()) ) {\nBaseTpl tpl = TemplateUtils.createTemplate(me.type, me.closed);\n- if( tpl.fuse(hop, c) )\n- genExplorePlans(tpl, hop, memo, hop.getInput(), c);\n+ if( tpl.fuse(hop, c) ) {\n+ int pos = hop.getInput().indexOf(c);\n+ MemoTableEntrySet P = new MemoTableEntrySet(tpl.getType(), pos, c.getHopID(), tpl.isClosed());\n+ for(int k=0; k<hop.getInput().size(); k++)\n+ if( k != pos ) {\n+ Hop input2 = hop.getInput().get(k);\n+ if( memo.contains(input2.getHopID()) && !memo.get(input2.getHopID()).get(0).closed\n+ && memo.get(input2.getHopID()).get(0).type == TemplateType.CellTpl && tpl.merge(hop, input2) )\n+ P.crossProduct(k, -1L, input2.getHopID());\n+ else\n+ P.crossProduct(k, -1L);\n+ }\n+ memo.addAll(hop, P);\n+ }\n}\n}\n//prune subsumed / redundant plans\nmemo.pruneRedundant(hop.getHopID());\n- //check if templates require close\n+ //close operator plans, if required\nif( memo.contains(hop.getHopID()) ) {\nIterator<MemoTableEntry> iter = memo.get(hop.getHopID()).iterator();\nwhile( iter.hasNext() ) {\nMemoTableEntry me = iter.next();\nBaseTpl tpl = TemplateUtils.createTemplate(me.type);\nCloseType ccode = tpl.close(hop);\n- if( ccode != CloseType.OPEN ) {\n- me.closed = true;\nif( ccode == CloseType.CLOSED_INVALID )\niter.remove();\n- }\n- }\n+ else if( ccode == CloseType.CLOSED_VALID )\n+ me.closed = true;\n}\n}\n- private static void genExplorePlans(BaseTpl tpl, Hop hop, CPlanMemoTable memo, ArrayList<Hop> inputs, Hop exclude)\n- {\n- //handle unary operators\n- if( hop.getInput().size() == 1 ) {\n- memo.add(hop, tpl.getType(), exclude.getHopID());\n- }\n- //handle binary operators\n- //TODO rework plan exploration step\n- else if( hop.getInput().size() == 2 ) {\n- int input2ix = (inputs.get(0)==exclude ? 1:0);\n- Hop input2 = inputs.get(input2ix);\n- long[] refs = (input2ix==1) ? new long[]{exclude.getHopID(), -1} : new long[]{-1, exclude.getHopID()};\n- memo.add(hop, tpl.getType(), refs[0], refs[1]);\n- if( memo.contains(input2.getHopID()) && !memo.get(input2.getHopID()).get(0).closed\n- && memo.get(input2.getHopID()).get(0).type == TemplateType.CellTpl && tpl.merge(hop, input2) ) {\n- refs[input2ix] = input2.getHopID();\n- memo.add(hop, tpl.getType(), refs[0], refs[1]);\n- }\n- }\n- else {\n- LOG.warn(\"genExplorePlans currently only supports unary and binary operators.\");\n- }\n+ //mark visited even if no plans found (e.g., unsupported ops)\n+ memo.addHop(hop);\n}\nprivate static void rConstructCPlans(Hop hop, CPlanMemoTable memo, HashMap<Long, Pair<Hop[],CNodeTpl>> cplans, boolean compileLiterals)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "diff": "@@ -52,6 +52,14 @@ public class CPlanMemoTable\n_plansBlacklist = new HashSet<Long>();\n}\n+ public void addHop(Hop hop) {\n+ _hopRefs.put(hop.getHopID(), hop);\n+ }\n+\n+ public boolean containsHop(Hop hop) {\n+ return _hopRefs.containsKey(hop.getHopID());\n+ }\n+\npublic boolean contains(long hopID) {\nreturn _plans.containsKey(hopID);\n}\n@@ -80,6 +88,13 @@ public class CPlanMemoTable\n_plans.get(hop.getHopID()).add(new MemoTableEntry(type, in1, in2, in3));\n}\n+ public void addAll(Hop hop, MemoTableEntrySet P) {\n+ _hopRefs.put(hop.getHopID(), hop);\n+ if( !_plans.containsKey(hop.getHopID()) )\n+ _plans.put(hop.getHopID(), new ArrayList<MemoTableEntry>());\n+ _plans.get(hop.getHopID()).addAll(P.plans);\n+ }\n+\n@SuppressWarnings(\"unchecked\")\npublic void pruneRedundant(long hopID) {\nif( !contains(hopID) )\n@@ -277,4 +292,23 @@ public class CPlanMemoTable\nreturn type.name()+\"(\"+input1+\",\"+input2+\",\"+input3+\")\";\n}\n}\n+\n+ public static class MemoTableEntrySet\n+ {\n+ public ArrayList<MemoTableEntry> plans = new ArrayList<MemoTableEntry>();\n+\n+ public MemoTableEntrySet(TemplateType type, int pos, long hopID, boolean close) {\n+ plans.add(new MemoTableEntry(type, (pos==0)?hopID:-1,\n+ (pos==1)?hopID:-1, (pos==2)?hopID:-1));\n+ }\n+\n+ public void crossProduct(int pos, Long... refs) {\n+ ArrayList<MemoTableEntry> tmp = new ArrayList<MemoTableEntry>();\n+ for( MemoTableEntry me : plans )\n+ for( Long ref : refs )\n+ tmp.add(new MemoTableEntry(me.type, (pos==0)?ref:me.input1,\n+ (pos==1)?ref:me.input2, (pos==2)?ref:me.input3));\n+ plans = tmp;\n+ }\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1374] Improved candidate exploration of code generation plans This patch improves the codegen candidate exploration algorithm by (1) better memoization (which now also includes unsupported operators) and (2) a simplified creation of merge plans (which now also applies to ternary operators).
49,738
19.03.2017 01:41:18
25,200
2893e1aed03f9259fdd63504483484da92d761af
Extended code generator (right indexing in cell/rowagg)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "new_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "diff": "@@ -76,11 +76,11 @@ public class IndexingOp extends Hop\n}\n- public boolean getRowLowerEqualsUpper(){\n+ public boolean isRowLowerEqualsUpper(){\nreturn _rowLowerEqualsUpper;\n}\n- public boolean getColLowerEqualsUpper() {\n+ public boolean isColLowerEqualsUpper() {\nreturn _colLowerEqualsUpper;\n}\n@@ -397,6 +397,10 @@ public class IndexingOp extends Hop\nHop input4 = getInput().get(3); //inpColL\nHop input5 = getInput().get(4); //inpColU\n+ //update single row/column flags (depends on CSE)\n+ _rowLowerEqualsUpper = (input2 == input3);\n+ _colLowerEqualsUpper = (input4 == input5);\n+\n//parse input information\nboolean allRows =\n( input2 instanceof LiteralOp && HopRewriteUtils.getIntValueSafe((LiteralOp)input2)==1\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/LiteralOp.java", "new_path": "src/main/java/org/apache/sysml/hops/LiteralOp.java", "diff": "@@ -183,7 +183,7 @@ public class LiteralOp extends Hop\n//do nothing; it is a scalar\n}\n- public long getLongValue() throws HopsException\n+ public long getLongValue()\n{\nswitch( getValueType() ) {\ncase INT:\n@@ -192,8 +192,10 @@ public class LiteralOp extends Hop\nreturn UtilFunctions.toLong(value_double);\ncase STRING:\nreturn Long.parseLong(value_string);\n+ case BOOLEAN:\n+ return value_boolean ? 1 : 0;\ndefault:\n- throw new HopsException(\"Can not coerce an object of type \" + getValueType() + \" into Long.\");\n+ return -1;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -36,6 +36,8 @@ import org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeCell;\nimport org.apache.sysml.hops.codegen.cplan.CNodeData;\nimport org.apache.sysml.hops.codegen.cplan.CNodeOuterProduct;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary.TernaryType;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\n@@ -490,8 +492,15 @@ public class SpoofCompiler\n//remove spurious lookups on main input of cell template\nif( tpl instanceof CNodeCell || tpl instanceof CNodeOuterProduct ) {\n- CNode in1 = tpl.getInput().get(0);\n- rFindAndRemoveLookup(tpl.getOutput(), in1.getVarname());\n+ CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n+ rFindAndRemoveLookup(tpl.getOutput(), in1);\n+ }\n+\n+ //remove invalid plans with column indexing on main input\n+ if( tpl instanceof CNodeCell ) {\n+ CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n+ if( rHasLookupRC1(tpl.getOutput(), in1) )\n+ cplans2.remove(e.getKey());\n}\n//remove cplan w/ single op and w/o agg\n@@ -517,17 +526,32 @@ public class SpoofCompiler\nrCollectLeafIDs(c, leafs);\n}\n- private static void rFindAndRemoveLookup(CNode node, String nodeName) {\n+ private static void rFindAndRemoveLookup(CNode node, CNodeData mainInput) {\nfor( int i=0; i<node.getInput().size(); i++ ) {\nCNode tmp = node.getInput().get(i);\nif( tmp instanceof CNodeUnary && (((CNodeUnary)tmp).getType()==UnaryType.LOOKUP_R\n|| ((CNodeUnary)tmp).getType()==UnaryType.LOOKUP_RC)\n- && tmp.getInput().get(0).getVarname().equals(nodeName) )\n+ && tmp.getInput().get(0) instanceof CNodeData\n+ && ((CNodeData)tmp.getInput().get(0)).getHopID()==mainInput.getHopID() )\n{\nnode.getInput().set(i, tmp.getInput().get(0));\n}\nelse\n- rFindAndRemoveLookup(tmp, nodeName);\n+ rFindAndRemoveLookup(tmp, mainInput);\n}\n}\n+\n+ private static boolean rHasLookupRC1(CNode node, CNodeData mainInput) {\n+ boolean ret = false;\n+ for( int i=0; i<node.getInput().size() && !ret; i++ ) {\n+ CNode tmp = node.getInput().get(i);\n+ if( tmp instanceof CNodeTernary && ((CNodeTernary)tmp).getType()==TernaryType.LOOKUP_RC1\n+ && tmp.getInput().get(0) instanceof CNodeData\n+ && ((CNodeData)tmp.getInput().get(0)).getHopID() == mainInput.getHopID())\n+ ret = true;\n+ else\n+ ret |= rHasLookupRC1(tmp, mainInput);\n+ }\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java", "diff": "@@ -173,7 +173,7 @@ public class CNodeCell extends CNodeTpl\nStringBuilder sb = new StringBuilder();\nsb.append(\"SPOOF CELLWISE [type=\");\nsb.append(_type.name());\n- sb.append(\", spafeSafe=\"+_sparseSafe);\n+ sb.append(\", sparseSafe=\"+_sparseSafe);\nsb.append(\", castdtm=\"+_requiresCastdtm);\nsb.append(\", mc=\"+_multipleConsumers);\nsb.append(\"]\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java", "diff": "@@ -27,7 +27,8 @@ import org.apache.sysml.parser.Expression.DataType;\npublic class CNodeTernary extends CNode\n{\npublic enum TernaryType {\n- PLUS_MULT, MINUS_MULT;\n+ PLUS_MULT, MINUS_MULT,\n+ LOOKUP_RC1;\npublic static boolean contains(String value) {\nfor( TernaryType tt : values() )\n@@ -44,6 +45,9 @@ public class CNodeTernary extends CNode\ncase MINUS_MULT:\nreturn \" double %TMP% = %IN1% - %IN2% * %IN3%;\\n;\\n\" ;\n+ case LOOKUP_RC1:\n+ return \" double %TMP% = %IN1%[rowIndex*%IN2%+%IN3%-1];\\n\";\n+\ndefault:\nthrow new RuntimeException(\"Invalid ternary type: \"+this.toString());\n}\n@@ -97,6 +101,7 @@ public class CNodeTernary extends CNode\nswitch(_type) {\ncase PLUS_MULT: return \"t(+*)\";\ncase MINUS_MULT: return \"t(-*)\";\n+ case LOOKUP_RC1: return \"u(ixrc1)\";\ndefault:\nreturn super.toString();\n}\n@@ -107,6 +112,7 @@ public class CNodeTernary extends CNode\nswitch(_type) {\ncase PLUS_MULT:\ncase MINUS_MULT:\n+ case LOOKUP_RC1:\n_rows = 0;\n_cols = 0;\n_dataType= DataType.SCALAR;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java", "diff": "@@ -32,6 +32,8 @@ import org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.Hop.OpOp2;\n+import org.apache.sysml.hops.IndexingOp;\n+import org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\n@@ -56,7 +58,8 @@ public class CellTpl extends BaseTpl\n@Override\npublic boolean open(Hop hop) {\n- return isValidOperation(hop);\n+ return isValidOperation(hop)\n+ || (hop instanceof IndexingOp && ((IndexingOp)hop).isColLowerEqualsUpper());\n}\n@Override\n@@ -197,6 +200,14 @@ public class CellTpl extends BaseTpl\nout = new CNodeTernary(cdata1, cdata2, cdata3,\nTernaryType.valueOf(top.getOp().toString()));\n}\n+ else if( hop instanceof IndexingOp )\n+ {\n+ CNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n+ out = new CNodeTernary(cdata1,\n+ TemplateUtils.createCNodeData(new LiteralOp(hop.getInput().get(0).getDim2()), true),\n+ TemplateUtils.createCNodeData(hop.getInput().get(4), true),\n+ TernaryType.LOOKUP_RC1);\n+ }\nelse if( HopRewriteUtils.isTransposeOperation(hop) )\n{\nout = tmp.get(hop.getInput().get(0).getHopID());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/RowAggTpl.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/RowAggTpl.java", "diff": "@@ -28,11 +28,15 @@ import org.apache.sysml.hops.AggBinaryOp;\nimport org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.IndexingOp;\n+import org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary.BinType;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary.TernaryType;\nimport org.apache.sysml.hops.codegen.cplan.CNodeData;\nimport org.apache.sysml.hops.codegen.cplan.CNodeRowAgg;\n+import org.apache.sysml.hops.codegen.cplan.CNodeTernary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\n@@ -90,18 +94,16 @@ public class RowAggTpl extends BaseTpl {\npublic Pair<Hop[], CNodeTpl> constructCplan(Hop hop, CPlanMemoTable memo, boolean compileLiterals) {\n//recursively process required cplan output\nHashSet<Hop> inHops = new HashSet<Hop>();\n+ HashMap<String, Hop> inHops2 = new HashMap<String,Hop>();\nHashMap<Long, CNode> tmp = new HashMap<Long, CNode>();\nhop.resetVisitStatus();\n- rConstructCplan(hop, memo, tmp, inHops, compileLiterals);\n+ rConstructCplan(hop, memo, tmp, inHops, inHops2, compileLiterals);\nhop.resetVisitStatus();\n//reorder inputs (ensure matrix is first input)\nLinkedList<Hop> sinHops = new LinkedList<Hop>(inHops);\n- for( Hop h : inHops )\n- if( h.getDataType().isMatrix() && !TemplateUtils.isVector(h) ) {\n- sinHops.remove(h);\n- sinHops.addFirst(h);\n- }\n+ Hop X = inHops2.get(\"X\");\n+ sinHops.remove(X); sinHops.addFirst(X);\n//construct template node\nArrayList<CNode> inputs = new ArrayList<CNode>();\n@@ -114,14 +116,14 @@ public class RowAggTpl extends BaseTpl {\nreturn new Pair<Hop[],CNodeTpl>(sinHops.toArray(new Hop[0]), tpl);\n}\n- private void rConstructCplan(Hop hop, CPlanMemoTable memo, HashMap<Long, CNode> tmp, HashSet<Hop> inHops, boolean compileLiterals)\n+ private void rConstructCplan(Hop hop, CPlanMemoTable memo, HashMap<Long, CNode> tmp, HashSet<Hop> inHops, HashMap<String, Hop> inHops2, boolean compileLiterals)\n{\n//recursively process required childs\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.RowAggTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\nHop c = hop.getInput().get(i);\nif( me.isPlanRef(i) )\n- rConstructCplan(c, memo, tmp, inHops, compileLiterals);\n+ rConstructCplan(c, memo, tmp, inHops, inHops2, compileLiterals);\nelse {\nCNodeData cdata = TemplateUtils.createCNodeData(c, compileLiterals);\ntmp.put(c.getHopID(), cdata);\n@@ -137,8 +139,10 @@ public class RowAggTpl extends BaseTpl {\nif( ((AggUnaryOp)hop).getDirection() == Direction.Row && ((AggUnaryOp)hop).getOp() == AggOp.SUM ) {\nif(hop.getInput().get(0).getDim2()==1)\nout = (cdata1.getDataType()==DataType.SCALAR) ? cdata1 : new CNodeUnary(cdata1,UnaryType.LOOKUP_R);\n- else\n+ else {\nout = new CNodeUnary(cdata1, UnaryType.ROW_SUMS);\n+ inHops2.put(\"X\", hop.getInput().get(0));\n+ }\n}\nelse if (((AggUnaryOp)hop).getDirection() == Direction.Col && ((AggUnaryOp)hop).getOp() == AggOp.SUM ) {\n//vector div add without temporary copy\n@@ -167,8 +171,10 @@ public class RowAggTpl extends BaseTpl {\nif(hop.getInput().get(0).getDim2()==1 && hop.getInput().get(1).getDim2()==1)\nout = new CNodeBinary((cdata1.getDataType()==DataType.SCALAR)? cdata1 : new CNodeUnary(cdata1, UnaryType.LOOKUP0),\n(cdata2.getDataType()==DataType.SCALAR)? cdata2 : new CNodeUnary(cdata2, UnaryType.LOOKUP0), BinType.MULT);\n- else\n+ else {\nout = new CNodeBinary(cdata1, cdata2, BinType.DOT_PRODUCT);\n+ inHops2.put(\"X\", hop.getInput().get(0));\n+ }\n}\n}\nelse if(hop instanceof BinaryOp)\n@@ -194,6 +200,14 @@ public class RowAggTpl extends BaseTpl {\nout = new CNodeBinary(cdata1, cdata2, BinType.valueOf(primitiveOpName));\n}\n}\n+ else if( hop instanceof IndexingOp )\n+ {\n+ CNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n+ out = new CNodeTernary(cdata1,\n+ TemplateUtils.createCNodeData(new LiteralOp(hop.getInput().get(0).getDim2()), true),\n+ TemplateUtils.createCNodeData(hop.getInput().get(4), true),\n+ TernaryType.LOOKUP_RC1);\n+ }\nif( out.getDataType().isMatrix() ) {\nout.setNumRows(hop.getDim1());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -1234,8 +1234,8 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\n{\n//e.g., (X%*%Y)[1,1] -> X[1,] %*% Y[,1]\nif( hi instanceof IndexingOp\n- && ((IndexingOp)hi).getRowLowerEqualsUpper()\n- && ((IndexingOp)hi).getColLowerEqualsUpper()\n+ && ((IndexingOp)hi).isRowLowerEqualsUpper()\n+ && ((IndexingOp)hi).isColLowerEqualsUpper()\n&& hi.getInput().get(0).getParent().size()==1 //rix is single mm consumer\n&& HopRewriteUtils.isMatrixMultiply(hi.getInput().get(0)) )\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteForLoopVectorization.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteForLoopVectorization.java", "diff": "@@ -133,12 +133,12 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\n&& right.getInput().get(0) instanceof IndexingOp )\n{\nIndexingOp ix = (IndexingOp)right.getInput().get(0);\n- if( ix.getRowLowerEqualsUpper() && ix.getInput().get(1) instanceof DataOp\n+ if( ix.isRowLowerEqualsUpper() && ix.getInput().get(1) instanceof DataOp\n&& ix.getInput().get(1).getName().equals(itervar) ){\nleftScalar = true;\nrowIx = true;\n}\n- else if( ix.getColLowerEqualsUpper() && ix.getInput().get(3) instanceof DataOp\n+ else if( ix.isColLowerEqualsUpper() && ix.getInput().get(3) instanceof DataOp\n&& ix.getInput().get(3).getName().equals(itervar) ){\nleftScalar = true;\nrowIx = false;\n@@ -152,12 +152,12 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\n&& left.getInput().get(0) instanceof IndexingOp )\n{\nIndexingOp ix = (IndexingOp)left.getInput().get(0);\n- if( ix.getRowLowerEqualsUpper() && ix.getInput().get(1) instanceof DataOp\n+ if( ix.isRowLowerEqualsUpper() && ix.getInput().get(1) instanceof DataOp\n&& ix.getInput().get(1).getName().equals(itervar) ){\nrightScalar = true;\nrowIx = true;\n}\n- else if( ix.getColLowerEqualsUpper() && ix.getInput().get(3) instanceof DataOp\n+ else if( ix.isColLowerEqualsUpper() && ix.getInput().get(3) instanceof DataOp\n&& ix.getInput().get(3).getName().equals(itervar) ){\nrightScalar = true;\nrowIx = false;\n@@ -236,7 +236,7 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\nIndexingOp rix1 = (IndexingOp) lixrhs.getInput().get(1);\n//check for rowwise\n- if( lix.getRowLowerEqualsUpper() && rix0.getRowLowerEqualsUpper() && rix1.getRowLowerEqualsUpper()\n+ if( lix.getRowLowerEqualsUpper() && rix0.isRowLowerEqualsUpper() && rix1.isRowLowerEqualsUpper()\n&& lix.getInput().get(2).getName().equals(itervar)\n&& rix0.getInput().get(1).getName().equals(itervar)\n&& rix1.getInput().get(1).getName().equals(itervar))\n@@ -245,7 +245,7 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\nrowIx = true;\n}\n//check for colwise\n- if( lix.getColLowerEqualsUpper() && rix0.getColLowerEqualsUpper() && rix1.getColLowerEqualsUpper()\n+ if( lix.getColLowerEqualsUpper() && rix0.isColLowerEqualsUpper() && rix1.isColLowerEqualsUpper()\n&& lix.getInput().get(4).getName().equals(itervar)\n&& rix0.getInput().get(3).getName().equals(itervar)\n&& rix1.getInput().get(3).getName().equals(itervar))\n@@ -406,14 +406,14 @@ public class RewriteForLoopVectorization extends StatementBlockRewriteRule\nboolean[] ret = new boolean[2]; //apply, rowIx\n//check for rowwise\n- if( lix.getRowLowerEqualsUpper() && rix.getRowLowerEqualsUpper()\n+ if( lix.getRowLowerEqualsUpper() && rix.isRowLowerEqualsUpper()\n&& lix.getInput().get(2).getName().equals(itervar)\n&& rix.getInput().get(1).getName().equals(itervar) ) {\nret[0] = true;\nret[1] = true;\n}\n//check for colwise\n- if( lix.getColLowerEqualsUpper() && rix.getColLowerEqualsUpper()\n+ if( lix.getColLowerEqualsUpper() && rix.isColLowerEqualsUpper()\n&& lix.getInput().get(4).getName().equals(itervar)\n&& rix.getInput().get(3).getName().equals(itervar) ) {\nret[0] = true;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteIndexingVectorization.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteIndexingVectorization.java", "diff": "@@ -107,8 +107,8 @@ public class RewriteIndexingVectorization extends HopRewriteRule\nif( hop instanceof IndexingOp ) //right indexing\n{\nIndexingOp ihop0 = (IndexingOp) hop;\n- boolean isSingleRow = ihop0.getRowLowerEqualsUpper();\n- boolean isSingleCol = ihop0.getColLowerEqualsUpper();\n+ boolean isSingleRow = ihop0.isRowLowerEqualsUpper();\n+ boolean isSingleCol = ihop0.isColLowerEqualsUpper();\nboolean appliedRow = false;\n//search for multiple indexing in same row\n@@ -120,7 +120,7 @@ public class RewriteIndexingVectorization extends HopRewriteRule\nihops.add(ihop0);\nfor( Hop c : input.getParent() ){\nif( c != ihop0 && c instanceof IndexingOp && c.getInput().get(0) == input\n- && ((IndexingOp) c).getRowLowerEqualsUpper()\n+ && ((IndexingOp) c).isRowLowerEqualsUpper()\n&& c.getInput().get(1)==ihop0.getInput().get(1) )\n{\nihops.add( c );\n@@ -159,7 +159,7 @@ public class RewriteIndexingVectorization extends HopRewriteRule\nihops.add(ihop0);\nfor( Hop c : input.getParent() ){\nif( c != ihop0 && c instanceof IndexingOp && c.getInput().get(0) == input\n- && ((IndexingOp) c).getColLowerEqualsUpper()\n+ && ((IndexingOp) c).isColLowerEqualsUpper()\n&& c.getInput().get(3)==ihop0.getInput().get(3) )\n{\nihops.add( c );\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java", "diff": "@@ -35,11 +35,13 @@ import org.apache.sysml.test.utils.TestUtils;\npublic class RowAggTmplTest extends AutomatedTestBase\n{\n- private static final String TEST_NAME1 = \"rowAggPattern1\";\n- private static final String TEST_NAME2 = \"rowAggPattern2\";\n- private static final String TEST_NAME3 = \"rowAggPattern3\";\n- private static final String TEST_NAME4 = \"rowAggPattern4\";\n- private static final String TEST_NAME5 = \"rowAggPattern5\";\n+ private static final String TEST_NAME = \"rowAggPattern\";\n+ private static final String TEST_NAME1 = TEST_NAME+\"1\";\n+ private static final String TEST_NAME2 = TEST_NAME+\"2\";\n+ private static final String TEST_NAME3 = TEST_NAME+\"3\";\n+ private static final String TEST_NAME4 = TEST_NAME+\"4\";\n+ private static final String TEST_NAME5 = TEST_NAME+\"5\";\n+ private static final String TEST_NAME6 = TEST_NAME+\"6\";\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -51,11 +53,8 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"0\" }) );\n- addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"1\" }) );\n- addTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"2\" }) );\n- addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"3\" }) );\n- addTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"4\" }) );\n+ for(int i=1; i<=6; i++)\n+ addTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@Test\n@@ -83,6 +82,11 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME5, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite6() {\n+ testCodegenIntegration( TEST_NAME6, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenRowAgg1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -108,6 +112,11 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME5, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenRowAgg6() {\n+ testCodegenIntegration( TEST_NAME6, false, ExecType.CP );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern6.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(1,15), 5, 3, byrow=TRUE);\n+v = seq(1,3);\n+P = cbind(seq(1,5),seq(2,6));\n+\n+S = t(X) %*% ((P[,1] * (1-P[,1])) * (X %*% v));\n+\n+writeMM(as(S, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/rowAggPattern6.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(1,15), rows=5, cols=3);\n+v = seq(1,3);\n+P = cbind(seq(1,5),seq(2,6));\n+\n+S = t(X) %*% ((P[,1] * (1-P[,1])) * (X %*% v));\n+\n+write(S,$1)\n+\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1407] Extended code generator (right indexing in cell/rowagg)
49,738
20.03.2017 12:24:28
25,200
128acb3adccfa03fa5e91495349d2e95c5cf0f2e
Robustness parfor parameter handling (cmd args, quotes)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "diff": "@@ -685,7 +685,10 @@ public class DmlSyntacticValidator extends CommonSyntacticValidator implements D\nHashMap<String, String> parForParamValues = new HashMap<String, String>();\nif(ctx.parForParams != null && ctx.parForParams.size() > 0) {\nfor(StrictParameterizedExpressionContext parForParamCtx : ctx.parForParams) {\n- parForParamValues.put(parForParamCtx.paramName.getText(), parForParamCtx.paramVal.getText());\n+ String paramVal = parForParamCtx.paramVal.getText();\n+ if( argVals.containsKey(paramVal) )\n+ paramVal = argVals.get(paramVal);\n+ parForParamValues.put(parForParamCtx.paramName.getText(), paramVal);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/pydml/PydmlSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/pydml/PydmlSyntacticValidator.java", "diff": "@@ -1353,7 +1353,10 @@ public class PydmlSyntacticValidator extends CommonSyntacticValidator implements\nHashMap<String, String> parForParamValues = new HashMap<String, String>();\nif(ctx.parForParams != null && ctx.parForParams.size() > 0) {\nfor(StrictParameterizedExpressionContext parForParamCtx : ctx.parForParams) {\n- parForParamValues.put(parForParamCtx.paramName.getText(), parForParamCtx.paramVal.getText());\n+ String paramVal = parForParamCtx.paramVal.getText();\n+ if( argVals.containsKey(paramVal) )\n+ paramVal = argVals.get(paramVal);\n+ parForParamValues.put(parForParamCtx.paramName.getText(), paramVal);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ForProgramBlock.java", "diff": "@@ -49,8 +49,7 @@ public class ForProgramBlock extends ProgramBlock\nprotected String[] _iterablePredicateVars; //from,to,where constants/internal vars not captured via instructions\n- public ForProgramBlock(Program prog, String[] iterPredVars) throws DMLRuntimeException\n- {\n+ public ForProgramBlock(Program prog, String[] iterPredVars) {\nsuper(prog);\n_exitInstructions = new ArrayList<Instruction>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/FunctionProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/FunctionProgramBlock.java", "diff": "@@ -42,7 +42,7 @@ public class FunctionProgramBlock extends ProgramBlock\nprivate boolean _recompileOnce = false;\n- public FunctionProgramBlock( Program prog, ArrayList<DataIdentifier> inputParams, ArrayList<DataIdentifier> outputParams) throws DMLRuntimeException\n+ public FunctionProgramBlock( Program prog, ArrayList<DataIdentifier> inputParams, ArrayList<DataIdentifier> outputParams)\n{\nsuper(prog);\n_childBlocks = new ArrayList<ProgramBlock>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/IfProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/IfProgramBlock.java", "diff": "@@ -50,7 +50,7 @@ public class IfProgramBlock extends ProgramBlock\nprivate ArrayList<ProgramBlock> _childBlocksIfBody;\nprivate ArrayList<ProgramBlock> _childBlocksElseBody;\n- public IfProgramBlock(Program prog, ArrayList<Instruction> predicate) throws DMLRuntimeException{\n+ public IfProgramBlock(Program prog, ArrayList<Instruction> predicate) {\nsuper(prog);\n_childBlocksIfBody = new ArrayList<ProgramBlock>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -103,6 +103,7 @@ import org.apache.sysml.runtime.instructions.cp.StringObject;\nimport org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.utils.Statistics;\nimport org.apache.sysml.yarn.ropt.YarnClusterAnalyzer;\n@@ -308,7 +309,6 @@ public class ParForProgramBlock extends ForProgramBlock\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic ParForProgramBlock(int ID, Program prog, String[] iterPredVars, HashMap<String,String> params)\n- throws DMLRuntimeException\n{\nsuper(prog, iterPredVars);\n@@ -322,21 +322,18 @@ public class ParForProgramBlock extends ForProgramBlock\n//parse and use internal parameters (already set to default if not specified)\n_params = params;\n- try\n- {\n- _numThreads = Integer.parseInt( _params.get(ParForStatementBlock.PAR) );\n- _taskPartitioner = PTaskPartitioner.valueOf( _params.get(ParForStatementBlock.TASK_PARTITIONER).toUpperCase() );\n- _taskSize = Integer.parseInt( _params.get(ParForStatementBlock.TASK_SIZE) );\n- _dataPartitioner = PDataPartitioner.valueOf( _params.get(ParForStatementBlock.DATA_PARTITIONER).toUpperCase() );\n- _resultMerge = PResultMerge.valueOf( _params.get(ParForStatementBlock.RESULT_MERGE).toUpperCase() );\n- _execMode = PExecMode.valueOf( _params.get(ParForStatementBlock.EXEC_MODE).toUpperCase() );\n- _optMode = POptMode.valueOf( _params.get(ParForStatementBlock.OPT_MODE).toUpperCase());\n- _optLogLevel = Level.toLevel( _params.get(ParForStatementBlock.OPT_LOG));\n- _monitor = (Integer.parseInt(_params.get(ParForStatementBlock.PROFILE) ) == 1);\n- }\n- catch(Exception ex)\n- {\n- //runtime exception in order to keep signature of program block\n+ try {\n+ _numThreads = Integer.parseInt( getParForParam(ParForStatementBlock.PAR) );\n+ _taskPartitioner = PTaskPartitioner.valueOf( getParForParam(ParForStatementBlock.TASK_PARTITIONER) );\n+ _taskSize = Integer.parseInt( getParForParam(ParForStatementBlock.TASK_SIZE) );\n+ _dataPartitioner = PDataPartitioner.valueOf( getParForParam(ParForStatementBlock.DATA_PARTITIONER) );\n+ _resultMerge = PResultMerge.valueOf( getParForParam(ParForStatementBlock.RESULT_MERGE) );\n+ _execMode = PExecMode.valueOf( getParForParam(ParForStatementBlock.EXEC_MODE) );\n+ _optMode = POptMode.valueOf( getParForParam(ParForStatementBlock.OPT_MODE) );\n+ _optLogLevel = Level.toLevel( getParForParam(ParForStatementBlock.OPT_LOG) );\n+ _monitor = (Integer.parseInt(getParForParam(ParForStatementBlock.PROFILE) ) == 1);\n+ }\n+ catch(Exception ex) {\nthrow new RuntimeException(\"Error parsing specified ParFOR parameters.\",ex);\n}\n@@ -364,129 +361,113 @@ public class ParForProgramBlock extends ForProgramBlock\nLOG.trace(\"PARFOR: ParForProgramBlock created with mode = \"+_execMode+\", optmode = \"+_optMode+\", numThreads = \"+_numThreads);\n}\n- public long getID()\n- {\n+ public long getID() {\nreturn _ID;\n}\n- public PExecMode getExecMode()\n- {\n+ public PExecMode getExecMode() {\nreturn _execMode;\n}\n- public HashMap<String,String> getParForParams()\n- {\n+ public HashMap<String,String> getParForParams() {\nreturn _params;\n}\n+ public String getParForParam(String key) {\n+ String tmp = getParForParams().get(key);\n+ return (tmp == null) ? null :\n+ UtilFunctions.unquote(tmp).toUpperCase();\n+ }\n+\npublic ArrayList<String> getResultVariables()\n{\nreturn _resultVars;\n}\n- public void setResultVariables(ArrayList<String> resultVars)\n- {\n+ public void setResultVariables(ArrayList<String> resultVars) {\n_resultVars = resultVars;\n}\n- public void disableOptimization()\n- {\n+ public void disableOptimization() {\n_optMode = POptMode.NONE;\n}\n- public POptMode getOptimizationMode()\n- {\n+ public POptMode getOptimizationMode() {\nreturn _optMode;\n}\n- public int getDegreeOfParallelism()\n- {\n+ public int getDegreeOfParallelism() {\nreturn _numThreads;\n}\n- public void setDegreeOfParallelism(int k)\n- {\n+ public void setDegreeOfParallelism(int k) {\n_numThreads = k;\n_params.put(ParForStatementBlock.PAR, String.valueOf(_numThreads)); //kept up-to-date for copies\nsetLocalParWorkerIDs();\n}\n- public void setCPCaching(boolean flag)\n- {\n+ public void setCPCaching(boolean flag) {\n_enableCPCaching = flag;\n}\n- public void setRuntimePiggybacking(boolean flag)\n- {\n+ public void setRuntimePiggybacking(boolean flag) {\n_enableRuntimePiggybacking = flag;\n}\n- public void setExecMode( PExecMode mode )\n- {\n+ public void setExecMode( PExecMode mode ) {\n_execMode = mode;\n_params.put(ParForStatementBlock.EXEC_MODE, String.valueOf(_execMode)); //kept up-to-date for copies\n}\n- public void setTaskPartitioner( PTaskPartitioner partitioner )\n- {\n+ public void setTaskPartitioner( PTaskPartitioner partitioner ) {\n_taskPartitioner = partitioner;\n_params.put(ParForStatementBlock.TASK_PARTITIONER, String.valueOf(_taskPartitioner)); //kept up-to-date for copies\n}\n- public void setTaskSize( long tasksize )\n- {\n+ public void setTaskSize( long tasksize ) {\n_taskSize = tasksize;\n_params.put(ParForStatementBlock.TASK_SIZE, String.valueOf(_taskSize)); //kept up-to-date for copies\n}\n- public void setDataPartitioner(PDataPartitioner partitioner)\n- {\n+ public void setDataPartitioner(PDataPartitioner partitioner) {\n_dataPartitioner = partitioner;\n_params.put(ParForStatementBlock.DATA_PARTITIONER, String.valueOf(_dataPartitioner)); //kept up-to-date for copies\n}\n- public void enableColocatedPartitionedMatrix( String varname )\n- {\n+ public void enableColocatedPartitionedMatrix( String varname ) {\n//only called from optimizer\n_colocatedDPMatrix = varname;\n}\n- public void setTransposeSparseColumnVector( boolean flag )\n- {\n+ public void setTransposeSparseColumnVector( boolean flag ) {\n_tSparseCol = flag;\n}\n- public void setPartitionReplicationFactor( int rep )\n- {\n+ public void setPartitionReplicationFactor( int rep ) {\n//only called from optimizer\n_replicationDP = rep;\n}\n- public void setExportReplicationFactor( int rep )\n- {\n+ public void setExportReplicationFactor( int rep ) {\n//only called from optimizer\n_replicationExport = rep;\n}\n- public void disableJVMReuse()\n- {\n+ public void disableJVMReuse() {\n//only called from optimizer\n_jvmReuse = false;\n}\n- public void disableMonitorReport()\n- {\n+ public void disableMonitorReport() {\n_monitorReport = false;\n}\n- public void setResultMerge(PResultMerge merge)\n- {\n+ public void setResultMerge(PResultMerge merge) {\n_resultMerge = merge;\n_params.put(ParForStatementBlock.RESULT_MERGE, String.valueOf(_resultMerge)); //kept up-to-date for copies\n}\n- public void setRecompileMemoryBudget( double localMem )\n- {\n+ public void setRecompileMemoryBudget( double localMem ) {\n_recompileMemoryBudget = localMem;\n}\n@@ -502,8 +483,7 @@ public class ParForProgramBlock extends ForProgramBlock\n_variablesECache = vars;\n}\n- public long getNumIterations()\n- {\n+ public long getNumIterations() {\nreturn _numIterations;\n}\n@@ -511,8 +491,7 @@ public class ParForProgramBlock extends ForProgramBlock\nreturn _hasFunctions;\n}\n- public static void initInternalConfigurations( DMLConfig conf )\n- {\n+ public static void initInternalConfigurations( DMLConfig conf ) {\nALLOW_REUSE_MR_JVMS = conf.getBooleanValue(DMLConfig.JVM_REUSE);\nALLOW_REUSE_MR_PAR_WORKER = ALLOW_REUSE_MR_JVMS;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "diff": "@@ -64,9 +64,7 @@ public class ProgramBlock\nprotected long _tid = 0; //by default _t0\n- public ProgramBlock(Program prog)\n- throws DMLRuntimeException\n- {\n+ public ProgramBlock(Program prog) {\n_prog = prog;\n_inst = new ArrayList<Instruction>();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/WhileProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/WhileProgramBlock.java", "diff": "@@ -50,7 +50,7 @@ public class WhileProgramBlock extends ProgramBlock\nprivate ArrayList <Instruction> _exitInstructions ;\nprivate ArrayList<ProgramBlock> _childBlocks;\n- public WhileProgramBlock(Program prog, ArrayList<Instruction> predicate) throws DMLRuntimeException{\n+ public WhileProgramBlock(Program prog, ArrayList<Instruction> predicate) {\nsuper(prog);\n_predicate = predicate;\n_predicateResultVar = findPredicateResultVar ();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1425] Robustness parfor parameter handling (cmd args, quotes)
49,738
20.03.2017 15:21:57
25,200
f693bcac7d6968452fc2e07c9a06e6b7fe1cbcdd
Fix issues of worst-case and parfor memory estimates This patch fixes various special cases of worst-case and parfor memory estimates, including (1) incomplete worst-case estimates, (2) parfor w/ spark execution type, (3) constrained parfor optimizer with unspecified degree of parallelism and/or execution type.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -618,7 +618,7 @@ public abstract class Hop\n//infer the output stats\nwstats = inferOutputCharacteristics(memo);\n- if( wstats != null ) {\n+ if( wstats != null && wstats[0] >= 0 && wstats[1] >= 0 ) {\n//use worst case characteristics to estimate mem\nlong lnnz = ((wstats[2]>=0)?wstats[2]:wstats[0]*wstats[1]);\n_outputMemEstimate = computeOutputMemEstimate( wstats[0], wstats[1], lnnz );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/CostEstimator.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/CostEstimator.java", "diff": "@@ -169,7 +169,8 @@ public abstract class CostEstimator\ncase PARFOR:\ntmp = node.getParam(ParamType.NUM_ITERATIONS);\nN = (tmp!=null) ? (double)Long.parseLong(tmp) : FACTOR_NUM_ITERATIONS;\n- val = N * getSumEstimate(measure, node.getChilds(), et) / node.getK();\n+ val = N * getSumEstimate(measure, node.getChilds(), et)\n+ / Math.max(node.getK(), 1);\nbreak;\ndefault:\n//do nothing\n@@ -187,10 +188,11 @@ public abstract class CostEstimator\nval = getMaxEstimate(measure, node.getChilds(), et);\nbreak;\ncase PARFOR:\n- if( node.getExecType() == OptNode.ExecType.MR )\n+ if( node.getExecType() == OptNode.ExecType.MR || node.getExecType() == OptNode.ExecType.SPARK )\nval = getMaxEstimate(measure, node.getChilds(), et); //executed in different JVMs\n- else if ( node.getExecType() == OptNode.ExecType.CP )\n- val = getMaxEstimate(measure, node.getChilds(), et) * node.getK(); //everything executed within 1 JVM\n+ else if ( node.getExecType() == OptNode.ExecType.CP || node.getExecType() == null )\n+ val = getMaxEstimate(measure, node.getChilds(), et)\n+ * Math.max(node.getK(), 1); //everything executed within 1 JVM\nbreak;\ndefault:\n//do nothing\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/CostEstimatorHops.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/CostEstimatorHops.java", "diff": "@@ -93,7 +93,7 @@ public class CostEstimatorHops extends CostEstimator\n}\n//check for forced runtime platform\n- if( h.getForcedExecType()==ExecType.MR || h.getExecType()==ExecType.SPARK)\n+ if( h.getForcedExecType()==ExecType.MR || h.getForcedExecType()==ExecType.SPARK)\n{\nvalue = DEFAULT_MEM_REMOTE;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1427] Fix issues of worst-case and parfor memory estimates This patch fixes various special cases of worst-case and parfor memory estimates, including (1) incomplete worst-case estimates, (2) parfor w/ spark execution type, (3) constrained parfor optimizer with unspecified degree of parallelism and/or execution type.
49,738
21.03.2017 19:40:39
25,200
1aac97ee1b25537278f292d89b4a7c4ab1355c2e
Extended parfor block partitioning (support in dpesp)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -103,6 +103,7 @@ import org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.StringObject;\nimport org.apache.sysml.runtime.instructions.cp.VariableCPInstruction;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.utils.Statistics;\n@@ -214,6 +215,18 @@ public class ParForProgramBlock extends ForProgramBlock\nreturn _dpf == PDataPartitionFormat.COLUMN_BLOCK_WISE_N\n|| _dpf == PDataPartitionFormat.ROW_BLOCK_WISE_N;\n}\n+ public long getNumParts(MatrixCharacteristics mc) {\n+ switch( _dpf ) {\n+ case ROW_WISE: return mc.getRows();\n+ case ROW_BLOCK_WISE: return mc.getNumRowBlocks();\n+ case ROW_BLOCK_WISE_N: return (long)Math.ceil((double)mc.getRows()/_N);\n+ case COLUMN_WISE: return mc.getCols();\n+ case COLUMN_BLOCK_WISE: return mc.getNumColBlocks();\n+ case COLUMN_BLOCK_WISE_N: return (long)Math.ceil((double)mc.getCols()/_N);\n+ default:\n+ throw new RuntimeException(\"Unsupported partition format: \"+_dpf);\n+ }\n+ }\n}\npublic enum PDataPartitioner {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSpark.java", "diff": "@@ -92,8 +92,7 @@ public class RemoteDPParForSpark\n//compute number of reducers (to avoid OOMs and reduce memory pressure)\nint numParts = SparkUtils.getNumPreferredPartitions(mc, in);\n- int numParts2 = (int)((dpf._dpf==PDataPartitionFormat.ROW_BLOCK_WISE) ? mc.getRows() : mc.getCols());\n- int numReducers2 = Math.max(numReducers, Math.min(numParts, numParts2));\n+ int numReducers2 = Math.max(numReducers, Math.min(numParts, (int)dpf.getNumParts(mc)));\n//core parfor datapartition-execute (w/ or w/o shuffle, depending on data characteristics)\nRemoteDPParForSparkWorker efun = new RemoteDPParForSparkWorker(program, clsMap,\n@@ -177,7 +176,9 @@ public class RemoteDPParForSpark\nprivate static boolean requiresGrouping(PartitionFormat dpf, MatrixObject mo) {\nMatrixCharacteristics mc = mo.getMatrixCharacteristics();\nreturn ((dpf == PartitionFormat.ROW_WISE && mc.getNumColBlocks() > 1)\n- || (dpf == PartitionFormat.COLUMN_WISE && mc.getNumRowBlocks() > 1))\n+ || (dpf == PartitionFormat.COLUMN_WISE && mc.getNumRowBlocks() > 1)\n+ || (dpf._dpf == PDataPartitionFormat.ROW_BLOCK_WISE_N && mc.getNumColBlocks() > 1)\n+ || (dpf._dpf == PDataPartitionFormat.COLUMN_BLOCK_WISE_N && mc.getNumRowBlocks() > 1))\n&& !hasInputDataSet(dpf, mo);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParForSparkWorker.java", "diff": "@@ -87,8 +87,18 @@ public class RemoteDPParForSparkWorker extends ParWorker implements PairFlatMapF\n_aIters = aiters;\n//setup matrix block partition meta data\n- _rlen = (dpf != PartitionFormat.ROW_WISE) ? (int)mc.getRows() : 1;\n- _clen = (dpf != PartitionFormat.COLUMN_WISE) ? (int)mc.getCols() : 1;\n+ switch( dpf._dpf ) {\n+ case ROW_WISE:\n+ _rlen = (int)mc.getRows(); _clen = 1; break;\n+ case ROW_BLOCK_WISE_N:\n+ _rlen = dpf._N; _clen = (int)mc.getCols(); break;\n+ case COLUMN_BLOCK_WISE:\n+ _rlen = 1; _clen = (int)mc.getCols(); break;\n+ case COLUMN_BLOCK_WISE_N:\n+ _rlen = (int)mc.getRows(); _clen = dpf._N; break;\n+ default:\n+ throw new RuntimeException(\"Unsupported partition format: \"+dpf._dpf.name());\n+ }\n_brlen = mc.getRowsPerBlock();\n_bclen = mc.getColsPerBlock();\n_tSparseCol = tSparseCol;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerConstrained.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerConstrained.java", "diff": "@@ -29,6 +29,7 @@ import org.apache.sysml.parser.ParForStatementBlock;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock;\n+import org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitionFormat;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitioner;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PExecMode;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.POptMode;\n@@ -386,7 +387,9 @@ public class OptimizerConstrained extends OptimizerRuleBased\nif( rIsAccessByIterationVariable(pn, moVarname, iterVarname) &&\n((moDpf==PartitionFormat.ROW_WISE && mo.getNumRows()==_N ) ||\n- (moDpf==PartitionFormat.COLUMN_WISE && mo.getNumColumns()==_N)) )\n+ (moDpf==PartitionFormat.COLUMN_WISE && mo.getNumColumns()==_N) ||\n+ (moDpf._dpf==PDataPartitionFormat.ROW_BLOCK_WISE_N && mo.getNumRows()<=_N*moDpf._N)||\n+ (moDpf._dpf==PDataPartitionFormat.COLUMN_BLOCK_WISE_N && mo.getNumColumns()<=_N*moDpf._N)) )\n{\nint k = (int)Math.min(_N,_rk2);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -1524,7 +1524,9 @@ public class OptimizerRuleBased extends Optimizer\nif( rIsAccessByIterationVariable(pn, moVarname, iterVarname) &&\n((moDpf==PartitionFormat.ROW_WISE && mo.getNumRows()==_N ) ||\n- (moDpf==PartitionFormat.COLUMN_WISE && mo.getNumColumns()==_N)) )\n+ (moDpf==PartitionFormat.COLUMN_WISE && mo.getNumColumns()==_N) ||\n+ (moDpf._dpf==PDataPartitionFormat.ROW_BLOCK_WISE_N && mo.getNumRows()<=_N*moDpf._N)||\n+ (moDpf._dpf==PDataPartitionFormat.COLUMN_BLOCK_WISE_N && mo.getNumColumns()<=_N*moDpf._N)) )\n{\nint k = (int)Math.min(_N,_rk2);\n@@ -1567,10 +1569,16 @@ public class OptimizerRuleBased extends Optimizer\nif( h.getInput().get(1) instanceof DataOp )\nindexAccess = h.getInput().get(1).getName();\nbreak;\n+ case ROW_BLOCK_WISE_N: //input 1 and 2 have same slope and var\n+ indexAccess = rGetVarFromExpression(h.getInput().get(1));\n+ break;\ncase COLUMN_WISE: //input 3 and 4 eq\nif( h.getInput().get(3) instanceof DataOp )\nindexAccess = h.getInput().get(3).getName();\nbreak;\n+ case COLUMN_BLOCK_WISE_N: //input 3 and 4 have same slope and var\n+ indexAccess = rGetVarFromExpression(h.getInput().get(3));\n+ break;\ndefault:\n//do nothing\n@@ -1583,6 +1591,17 @@ public class OptimizerRuleBased extends Optimizer\nreturn ret;\n}\n+ private static String rGetVarFromExpression(Hop current) {\n+ String var = null;\n+ for( Hop c : current.getInput() ) {\n+ var = rGetVarFromExpression(c);\n+ if( var != null )\n+ return var;\n+ }\n+ return (current instanceof DataOp) ?\n+ current.getName() : null;\n+ }\n+\n///////\n//REWRITE transpose sparse vector operations\n///\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForBlockwiseDataPartitioningTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForBlockwiseDataPartitioningTest.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.test.integration.functions.parfor;\nimport java.util.HashMap;\n+import org.junit.Assert;\nimport org.junit.Test;\nimport org.apache.sysml.api.DMLScript;\n@@ -68,17 +69,17 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\n}\n@Test\n- public void testParForRowBlockPartitioningLocalRemoteSparkDense() {\n+ public void testParForRowBlockPartitioningLocalRemoteDense() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.LOCAL, PExecMode.REMOTE_SPARK, false);\n}\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkLocalDense() {\n+ public void testParForRowBlockPartitioningRemoteLocalDense() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, false);\n}\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkRemoteDense() {\n+ public void testParForRowBlockPartitioningRemoteRemoteDense() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, false);\n}\n@@ -88,17 +89,17 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\n}\n@Test\n- public void testParForRowBlockPartitioningLocalRemoteSparkSparse() {\n+ public void testParForRowBlockPartitioningLocalRemoteSparse() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.LOCAL, PExecMode.REMOTE_SPARK, true);\n}\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkLocalSparse() {\n+ public void testParForRowBlockPartitioningRemoteLocalSparse() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, true);\n}\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkRemoteSparse() {\n+ public void testParForRowBlockPartitioningRemoteRemoteSparse() {\nrunParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, true);\n}\n@@ -108,17 +109,17 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\n}\n@Test\n- public void testParForColBlockPartitioningLocalRemoteSparkDense() {\n+ public void testParForColBlockPartitioningLocalRemoteDense() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.LOCAL, PExecMode.REMOTE_SPARK, false);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkLocalDense() {\n+ public void testParForColBlockPartitioningRemoteLocalDense() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, false);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkRemoteDense() {\n+ public void testParForColBlockPartitioningRemoteRemoteDense() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, false);\n}\n@@ -128,39 +129,52 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\n}\n@Test\n- public void testParForColBlockPartitioningLocalRemoteSparkSparse() {\n+ public void testParForColBlockPartitioningLocalRemoteSparse() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.LOCAL, PExecMode.REMOTE_SPARK, true);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkLocalSparse() {\n+ public void testParForColBlockPartitioningRemoteLocalSparse() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, true);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkRemoteSparse() {\n+ public void testParForColBlockPartitioningRemoteRemoteSparse() {\nrunParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, true);\n}\n+ //fused data partition execute\n+\n+ @Test\n+ public void testParForRowBlockPartitioningRemoteRemoteFusedDense() {\n+ runParForDataPartitioningTest(TEST_NAME1, PDataPartitioner.UNSPECIFIED, PExecMode.REMOTE_SPARK_DP, false);\n+ }\n+\n+ @Test\n+ public void testParForColBlockPartitioningRemoteRemoteFusedDense() {\n+ runParForDataPartitioningTest(TEST_NAME2, PDataPartitioner.UNSPECIFIED, PExecMode.REMOTE_SPARK_DP, false);\n+ }\n+\n+\n//negative examples\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkLocalSparseNegative() {\n+ public void testParForRowBlockPartitioningRemoteLocalSparseNegative() {\nrunParForDataPartitioningTest(TEST_NAME3, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, true);\n}\n@Test\n- public void testParForRowBlockPartitioningRemoteSparkRemoteSparseNegative() {\n+ public void testParForRowBlockPartitioningRemoteRemoteSparseNegative() {\nrunParForDataPartitioningTest(TEST_NAME3, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, true);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkLocalSparseNegative() {\n+ public void testParForColBlockPartitioningRemoteLocalSparseNegative() {\nrunParForDataPartitioningTest(TEST_NAME4, PDataPartitioner.REMOTE_SPARK, PExecMode.LOCAL, true);\n}\n@Test\n- public void testParForColBlockPartitioningRemoteSparkRemoteSparseNegative() {\n+ public void testParForColBlockPartitioningRemoteRemoteSparseNegative() {\nrunParForDataPartitioningTest(TEST_NAME4, PDataPartitioner.REMOTE_SPARK, PExecMode.REMOTE_SPARK, true);\n}\n@@ -183,7 +197,7 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{\"-args\", input(\"V\"),\n+ programArgs = new String[]{\"-stats\", \"-args\", input(\"V\"),\npartitioner.name(), mode.name(), output(\"R\") };\nfullRScriptName = HOME + testname + \".R\";\n@@ -204,6 +218,11 @@ public class ParForBlockwiseDataPartitioningTest extends AutomatedTestBase\nHashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"R\");\nHashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"Rout\");\nTestUtils.compareMatrices(dmlfile, rfile, eps, \"DML\", \"R\");\n+\n+ //test for correct plan\n+ boolean pos = testname.equals(TEST_NAME1) || testname.equals(TEST_NAME2);\n+ Assert.assertEquals(pos, heavyHittersContainsSubString(\"ParFor-DPSP\")\n+ || heavyHittersContainsSubString(\"ParFor-DPESP\"));\n}\nfinally\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1310] Extended parfor block partitioning (support in dpesp)
49,738
21.03.2017 22:30:40
25,200
95be80c5b7542097c8c6830d8b5037ec26a3b55c
Fix test expected compiled mr jobs (w/ correct mem est)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/recompile/BranchRemovalTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/recompile/BranchRemovalTest.java", "diff": "@@ -107,9 +107,6 @@ public class BranchRemovalTest extends AutomatedTestBase\nboolean oldFlagBranchRemoval = OptimizerUtils.ALLOW_BRANCH_REMOVAL;\nboolean oldFlagIPA = OptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS;\n- //boolean oldFlagRand1 = OptimizerUtils.ALLOW_RAND_JOB_RECOMPILE;\n- //boolean oldFlagRand3 = OptimizerUtils.ALLOW_WORSTCASE_SIZE_EXPRESSION_EVALUATION;\n-\nint val = (condition?1:0);\ntry\n@@ -131,10 +128,6 @@ public class BranchRemovalTest extends AutomatedTestBase\nOptimizerUtils.ALLOW_BRANCH_REMOVAL = branchRemoval;\nOptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS = IPA;\n- //disable rand specific recompile\n- //OptimizerUtils.ALLOW_RAND_JOB_RECOMPILE = false;\n- //OptimizerUtils.ALLOW_WORSTCASE_SIZE_EXPRESSION_EVALUATION = false;\n-\ndouble[][] V = getRandomMatrix(rows, cols, -1, 1, 1.0d, 7);\nwriteInputMatrix(\"X\", V, true);\n@@ -147,24 +140,20 @@ public class BranchRemovalTest extends AutomatedTestBase\nTestUtils.compareMatrices(dmlfile, rfile, 0, \"Stat-DML\", \"Stat-R\");\n//check expected number of compiled and executed MR jobs\n- int expectedNumCompiled = 4; //reblock, 3xGMR (append), write\n+ int expectedNumCompiled = 5; //reblock, 3xGMR (append), write\nint expectedNumExecuted = 0;\nif( branchRemoval && IPA )\nexpectedNumCompiled = 1; //reblock\nelse if( branchRemoval ){\n- expectedNumCompiled = 3; //reblock, GMR (append), write\n+ expectedNumCompiled = condition ? 4 : 3; //reblock, GMR (append), write\n}\ncheckNumCompiledMRJobs(expectedNumCompiled);\ncheckNumExecutedMRJobs(expectedNumExecuted);\n}\n- finally\n- {\n+ finally {\nOptimizerUtils.ALLOW_BRANCH_REMOVAL = oldFlagBranchRemoval;\nOptimizerUtils.ALLOW_INTER_PROCEDURAL_ANALYSIS = oldFlagIPA;\n-\n- //OptimizerUtils.ALLOW_RAND_JOB_RECOMPILE = oldFlagRand1;\n- //OptimizerUtils.ALLOW_WORSTCASE_SIZE_EXPRESSION_EVALUATION = oldFlagRand3;\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1427] Fix test expected compiled mr jobs (w/ correct mem est)
49,717
22.03.2017 12:08:21
25,200
97da0004f1423d40d63372e59c0424d28793ef92
[HOTFIX] Changed unit test LRUCacheMapTest to run only with mvn verify Closes
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<excludes>\n<exclude>**/slowtest/**</exclude>\n<exclude>**/integration/**</exclude>\n+ <exclude>**/test/unit/**</exclude>\n</excludes>\n</configuration>\n" }, { "change_type": "RENAME", "old_path": "src/test/java/org/apache/sysml/test/utils/LRUCacheMapTest.java", "new_path": "src/test/java/org/apache/sysml/test/unit/LRUCacheMapTest.java", "diff": "* specific language governing permissions and limitations\n* under the License.\n*/\n-package org.apache.sysml.test.utils;\n+package org.apache.sysml.test.unit;\nimport org.apache.sysml.utils.LRUCacheMap;\nimport org.junit.Assert;\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Changed unit test LRUCacheMapTest to run only with mvn verify Closes #436
49,736
22.03.2017 15:25:52
28,800
16e990928fa0201132688a8f7476856a02253030
Fixed maxpooling functions for padding > 0 Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -427,6 +427,12 @@ public class LibMatrixDNN {\noutputBlock.recomputeNonZeros();\n}\n+ /**\n+ * This method computes start and end indexes required for max_pool and max_pool_backward operations.\n+ * This speeds up the performance of max_pool and max_pool_backward\n+ *\n+ * @param params parameters required for max_pool and max_pool_backward operations\n+ */\nprivate static void fillIndexesArray(ConvolutionParameters params) {\nparams.start_indexes_h = new int[params.P];\nparams.end_indexes_h = new int[params.P];\n@@ -434,17 +440,17 @@ public class LibMatrixDNN {\nparams.end_indexes_w = new int[params.Q];\nfor (int p = 0; p < params.P; p++) {\nint start_index_h = p * params.stride_h - params.pad_h;\n- final int end_index_h = Math.min(start_index_h + params.R, params.H);\n- start_index_h = Math.max(start_index_h, 0);\n- params.start_indexes_h[p] = start_index_h;\n- params.end_indexes_h[p] = end_index_h;\n+ int end_index_h = start_index_h + params.R;\n+ // Note: We do not treat pad as zero\n+ params.start_indexes_h[p] = Math.max(start_index_h, 0);\n+ params.end_indexes_h[p] = Math.min(end_index_h, params.H);\n}\nfor (int q = 0; q < params.Q; q++) {\n- int start_index_w = Math.max(q * params.stride_w - params.pad_w, 0);\n- int end_index_w = Math.min(start_index_w + params.S, params.W);\n- start_index_w = Math.max(start_index_w, 0);\n- params.start_indexes_w[q] = start_index_w;\n- params.end_indexes_w[q] = end_index_w;\n+ int start_index_w = q * params.stride_w - params.pad_w;\n+ int end_index_w = start_index_w + params.S;\n+ // Note: We do not treat pad as zero\n+ params.start_indexes_w[q] = Math.max(start_index_w, 0);\n+ params.end_indexes_w[q] = Math.min(end_index_w, params.W);\n}\n}\n@@ -486,6 +492,7 @@ public class LibMatrixDNN {\nif(inVal != 0) {\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\nint maxIndex = getMaxIndexSparse(p, q, inputOffset, n, c, params.input1, params);\n+ if(maxIndex != -1)\noutputArray[maxIndex] += inVal;\n}\n}\n@@ -510,6 +517,7 @@ public class LibMatrixDNN {\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\nint maxIndex = getMaxIndexSparse(p, q, inputOffset, n, c, params.input1, params);\n+ if(maxIndex != -1)\noutputArray[maxIndex] += ijv.getV();\n}\n@@ -530,6 +538,7 @@ public class LibMatrixDNN {\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\nint maxIndex = getMaxIndex(p, q, inputOffset, inputArray, params);\n+ if(maxIndex != -1)\noutputArray[maxIndex] += ijv.getV();\n}\n}\n@@ -543,12 +552,26 @@ public class LibMatrixDNN {\nfor (int p = 0; p < params.P; p++) {\nfor (int q = 0; q < params.Q; q++) {\nint maxIndex = getMaxIndex(p, q, inputOffset, inputArray, params);\n+ if(maxIndex != -1)\noutputArray[maxIndex] += doutArray[outputOffset + p * params.Q + q];\n}\n}\n}\n}\n+ /**\n+ * Returns the index of cell with maximum value. This method is optimized for sparse input\n+ *\n+ * @param p output feature map height\n+ * @param q output feature map width\n+ * @param inputOffset offset to be used for input index\n+ * @param n number of images\n+ * @param c number of channels\n+ * @param input input matrix\n+ * @param params convolution parameters\n+ * @return index of the cell with maximum value\n+ * @throws DMLRuntimeException if error occurs\n+ */\nprivate static int getMaxIndexSparse(int p, int q, int inputOffset, int n, int c, MatrixBlock input, ConvolutionParameters params) throws DMLRuntimeException {\nif(!input.isInSparseFormat())\nthrow new DMLRuntimeException(\"Incorrect usage: Only sparse format supported\");\n@@ -562,9 +585,13 @@ public class LibMatrixDNN {\nint start_index_w = params.start_indexes_w[q];\nint end_index_w = params.end_indexes_w[q];\n- int maxIndex = inputOffset + start_index_h*params.W + start_index_w;\n+ int maxIndex = -1;\ndouble maxVal = -Double.MAX_VALUE;\n+ // Note: We do not treat pad as zero and hence we don't do:\n+ // maxVal = 0\n+ // if start_index_h < 0 || start_index_w < 0 || end_index_h >= params.H || end_index_w >= params.W\n+\n// Find maxIndex\ndouble currDoutVal = -1;\nwhile(iter.hasNext()) {\n@@ -585,15 +612,29 @@ public class LibMatrixDNN {\nreturn maxIndex;\n}\n+ /**\n+ * Returns the index of cell with maximum value. This method is optimized for dense input\n+ *\n+ * @param p output feature map height\n+ * @param q output feature map width\n+ * @param inputOffset offset to be used for input index\n+ * @param inputArray input array\n+ * @param params convolution parameters\n+ * @return index of cell with maximum value\n+ */\nprivate static int getMaxIndex(int p, int q, int inputOffset, double [] inputArray, ConvolutionParameters params) {\nint start_index_h = params.start_indexes_h[p];\nint end_index_h = params.end_indexes_h[p];\nint start_index_w = params.start_indexes_w[q];\nint end_index_w = params.end_indexes_w[q];\n- int maxIndex = inputOffset + start_index_h*params.W + start_index_w;\n+ int maxIndex = -1;\ndouble maxVal = -Double.MAX_VALUE;\n+ // Note: We do not treat pad as zero and hence we don't do:\n+ // maxVal = 0\n+ // if start_index_h < 0 || start_index_w < 0 || end_index_h >= params.H || end_index_w >= params.W\n+\n// Find maxIndex\ndouble currDoutVal = -1;\nfor (int h = start_index_h; h < end_index_h; h++) {\n@@ -936,8 +977,7 @@ public class LibMatrixDNN {\nfor (int q = 0; q < params.Q; q++, out_index++) {\nfor (int h = params.start_indexes_h[p]; h < params.end_indexes_h[p]; h++) {\nfor (int w = params.start_indexes_w[q]; w < params.end_indexes_w[q]; w++) {\n- double inVal = params.input1.quickGetValue(n, c*HW + h*params.W + w);\n- outputArray[out_index] = Math.max(outputArray[out_index], inVal);\n+ outputArray[out_index] = Math.max(outputArray[out_index], params.input1.quickGetValue(n, c*HW + h*params.W + w));\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/yarn/ropt/ResourceConfig.java", "new_path": "src/main/java/org/apache/sysml/yarn/ropt/ResourceConfig.java", "diff": "@@ -101,7 +101,7 @@ public class ResourceConfig\npublic long getMaxMRResource()\n{\n- double val = Collections.max(_mrres);\n+ double val = (double) Collections.max(_mrres);\nreturn (long)val;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1428] Fixed maxpooling functions for padding > 0 Closes #437.
49,738
22.03.2017 21:27:00
25,200
f380b52a9d041798b268de8007140b418af1e0db
Fix unnecessarily blocking rdd and broadcast cleanup
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -85,9 +85,9 @@ public class SparkExecutionContext extends ExecutionContext\nprivate static final boolean LDEBUG = false; //local debug flag\n//internal configurations\n- private static boolean LAZY_SPARKCTX_CREATION = true;\n- private static boolean ASYNCHRONOUS_VAR_DESTROY = true;\n- private static boolean FAIR_SCHEDULER_MODE = true;\n+ private static final boolean LAZY_SPARKCTX_CREATION = true;\n+ private static final boolean ASYNCHRONOUS_VAR_DESTROY = true;\n+ private static final boolean FAIR_SCHEDULER_MODE = true;\n//executor memory and relative fractions as obtained from the spark configuration\nprivate static SparkClusterConfig _sconf = null;\n@@ -1152,12 +1152,12 @@ public class SparkExecutionContext extends ExecutionContext\n*\n* @param bvar broadcast variable\n*/\n- public void cleanupBroadcastVariable(Broadcast<?> bvar)\n+ public static void cleanupBroadcastVariable(Broadcast<?> bvar)\n{\n- //in comparison to 'unpersist' (which would only delete the broadcast from the executors),\n- //this call also deletes related data from the driver.\n+ //In comparison to 'unpersist' (which would only delete the broadcast\n+ //from the executors), this call also deletes related data from the driver.\nif( bvar.isValid() ) {\n- bvar.destroy( ASYNCHRONOUS_VAR_DESTROY );\n+ bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY );\n}\n}\n@@ -1168,10 +1168,10 @@ public class SparkExecutionContext extends ExecutionContext\n*\n* @param rvar rdd variable to remove\n*/\n- public void cleanupRDDVariable(JavaPairRDD<?,?> rvar)\n+ public static void cleanupRDDVariable(JavaPairRDD<?,?> rvar)\n{\nif( rvar.getStorageLevel()!=StorageLevel.NONE() ) {\n- rvar.unpersist( ASYNCHRONOUS_VAR_DESTROY );\n+ rvar.unpersist( !ASYNCHRONOUS_VAR_DESTROY );\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/PartitionedBroadcast.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/PartitionedBroadcast.java", "diff": "@@ -24,6 +24,7 @@ import java.io.Serializable;\nimport org.apache.spark.broadcast.Broadcast;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheBlock;\n+import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n/**\n* This class is a wrapper around an array of broadcasts of partitioned matrix/frame blocks,\n@@ -101,4 +102,12 @@ public class PartitionedBroadcast<T extends CacheBlock> implements Serializable\nreturn ret;\n}\n+ /**\n+ * This method cleanups all underlying broadcasts of a partitioned broadcast,\n+ * by forward the calls to SparkExecutionContext.cleanupBroadcastVariable.\n+ */\n+ public void destroy() {\n+ for( Broadcast<PartitionedBlock<T>> bvar : _pbc )\n+ SparkExecutionContext.cleanupBroadcastVariable(bvar);\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1433] Fix unnecessarily blocking rdd and broadcast cleanup
49,738
23.03.2017 13:37:44
25,200
a429e2df9287b709edd245c6a3211d62ecbf9517
Robust broadcast memory handling (track pinned sizes)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.runtime.controlprogram.caching;\nimport java.io.File;\nimport java.io.IOException;\nimport java.lang.ref.SoftReference;\n+import java.util.concurrent.atomic.AtomicLong;\nimport org.apache.commons.lang.mutable.MutableBoolean;\nimport org.apache.commons.logging.Log;\n@@ -116,6 +117,12 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n@Override protected Long initialValue() { return 0L; }\n};\n+ //current size of live broadcast objects (because Spark's ContextCleaner maintains\n+ //a buffer with references to prevent eager cleanup by GC); note that this is an\n+ //overestimate, because we maintain partitioned broadcasts as soft references, which\n+ //might be collected by the GC and subsequently cleaned up by Spark's ContextCleaner.\n+ private static AtomicLong _refBCs = new AtomicLong(0);\n+\nstatic {\n_seq = new IDSequence();\n}\n@@ -1213,6 +1220,14 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nreturn sizePinned.get();\n}\n+ public static void addBroadcastSize(long size) {\n+ _refBCs.addAndGet(size);\n+ }\n+\n+ public static long getBroadcastSize() {\n+ return _refBCs.longValue();\n+ }\n+\n// --------- STATIC CACHE INIT/CLEANUP OPERATIONS ----------\npublic synchronized static void cleanupCacheDir() {\n@@ -1285,6 +1300,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n//init write-ahead buffer\nLazyWriteBuffer.init();\n+ _refBCs.set(0);\n_activeFlag = true; //turn on caching\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java", "diff": "@@ -482,7 +482,7 @@ public class MatrixObject extends CacheableData<MatrixBlock>\n//guarded rdd collect\nif( ii == InputInfo.BinaryBlockInputInfo && //guarded collect not for binary cell\n- !OptimizerUtils.checkSparkCollectMemoryBudget(rlen, clen, brlen, bclen, nnz, getPinnedSize()) ) {\n+ !OptimizerUtils.checkSparkCollectMemoryBudget(mc, getPinnedSize()+getBroadcastSize()) ) {\n//write RDD to hdfs and read to prevent invalid collect mem consumption\n//note: lazy, partition-at-a-time collect (toLocalIterator) was significantly slower\nif( !MapReduceTool.existsFileOnHDFS(_hdfsFileName) ) { //prevent overwrite existing file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/SparkExecutionContext.java", "diff": "@@ -526,6 +526,10 @@ public class SparkExecutionContext extends ExecutionContext\n//create new broadcast handle (never created, evicted)\nif( bret == null )\n{\n+ //account for overwritten invalid broadcast (e.g., evicted)\n+ if( mo.getBroadcastHandle()!=null )\n+ CacheableData.addBroadcastSize(-mo.getBroadcastHandle().getSize());\n+\n//obtain meta data for matrix\nint brlen = (int) mo.getNumRowsPerBlock();\nint bclen = (int) mo.getNumColumnsPerBlock();\n@@ -554,8 +558,10 @@ public class SparkExecutionContext extends ExecutionContext\n}\nbret = new PartitionedBroadcast<MatrixBlock>(ret);\n- BroadcastObject<MatrixBlock> bchandle = new BroadcastObject<MatrixBlock>(bret, varname);\n+ BroadcastObject<MatrixBlock> bchandle = new BroadcastObject<MatrixBlock>(bret, varname,\n+ OptimizerUtils.estimatePartitionedSizeExactSparsity(mo.getMatrixCharacteristics()));\nmo.setBroadcastHandle(bchandle);\n+ CacheableData.addBroadcastSize(bchandle.getSize());\n}\nif (DMLScript.STATISTICS) {\n@@ -586,6 +592,10 @@ public class SparkExecutionContext extends ExecutionContext\n//create new broadcast handle (never created, evicted)\nif( bret == null )\n{\n+ //account for overwritten invalid broadcast (e.g., evicted)\n+ if( fo.getBroadcastHandle()!=null )\n+ CacheableData.addBroadcastSize(-fo.getBroadcastHandle().getSize());\n+\n//obtain meta data for frame\nint bclen = (int) fo.getNumColumns();\nint brlen = OptimizerUtils.getDefaultFrameSize();\n@@ -614,8 +624,10 @@ public class SparkExecutionContext extends ExecutionContext\n}\nbret = new PartitionedBroadcast<FrameBlock>(ret);\n- BroadcastObject<FrameBlock> bchandle = new BroadcastObject<FrameBlock>(bret, varname);\n+ BroadcastObject<FrameBlock> bchandle = new BroadcastObject<FrameBlock>(bret, varname,\n+ OptimizerUtils.estimatePartitionedSizeExactSparsity(fo.getMatrixCharacteristics()));\nfo.setBroadcastHandle(bchandle);\n+ CacheableData.addBroadcastSize(bchandle.getSize());\n}\nif (DMLScript.STATISTICS) {\n@@ -1136,6 +1148,7 @@ public class SparkExecutionContext extends ExecutionContext\nif( pbm != null ) //robustness for evictions\nfor( Broadcast<PartitionedBlock> bc : pbm.getBroadcasts() )\ncleanupBroadcastVariable(bc);\n+ CacheableData.addBroadcastSize(-((BroadcastObject)lob).getSize());\n}\n//recursively process lineage children\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/BroadcastObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/BroadcastObject.java", "diff": "@@ -27,19 +27,24 @@ import org.apache.sysml.runtime.controlprogram.caching.CacheBlock;\npublic class BroadcastObject<T extends CacheBlock> extends LineageObject\n{\n//soft reference storage for graceful cleanup in case of memory pressure\n- protected SoftReference<PartitionedBroadcast<T>> _bcHandle = null;\n+ protected final SoftReference<PartitionedBroadcast<T>> _bcHandle;\n+ private final long _size;\n- public BroadcastObject( PartitionedBroadcast<T> bvar, String varName ) {\n+ public BroadcastObject( PartitionedBroadcast<T> bvar, String varName, long size ) {\nsuper(varName);\n_bcHandle = new SoftReference<PartitionedBroadcast<T>>(bvar);\n+ _size = size;\n}\n@SuppressWarnings(\"rawtypes\")\n- public PartitionedBroadcast getBroadcast()\n- {\n+ public PartitionedBroadcast getBroadcast() {\nreturn _bcHandle.get();\n}\n+ public long getSize() {\n+ return _size;\n+ }\n+\npublic boolean isValid()\n{\n//check for evicted soft reference\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1430] Robust broadcast memory handling (track pinned sizes)
49,738
23.03.2017 22:14:35
25,200
a929ae6e6b59504215403b5b5bc7110c5f180efb
Fix codegen candidate exploration (distinct fuse/merge) This patch fixes the code generator candidate exploration algorithm by considering only distinct memo table entries for fuse and merge considerations. Furthermore, this cleans up various configurations and logging issues.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -57,6 +57,7 @@ import org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.OptimizerUtils.OptimizationLevel;\nimport org.apache.sysml.hops.codegen.SpoofCompiler;\n+import org.apache.sysml.hops.codegen.SpoofCompiler.PlanCache;\nimport org.apache.sysml.hops.globalopt.GlobalOptimizerWrapper;\nimport org.apache.sysml.lops.Lop;\nimport org.apache.sysml.lops.LopsException;\n@@ -596,9 +597,9 @@ public class DMLScript\n//Step 5.1: Generate code for the rewrited Hop dags\nif( dmlconf.getBooleanValue(DMLConfig.CODEGEN) ){\n- SpoofCompiler.USE_PLAN_CACHE = dmlconf.getBooleanValue(DMLConfig.CODEGEN_PLANCACHE);\n- SpoofCompiler.ALWAYS_COMPILE_LITERALS = (dmlconf.getIntValue(DMLConfig.CODEGEN_LITERALS)==2);\n-\n+ SpoofCompiler.PLAN_CACHE_POLICY = PlanCache.getPolicy(\n+ dmlconf.getBooleanValue(DMLConfig.CODEGEN_PLANCACHE),\n+ dmlconf.getIntValue(DMLConfig.CODEGEN_LITERALS)==2);\ndmlt.codgenHopsDAG(prog);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -78,15 +78,28 @@ public class SpoofCompiler\n{\nprivate static final Log LOG = LogFactory.getLog(SpoofCompiler.class.getName());\n- public static boolean OPTIMIZE = true;\n-\n//internal configuration flags\n- public static final boolean LDEBUG = false;\n- public static final boolean SUM_PRODUCT = false;\n- public static final boolean RECOMPILE = true;\n- public static boolean USE_PLAN_CACHE = true;\n- public static boolean ALWAYS_COMPILE_LITERALS = false;\n- public static final boolean ALLOW_SPARK_OPS = false;\n+ public static boolean LDEBUG = false;\n+ public static final boolean RECOMPILE_CODEGEN = true;\n+ public static PlanCache PLAN_CACHE_POLICY = PlanCache.CSLH;\n+ public static final PlanSelection PLAN_SEL_POLICY = PlanSelection.FUSE_ALL;\n+ public static final boolean PRUNE_REDUNDANT_PLANS = true;\n+\n+ public enum PlanSelection {\n+ FUSE_ALL, //maximal fusion, possible w/ redundant compute\n+ FUSE_NO_REDUNDANCY, //fusion without redundant compute\n+ FUSE_COST_BASED, //cost-based decision on materialization points\n+ }\n+\n+ public enum PlanCache {\n+ CONSTANT, //plan cache, with always compile literals\n+ CSLH, //plan cache, with context-sensitive literal replacement heuristic\n+ NONE; //no plan cache\n+\n+ public static PlanCache getPolicy(boolean planCache, boolean compileLiterals) {\n+ return !planCache ? NONE : compileLiterals ? CONSTANT : CSLH;\n+ }\n+ }\n//plan cache for cplan->compiled source to avoid unnecessary codegen/source code compile\n//for equal operators from (1) different hop dags and (2) repeated recompilation\n@@ -189,7 +202,7 @@ public class SpoofCompiler\n}\npublic static void cleanupCodeGenerator() {\n- if( USE_PLAN_CACHE ) {\n+ if( PLAN_CACHE_POLICY != PlanCache.NONE ) {\nCodegenUtils.clearClassCache(); //class cache\nplanCache.clear(); //plan cache\n}\n@@ -203,11 +216,10 @@ public class SpoofCompiler\n* @return dag root nodes of modified dag\n* @throws DMLRuntimeException if optimization failed\n*/\n- @SuppressWarnings(\"unused\")\npublic static ArrayList<Hop> optimize(ArrayList<Hop> roots, boolean recompile)\nthrows DMLRuntimeException\n{\n- if( roots == null || roots.isEmpty() || !OPTIMIZE )\n+ if( roots == null || roots.isEmpty() )\nreturn roots;\nlong t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n@@ -216,7 +228,7 @@ public class SpoofCompiler\ntry\n{\n//context-sensitive literal replacement (only integers during recompile)\n- boolean compileLiterals = ALWAYS_COMPILE_LITERALS || !recompile;\n+ boolean compileLiterals = (PLAN_CACHE_POLICY==PlanCache.CONSTANT) || !recompile;\n//construct codegen plans\nHashMap<Long, Pair<Hop[],CNodeTpl>> cplans = constructCPlans(roots, compileLiterals);\n@@ -235,7 +247,7 @@ public class SpoofCompiler\nfor( Entry<Long, Pair<Hop[],CNodeTpl>> cplan : cplans.entrySet() ) {\nPair<Hop[],CNodeTpl> tmp = cplan.getValue();\n- if( !USE_PLAN_CACHE || !planCache.containsKey(tmp.getValue()) ) {\n+ if( PLAN_CACHE_POLICY==PlanCache.NONE || !planCache.containsKey(tmp.getValue()) ) {\n//generate java source code\nString src = tmp.getValue().codegen(false);\n@@ -336,7 +348,7 @@ public class SpoofCompiler\n//fuse and merge operator plans\nfor( Hop c : hop.getInput() ) {\nif( memo.contains(c.getHopID()) )\n- for( MemoTableEntry me : memo.get(c.getHopID()) ) {\n+ for( MemoTableEntry me : memo.getDistinct(c.getHopID()) ) {\nBaseTpl tpl = TemplateUtils.createTemplate(me.type, me.closed);\nif( tpl.fuse(hop, c) ) {\nint pos = hop.getInput().indexOf(c);\n@@ -356,6 +368,7 @@ public class SpoofCompiler\n}\n//prune subsumed / redundant plans\n+ if( PRUNE_REDUNDANT_PLANS )\nmemo.pruneRedundant(hop.getHopID());\n//close operator plans, if required\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java", "diff": "@@ -21,26 +21,26 @@ package org.apache.sysml.hops.codegen.template;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.Collections;\n+import java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Iterator;\n+import java.util.List;\nimport java.util.Map.Entry;\n+import java.util.stream.Collectors;\nimport org.apache.commons.collections.CollectionUtils;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.codegen.SpoofCompiler;\nimport org.apache.sysml.hops.codegen.template.BaseTpl.TemplateType;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\n-import scala.tools.jline_embedded.internal.Log;\n-\n-\npublic class CPlanMemoTable\n{\n- public enum PlanSelection {\n- FUSE_ALL, //maximal fusion, possible w/ redundant compute\n- FUSE_NO_REDUNDANCY, //fusion without redundant compute\n- FUSE_COST_BASED, //cost-based decision on materialization points\n- }\n+ private static final Log LOG = LogFactory.getLog(SpoofCompiler.class.getName());\nprivate HashMap<Long, ArrayList<MemoTableEntry>> _plans;\nprivate HashMap<Long, Hop> _hopRefs;\n@@ -128,6 +128,9 @@ public class CPlanMemoTable\n}\npublic void pruneSuboptimal() {\n+ if( SpoofCompiler.LDEBUG )\n+ LOG.info(\"#1: Memo before plan selection (\"+size()+\" plans)\\n\"+this);\n+\n//build index of referenced entries\nHashSet<Long> ix = new HashSet<Long>();\nfor( Entry<Long, ArrayList<MemoTableEntry>> e : _plans.entrySet() )\n@@ -160,14 +163,24 @@ public class CPlanMemoTable\nif( me.isPlanRef(i) && _hopRefs.get(me.intput(i)).getParent().size()==1 )\n_plansBlacklist.add(me.intput(i));\n}\n+\n+ if( SpoofCompiler.LDEBUG )\n+ LOG.info(\"#2: Memo after plan selection (\"+size()+\" plans)\\n\"+this);\n}\n- public ArrayList<MemoTableEntry> get(long hopID) {\n+ public List<MemoTableEntry> get(long hopID) {\nreturn _plans.get(hopID);\n}\n+ public List<MemoTableEntry> getDistinct(long hopID) {\n+ //return distinct entries wrt type and closed attributes\n+ return _plans.get(hopID).stream()\n+ .map(p -> new MemoTableEntry(p.type,-1,-1,-1,p.closed))\n+ .distinct().collect(Collectors.toList());\n+ }\n+\npublic MemoTableEntry getBest(long hopID) {\n- ArrayList<MemoTableEntry> tmp = get(hopID);\n+ List<MemoTableEntry> tmp = get(hopID);\nif( tmp == null || tmp.isEmpty() )\nreturn null;\n@@ -183,38 +196,31 @@ public class CPlanMemoTable\n//TODO revisit requirement for preference once cost-based pruning (pruneSuboptimal) ready\npublic MemoTableEntry getBest(long hopID, TemplateType pref) {\n- ArrayList<MemoTableEntry> tmp = get(hopID);\n+ List<MemoTableEntry> tmp = get(hopID);\nif( tmp.size()==1 ) //single plan available\nreturn tmp.get(0);\n//try to find plan with preferred type\n- Log.warn(\"Multiple memo table entries available, searching for preferred type.\");\n+ if( SpoofCompiler.LDEBUG )\n+ LOG.warn(\"Multiple memo table entries available, searching for preferred type.\");\nArrayList<MemoTableEntry> tmp2 = new ArrayList<MemoTableEntry>();\nfor( MemoTableEntry me : tmp )\nif( me.type == pref )\ntmp2.add(me);\nif( !tmp2.isEmpty() ) {\n- if( tmp2.size() > 1 )\n- Log.warn(\"Multiple memo table entries w/ preferred type available, return max refs entry.\");\n+ if( tmp2.size() > 1 && SpoofCompiler.LDEBUG )\n+ LOG.warn(\"Multiple memo table entries w/ preferred type available, return max refs entry.\");\nreturn getMaxRefsEntry(tmp2);\n}\nelse {\n- Log.warn(\"Multiple memo table entries available but none with preferred type, return max refs entry.\");\n+ if( SpoofCompiler.LDEBUG )\n+ LOG.warn(\"Multiple memo table entries available but none with preferred type, return max refs entry.\");\nreturn getMaxRefsEntry(tmp);\n}\n}\n- private static MemoTableEntry getMaxRefsEntry(ArrayList<MemoTableEntry> tmp) {\n- int maxPos = 0;\n- int maxRefs = 0;\n- for( int i=0; i<tmp.size(); i++ ) {\n- int cntRefs = tmp.get(i).countPlanRefs();\n- if( cntRefs > maxRefs ) {\n- maxRefs = cntRefs;\n- maxPos = i;\n- }\n- }\n- return tmp.get(maxPos);\n+ private static MemoTableEntry getMaxRefsEntry(List<MemoTableEntry> tmp) {\n+ return Collections.max(tmp, Comparator.comparing(p -> p.countPlanRefs()));\n}\nprivate static boolean isValid(MemoTableEntry me, Hop hop) {\n@@ -224,6 +230,12 @@ public class CPlanMemoTable\n|| (me.type == TemplateType.CellTpl);\n}\n+ public int size() {\n+ return _plans.values().stream()\n+ .map(list -> list.size())\n+ .mapToInt(x -> x.intValue()).sum();\n+ }\n+\n@Override\npublic String toString() {\nStringBuilder sb = new StringBuilder();\n@@ -235,6 +247,9 @@ public class CPlanMemoTable\nsb.append(Arrays.toString(e.getValue().toArray(new MemoTableEntry[0]))+\"\\n\");\n}\nsb.append(\"----------------------------------\\n\");\n+ sb.append(\"Blacklisted Plans: \");\n+ sb.append(Arrays.toString(_plansBlacklist.toArray(new Long[0]))+\"\\n\");\n+ sb.append(\"----------------------------------\\n\");\nreturn sb.toString();\n}\n@@ -246,10 +261,14 @@ public class CPlanMemoTable\npublic final long input3;\npublic boolean closed = false;\npublic MemoTableEntry(TemplateType t, long in1, long in2, long in3) {\n+ this(t, in1, in2, in3, false);\n+ }\n+ public MemoTableEntry(TemplateType t, long in1, long in2, long in3, boolean close) {\ntype = t;\ninput1 = in1;\ninput2 = in2;\ninput3 = in3;\n+ closed = close;\n}\npublic boolean isPlanRef(int index) {\nreturn (index==0 && input1 >=0)\n@@ -310,5 +329,10 @@ public class CPlanMemoTable\n(pos==1)?ref:me.input2, (pos==2)?ref:me.input3));\nplans = tmp;\n}\n+\n+ @Override\n+ public String toString() {\n+ return Arrays.toString(plans.toArray(new MemoTableEntry[0]));\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "diff": "@@ -213,7 +213,8 @@ public class Recompiler\nmemo.extract(hops, status);\n// codegen if enabled\n- if( ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.CODEGEN) && SpoofCompiler.RECOMPILE ) {\n+ if( ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.CODEGEN)\n+ && SpoofCompiler.RECOMPILE_CODEGEN ) {\nHop.resetVisitStatus(hops);\nhops = SpoofCompiler.optimize(hops, true);\n}\n@@ -313,7 +314,8 @@ public class Recompiler\nhops.refreshMemEstimates(memo);\n// codegen if enabled\n- if( ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.CODEGEN) && SpoofCompiler.RECOMPILE ) {\n+ if( ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.CODEGEN)\n+ && SpoofCompiler.RECOMPILE_CODEGEN ) {\nhops.resetVisitStatus();\nhops = SpoofCompiler.optimize(hops, false);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1374] Fix codegen candidate exploration (distinct fuse/merge) This patch fixes the code generator candidate exploration algorithm by considering only distinct memo table entries for fuse and merge considerations. Furthermore, this cleans up various configurations and logging issues.