author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
49,737
14.08.2017 15:18:50
25,200
667aeb2b7f38b76b1ff85138426f215a03a4dfc4
[DOC][HOTFIX] updatest to the performance test scripts Closes
[ { "change_type": "MODIFY", "old_path": "docs/python-performance-test.md", "new_path": "docs/python-performance-test.md", "diff": "@@ -177,7 +177,20 @@ In the example above `--tag` can be a major/minor systemml version and `--auth`\nCurrently we only support time difference between algorithms in different versions. This can be obtained by running the script below\n`./stats.py --auth client_json.json --exec-mode singlenode --tags 1.0 2.0`\n-Note: Please pip install `https://github.com/burnash/gspread` to use google docs client.\n+We pass different `matrix shapes` using `--mat-shape` argument.\n+\n+Matrix Shape | Approximate Data Size\n+--- | --- |\n+10k_1k|80MB\n+100k_1k|800MB\n+1M_1k|8GB\n+10M_1k|80GB\n+100M_1k|800GB\n+\n+For example the command below runs performance test for all data sizes described above\n+`run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2 --mat-shape 10k_1k 100k_1k 1M_1k 10M_1k 100M_1k --master yarn-client --temp-dir hdfs://localhost:9000/user/systemml`\n+\n+Note: Please use this command `pip3 install -r requirements.txt` before using the perftest scripts.\n## Troubleshooting\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_fs.py", "new_path": "scripts/perftest/python/utils_fs.py", "diff": "@@ -134,12 +134,12 @@ def relevant_folders(path, algo, family, matrix_type, matrix_shape, mode):\nif mode == 'data-gen':\nsub_folder_name = '.'.join([family, current_matrix_type, current_matrix_shape])\ncmd = ['hdfs', 'dfs', '-ls', path]\n- path_subdir = subprocess_exec(' '.join(cmd), 'dir')\n+ path_subdir = subprocess_exec(' '.join(cmd), extract='dir')\nif mode == 'train':\nsub_folder_name = '.'.join([algo, family, current_matrix_type, current_matrix_shape])\ncmd = ['hdfs', 'dfs', '-ls', path]\n- path_subdir = subprocess_exec(' '.join(cmd), 'dir')\n+ path_subdir = subprocess_exec(' '.join(cmd), extract='dir')\npath_folders = list(filter(lambda x: contains_dir(x, sub_folder_name), path_subdir))\n" } ]
Java
Apache License 2.0
apache/systemds
[DOC][HOTFIX] updatest to the performance test scripts Closes #616
49,738
14.08.2017 20:15:32
25,200
89632b5ea8063d8959dcab3ec3a774f8883f2b62
Fix NPE on parfor initialization w/o log4j config This patch fixes a null pointer exception on parfor static initialization which expected a set log level. We now properly probe the log level and assign a default of INFO if nothing was setup.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -24,6 +24,8 @@ import java.util.HashMap;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.log4j.Level;\n+import org.apache.log4j.Logger;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.conf.CompilerConfig;\n@@ -933,6 +935,11 @@ public class OptimizerUtils\nreturn ret;\n}\n+ public static Level getDefaultLogLevel() {\n+ Level log = Logger.getRootLogger().getLevel();\n+ return (log != null) ? log : Level.INFO;\n+ }\n+\n////////////////////////\n// Sparsity Estimates //\n////////////////////////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "diff": "@@ -128,7 +128,7 @@ public class ParForStatementBlock extends ForStatementBlock\n_paramDefaults.put( EXEC_MODE, String.valueOf(PExecMode.LOCAL) );\n_paramDefaults.put( OPT_MODE, String.valueOf(POptMode.RULEBASED) );\n_paramDefaults.put( PROFILE, \"0\" );\n- _paramDefaults.put( OPT_LOG, Logger.getRootLogger().getLevel().toString() );\n+ _paramDefaults.put( OPT_LOG, OptimizerUtils.getDefaultLogLevel().toString() );\n_paramDefaults2 = new HashMap<String, String>(); //OPT_MODE always specified\n_paramDefaults2.put( CHECK, \"1\" );\n@@ -139,7 +139,7 @@ public class ParForStatementBlock extends ForStatementBlock\n_paramDefaults2.put( RESULT_MERGE, String.valueOf(PResultMerge.UNSPECIFIED) );\n_paramDefaults2.put( EXEC_MODE, String.valueOf(PExecMode.UNSPECIFIED) );\n_paramDefaults2.put( PROFILE, \"0\" );\n- _paramDefaults2.put( OPT_LOG, Logger.getRootLogger().getLevel().toString() );\n+ _paramDefaults2.put( OPT_LOG, OptimizerUtils.getDefaultLogLevel().toString() );\n_idSeq = new IDSequence();\n_idSeqfn = new IDSequence();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1839] Fix NPE on parfor initialization w/o log4j config This patch fixes a null pointer exception on parfor static initialization which expected a set log level. We now properly probe the log level and assign a default of INFO if nothing was setup.
49,738
14.08.2017 23:10:38
25,200
fcfbd3d2473a555b139371ebe4d49714d5dabe48
Fix missing validation of transform* specifications Currently, all transform builtin functions parse the provided transform specification during runtime but not during initial compilation. This patch fixes this missing validation to give users immediate feedback on potential syntax errors, even before reading potentially large data.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/ParameterizedBuiltinFunctionExpression.java", "diff": "@@ -26,6 +26,7 @@ import java.util.HashSet;\nimport org.apache.sysml.hops.Hop.ParamBuiltinOp;\nimport org.apache.sysml.parser.LanguageException.LanguageErrorCodes;\n+import org.apache.wink.json4j.JSONObject;\npublic class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n@@ -288,6 +289,7 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n//validate specification\ncheckDataValueType(\"transformapply\", TF_FN_PARAM_SPEC, DataType.SCALAR, ValueType.STRING, conditional);\n+ validateTransformSpec(TF_FN_PARAM_SPEC, conditional);\n//set output dimensions\noutput.setDataType(DataType.MATRIX);\n@@ -304,6 +306,7 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n//validate specification\ncheckDataValueType(\"transformdecode\", TF_FN_PARAM_SPEC, DataType.SCALAR, ValueType.STRING, conditional);\n+ validateTransformSpec(TF_FN_PARAM_SPEC, conditional);\n//set output dimensions\noutput.setDataType(DataType.FRAME);\n@@ -316,6 +319,7 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n{\n//validate specification\ncheckDataValueType(\"transformmeta\", TF_FN_PARAM_SPEC, DataType.SCALAR, ValueType.STRING, conditional);\n+ validateTransformSpec(TF_FN_PARAM_SPEC, conditional);\n//validate meta data path\ncheckDataValueType(\"transformmeta\", TF_FN_PARAM_MTD, DataType.SCALAR, ValueType.STRING, conditional);\n@@ -334,6 +338,7 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n//validate specification\ncheckDataValueType(\"transformencode\", TF_FN_PARAM_SPEC, DataType.SCALAR, ValueType.STRING, conditional);\n+ validateTransformSpec(TF_FN_PARAM_SPEC, conditional);\n//set output dimensions\noutput1.setDataType(DataType.MATRIX);\n@@ -344,6 +349,20 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\noutput2.setDimensions(-1, -1);\n}\n+ private void validateTransformSpec(String pname, boolean conditional) throws LanguageException {\n+ Expression data = getVarParam(pname);\n+ if( data instanceof StringIdentifier ) {\n+ try {\n+ StringIdentifier spec = (StringIdentifier)data;\n+ new JSONObject(spec.getValue());\n+ }\n+ catch(Exception ex) {\n+ raiseValidateError(\"Transform specification parsing issue: \",\n+ conditional, ex.getMessage());\n+ }\n+ }\n+ }\n+\nprivate void validateReplace(DataIdentifier output, boolean conditional) throws LanguageException {\n//check existence and correctness of arguments\nExpression target = getVarParam(\"target\");\n@@ -721,11 +740,6 @@ public class ParameterizedBuiltinFunctionExpression extends DataIdentifier\n@Override\npublic boolean multipleReturns() {\n- switch(_opcode) {\n- case TRANSFORMENCODE:\n- return true;\n- default:\n- return false;\n- }\n+ return (_opcode == ParameterizedBuiltinFunctionOp.TRANSFORMENCODE);\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1840] Fix missing validation of transform* specifications Currently, all transform builtin functions parse the provided transform specification during runtime but not during initial compilation. This patch fixes this missing validation to give users immediate feedback on potential syntax errors, even before reading potentially large data.
49,738
15.08.2017 19:22:23
25,200
a2bf0006f26e2c0058d1ee2c63b7ff0e3360466f
Fix rewrite mark loop variables as update-in-place This patch fixes special cases of the update in place rewrite, where variables that are updated but not read inside the loop were mistakenly marked for update-in-place. We now properly reject these cases which avoids an unnecessary matrix copy on loop entry.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMarkLoopVariablesUpdateInPlace.java", "diff": "@@ -99,12 +99,12 @@ public class RewriteMarkLoopVariablesUpdateInPlace extends StatementBlockRewrite\n//recursive invocation\nboolean ret = true;\nfor( StatementBlock sb : sbs ) {\n- if( !sb.variablesRead().containsVariable(varname) )\n+ if( !sb.variablesRead().containsVariable(varname)\n+ && !sb.variablesUpdated().containsVariable(varname) )\ncontinue; //valid wrt update-in-place\nif( sb instanceof WhileStatementBlock || sb instanceof ForStatementBlock ) {\n- ret &= sb.getUpdateInPlaceVars()\n- .contains(varname);\n+ ret &= sb.getUpdateInPlaceVars().contains(varname);\n}\nelse if( sb instanceof IfStatementBlock ) {\nIfStatementBlock isb = (IfStatementBlock) sb;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1843] Fix rewrite mark loop variables as update-in-place This patch fixes special cases of the update in place rewrite, where variables that are updated but not read inside the loop were mistakenly marked for update-in-place. We now properly reject these cases which avoids an unnecessary matrix copy on loop entry.
49,717
16.08.2017 11:14:43
25,200
ce240af57fb68caa3a978a8bad62701cb55a139d
[MINOR] updates to performance scripts Closes
[ { "change_type": "MODIFY", "old_path": "bin/systemml-standalone.py", "new_path": "bin/systemml-standalone.py", "diff": "@@ -43,7 +43,7 @@ def default_classpath(systemml_home):\n#TODO\n# User dir, fix for SYSTEMML_1795\n-def standalone_execution_entry(nvargs, args, config, explain, debug, stats, gpu, f):\n+def standalone_execution_entry(nvargs, args, config, explain, debug, stats, gpu, heapmem, f):\n\"\"\"\nThis function is responsible for the execution of arguments via\nsubprocess call in singlenode mode\n@@ -57,7 +57,7 @@ def standalone_execution_entry(nvargs, args, config, explain, debug, stats, gpu,\nelse:\ndefault_cp = ':'.join(default_classpath(systemml_home))\n- java_memory = '-Xmx8g -Xms4g -Xmn1g'\n+ java_memory = '-Xmx' + heapmem + ' -Xms4g -Xmn1g'\n# Log4j\nlog4j = log4j_path(systemml_home)\n@@ -93,7 +93,10 @@ def standalone_execution_entry(nvargs, args, config, explain, debug, stats, gpu,\n'-f', script_file, '-exec', 'singlenode', '-config', default_config,\n' '.join(ml_options)]\n- return_code = os.system(' '.join(cmd))\n+ cmd = ' '.join(cmd)\n+ print(cmd)\n+\n+ return_code = os.system(cmd)\nreturn return_code\n@@ -115,6 +118,7 @@ if __name__ == '__main__':\ncparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '\n'set <force> option to skip conservative memory estimates '\n'and use GPU wherever possible', nargs='?')\n+ cparser.add_argument('-heapmem', help='maximum JVM heap memory', metavar='', default='8g')\ncparser.add_argument('-f', required=True, help='specifies dml/pydml file to execute; '\n'path can be local/hdfs/gpfs', metavar='')\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -32,8 +32,7 @@ from datagen import config_packets_datagen\nfrom train import config_packets_train\nfrom predict import config_packets_predict\nfrom utils_misc import get_families, config_reader, \\\n- exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics, args_dict_split, \\\n- get_config_args\n+ exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics, split_config_args\nfrom utils_fs import create_dir_local, write_success, check_SUCCESS_file_exists\n# A packet is a dictionary\n@@ -84,8 +83,6 @@ ML_PREDICT = {'Kmeans': 'Kmeans-predict',\nDENSE_TYPE_ALGOS = ['clustering', 'stats1', 'stats2']\n-sup_args_dict = {}\n-\n# Responsible for execution and metric logging\ndef algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode, current_dir):\n@@ -134,7 +131,7 @@ def algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode,\nif exit_flag_success:\ntime = 'data_exists'\nelse:\n- time = exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict, config_path)\n+ time = exec_dml_and_parse_time(exec_type, dml_file_name, args, backend_args_dict, systemml_args_dict, config_path)\nwrite_success(time, temp_cwd)\nprint('{},{},{},{},{},{}'.format(algo, action_mode, intercept, mat_type, mat_shape, time))\n@@ -222,7 +219,7 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mo\n# Statistic family do not require to be split\nif family_name not in ['stats1', 'stats2']:\nif not success_file:\n- exec_test_data(exec_type, spark_args_dict, sup_args_dict, data_gen_dir, config)\n+ exec_test_data(exec_type, backend_args_dict, systemml_args_dict, data_gen_dir, config)\nif 'train' in mode:\n# Create config directories\n@@ -297,7 +294,7 @@ if __name__ == '__main__':\ndescription='SystemML Performance Test Script')\ncparser.add_argument('--family', help='space separated list of classes of algorithms '\n'(available : ' + ', '.join(sorted(all_families)) + ')',\n- metavar='', choices=all_families, nargs='+')\n+ metavar='', choices=all_families, nargs='+', default=' '.join(all_families))\ncparser.add_argument('--algo', help='space separated list of algorithm to run '\n'(Overrides --family, available : ' + ', '.join(sorted(all_algos)) + ')', metavar='',\nchoices=all_algos, nargs='+')\n@@ -329,7 +326,9 @@ if __name__ == '__main__':\ncparser.add_argument('-explain', help='explains plan levels can be hops, runtime, '\n'recompile_hops, recompile_runtime', nargs='?', const='runtime', metavar='')\ncparser.add_argument('-config', help='System-ML configuration file (e.g SystemML-config.xml)', metavar='')\n-\n+ cparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '\n+ 'set <force> option to skip conservative memory estimates '\n+ 'and use GPU wherever possible', nargs='?')\n# Spark Configuration Option\ncparser.add_argument('--master', help='local, yarn-client, yarn-cluster', metavar='')\ncparser.add_argument('--driver-memory', help='Memory for driver (e.g. 512M)', metavar='')\n@@ -338,15 +337,18 @@ if __name__ == '__main__':\ncparser.add_argument('--executor-cores', help='Number of cores', metavar='')\ncparser.add_argument('--conf', help='Spark configuration file', nargs='+', metavar='')\n+ # Single node execution mode options\n+ cparser.add_argument('-heapmem', help='maximum JVM heap memory', metavar='', default='8g')\n+\n+\n# Args is a namespace\nargs = cparser.parse_args()\nall_arg_dict = vars(args)\n- arg_dict, config_dict, spark_dict = args_dict_split(all_arg_dict)\ncreate_dir_local(args.config_dir)\n# Global variables\n- sup_args_dict, spark_args_dict = get_config_args(config_dict, spark_dict, args.exec_type)\n+ perftest_args_dict, systemml_args_dict, backend_args_dict = split_config_args(all_arg_dict)\n# Debug arguments\n# print(arg_dict)\n@@ -395,8 +397,8 @@ if __name__ == '__main__':\nlogging.info('algorithm,run_type,intercept,matrix_type,data_shape,time_sec')\n# Remove filename item from dictionary as its already used to create the log above\n- del arg_dict['filename']\n- perf_test_entry(**arg_dict)\n+ del perftest_args_dict['filename']\n+ perf_test_entry(**perftest_args_dict)\ntotal_time = (time.time() - start_time)\nlogging.info('Performance tests complete {0:.3f} secs \\n'.format(total_time))\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_exec.py", "new_path": "scripts/perftest/python/utils_exec.py", "diff": "@@ -52,6 +52,7 @@ def subprocess_exec(cmd_string, log_file_path=None, extract=None):\nerror_arr, out_arr = get_all_logs(proc1)\nstd_outs = out_arr + error_arr\n+ std_outs.insert(0, ' '.join(exec_command))\nreturn_code = proc1.returncode\nif log_file_path is not None:\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -30,53 +30,68 @@ from utils_exec import subprocess_exec\n# This file contains all misc utility functions required by performance test module\n-def get_config_args(config_dict, spark_dict, exec_type):\n+def split_config_args(args):\n\"\"\"\nBased on configuration parameters passed build configuration dictionary used by subprocess\n- config_dict: Dictionary\n- General configuration options\n+ args: Dictionary\n+ All parameters passed in\n- spark_dict: Dictionary\n- Spark configuration options\n+ return: Dictionary, Dictionary, Dictionary\n+ 3 dictionaries - one for perf tests, one for systemml specific args, one for backend options\n+ \"\"\"\n- exec_type: String\n- Contains the execution type singlenode / hybrid_spark\n+ perftest_args_dict = {}\n- return: Dictionary, Dictionary\n- Based on the parameters passed we build to dictionary that need to be passed either at the\n- beginning or at the end\n- \"\"\"\n+ perftest_args_dict['family'] = args['family']\n+ perftest_args_dict['algo'] = args['algo']\n+ perftest_args_dict['exec_type'] = args['exec_type']\n+ perftest_args_dict['mat_type'] = args['mat_type']\n+ perftest_args_dict['mat_shape'] = args['mat_shape']\n+ perftest_args_dict['config_dir'] = args['config_dir']\n+ perftest_args_dict['filename'] = args['filename']\n+ perftest_args_dict['mode'] = args['mode']\n+ perftest_args_dict['temp_dir'] = args['temp_dir']\n- sup_args_dict = {}\n- if config_dict['stats'] is not None:\n- sup_args_dict['-stats'] = config_dict['stats']\n- if config_dict['explain'] is not None:\n- sup_args_dict['-explain'] = config_dict['explain']\n+ systemml_args_dict = {}\n- if config_dict['config'] is not None:\n- sup_args_dict['-config'] = config_dict['config']\n+ if args['stats'] is not None:\n+ systemml_args_dict['-stats'] = args['stats']\n+\n+ if args['explain'] is not None:\n+ systemml_args_dict['-explain'] = args['explain']\n+\n+ if args['config'] is not None:\n+ systemml_args_dict['-config'] = args['config']\n+\n+ if args['gpu'] is not None:\n+ systemml_args_dict['-gpu'] = args['gpu']\n+\n+ backend_args_dict = {}\n+ exec_type = args['exec_type']\n- spark_args_dict = {}\nif exec_type == 'hybrid_spark':\n- if spark_dict['master'] is not None:\n- spark_args_dict['--master'] = spark_dict['master']\n+ if args['master'] is not None:\n+ backend_args_dict['--master'] = args['master']\n- if spark_dict['num_executors'] is not None:\n- spark_args_dict['--num-executors'] = spark_dict['num_executors']\n+ if args['num_executors'] is not None:\n+ backend_args_dict['--num-executors'] = args['num_executors']\n- if spark_dict['driver_memory'] is not None:\n- spark_args_dict['--driver-memory'] = spark_dict['driver_memory']\n+ if args['driver_memory'] is not None:\n+ backend_args_dict['--driver-memory'] = args['driver_memory']\n- if spark_dict['executor_cores'] is not None:\n- spark_args_dict['--executor-cores'] = spark_dict['executor_cores']\n+ if args['executor_cores'] is not None:\n+ backend_args_dict['--executor-cores'] = args['executor_cores']\n- if spark_dict['conf'] is not None:\n- spark_args_dict['--conf'] = ' '.join(spark_dict['conf'])\n+ if args['conf'] is not None:\n+ backend_args_dict['--conf'] = ' '.join(args['conf'])\n+ elif exec_type == 'singlenode':\n+ if args['heapmem'] is not None:\n+ backend_args_dict['-heapmem'] = args['heapmem']\n- return sup_args_dict, spark_args_dict\n+ return perftest_args_dict, systemml_args_dict, backend_args_dict\ndef args_dict_split(all_arguments):\n@@ -87,13 +102,14 @@ def args_dict_split(all_arguments):\nAll input arguments parsed\nreturn: Dictionary, Dictionary, Dictionary\n- We return three dictionaries for init, script, spark arguments\n+ We return four dictionaries for init, script, spark arguments, singlenode arguments\n\"\"\"\nargs_dict = dict(list(all_arguments.items())[0:9])\n- config_dict = dict(list(all_arguments.items())[9:12])\n- spark_dict = dict(list(all_arguments.items())[12:])\n+ config_dict = dict(list(all_arguments.items())[9:13])\n+ spark_dict = dict(list(all_arguments.items())[13:19])\n+ singlenode_dict = dict(list(all_arguments.items())[19:])\n- return args_dict, config_dict, spark_dict\n+ return args_dict, config_dict, spark_dict, singlenode_dict\ndef get_families(current_algo, ml_algo):\n@@ -166,7 +182,7 @@ def config_reader(read_path):\nreturn conf_file\n-def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup_args_dict, log_file_name=None):\n+def exec_dml_and_parse_time(exec_type, dml_file_name, args, backend_args_dict, systemml_args_dict, log_file_name=None):\n\"\"\"\nThis function is responsible of execution of input arguments via python sub process,\nWe also extract time obtained from the output of this subprocess\n@@ -180,10 +196,10 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup\nargs: Dictionary\nKey values pairs depending on the arg type\n- spark_args_dict: Dictionary\n- Spark configuration arguments\n+ backend_args_dict: Dictionary\n+ Spark configuration arguments / singlenode config arguments\n- sup_args_dict: Dictionary\n+ systemml_args_dict: Dictionary\nSupplementary arguments required by the script\nlog_file_name: String\n@@ -195,17 +211,17 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, args, spark_args_dict, sup\nalgorithm = dml_file_name + '.dml'\n- sup_args = ''.join(['{} {}'.format(k, v) for k, v in sup_args_dict.items()])\n+ sup_args = ''.join(['{} {}'.format(k, v) for k, v in systemml_args_dict.items()])\nif exec_type == 'singlenode':\nexec_script = join(os.environ.get('SYSTEMML_HOME'), 'bin', 'systemml-standalone.py')\n-\n+ singlenode_pre_args = ''.join([' {} {} '.format(k, v) for k, v in backend_args_dict.items()])\nargs = ''.join(['{} {}'.format(k, v) for k, v in args.items()])\n- cmd = [exec_script, '-f', algorithm, args, sup_args]\n+ cmd = [exec_script, singlenode_pre_args, '-f', algorithm, args, sup_args]\ncmd_string = ' '.join(cmd)\nif exec_type == 'hybrid_spark':\nexec_script = join(os.environ.get('SYSTEMML_HOME'), 'bin', 'systemml-spark-submit.py')\n- spark_pre_args = ''.join([' {} {} '.format(k, v) for k, v in spark_args_dict.items()])\n+ spark_pre_args = ''.join([' {} {} '.format(k, v) for k, v in backend_args_dict.items()])\nargs = ''.join(['{} {}'.format(k, v) for k, v in args.items()])\ncmd = [exec_script, spark_pre_args, '-f', algorithm, args, sup_args]\ncmd_string = ' '.join(cmd)\n@@ -238,7 +254,7 @@ def parse_time(raw_logs):\nreturn 'time_not_found'\n-def exec_test_data(exec_type, spark_args_dict, sup_args_dict, datagen_path, config):\n+def exec_test_data(exec_type, backend_args_dict, systemml_args_dict, datagen_path, config):\n\"\"\"\nCreates the test data split from the given input path\n@@ -256,7 +272,7 @@ def exec_test_data(exec_type, spark_args_dict, sup_args_dict, datagen_path, conf\nX_test = join(path, 'X_test.data')\nY_test = join(path, 'Y_test.data')\nargs = {'-args': ' '.join([X, Y, X_test, Y_test, 'csv'])}\n- exec_dml_and_parse_time(exec_type, test_split_script, args, spark_args_dict, sup_args_dict)\n+ exec_dml_and_parse_time(exec_type, test_split_script, args, backend_args_dict, systemml_args_dict)\ndef check_predict(current_algo, ml_predict):\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] updates to performance scripts Closes #618
49,703
17.08.2017 10:40:51
25,200
071207c32269d5ed6e7d2de0ec55720c7c876dd4
[MINOR] Add Python artifact README.txt Create README.txt file for Python artifact. This removes the Python README warning when the Python artifact is built. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/python/MANIFEST.in", "new_path": "src/main/python/MANIFEST.in", "diff": "#-------------------------------------------------------------\ninclude LICENSE\ninclude NOTICE\n+include README.txt\nrecursive-include systemml/systemml-java *\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/python/README.txt", "diff": "+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+\n+\n+Apache SystemML\n+\n+Website: http://systemml.apache.org/\n+GitHub: https://github.com/apache/systemml\n+Mailing List: [email protected]\n+Issue Tracker: https://issues.apache.org/jira/browse/SYSTEMML\n+Download: http://systemml.apache.org/download.html\n+\n+For more information about using SystemML with Python, please see:\n+Beginner's Guide for Python Users: http://apache.github.io/systemml/beginners-guide-python.html\n+Reference Guide for Python Users: http://apache.github.io/systemml/python-reference.html\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Add Python artifact README.txt Create README.txt file for Python artifact. This removes the Python README warning when the Python artifact is built. Closes #620.
49,703
17.08.2017 10:45:55
25,200
6bf3d92c74fa7e6bc5a5f9f531b6fea62bee3fc1
[MINOR] Initialize log4j in GenerateClassesForMLContext Initialize log4j in GenerateClassesForMLContext using BasicConfigurator. This removes the log4j initialization warning for GenerateClassesForMLContext during the project build. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GenerateClassesForMLContext.java", "new_path": "src/main/java/org/apache/sysml/utils/GenerateClassesForMLContext.java", "diff": "@@ -31,6 +31,9 @@ import java.util.Map;\nimport org.apache.commons.io.FileUtils;\nimport org.apache.commons.lang.StringEscapeUtils;\n+import org.apache.log4j.BasicConfigurator;\n+import org.apache.log4j.Level;\n+import org.apache.log4j.Logger;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.mlcontext.MLContext;\nimport org.apache.sysml.api.mlcontext.MLResults;\n@@ -89,6 +92,9 @@ public class GenerateClassesForMLContext {\nsource = args[0];\n}\ntry {\n+ BasicConfigurator.configure();\n+ Logger.getRootLogger().setLevel(Level.WARN);\n+\nDMLScript.VALIDATOR_IGNORE_ISSUES = true;\nSystem.out.println(\"************************************\");\nSystem.out.println(\"**** MLContext Class Generation ****\");\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Initialize log4j in GenerateClassesForMLContext Initialize log4j in GenerateClassesForMLContext using BasicConfigurator. This removes the log4j initialization warning for GenerateClassesForMLContext during the project build. Closes #621.
49,737
17.08.2017 11:43:49
25,200
11420072412c0c873b72267d1e9764c87abc57b4
[MINOR] fixes for HDFS path Closes
[ { "change_type": "MODIFY", "old_path": "bin/utils.py", "new_path": "bin/utils.py", "diff": "@@ -74,15 +74,16 @@ def find_dml_file(systemml_home, script_file):\nLocation of the dml script\n\"\"\"\nscripts_dir = join(systemml_home, 'scripts')\n- if not (exists(script_file)):\n- script_file = find_file(script_file, scripts_dir)\n- if script_file is None:\n+ if not exists(script_file):\n+ script_file_path = find_file(script_file, scripts_dir)\n+ if script_file_path is not None:\n+ return script_file_path\n+ else:\nprint('Could not find DML script: ' + script_file)\nsys.exit()\nreturn script_file\n-\ndef log4j_path(systemml_home):\n\"\"\"\nCreate log4j.properties from the template if not exist\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -32,7 +32,8 @@ from datagen import config_packets_datagen\nfrom train import config_packets_train\nfrom predict import config_packets_predict\nfrom utils_misc import get_families, config_reader, \\\n- exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics, split_config_args\n+ exec_dml_and_parse_time, exec_test_data, check_predict, get_folder_metrics, split_config_args, \\\n+ get_default_dir\nfrom utils_fs import create_dir_local, write_success, check_SUCCESS_file_exists\n# A packet is a dictionary\n@@ -275,7 +276,7 @@ if __name__ == '__main__':\ndefault_mat_shape = ['10k_100']\n# Default temp directory, contains everything generated in perftest\n- default_temp_dir = join(systemml_home, 'scripts', 'perftest', 'temp')\n+ default_config_dir = join(systemml_home, 'scripts', 'perftest', 'temp')\n# Initialize time\nstart_time = time.time()\n@@ -308,7 +309,7 @@ if __name__ == '__main__':\ncparser.add_argument('--mat-shape', default=default_mat_shape, help='space separated list of shapes of matrices '\n'to generate (e.g 10k_1k, 20M_4k)', metavar='', nargs='+')\n- cparser.add_argument('--config-dir', default=default_temp_dir, help='temporary directory '\n+ cparser.add_argument('--config-dir', default=default_config_dir, help='temporary directory '\n'where generated, training and prediction data is put', metavar='')\ncparser.add_argument('--filename', default='perf_test', help='name of the output file for the perf'\n' metrics', metavar='')\n@@ -316,8 +317,7 @@ if __name__ == '__main__':\nhelp='space separated list of types of workloads to run (available: data-gen, train, predict)',\nmetavar='', choices=workload, nargs='+')\n# Change this to temp-dir\n- cparser.add_argument('--temp-dir', default=default_temp_dir,\n- help='define the file system to work on', metavar='')\n+ cparser.add_argument('--temp-dir', help='define the file system to work on', metavar='')\n# Configuration Options\ncparser.add_argument('-stats', help='Monitor and report caching/recompilation statistics, '\n@@ -350,8 +350,8 @@ if __name__ == '__main__':\n# Global variables\nperftest_args_dict, systemml_args_dict, backend_args_dict = split_config_args(all_arg_dict)\n- # Debug arguments\n- # print(arg_dict)\n+ # temp_dir hdfs / local path check\n+ perftest_args_dict['temp_dir'] = get_default_dir(args.temp_dir, args.exec_type, default_config_dir)\n# default_mat_type validity\nif len(args.mat_type) > 2:\n@@ -401,4 +401,5 @@ if __name__ == '__main__':\nperf_test_entry(**perftest_args_dict)\ntotal_time = (time.time() - start_time)\n- logging.info('Performance tests complete {0:.3f} secs \\n'.format(total_time))\n+ logging.info('total_time,none,none,none,none,{}'.format(total_time))\n+ logging.info('Performance tests complete')\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_exec.py", "new_path": "scripts/perftest/python/utils_exec.py", "diff": "#\n#-------------------------------------------------------------\n+import sys\nimport subprocess\nimport shlex\nimport re\n@@ -63,6 +64,8 @@ def subprocess_exec(cmd_string, log_file_path=None, extract=None):\nreturn_data = parse_time(std_outs)\nif extract == 'dir':\nreturn_data = parse_hdfs_paths(std_outs)\n+ if extract == 'hdfs_base':\n+ return_data = parse_hdfs_base(std_outs)\nif extract is None:\nreturn_data = 0\n@@ -73,6 +76,20 @@ def subprocess_exec(cmd_string, log_file_path=None, extract=None):\nreturn return_data\n+def parse_hdfs_base(std_outs):\n+ \"\"\"\n+ return: String\n+ hdfs base uri\n+ \"\"\"\n+ hdfs_uri = None\n+ for line in std_outs:\n+ if line.startswith('hdfs://'):\n+ hdfs_uri = line\n+ if hdfs_uri is None:\n+ sys.exit('HDFS URI not found')\n+ return hdfs_uri\n+\n+\ndef write_logs(std_outs, log_file_path):\n\"\"\"\nWrite all logs to the specified location\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_fs.py", "new_path": "scripts/perftest/python/utils_fs.py", "diff": "#-------------------------------------------------------------\nimport os\n+import sys\nfrom os.path import join\nimport glob\nfrom functools import reduce\n@@ -101,6 +102,16 @@ def contains_dir(hdfs_dirs, sub_folder):\nreturn False\n+def check_hdfs_path(path):\n+ \"\"\"\n+ Check if a path is present in HDFS\n+ \"\"\"\n+ cmd = ['hdfs', 'dfs', '-test', '-e', path]\n+ return_code = subprocess_exec(' '.join(cmd))\n+ if return_code != 0:\n+ return sys.exit('Please create {}'.format(path))\n+\n+\ndef relevant_folders(path, algo, family, matrix_type, matrix_shape, mode):\n\"\"\"\nFinds the right folder to read the data based on given parameters\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -25,7 +25,9 @@ import os\nimport json\nimport re\nimport sys\n+import getpass\nfrom utils_exec import subprocess_exec\n+from utils_fs import check_hdfs_path\n# This file contains all misc utility functions required by performance test module\n@@ -361,3 +363,36 @@ def mat_type_check(current_family, matrix_types, dense_algos):\ncurrent_type.append(current_matrix_type)\nreturn current_type\n+\n+\n+def get_default_dir(temp_dir, exec_mode, config_dir):\n+ \"\"\"\n+ temp_dir: String\n+ exec_mode: String\n+ config_dir: String\n+\n+ return: String\n+ Local or HDFS home directory\n+ \"\"\"\n+\n+ if exec_mode == 'singlenode':\n+ if temp_dir is None:\n+ return config_dir\n+ if temp_dir is not None:\n+ return temp_dir\n+\n+ if exec_mode == 'hybrid_spark':\n+ cmd = ['hdfs', 'getconf', '-confKey', 'fs.default.name']\n+ hdfs_base = subprocess_exec(' '.join(cmd), extract='hdfs_base')\n+\n+ if temp_dir is None:\n+ hdfs_home = join(hdfs_base, 'user', getpass.getuser())\n+ check_hdfs_path(hdfs_home)\n+ return hdfs_home\n+\n+ if temp_dir is not None:\n+ if temp_dir.startswith('hdfs'):\n+ return temp_dir\n+ else:\n+ hdfs_home = join(hdfs_base, 'user', getpass.getuser(), temp_dir)\n+ return hdfs_home\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] fixes for HDFS path Closes #624
49,703
17.08.2017 12:02:54
25,200
830e6589fef25ff4e1f751c03609298755fcc170
Attach python artifact for install and deploy Attach the python artifact to project using build-helper-maven-plugin's attach-artifact goal. This allows the python artifact to be installed into the local maven repository and deployed to the snapshot repository using the distribution profile. CLoses
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "</execution>\n</executions>\n</plugin>\n+ <!-- Attach python artifact so it can be installed and deployed. -->\n+ <plugin>\n+ <groupId>org.codehaus.mojo</groupId>\n+ <artifactId>build-helper-maven-plugin</artifactId>\n+ <version>1.8</version>\n+ <executions>\n+ <execution>\n+ <id>attach-python-artifact</id>\n+ <phase>pre-integration-test</phase>\n+ <goals>\n+ <goal>attach-artifact</goal>\n+ </goals>\n+ <configuration>\n+ <artifacts>\n+ <artifact>\n+ <file>${basedir}/target/${project.artifactId}-${project.version}-python.tgz</file>\n+ <type>tgz</type>\n+ <classifier>python</classifier>\n+ </artifact>\n+ </artifacts>\n+ </configuration>\n+ </execution>\n+ </executions>\n+ </plugin>\n</plugins>\n</build>\n</profile>\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1844] Attach python artifact for install and deploy Attach the python artifact to project using build-helper-maven-plugin's attach-artifact goal. This allows the python artifact to be installed into the local maven repository and deployed to the snapshot repository using the distribution profile. CLoses #619.
49,738
17.08.2017 20:46:50
25,200
4c3eab89f32355b2d5d898db4411d9cde6eb8c08
Fix robustness transformdecode w/ delimiter tokens This patch makes the construction and splitting of recode map entries consistent and robust for special tokens, that include the delimiter (i.e., Lop.DATATYPE_PREFIX) itself.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "diff": "@@ -35,11 +35,11 @@ import java.util.Map;\nimport org.apache.commons.lang.ArrayUtils;\nimport org.apache.hadoop.io.Writable;\n-import org.apache.sysml.lops.Lop;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheBlock;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n+import org.apache.sysml.runtime.transform.encode.EncoderRecode;\nimport org.apache.sysml.runtime.util.IndexRange;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -1049,12 +1049,7 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nfor( int i=0; i<getNumRows(); i++ ) {\nObject val = ldata.get(i);\nif( val != null ) {\n- // Instead of using splitCSV which is forcing string with RFC-4180 format,\n- // using Lop.DATATYPE_PREFIX separator to split token and code\n- String[] tmp = new String[2];\n- int pos = val.toString().lastIndexOf(Lop.DATATYPE_PREFIX);\n- tmp[0] = val.toString().substring(0, pos);\n- tmp[1] = val.toString().substring(pos+1);\n+ String[] tmp = EncoderRecode.splitRecodeMapEntry(val.toString());\nmap.put(tmp[0], Long.parseLong(tmp[1]));\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/transform/decode/DecoderRecode.java", "new_path": "src/main/java/org/apache/sysml/runtime/transform/decode/DecoderRecode.java", "diff": "@@ -21,12 +21,12 @@ package org.apache.sysml.runtime.transform.decode;\nimport java.util.HashMap;\n-import org.apache.sysml.lops.Lop;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.matrix.data.FrameBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.Pair;\nimport org.apache.sysml.runtime.transform.TfUtils;\n+import org.apache.sysml.runtime.transform.encode.EncoderRecode;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n/**\n@@ -82,7 +82,7 @@ public class DecoderRecode extends Decoder\nfor( int i=0; i<meta.getNumRows(); i++ ) {\nif( meta.get(i, _colList[j]-1)==null )\nbreak; //reached end of recode map\n- String[] tmp = meta.get(i, _colList[j]-1).toString().split(Lop.DATATYPE_PREFIX);\n+ String[] tmp = EncoderRecode.splitRecodeMapEntry(meta.get(i, _colList[j]-1).toString());\nObject obj = UtilFunctions.stringToObject(_schema[_colList[j]-1], tmp[0]);\nmap.put(Long.parseLong(tmp[1]), obj);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java", "new_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java", "diff": "@@ -188,11 +188,25 @@ public class EncoderRecode extends Encoder\n/**\n* Returns the Recode map entry which consists of concatenation of code, delimiter and token.\n+ *\n* @param token is part of Recode map\n* @param code is code for token\n- * @return the concatenation of code and token with delimiter in between\n+ * @return the concatenation of token and code with delimiter in between\n*/\npublic static String constructRecodeMapEntry(String token, Long code) {\nreturn token + Lop.DATATYPE_PREFIX + code.toString();\n}\n+\n+ /**\n+ * Splits a Recode map entry into its token and code.\n+ *\n+ * @param value concatenation of token and code with delimiter in between\n+ * @return string array of token and code\n+ */\n+ public static String[] splitRecodeMapEntry(String value) {\n+ // Instead of using splitCSV which is forcing string with RFC-4180 format,\n+ // using Lop.DATATYPE_PREFIX separator to split token and code\n+ int pos = value.toString().lastIndexOf(Lop.DATATYPE_PREFIX);\n+ return new String[] {value.substring(0, pos), value.substring(pos+1)};\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1850] Fix robustness transformdecode w/ delimiter tokens This patch makes the construction and splitting of recode map entries consistent and robust for special tokens, that include the delimiter (i.e., Lop.DATATYPE_PREFIX) itself.
49,738
17.08.2017 22:40:32
25,200
8c87d2a2f3472d5516113a65817defe5ce6989a6
Fix rewrite outer-rexpand (generalized max handling) This patch fixes a static rewrite issue of outer to rexpand, which failed whenever the sequence to parameter was not a literal after constant folding. We now extended this rewrite to arbitrary hops and generalized the related size propagation and memory estimates accordingly.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java", "diff": "@@ -1025,15 +1025,15 @@ public class ParameterizedBuiltinOp extends Hop implements MultiThreadedHop\n//but very good sparsity estimate possible (number of non-zeros in input)\nHop max = getInput().get(_paramIndexMap.get(\"max\"));\nHop dir = getInput().get(_paramIndexMap.get(\"dir\"));\n- double maxVal = HopRewriteUtils.getDoubleValueSafe((LiteralOp)max);\n+ long maxVal = computeDimParameterInformation(max, memo);\nString dirVal = ((LiteralOp)dir).getStringValue();\nif( mc.dimsKnown() ) {\nlong lnnz = mc.nnzKnown() ? mc.getNonZeros() : mc.getRows();\nif( \"cols\".equals(dirVal) ) { //expand horizontally\n- ret = new long[]{mc.getRows(), UtilFunctions.toLong(maxVal), lnnz};\n+ ret = new long[]{mc.getRows(), maxVal, lnnz};\n}\nelse if( \"rows\".equals(dirVal) ){ //expand vertically\n- ret = new long[]{UtilFunctions.toLong(maxVal), mc.getRows(), lnnz};\n+ ret = new long[]{maxVal, mc.getRows(), lnnz};\n}\n}\n}\n@@ -1164,7 +1164,7 @@ public class ParameterizedBuiltinOp extends Hop implements MultiThreadedHop\nHop target = getInput().get(_paramIndexMap.get(\"target\"));\nHop max = getInput().get(_paramIndexMap.get(\"max\"));\nHop dir = getInput().get(_paramIndexMap.get(\"dir\"));\n- double maxVal = HopRewriteUtils.getDoubleValueSafe((LiteralOp)max);\n+ double maxVal = computeSizeInformation(max);\nString dirVal = ((LiteralOp)dir).getStringValue();\nif( \"cols\".equals(dirVal) ) { //expand horizontally\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -456,6 +456,11 @@ public class HopRewriteUtils\nreturn datagen;\n}\n+ public static boolean isDataGenOp(Hop hop, DataGenMethod... ops) {\n+ return (hop instanceof DataGenOp\n+ && ArrayUtils.contains(ops, ((DataGenOp)hop).getOp()));\n+ }\n+\npublic static boolean isDataGenOpWithConstantValue(Hop hop, double value) {\nreturn hop instanceof DataGenOp\n&& ((DataGenOp)hop).getOp()==DataGenMethod.RAND\n@@ -989,17 +994,13 @@ public class HopRewriteUtils\nreturn ret;\n}\n- public static LiteralOp getBasic1NSequenceMaxLiteral(Hop hop)\n+ public static Hop getBasic1NSequenceMax(Hop hop)\nthrows HopsException\n{\n- if( hop instanceof DataGenOp )\n- {\n+ if( isDataGenOp(hop, DataGenMethod.SEQ) ) {\nDataGenOp dgop = (DataGenOp) hop;\n- if( dgop.getOp() == DataGenMethod.SEQ ){\n- Hop to = dgop.getInput().get(dgop.getParamIndex(Statement.SEQ_TO));\n- if( to instanceof LiteralOp )\n- return (LiteralOp)to;\n- }\n+ return dgop.getInput()\n+ .get(dgop.getParamIndex(Statement.SEQ_TO));\n}\nthrow new HopsException(\"Failed to retrieve 'to' argument from basic 1-N sequence.\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -1641,7 +1641,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\n//setup input parameter hops\nHashMap<String,Hop> inputargs = new HashMap<String,Hop>();\ninputargs.put(\"target\", trgt);\n- inputargs.put(\"max\", HopRewriteUtils.getBasic1NSequenceMaxLiteral(seq));\n+ inputargs.put(\"max\", HopRewriteUtils.getBasic1NSequenceMax(seq));\ninputargs.put(\"dir\", new LiteralOp(direction));\ninputargs.put(\"ignore\", new LiteralOp(true));\ninputargs.put(\"cast\", new LiteralOp(false));\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1852] Fix rewrite outer-rexpand (generalized max handling) This patch fixes a static rewrite issue of outer to rexpand, which failed whenever the sequence to parameter was not a literal after constant folding. We now extended this rewrite to arbitrary hops and generalized the related size propagation and memory estimates accordingly.
49,738
18.08.2017 21:13:56
25,200
129710a01458a9e6bae8806b8d982e580ebb225e
Fix NPE on transformapply w/ empty recode map
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java", "new_path": "src/main/java/org/apache/sysml/runtime/transform/encode/EncoderRecode.java", "diff": "@@ -59,6 +59,8 @@ public class EncoderRecode extends Encoder\n}\nprivate long lookupRCDMap(int colID, String key) {\n+ if( !_rcdMaps.containsKey(colID) )\n+ return -1; //empty recode map\nLong tmp = _rcdMaps.get(colID).get(key);\nreturn (tmp!=null) ? tmp : -1;\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/transform/TransformApplyEmptyRecodeMapTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.transform;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.matrix.data.FrameBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.transform.encode.Encoder;\n+import org.apache.sysml.runtime.transform.encode.EncoderFactory;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class TransformApplyEmptyRecodeMapTest extends AutomatedTestBase\n+{\n+ private static final int rows = 7;\n+ private static final int cols = 1;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ }\n+\n+ @Test\n+ public void testTransformApplyEmptyRecodeMap() {\n+ try {\n+ //generate input data\n+ FrameBlock data = DataConverter.convertToFrameBlock(\n+ DataConverter.convertToMatrixBlock(getRandomMatrix(rows, cols, 1, 1, 1, 7)));\n+ FrameBlock meta = new FrameBlock(new ValueType[]{ValueType.STRING}, new String[]{\"C1\"});\n+\n+ //execute transform apply\n+ Encoder encoder = EncoderFactory.createEncoder(\n+ \"{ids:true, recode:[1]}\", data.getColumnNames(), meta.getSchema(), meta);\n+ MatrixBlock out = encoder.apply(data, new MatrixBlock(rows, cols, true));\n+\n+ //check outputs\n+ Assert.assertEquals(rows, out.getNumRows());\n+ Assert.assertEquals(cols, out.getNumColumns());\n+ for(int i=0; i<rows; i++)\n+ for(int j=0; j<cols; j++)\n+ Assert.assertTrue(Double.isNaN(out.quickGetValue(i, j)));\n+ }\n+ catch (DMLRuntimeException e) {\n+ throw new RuntimeException(e);\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/transform/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/transform/ZPackageSuite.java", "diff": "@@ -27,6 +27,7 @@ import org.junit.runners.Suite;\n@RunWith(Suite.class)\[email protected]({\nFrameCSVReadWriteTest.class,\n+ TransformApplyEmptyRecodeMapTest.class,\nTransformCSVFrameEncodeDecodeTest.class,\nTransformCSVFrameEncodeReadTest.class,\nTransformEncodeDecodeTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1854] Fix NPE on transformapply w/ empty recode map
49,738
22.08.2017 19:26:00
25,200
06fa73acc4639dd446c2f36c62a956803c247753
Performance codegen outer operators (degree of par)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "diff": "@@ -51,7 +51,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class SpoofCellwise extends SpoofOperator implements Serializable\n{\nprivate static final long serialVersionUID = 3442528770573293590L;\n- private static final long PAR_NUMCELL_THRESHOLD = 1024*1024; //Min 1M elements\npublic enum CellType {\nNO_AGG,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java", "diff": "@@ -48,7 +48,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class SpoofMultiAggregate extends SpoofOperator implements Serializable\n{\nprivate static final long serialVersionUID = -6164871955591089349L;\n- private static final long PAR_NUMCELL_THRESHOLD = 1024*1024; //Min 1M elements\nprivate final AggOp[] _aggOps;\nprivate final boolean _sparseSafe;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "diff": "@@ -37,6 +37,10 @@ public abstract class SpoofOperator implements Serializable\nprivate static final long serialVersionUID = 3834006998853573319L;\nprivate static final Log LOG = LogFactory.getLog(SpoofOperator.class.getName());\n+ protected static final long PAR_NUMCELL_THRESHOLD = 1024*1024; //Min 1M elements\n+ protected static final long PAR_MINFLOP_THRESHOLD = 2L*1024*1024; //MIN 2 MFLOP\n+\n+\npublic abstract MatrixBlock execute(ArrayList<MatrixBlock> inputs, ArrayList<ScalarObject> scalars, MatrixBlock out)\nthrows DMLRuntimeException;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOuterProduct.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOuterProduct.java", "diff": "@@ -112,6 +112,9 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nif( inputs.get(0).isEmptyBlock(false) )\nreturn new DoubleObject(0);\n+ if( 2*inputs.get(0).getNonZeros()*inputs.get(1).getNumColumns() < PAR_MINFLOP_THRESHOLD )\n+ return execute(inputs, scalarObjects); //sequential\n+\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\ndouble[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n@@ -121,15 +124,14 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nfinal int m = inputs.get(0).getNumRows();\nfinal int n = inputs.get(0).getNumColumns();\nfinal int k = inputs.get(1).getNumColumns(); // rank\n+ final long nnz = inputs.get(0).getNonZeros();\ndouble sum = 0;\ntry\n{\nExecutorService pool = Executors.newFixedThreadPool(k);\nArrayList<ParOuterProdAggTask> tasks = new ArrayList<ParOuterProdAggTask>();\n- //create tasks (for wdivmm-left, parallelization over columns;\n- //for wdivmm-right, parallelization over rows; both ensure disjoint results)\n- int numThreads2 = UtilFunctions.roundToNext(Math.min(8*k,m/32), k);\n+ int numThreads2 = getPreferredNumberOfTasks(m, n, nnz, k, numThreads);\nint blklen = (int)(Math.ceil((double)m/numThreads2));\nfor( int i=0; i<numThreads2 & i*blklen<m; i++ )\ntasks.add(new ParOuterProdAggTask(inputs.get(0), ab[0], ab[1], b, scalars,\n@@ -259,6 +261,9 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nout.allocateDenseBlock();\n}\n+ if( 2*inputs.get(0).getNonZeros()*inputs.get(1).getNumColumns() < PAR_MINFLOP_THRESHOLD )\n+ return execute(inputs, scalarObjects, out); //sequential\n+\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\ndouble[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n@@ -268,6 +273,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nfinal int m = inputs.get(0).getNumRows();\nfinal int n = inputs.get(0).getNumColumns();\nfinal int k = inputs.get(1).getNumColumns(); // rank\n+ final long nnz = inputs.get(0).getNonZeros();\nMatrixBlock a = inputs.get(0);\n@@ -284,21 +290,24 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nint numCG = ((CompressedMatrixBlock)a).getNumColGroups();\nint blklen = (int)(Math.ceil((double)numCG/numThreads));\nfor( int j=0; j<numThreads & j*blklen<numCG; j++ )\n- tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k, _outerProductType, 0, m, j*blklen, Math.min((j+1)*blklen, numCG)));\n+ tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k,\n+ _outerProductType, 0, m, j*blklen, Math.min((j+1)*blklen, numCG)));\n}\nelse {\n//parallelize over column partitions\nint blklen = (int)(Math.ceil((double)n/numThreads));\nfor( int j=0; j<numThreads & j*blklen<n; j++ )\n- tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k, _outerProductType, 0, m, j*blklen, Math.min((j+1)*blklen, n)));\n+ tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k,\n+ _outerProductType, 0, m, j*blklen, Math.min((j+1)*blklen, n)));\n}\n}\nelse { //right or cell-wise\n//parallelize over row partitions\n- int numThreads2 = UtilFunctions.roundToNext(Math.min(8*k,m/32), k);\n+ int numThreads2 = getPreferredNumberOfTasks(m, n, nnz, k, numThreads);\nint blklen = (int)(Math.ceil((double)m/numThreads2));\nfor( int i=0; i<numThreads2 & i*blklen<m; i++ )\n- tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k, _outerProductType, i*blklen, Math.min((i+1)*blklen,m), 0, n));\n+ tasks.add(new ParExecTask(a, ab[0], ab[1], b, scalars, out, m, n, k,\n+ _outerProductType, i*blklen, Math.min((i+1)*blklen,m), 0, n));\n}\nList<Future<Long>> taskret = pool.invokeAll(tasks);\npool.shutdown();\n@@ -320,6 +329,13 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nreturn out;\n}\n+ private static int getPreferredNumberOfTasks(int m, int n, long nnz, int rank, int k) {\n+ //compute number of tasks nk in range [k, 8k]\n+ int base = (int) Math.min(Math.min(8*k, m/32),\n+ Math.ceil((double)2*nnz*rank/PAR_MINFLOP_THRESHOLD));\n+ return UtilFunctions.roundToNext(base, k);\n+ }\n+\nprivate void executeDense(double[] a, double[] u, double[] v, double[][] b, double[] scalars,\ndouble[] c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu )\n{\n@@ -427,6 +443,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nif( !out.isInSparseFormat() ) //DENSE\n{\ndouble[] c = out.getDenseBlock();\n+ double tmp = 0;\nfor( int bi=rl; bi<ru; bi+=blocksizeIJ ) {\nint bimin = Math.min(ru, bi+blocksizeIJ);\n//prepare starting indexes for block row\n@@ -441,16 +458,20 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nint[] wix = sblock.indexes(i);\ndouble[] wval = sblock.values(i);\nint index = wpos + curk[i-bi];\n- for( ; index<wpos+wlen && wix[index]<bjmin; index++ ) {\nif( type == OutProdType.CELLWISE_OUTER_PRODUCT )\n- c[wix[index]] = genexecCellwise( wval[index], u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index] );\n+ for( ; index<wpos+wlen && wix[index]<bjmin; index++ )\n+ c[wix[index]] = genexecCellwise( wval[index],\n+ u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index] );\nelse\n- c[0] += genexecCellwise( wval[index], u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index]);\n- }\n+ for( ; index<wpos+wlen && wix[index]<bjmin; index++ )\n+ tmp += genexecCellwise( wval[index],\n+ u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index]);\ncurk[i-bi] = index - wpos;\n}\n}\n}\n+ if( type != OutProdType.CELLWISE_OUTER_PRODUCT )\n+ c[0] = tmp;\n}\nelse //SPARSE\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "diff": "@@ -44,7 +44,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class SpoofRowwise extends SpoofOperator\n{\nprivate static final long serialVersionUID = 6242910797139642998L;\n- private static final long PAR_NUMCELL_THRESHOLD = 1024*1024; //Min 1M elements\npublic enum RowType {\nNO_AGG, //no aggregation\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1857] Performance codegen outer operators (degree of par)
49,703
23.08.2017 17:13:50
25,200
10f8e23d55c26e676ae3f07a62e51b8397b8a8b0
[MINOR] Function with no args gives ArrayIndexOutOfBoundsException Handle built-in function calls with no args (such as 'x=min();'), since this could give an ArrayIndexOutOfBoundsException in the toString() of BuiltinFunctionExpression during live variable analysis. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -328,6 +328,9 @@ public class BuiltinFunctionExpression extends DataIdentifier\nDataIdentifier output = new DataIdentifier(outputName);\noutput.setAllPositions(this.getFilename(), this.getBeginLine(), this.getBeginColumn(), this.getEndLine(), this.getEndColumn());\n+ if (this.getFirstExpr() == null) {\n+ raiseValidateError(\"Function \" + this + \" has no arguments.\", false);\n+ }\nIdentifier id = this.getFirstExpr().getOutput();\noutput.setProperties(this.getFirstExpr().getOutput());\noutput.setNnz(-1); //conservatively, cannot use input nnz!\n@@ -1330,11 +1333,15 @@ public class BuiltinFunctionExpression extends DataIdentifier\n}\npublic String toString() {\n- StringBuilder sb = new StringBuilder(_opcode.toString() + \"(\" + _args[0].toString());\n- for(int i=1; i < _args.length; i++) {\n+ StringBuilder sb = new StringBuilder(_opcode.toString() + \"(\");\n+ if (_args != null) {\n+ for (int i = 0; i < _args.length; i++) {\n+ if (i > 0) {\nsb.append(\",\");\n+ }\nsb.append(_args[i].toString());\n}\n+ }\nsb.append(\")\");\nreturn sb.toString();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Function with no args gives ArrayIndexOutOfBoundsException Handle built-in function calls with no args (such as 'x=min();'), since this could give an ArrayIndexOutOfBoundsException in the toString() of BuiltinFunctionExpression during live variable analysis. Closes #630.
49,698
23.08.2017 17:49:28
25,200
7e3c0360951ccd46aabf8f9ee12aee6d04947a60
New svd builtin function (compiler/runtime, local only) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "diff": "@@ -167,6 +167,12 @@ public class FunctionOp extends Hop\nlong outputValues = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), 1, 1.0);\nreturn outputVectors+outputValues;\n}\n+ else if ( getFunctionName().equalsIgnoreCase(\"svd\") ) {\n+ long outputU = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(0).getDim1(), getOutputs().get(0).getDim2(), 1.0);\n+ long outputSigma = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(1).getDim1(), getOutputs().get(1).getDim2(), 1.0);\n+ long outputV = OptimizerUtils.estimateSizeExactSparsity(getOutputs().get(2).getDim1(), getOutputs().get(2).getDim2(), 1.0);\n+ return outputU+outputSigma+outputV;\n+ }\nelse\nthrow new RuntimeException(\"Invalid call of computeOutputMemEstimate in FunctionOp.\");\n}\n@@ -197,6 +203,10 @@ public class FunctionOp extends Hop\n//System.out.println(\"EigenInter \" + interOutput/1024/1024);\nreturn interOutput;\n}\n+ else if ( getFunctionName().equalsIgnoreCase(\"svd\")) {\n+ double interOutput = OptimizerUtils.estimateSizeExactSparsity(1, getInput().get(0).getDim2(), 1.0);\n+ return interOutput;\n+ }\nelse\nthrow new RuntimeException(\"Invalid call of computeIntermediateMemEstimate in FunctionOp.\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1024,6 +1024,7 @@ public abstract class Hop\nNOT, ABS, SIN, COS, TAN, ASIN, ACOS, ATAN, SIGN, SQRT, LOG, EXP,\nCAST_AS_SCALAR, CAST_AS_MATRIX, CAST_AS_FRAME, CAST_AS_DOUBLE, CAST_AS_INT, CAST_AS_BOOLEAN,\nPRINT, EIGEN, NROW, NCOL, LENGTH, ROUND, IQM, STOP, CEIL, FLOOR, MEDIAN, INVERSE, CHOLESKY,\n+ SVD,\n//cumulative sums, products, extreme values\nCUMSUM, CUMPROD, CUMMIN, CUMMAX,\n//fused ML-specific operators for performance\n@@ -1317,6 +1318,7 @@ public abstract class Hop\nHopsOpOp12String.put(OpOp1.CAST_AS_SCALAR, \"castAsScalar\");\nHopsOpOp12String.put(OpOp1.COS, \"cos\");\nHopsOpOp12String.put(OpOp1.EIGEN, \"eigen\");\n+ HopsOpOp12String.put(OpOp1.SVD, \"svd\");\nHopsOpOp12String.put(OpOp1.EXP, \"exp\");\nHopsOpOp12String.put(OpOp1.IQM, \"iqm\");\nHopsOpOp12String.put(OpOp1.MEDIAN, \"median\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "diff": "@@ -693,7 +693,7 @@ public class UnaryOp extends Hop implements MultiThreadedHop\n//ensure cp exec type for single-node operations\nif( _op == OpOp1.PRINT || _op == OpOp1.STOP\n- || _op == OpOp1.INVERSE || _op == OpOp1.EIGEN || _op == OpOp1.CHOLESKY )\n+ || _op == OpOp1.INVERSE || _op == OpOp1.EIGEN || _op == OpOp1.CHOLESKY || _op == OpOp1.SVD)\n{\n_etype = ExecType.CP;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/cost/CostEstimatorStaticRuntime.java", "new_path": "src/main/java/org/apache/sysml/hops/cost/CostEstimatorStaticRuntime.java", "diff": "@@ -994,13 +994,15 @@ public class CostEstimatorStaticRuntime extends CostEstimator\n//note: should be invoked independently for multiple outputs\nreturn d1m * d1n * d1s * DEFAULT_NFLOP_UNKNOWN;\n- case MultiReturnBuiltin: //opcodes: qr, lu, eigen\n+ case MultiReturnBuiltin: //opcodes: qr, lu, eigen, svd\n//note: they all have cubic complexity, the scaling factor refers to commons.math\ndouble xf = 2; //default e.g, qr\nif( optype.equals(\"eigen\") )\nxf = 32;\nelse if ( optype.equals(\"lu\") )\nxf = 16;\n+ else if ( optype.equals(\"svd\"))\n+ xf = 32; // TODO - assuming worst case for now\nreturn xf * d1m * d1n * d1n; //for 1kx1k ~ 2GFLOP -> 1s\ncase ParameterizedBuiltin: //opcodes: cdf, invcdf, groupedagg, rmempty\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -200,6 +200,37 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbreak;\n+ case SVD:\n+ checkNumParameters(1);\n+ checkMatrixParam(getFirstExpr());\n+\n+ long minMN = Math.min(getFirstExpr().getOutput().getDim1(), getFirstExpr().getOutput().getDim2());\n+\n+ // setup output properties\n+ DataIdentifier svdOut1 = (DataIdentifier) getOutputs()[0];\n+ DataIdentifier svdOut2 = (DataIdentifier) getOutputs()[1];\n+ DataIdentifier svdOut3 = (DataIdentifier) getOutputs()[2];\n+\n+ // Output 1\n+ svdOut1.setDataType(DataType.MATRIX);\n+ svdOut1.setValueType(ValueType.DOUBLE);\n+ svdOut1.setDimensions(getFirstExpr().getOutput().getDim1(), minMN);\n+ svdOut1.setBlockDimensions(getFirstExpr().getOutput().getRowsInBlock(), getFirstExpr().getOutput().getColumnsInBlock());\n+\n+ // Output 2\n+ svdOut2.setDataType(DataType.MATRIX);\n+ svdOut2.setValueType(ValueType.DOUBLE);\n+ svdOut2.setDimensions(minMN, minMN);\n+ svdOut2.setBlockDimensions(getFirstExpr().getOutput().getRowsInBlock(), getFirstExpr().getOutput().getColumnsInBlock());\n+\n+ // Output 3\n+ svdOut3.setDataType(DataType.MATRIX);\n+ svdOut3.setValueType(ValueType.DOUBLE);\n+ svdOut3.setDimensions(getFirstExpr().getOutput().getDim2(), minMN);\n+ svdOut3.setBlockDimensions(getFirstExpr().getOutput().getRowsInBlock(), getFirstExpr().getOutput().getColumnsInBlock());\n+\n+ break;\n+\ndefault: //always unconditional\nraiseValidateError(\"Unknown Builtin Function opcode: \" + _opcode, false);\n}\n@@ -1214,7 +1245,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\nelse {\n// always unconditional (because unsupported operation)\nBuiltinFunctionOp op = getOpCode();\n- if( op==BuiltinFunctionOp.EIGEN || op==BuiltinFunctionOp.LU || op==BuiltinFunctionOp.QR )\n+ if( op==BuiltinFunctionOp.EIGEN || op==BuiltinFunctionOp.LU || op==BuiltinFunctionOp.QR || op==BuiltinFunctionOp.SVD)\nraiseValidateError(\"Function \"+op+\" needs to be called with multi-return assignment.\", false, LanguageErrorCodes.INVALID_PARAMETERS);\nelse\nraiseValidateError(\"Unsupported function \"+op, false, LanguageErrorCodes.INVALID_PARAMETERS);\n@@ -1256,6 +1287,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\ncase QR:\ncase LU:\ncase EIGEN:\n+ case SVD:\nreturn true;\ndefault:\nreturn false;\n@@ -1667,6 +1699,8 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbifop = Expression.BuiltinFunctionOp.INVERSE;\nelse if (functionName.equals(\"cholesky\"))\nbifop = Expression.BuiltinFunctionOp.CHOLESKY;\n+ else if (functionName.equals(\"svd\"))\n+ bifop = Expression.BuiltinFunctionOp.SVD;\nelse if (functionName.equals(\"sample\"))\nbifop = Expression.BuiltinFunctionOp.SAMPLE;\nelse if ( functionName.equals(\"outer\") )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -2471,6 +2471,7 @@ public class DMLTranslator\ncase QR:\ncase LU:\ncase EIGEN:\n+ case SVD:\n// Number of outputs = size of targetList = #of identifiers in source.getOutputs\nString[] outputNames = new String[targetList.size()];\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/Expression.java", "new_path": "src/main/java/org/apache/sysml/parser/Expression.java", "diff": "@@ -125,6 +125,7 @@ public abstract class Expression\nSOLVE,\nSQRT,\nSUM,\n+ SVD,\nTABLE,\nTAN,\nTRACE,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "diff": "@@ -267,11 +267,13 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"qr\", CPINSTRUCTION_TYPE.MultiReturnBuiltin);\nString2CPInstructionType.put( \"lu\", CPINSTRUCTION_TYPE.MultiReturnBuiltin);\nString2CPInstructionType.put( \"eigen\", CPINSTRUCTION_TYPE.MultiReturnBuiltin);\n+ String2CPInstructionType.put( \"svd\", CPINSTRUCTION_TYPE.MultiReturnBuiltin);\nString2CPInstructionType.put( \"partition\", CPINSTRUCTION_TYPE.Partition);\nString2CPInstructionType.put( \"compress\", CPINSTRUCTION_TYPE.Compression);\nString2CPInstructionType.put( \"spoof\", CPINSTRUCTION_TYPE.SpoofFused);\n+\n//CP FILE instruction\nString2CPFileInstructionType = new HashMap<String, CPINSTRUCTION_TYPE>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/MultiReturnBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/MultiReturnBuiltinCPInstruction.java", "diff": "@@ -89,6 +89,17 @@ public class MultiReturnBuiltinCPInstruction extends ComputationCPInstruction\nreturn new MultiReturnBuiltinCPInstruction(null, in1, outputs, opcode, str);\n+ }\n+ else if ( opcode.equalsIgnoreCase(\"svd\") ) {\n+ CPOperand in1 = new CPOperand(parts[1]);\n+\n+ // one input and three outputs\n+ outputs.add ( new CPOperand(parts[2], ValueType.DOUBLE, DataType.MATRIX) );\n+ outputs.add ( new CPOperand(parts[3], ValueType.DOUBLE, DataType.MATRIX) );\n+ outputs.add ( new CPOperand(parts[4], ValueType.DOUBLE, DataType.MATRIX) );\n+\n+ return new MultiReturnBuiltinCPInstruction(null, in1, outputs, opcode, str);\n+\n}\nelse {\nthrow new DMLRuntimeException(\"Invalid opcode in MultiReturnBuiltin instruction: \" + opcode);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibCommonsMath.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibCommonsMath.java", "diff": "@@ -26,6 +26,7 @@ import org.apache.commons.math3.linear.EigenDecomposition;\nimport org.apache.commons.math3.linear.LUDecomposition;\nimport org.apache.commons.math3.linear.QRDecomposition;\nimport org.apache.commons.math3.linear.RealMatrix;\n+import org.apache.commons.math3.linear.SingularValueDecomposition;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.util.DataConverter;\n@@ -48,7 +49,7 @@ public class LibCommonsMath\n}\npublic static boolean isSupportedMultiReturnOperation( String opcode ) {\n- return ( opcode.equals(\"qr\") || opcode.equals(\"lu\") || opcode.equals(\"eigen\") );\n+ return ( opcode.equals(\"qr\") || opcode.equals(\"lu\") || opcode.equals(\"eigen\") || opcode.equals(\"svd\") );\n}\npublic static boolean isSupportedMatrixMatrixOperation( String opcode ) {\n@@ -75,6 +76,8 @@ public class LibCommonsMath\nreturn computeLU(in);\nelse if (opcode.equals(\"eigen\"))\nreturn computeEigen(in);\n+ else if ( opcode.equals(\"svd\"))\n+ return computeSvd(in);\nreturn null;\n}\n@@ -217,6 +220,33 @@ public class LibCommonsMath\nreturn new MatrixBlock[] { mbValues, mbVectors };\n}\n+\n+ /**\n+ * Performs Singular Value Decomposition. Calls Apache Commons Math SVD.\n+ * X = U * Sigma * Vt, where X is the input matrix,\n+ * U is the left singular matrix, Sigma is the singular values matrix returned as a\n+ * column matrix and Vt is the transpose of the right singular matrix V.\n+ * However, the returned array has { U, Sigma, V}\n+ *\n+ * @param in Input matrix\n+ * @return An array containing U, Sigma & V\n+ * @throws DMLRuntimeException\n+ */\n+ private static MatrixBlock[] computeSvd(MatrixObject in) throws DMLRuntimeException {\n+ Array2DRowRealMatrix matrixInput = DataConverter.convertToArray2DRowRealMatrix(in);\n+\n+ SingularValueDecomposition svd = new SingularValueDecomposition(matrixInput);\n+ double[] sigma = svd.getSingularValues();\n+ RealMatrix u = svd.getU();\n+ RealMatrix v = svd.getV();\n+ MatrixBlock U = DataConverter.convertToMatrixBlock(u.getData());\n+ MatrixBlock Sigma = DataConverter.convertToMatrixBlock(sigma, true);\n+ Sigma = LibMatrixReorg.diag(Sigma, new MatrixBlock(Sigma.rlen, Sigma.rlen, true));\n+ MatrixBlock V = DataConverter.convertToMatrixBlock(v.getData());\n+\n+ return new MatrixBlock[] { U, Sigma, V };\n+ }\n+\n/**\n* Function to compute matrix inverse via matrix decomposition.\n*\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/unary/matrix/SVDFactorizeTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.unary.matrix;\n+\n+import org.junit.Test;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+\n+public class SVDFactorizeTest extends AutomatedTestBase\n+{\n+\n+ private final static String TEST_NAME1 = \"svd\";\n+ private final static String TEST_DIR = \"functions/unary/matrix/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + SVDFactorizeTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows1 = 500;\n+ private final static int rows2 = 2500;\n+ private final static int cols1 = 400;\n+ private final static int cols2 = 2200;\n+ private final static double sparsity = 0.9;\n+\n+ @Override\n+ public void setUp()\n+ {\n+ addTestConfiguration(\n+ TEST_NAME1,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1,\n+ new String[] { \"D\" }) );\n+ }\n+\n+ @Test\n+ public void testSVDFactorizeDenseCP()\n+ {\n+ runTestSVDFactorize( rows1, cols1, RUNTIME_PLATFORM.SINGLE_NODE );\n+ }\n+\n+ @Test\n+ public void testSVDFactorizeDenseSP()\n+ {\n+ runTestSVDFactorize( rows1, cols1, RUNTIME_PLATFORM.SPARK );\n+ }\n+\n+ @Test\n+ public void testSVDFactorizeDenseMR()\n+ {\n+ runTestSVDFactorize( rows1, cols1, RUNTIME_PLATFORM.HADOOP );\n+ }\n+\n+ @Test\n+ public void testSVDFactorizeDenseHybrid()\n+ {\n+ runTestSVDFactorize( rows1, cols1, RUNTIME_PLATFORM.HYBRID );\n+ }\n+\n+ @Test\n+ public void testLargeSVDFactorizeDenseCP()\n+ {\n+ runTestSVDFactorize( rows2, cols2, RUNTIME_PLATFORM.SINGLE_NODE );\n+ }\n+\n+ @Test\n+ public void testLargeSVDFactorizeDenseSP()\n+ {\n+ runTestSVDFactorize( rows2, cols2, RUNTIME_PLATFORM.SPARK );\n+ }\n+\n+ @Test\n+ public void testLargeSVDFactorizeDenseMR()\n+ {\n+ runTestSVDFactorize( rows2, cols2, RUNTIME_PLATFORM.HADOOP );\n+ }\n+\n+ @Test\n+ public void testLargeSVDFactorizeDenseHybrid()\n+ {\n+ runTestSVDFactorize( rows2, cols2, RUNTIME_PLATFORM.HYBRID );\n+ }\n+\n+ private void runTestSVDFactorize( int rows, int cols, RUNTIME_PLATFORM rt)\n+ {\n+ RUNTIME_PLATFORM rtold = rtplatform;\n+ rtplatform = rt;\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ getAndLoadTestConfiguration(TEST_NAME1);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME1 + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"A\"), output(\"D\") };\n+\n+ double[][] A = getRandomMatrix(rows, cols, 0, 1, sparsity, 10);\n+ MatrixCharacteristics mc = new MatrixCharacteristics(rows, cols, -1, -1, -1);\n+ writeInputMatrixWithMTD(\"A\", A, false, mc);\n+\n+ // Expected matrix = 1x1 zero matrix\n+ double[][] D = new double[1][1];\n+ D[0][0] = 0.0;\n+ writeExpectedMatrix(\"D\", D);\n+\n+ boolean exceptionExpected = false;\n+ runTest(true, exceptionExpected, null, -1);\n+ compareResults(1e-8);\n+ }\n+ finally\n+ {\n+ rtplatform = rtold;\n+ }\n+ }\n+\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/unary/matrix/svd.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+/*\n+ * DML script to test Singular Value Decomposition (SVD)\n+ */\n+\n+A = read($1);\n+\n+[U, S, V] = svd(A);\n+\n+# U(and V) must be orthogonal i.e., U = U^{-1} and V = V^{-1}\n+I1 = U %*% t(U);\n+I2 = V %*% t(V);\n+\n+Urows = nrow(U);\n+Ucols = ncol(U);\n+Vrows = nrow(V);\n+Vcols = ncol(V);\n+\n+# check for, whether the U and V are square or not.\n+#if ( Urows == Ucols & Vrows == Vcols) {\n+ a = sum(I1) / Urows;\n+ b = sum(I2) / Vrows;\n+#}\n+\n+D = matrix(1,1,1);\n+\n+if ( abs(1-a)<10e-4 & abs(1-b)<10e-4 ) {\n+\n+ # Multiplying U, S and t(V), we must get back original A\n+ B = U %*% S %*% t(V);\n+\n+ diff = sum(A - B);\n+ # write the difference between original A and computed B as a dummy 1x1 matrix\n+ D = diff*D;\n+\n+}\n+\n+write (D, $2);\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/matrix/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/matrix/ZPackageSuite.java", "diff": "@@ -42,6 +42,7 @@ import org.junit.runners.Suite;\nFullSignTest.class,\nIQMTest.class,\nLUFactorizeTest.class,\n+ SVDFactorizeTest.class,\nMatrixInverseTest.class,\nMinusTest.class,\nMLUnaryBuiltinTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1216] New svd builtin function (compiler/runtime, local only) Closes #605.
49,768
24.08.2017 01:09:26
25,200
0ee8800b8e10b65983c61677b00c2bfb185c1d38
Transfer Learning using Caffe model
[ { "change_type": "ADD", "old_path": null, "new_path": "samples/jupyter-notebooks/Image_Classify_Using_VGG_19_Transfer_Learning.ipynb", "diff": "+{\n+ \"cells\": [\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Image Classification using Caffe VGG-19 model (Transfer Learning)\\n\",\n+ \"\\n\",\n+ \"This notebook demonstrates importing VGG-19 model from Caffe to SystemML and use that model to do an image classification. VGG-19 model has been trained using ImageNet dataset (1000 classes with ~ 14M images). If an image to be predicted is in one of the class VGG-19 has trained on then accuracy will be higher.\\n\",\n+ \"We expect prediction of any image through SystemML using VGG-19 model will be similar to that of image predicted through Caffe using VGG-19 model directly.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Prerequisite:\\n\",\n+ \"1. SystemML Python Package\\n\",\n+ \"To run this notebook you need to install systeml 1.0 (Master Branch code as of 08/24/2017 or later) python package.\\n\",\n+ \"2. Download Dogs-vs-Cats Kaggle dataset from https://www.kaggle.com/c/dogs-vs-cats/data location to a directory.\\n\",\n+ \" Unzip the train.zip directory to some location and update the variable \\\"train_dir\\\" in bottom two cells in which classifyImagesWTransfLearning() and classifyImages() methods are called to test this change. \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"##### SystemML Python Package information\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 1,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"Name: systemml\\r\\n\",\n+ \"Version: 1.0.0\\r\\n\",\n+ \"Summary: Apache SystemML is a distributed and declarative machine learning platform.\\r\\n\",\n+ \"Home-page: http://systemml.apache.org/\\r\\n\",\n+ \"Author: Apache SystemML\\r\\n\",\n+ \"Author-email: [email protected]\\r\\n\",\n+ \"License: Apache 2.0\\r\\n\",\n+ \"Location: /home/asurve/src/anaconda2/lib/python2.7/site-packages\\r\\n\",\n+ \"Requires: Pillow, numpy, scipy, pandas\\r\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"!pip show systemml\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### SystemML Build information\\n\",\n+ \"Following code will show SystemML information which is installed in the environment.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 2,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"SystemML Built-Time:2017-08-17 19:20:41 UTC\\n\",\n+ \"Archiver-Version: Plexus Archiver\\n\",\n+ \"Artifact-Id: systemml\\n\",\n+ \"Build-Jdk: 1.8.0_121\\n\",\n+ \"Build-Time: 2017-08-17 19:20:41 UTC\\n\",\n+ \"Built-By: asurve\\n\",\n+ \"Created-By: Apache Maven 3.3.9\\n\",\n+ \"Group-Id: org.apache.systemml\\n\",\n+ \"Main-Class: org.apache.sysml.api.DMLScript\\n\",\n+ \"Manifest-Version: 1.0\\n\",\n+ \"Minimum-Recommended-Spark-Version: 2.1.0\\n\",\n+ \"Version: 1.0.0-SNAPSHOT\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"from systemml import MLContext\\n\",\n+ \"ml = MLContext(sc)\\n\",\n+ \"print (\\\"SystemML Built-Time:\\\"+ ml.buildTime())\\n\",\n+ \"print(ml.info())\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 3,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Workaround for Python 2.7.13 to avoid certificate validation issue while downloading any file.\\n\",\n+ \"\\n\",\n+ \"import ssl\\n\",\n+ \"\\n\",\n+ \"try:\\n\",\n+ \" _create_unverified_https_context = ssl._create_unverified_context\\n\",\n+ \"except AttributeError:\\n\",\n+ \" # Legacy Python that doesn't verify HTTPS certificates by default\\n\",\n+ \" pass\\n\",\n+ \"else:\\n\",\n+ \" # Handle target environment that doesn't support HTTPS verification\\n\",\n+ \" ssl._create_default_https_context = _create_unverified_https_context\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 4,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Create label.txt file\\n\",\n+ \"\\n\",\n+ \"def createLabelFile(fileName):\\n\",\n+ \" file = open(fileName, 'w')\\n\",\n+ \" file.write('1,\\\"Cat\\\" \\\\n')\\n\",\n+ \" file.write('2,\\\"Dog\\\" \\\\n')\\n\",\n+ \" file.close()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Download model, proto files and convert them to SystemML format.\\n\",\n+ \"\\n\",\n+ \"1. Download Caffe Model (VGG-19), proto files (deployer, network and solver) and label file.\\n\",\n+ \"2. Convert the Caffe model into SystemML input format.\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 5,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Download caffemodel and proto files \\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def downloadAndConvertModel(downloadDir='.', trained_vgg_weights='trained_vgg_weights'):\\n\",\n+ \" \\n\",\n+ \" # Step 1: Download the VGG-19 model and other files.\\n\",\n+ \" import errno\\n\",\n+ \" import os\\n\",\n+ \" import urllib\\n\",\n+ \"\\n\",\n+ \" # Create directory, if exists don't error out\\n\",\n+ \" try:\\n\",\n+ \" os.makedirs(os.path.join(downloadDir,trained_vgg_weights))\\n\",\n+ \" except OSError as exc: # Python >2.5\\n\",\n+ \" if exc.errno == errno.EEXIST and os.path.isdir(trained_vgg_weights):\\n\",\n+ \" pass\\n\",\n+ \" else:\\n\",\n+ \" raise\\n\",\n+ \" \\n\",\n+ \" # Download deployer, network, solver proto and label files.\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_deploy.proto', os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'))\\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_network.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_network.proto'))\\n\",\n+ \" #TODO: After downloading network file (VGG_ILSVRC_19_layers_network.proto) , change num_output from 1000 to 2\\n\",\n+ \" \\n\",\n+ \" urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_solver.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'))\\n\",\n+ \" # TODO: set values as descrived below in VGG_ILSVRC_19_layers_solver.proto (Possibly through APIs whenever available)\\n\",\n+ \" # test_iter: 100\\n\",\n+ \" # stepsize: 40\\n\",\n+ \" # max_iter: 200\\n\",\n+ \" \\n\",\n+ \" # Create labels for data\\n\",\n+ \" ### 1,\\\"Cat\\\"\\n\",\n+ \" ### 2,\\\"Dog\\\"\\n\",\n+ \" createLabelFile(os.path.join(downloadDir, trained_vgg_weights, 'labels.txt'))\\n\",\n+ \"\\n\",\n+ \" # TODO: Following line commented as its 500MG file, if u need to download it please uncomment it and run.\\n\",\n+ \" # urllib.urlretrieve('http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel', os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'))\\n\",\n+ \"\\n\",\n+ \" # Step 2: Convert the caffemodel to trained_vgg_weights directory\\n\",\n+ \" import systemml as sml\\n\",\n+ \" sml.convert_caffemodel(sc, os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'), os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'), os.path.join(downloadDir,trained_vgg_weights))\\n\",\n+ \" \\n\",\n+ \" return\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"##### PrintTopK\\n\",\n+ \"This function will print top K probabilities and indices from the result.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 6,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Print top K indices and probability\\n\",\n+ \"\\n\",\n+ \"def printTopK(prob, label, k):\\n\",\n+ \" print(label, 'Top ', k, ' Index : ', np.argsort(-prob)[0, :k])\\n\",\n+ \" print(label, 'Top ', k, ' Probability : ', prob[0,np.argsort(-prob)[0, :k]])\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Classify images\\n\",\n+ \"\\n\",\n+ \"This function classify images from images specified through urls.\\n\",\n+ \"\\n\",\n+ \"###### Input Parameters: \\n\",\n+ \" urls: List of urls\\n\",\n+ \" printTokKData (default False): Whether to print top K indices and probabilities\\n\",\n+ \" topK: Top K elements to be displayed. \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 7,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import numpy as np\\n\",\n+ \"import urllib\\n\",\n+ \"from systemml.mllearn import Caffe2DML\\n\",\n+ \"import systemml as sml\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def classifyImages(urls,img_shape=(3, 224, 224), printTokKData=False, topK=5, downloadDir='.', trained_vgg_weights='trained_vgg_weights'):\\n\",\n+ \"\\n\",\n+ \" size = (img_shape[1], img_shape[2])\\n\",\n+ \" \\n\",\n+ \" vgg = Caffe2DML(sqlCtx, solver=os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'), input_shape=img_shape)\\n\",\n+ \" vgg.load(trained_vgg_weights)\\n\",\n+ \"\\n\",\n+ \" for url in urls:\\n\",\n+ \" outFile = 'inputTest.jpg'\\n\",\n+ \" urllib.urlretrieve(url, outFile)\\n\",\n+ \" \\n\",\n+ \" from IPython.display import Image, display\\n\",\n+ \" display(Image(filename=outFile))\\n\",\n+ \" \\n\",\n+ \" print (\\\"Prediction of above image to ImageNet Class using\\\");\\n\",\n+ \"\\n\",\n+ \" ## Do image classification through SystemML processing\\n\",\n+ \" from PIL import Image\\n\",\n+ \" input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape\\n\",\n+ \" , color_mode='BGR', mean=sml.getDatasetMean('VGG_ILSVRC_19_2014'))\\n\",\n+ \" print (\\\"Image preprocessed through SystemML :: \\\", vgg.predict(input_image)[0])\\n\",\n+ \" if(printTopKData == True):\\n\",\n+ \" sysml_proba = vgg.predict_proba(input_image)\\n\",\n+ \" printTopK(sysml_proba, 'SystemML BGR', topK)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 8,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from pyspark.ml.linalg import Vectors\\n\",\n+ \"import os\\n\",\n+ \"import systemml as sml\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def getLabelFeatures(filename, train_dir, img_shape):\\n\",\n+ \" from PIL import Image\\n\",\n+ \"\\n\",\n+ \" vec = Vectors.dense(sml.convertImageToNumPyArr(Image.open(os.path.join(train_dir, filename)), img_shape=img_shape)[0,:])\\n\",\n+ \" if filename.lower().startswith('cat'):\\n\",\n+ \" return (1, vec)\\n\",\n+ \" elif filename.lower().startswith('dog'):\\n\",\n+ \" return (2, vec)\\n\",\n+ \" else:\\n\",\n+ \" raise ValueError('Expected the filename to start with either cat or dog')\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 9,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from pyspark.sql.functions import rand\\n\",\n+ \"import os\\n\",\n+ \"\\n\",\n+ \"def createTrainingDF(train_dir, train_data_file, img_shape):\\n\",\n+ \" list_jpeg_files = os.listdir(train_dir)\\n\",\n+ \" # 10 files per partition\\n\",\n+ \" train_df = sc.parallelize(list_jpeg_files, int(len(list_jpeg_files)/10)).map(lambda filename : getLabelFeatures(filename, train_dir, img_shape)).toDF(['label', 'features']).orderBy(rand())\\n\",\n+ \" # Optional: but helps seperates conversion-related from training\\n\",\n+ \" # train_df.write.parquet(train_data_file) # 'kaggle-cats-dogs.parquet'\\n\",\n+ \" return train_df\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 10,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def readTrainingDF(train_dir, train_data_file):\\n\",\n+ \" train_df = sqlContext.read.parquet(train_data_file)\\n\",\n+ \" return train_df\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 11,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# downloadAndConvertModel(downloadDir, trained_vgg_weights)\\n\",\n+ \"# TODO: Take \\\"TODO\\\" actions mentioned in the downloadAndConvertModel() function after calling downloadAndConvertModel() function.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 12,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def retrainModel(img_shape, downloadDir, trained_vgg_weights, train_dir, train_data_file, vgg_new_model):\\n\",\n+ \"\\n\",\n+ \" # Let downloadAndConvertModel() functon be commented out, as it needs to be called separately (which is done in cell above) and manual action to be taken after calling it.\\n\",\n+ \" # downloadAndConvertModel(downloadDir, trained_vgg_weights)\\n\",\n+ \" # TODO: Take \\\"TODO\\\" actions mentioned in the downloadAndConvertModel() function after calling that function.\\n\",\n+ \" \\n\",\n+ \" train_df = createTrainingDF(train_dir, train_data_file, img_shape)\\n\",\n+ \" ## Write from input files OR read if its already written/converted\\n\",\n+ \" # train_df = readTrainingDF(train_dir, train_data_file)\\n\",\n+ \" \\n\",\n+ \" # Load the model\\n\",\n+ \" vgg = Caffe2DML(sqlCtx, solver=os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'), input_shape=img_shape)\\n\",\n+ \" vgg.load(weights=os.path.join(downloadDir,trained_vgg_weights), ignore_weights=['fc8'])\\n\",\n+ \" vgg.set(debug=True).setExplain(True)\\n\",\n+ \"\\n\",\n+ \" # Train the model using new data\\n\",\n+ \" vgg.fit(train_df)\\n\",\n+ \" \\n\",\n+ \" # Save the trained model\\n\",\n+ \" vgg.save(vgg_new_model)\\n\",\n+ \" \\n\",\n+ \" return vgg\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import numpy as np\\n\",\n+ \"import urllib\\n\",\n+ \"from systemml.mllearn import Caffe2DML\\n\",\n+ \"import systemml as sml\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"def classifyImagesWTransfLearning(urls, model, img_shape=(3, 224, 224), printTokKData=False, topK=5):\\n\",\n+ \"\\n\",\n+ \" size = (img_shape[1], img_shape[2])\\n\",\n+ \" # vgg.load(trained_vgg_weights)\\n\",\n+ \"\\n\",\n+ \" for url in urls:\\n\",\n+ \" outFile = 'inputTest.jpg'\\n\",\n+ \" urllib.urlretrieve(url, outFile)\\n\",\n+ \" \\n\",\n+ \" from IPython.display import Image, display\\n\",\n+ \" display(Image(filename=outFile))\\n\",\n+ \" \\n\",\n+ \" print (\\\"Prediction of above image to ImageNet Class using\\\");\\n\",\n+ \"\\n\",\n+ \" ## Do image classification through SystemML processing\\n\",\n+ \" from PIL import Image\\n\",\n+ \" input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape\\n\",\n+ \" , color_mode='BGR', mean=sml.getDatasetMean('VGG_ILSVRC_19_2014'))\\n\",\n+ \"\\n\",\n+ \" print (\\\"Image preprocessed through SystemML :: \\\", model.predict(input_image)[0])\\n\",\n+ \" if(printTopKData == True):\\n\",\n+ \" sysml_proba = model.predict_proba(input_image)\\n\",\n+ \" printTopK(sysml_proba, 'SystemML BGR', topK)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Sample code to retrain the model and use it to classify image through two different way\\n\",\n+ \"\\n\",\n+ \"There are couple of parameters to set based on what you are looking for.\\n\",\n+ \"1. printTopKData (default False): If this parameter gets set to True, then top K results (probabilities and indices) will be displayed. \\n\",\n+ \"2. topK (default 5): How many entities (K) to be displayed.\\n\",\n+ \"3. Directories, data file name, model name and directory where data has donwloaded.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# ImageNet specific parameters\\n\",\n+ \"img_shape = (3, 224, 224)\\n\",\n+ \"\\n\",\n+ \"# Setting other than current directory causes \\\"network file not found\\\" issue, as network file\\n\",\n+ \"# location is defined in solver file which does not have a path, so it searches in current dir.\\n\",\n+ \"downloadDir = '.' # /home/asurve/caffe_models' \\n\",\n+ \"trained_vgg_weights = 'trained_vgg_weights'\\n\",\n+ \"\\n\",\n+ \"train_dir = '/home/asurve/data/keggle/dogs_vs_cats_2/train'\\n\",\n+ \"train_data_file = 'kaggle-cats-dogs.parquet'\\n\",\n+ \" \\n\",\n+ \"vgg_new_model = 'kaggle-cats-dogs-model_2'\\n\",\n+ \" \\n\",\n+ \"printTopKData=True\\n\",\n+ \"topK=5\\n\",\n+ \"\\n\",\n+ \"urls = ['http://cdn3-www.dogtime.com/assets/uploads/gallery/goldador-dog-breed-pictures/puppy-1.jpg','https://lh3.googleusercontent.com/-YdeAa1Ff4Ac/VkUnQ4vuZGI/AAAAAAAAAEg/nBiUn4pp6aE/w800-h800/images-6.jpeg','https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg']\\n\",\n+ \"\\n\",\n+ \"vgg = retrainModel(img_shape, downloadDir, trained_vgg_weights, train_dir, train_data_file, vgg_new_model)\\n\",\n+ \"classifyImagesWTransfLearning(urls, vgg, img_shape, printTopKData, topK)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"img_shape = (3, 224, 224)\\n\",\n+ \"\\n\",\n+ \"printTopKData=True\\n\",\n+ \"topK=5\\n\",\n+ \"\\n\",\n+ \"# Setting other than current directory causes \\\"network file not found\\\" issue, as network file\\n\",\n+ \"# location is defined in solver file which does not have a path, so it searches in current dir.\\n\",\n+ \"downloadDir = '.' # /home/asurve/caffe_models' \\n\",\n+ \"trained_vgg_weights = 'kaggle-cats-dogs-model_2'\\n\",\n+ \"\\n\",\n+ \"urls = ['http://cdn3-www.dogtime.com/assets/uploads/gallery/goldador-dog-breed-pictures/puppy-1.jpg','https://lh3.googleusercontent.com/-YdeAa1Ff4Ac/VkUnQ4vuZGI/AAAAAAAAAEg/nBiUn4pp6aE/w800-h800/images-6.jpeg','https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg']\\n\",\n+ \"\\n\",\n+ \"classifyImages(urls,img_shape, printTopKData, topK, downloadDir, trained_vgg_weights)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": []\n+ }\n+ ],\n+ \"metadata\": {\n+ \"kernelspec\": {\n+ \"display_name\": \"Python 2\",\n+ \"language\": \"python\",\n+ \"name\": \"python2\"\n+ },\n+ \"language_info\": {\n+ \"codemirror_mode\": {\n+ \"name\": \"ipython\",\n+ \"version\": 2\n+ },\n+ \"file_extension\": \".py\",\n+ \"mimetype\": \"text/x-python\",\n+ \"name\": \"python\",\n+ \"nbconvert_exporter\": \"python\",\n+ \"pygments_lexer\": \"ipython2\",\n+ \"version\": \"2.7.13\"\n+ }\n+ },\n+ \"nbformat\": 4,\n+ \"nbformat_minor\": 2\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1742] Transfer Learning using Caffe VGG-19 model
49,703
24.08.2017 10:52:57
25,200
e52c8713de4f99114202208ed06ee21106261f69
[MINOR] Use default namespace constant rather than String literal Make DMLProgram's DEFAULT_NAMESPACE and INTERNAL_NAMESPACE final. Use DEFAULT_NAMESPACE rather than String literal in FunctionCallIdentifier. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLProgram.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLProgram.java", "diff": "@@ -33,8 +33,8 @@ public class DMLProgram\nprivate ArrayList<StatementBlock> _blocks;\nprivate HashMap<String, FunctionStatementBlock> _functionBlocks;\nprivate HashMap<String,DMLProgram> _namespaces;\n- public static String DEFAULT_NAMESPACE = \".defaultNS\";\n- public static String INTERNAL_NAMESPACE = \"_internal\"; // used for multi-return builtin functions\n+ public static final String DEFAULT_NAMESPACE = \".defaultNS\";\n+ public static final String INTERNAL_NAMESPACE = \"_internal\"; // used for multi-return builtin functions\nprivate static final Log LOG = LogFactory.getLog(DMLProgram.class.getName());\npublic DMLProgram(){\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/FunctionCallIdentifier.java", "new_path": "src/main/java/org/apache/sysml/parser/FunctionCallIdentifier.java", "diff": "@@ -168,7 +168,7 @@ public class FunctionCallIdentifier extends DataIdentifier\n@Override\npublic String toString() {\nStringBuilder sb = new StringBuilder();\n- if (_namespace != null && _namespace.length() > 0 && !_namespace.equals(\".defaultNS\"))\n+ if (_namespace != null && _namespace.length() > 0 && !_namespace.equals(DMLProgram.DEFAULT_NAMESPACE))\nsb.append(_namespace + \"::\");\nsb.append(_name);\nsb.append(\" ( \");\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Use default namespace constant rather than String literal Make DMLProgram's DEFAULT_NAMESPACE and INTERNAL_NAMESPACE final. Use DEFAULT_NAMESPACE rather than String literal in FunctionCallIdentifier. Closes #636.
49,703
24.08.2017 10:59:33
25,200
8fb74b158f1011c16538b1fcb4ca905090db1e8a
[MINOR] Fix case of externalFunction classname message Use constant for 'classname' in externalFunction error message, since the previous 'className' in the error message does not work. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "new_path": "src/main/java/org/apache/sysml/parser/dml/DmlSyntacticValidator.java", "diff": "@@ -852,13 +852,14 @@ public class DmlSyntacticValidator extends CommonSyntacticValidator implements D\nreturn;\n}\notherParams.put(paramName, val);\n- if(paramName.equals(\"classname\")) {\n+ if (paramName.equals(ExternalFunctionStatement.CLASS_NAME)) {\natleastOneClassName = true;\n}\n}\nfunctionStmt.setOtherParams(otherParams);\nif (!atleastOneClassName) {\n- notifyErrorListeners(\"the parameter \\'className\\' needs to be passed for externalFunction\", ctx.start);\n+ notifyErrorListeners(\"The \\'\" + ExternalFunctionStatement.CLASS_NAME\n+ + \"\\' argument needs to be passed to the externalFunction 'implemented in' clause.\", ctx.start);\nreturn;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix case of externalFunction classname message Use constant for 'classname' in externalFunction error message, since the previous 'className' in the error message does not work. Closes #634.
49,703
24.08.2017 15:41:26
25,200
f1d35b780a6c6eb5cdfaa4ddd71d49d25800016f
Add avro to bin and standalone-jar artifacts Include avro jars in bin artifacts and avro classes in standalone-jar artifact. Update LICENSE files accordingly. Closes
[ { "change_type": "MODIFY", "old_path": "src/assembly/bin.xml", "new_path": "src/assembly/bin.xml", "diff": "<dependencySet>\n<includes>\n<include>*:${artifactId}*</include>\n+ <include>*:avro*</include>\n<include>*:commons-cli*</include>\n<include>*:commons-collections*</include>\n<include>*:commons-configuration*</include>\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/bin/LICENSE", "new_path": "src/assembly/bin/LICENSE", "diff": "The following components come under the Apache Software License 2.0.\n+avro-1.7.4.jar\n+avro-ipc-1.7.7-tests.jar\n+avro-ipc-1.7.7.jar\n+avro-mapred-1.7.7-hadoop2.jar\ncommons-cli-1.2.jar\ncommons-collections-3.2.1.jar\ncommons-configuration-1.6.jar\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/standalone-jar.xml", "new_path": "src/assembly/standalone-jar.xml", "diff": "<dependencySet>\n<includes>\n<include>*:${artifactId}*</include>\n+ <include>*:avro*</include>\n<include>*:commons-cli*</include>\n<include>*:commons-collections*</include>\n<include>*:commons-configuration*</include>\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/standalone-jar/LICENSE", "new_path": "src/assembly/standalone-jar/LICENSE", "diff": "@@ -214,6 +214,10 @@ commons-lang:commons-lang:2.6\ncommons-logging:commons-logging:1.1.3\nlog4j:log4j:1.2.15\nnet.sf.opencsv:opencsv:2.3\n+org.apache.avro:avro:1.7.4\n+org.apache.avro:avro-ipc:1.7.7\n+org.apache.avro:avro-ipc:1.7.7:tests\n+org.apache.avro:avro-mapred:1.7.7:hadoop2\norg.apache.commons:commons-math3:3.4.1\norg.apache.hadoop:hadoop-auth:2.6.0\norg.apache.hadoop:hadoop-client:2.6.0\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1870] Add avro to bin and standalone-jar artifacts Include avro jars in bin artifacts and avro classes in standalone-jar artifact. Update LICENSE files accordingly. Closes #639.
49,703
24.08.2017 16:50:11
25,200
100f2d6061e945a914d25eeba458164997b4bb1a
Throw error for function calls with extra input args If a function call contains more input arguments than defined by the function signature, throw an error. Example: triple = function(double a) return (double z) { z = a*3; } x = triple(10,11,12) print(x) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "diff": "@@ -435,9 +435,9 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\n}\nStatementBlock sblock = fstmt.getBody().get(0);\n- if( fcall.getParamExprs().size() < fstmt.getInputParams().size() ) {\n- sourceExpr.raiseValidateError(\"Wrong number of function parameters: \"+\n- fcall.getParamExprs().size() + \", but \" + fstmt.getInputParams().size()+\" expected.\");\n+ if( fcall.getParamExprs().size() != fstmt.getInputParams().size() ) {\n+ sourceExpr.raiseValidateError(\"Wrong number of function input arguments: \"+\n+ fcall.getParamExprs().size() + \" found, but \" + fstmt.getInputParams().size()+\" expected.\");\n}\nfor (int i =0; i < fstmt.getInputParams().size(); i++) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1859] Throw error for function calls with extra input args If a function call contains more input arguments than defined by the function signature, throw an error. Example: triple = function(double a) return (double z) { z = a*3; } x = triple(10,11,12) print(x) Closes #633.
49,717
25.08.2017 17:08:44
25,200
ac04168836cc68f9af940c08baccab575c7e2cb3
[MINOR] Print statistics to stderr if an error has occured Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -80,6 +80,8 @@ public class ScriptExecutorUtils {\nDMLScript.FINEGRAINED_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_FINEGRAINED_STATS);\nDMLScript.STATISTICS_MAX_WRAP_LEN = dmlconf.getIntValue(DMLConfig.STATS_MAX_WRAP_LEN);\n+ boolean exceptionThrown = false;\n+\nStatistics.startRunTimer();\ntry {\n// run execute (w/ exception handling to ensure proper shutdown)\n@@ -93,6 +95,9 @@ public class ScriptExecutorUtils {\nec.setGPUContexts(gCtxs);\n}\nrtprog.execute(ec);\n+ } catch (Throwable e) {\n+ exceptionThrown = true;\n+ throw e;\n} finally { // ensure cleanup/shutdown\nif (DMLScript.USE_ACCELERATOR && !ec.getGPUContexts().isEmpty()) {\nec.getGPUContexts().forEach(gCtx -> gCtx.clearTemporaryMemory());\n@@ -104,10 +109,17 @@ public class ScriptExecutorUtils {\n// display statistics (incl caching stats if enabled)\nStatistics.stopRunTimer();\n+ if (!exceptionThrown) {\nif (statisticsMaxHeavyHitters > 0)\nSystem.out.println(Statistics.display(statisticsMaxHeavyHitters));\nelse\nSystem.out.println(Statistics.display());\n+ } else {\n+ if (statisticsMaxHeavyHitters > 0)\n+ System.err.println(Statistics.display(statisticsMaxHeavyHitters));\n+ else\n+ System.err.println(Statistics.display());\n+ }\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Print statistics to stderr if an error has occured Closes #631
49,698
25.08.2017 21:06:08
25,200
aedceb61152f15c392cbcca2fe6775c1ea4419ab
HOP/LOP extensions for UDFs in expressions (part 1) Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/FunctionOp.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.hops;\nimport java.util.ArrayList;\nimport org.apache.sysml.lops.FunctionCallCP;\n+import org.apache.sysml.lops.FunctionCallCPSingle;\nimport org.apache.sysml.lops.Lop;\nimport org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.LopProperties.ExecType;\n@@ -33,14 +34,11 @@ import org.apache.sysml.runtime.controlprogram.parfor.opt.CostEstimatorHops;\n/**\n* This FunctionOp represents the call to a DML-bodied or external function.\n*\n- * Note: Currently, we support expressions in function arguments but no function calls\n- * in expressions.\n+ * Note: Currently, we support expressions in function arguments along with function calls\n+ * in expressions with single outputs, leaving multiple outputs handling as it is.\n*/\npublic class FunctionOp extends Hop\n{\n-\n- public static String OPSTRING = \"extfunct\";\n-\npublic enum FunctionType{\nDML,\nEXTERNAL_MEM,\n@@ -49,22 +47,25 @@ public class FunctionOp extends Hop\nUNKNOWN\n}\n+ public static final String OPSTRING = \"extfunct\";\n+\nprivate FunctionType _type = null;\nprivate String _fnamespace = null;\nprivate String _fname = null;\nprivate String[] _outputs = null;\nprivate ArrayList<Hop> _outputHops = null;\n+ private boolean _singleOutFun = false;\nprivate FunctionOp() {\n//default constructor for clone\n}\npublic FunctionOp(FunctionType type, String fnamespace, String fname, ArrayList<Hop> finputs, String[] outputs, ArrayList<Hop> outputHops) {\n- this(type, fnamespace, fname, finputs, outputs);\n+ this(type, fnamespace, fname, finputs, outputs, false);\n_outputHops = outputHops;\n}\n- public FunctionOp(FunctionType type, String fnamespace, String fname, ArrayList<Hop> finputs, String[] outputs)\n+ public FunctionOp(FunctionType type, String fnamespace, String fname, ArrayList<Hop> finputs, String[] outputs, boolean singleOut)\n{\nsuper(fnamespace + Program.KEY_DELIM + fname, DataType.UNKNOWN, ValueType.UNKNOWN );\n@@ -72,9 +73,9 @@ public class FunctionOp extends Hop\n_fnamespace = fnamespace;\n_fname = fname;\n_outputs = outputs;\n+ _singleOutFun = singleOut;\n- for( Hop in : finputs )\n- {\n+ for( Hop in : finputs ) {\ngetInput().add(in);\nin.getParent().add(this);\n}\n@@ -239,7 +240,8 @@ public class FunctionOp extends Hop\ntmp.add( in.constructLops() );\n//construct function call\n- FunctionCallCP fcall = new FunctionCallCP( tmp, _fnamespace, _fname, _outputs, _outputHops, et );\n+ Lop fcall = _singleOutFun ? new FunctionCallCPSingle( tmp, _fnamespace, _fname, et ) :\n+ new FunctionCallCP(tmp, _fnamespace, _fname, _outputs, _outputHops, et);\nsetLineNumbers(fcall);\nsetLops(fcall);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/FunctionCallCP.java", "new_path": "src/main/java/org/apache/sysml/lops/FunctionCallCP.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.lops;\nimport java.util.ArrayList;\n+import org.apache.sysml.hops.FunctionOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.HopsException;\nimport org.apache.sysml.lops.LopProperties.ExecLocation;\n@@ -122,7 +123,7 @@ public class FunctionCallCP extends Lop\ninst.append(getExecType());\ninst.append(Lop.OPERAND_DELIMITOR);\n- inst.append(\"extfunct\");\n+ inst.append(FunctionOp.OPSTRING);\ninst.append(Lop.OPERAND_DELIMITOR);\ninst.append(_fnamespace);\ninst.append(Lop.OPERAND_DELIMITOR);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/lops/FunctionCallCPSingle.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.lops;\n+\n+\n+import java.util.ArrayList;\n+\n+import org.apache.sysml.hops.FunctionOp;\n+import org.apache.sysml.lops.LopProperties.ExecLocation;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.lops.compile.JobType;\n+import org.apache.sysml.parser.DMLProgram;\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.parser.Expression.ValueType;\n+\n+public class FunctionCallCPSingle extends Lop\n+{\n+ private String _fnamespace;\n+ private String _fname;\n+\n+ public FunctionCallCPSingle(ArrayList<Lop> inputs, String fnamespace, String fname, ExecType et)\n+ {\n+ super(Lop.Type.FunctionCallCPSingle, DataType.UNKNOWN, ValueType.UNKNOWN);\n+ //note: data scalar in order to prevent generation of redundant createvar, rmvar\n+\n+ _fnamespace = fnamespace;\n+ _fname = fname;\n+\n+ //wire inputs\n+ for( Lop in : inputs ) {\n+ addInput( in );\n+ in.addOutput( this );\n+ }\n+\n+ //lop properties: always in CP\n+ boolean breaksAlignment = false;\n+ boolean aligner = false;\n+ boolean definesMRJob = false;\n+ lps.addCompatibility(JobType.INVALID);\n+ lps.setProperties(inputs, et, ExecLocation.ControlProgram, breaksAlignment, aligner, definesMRJob );\n+ }\n+\n+\n+ @Override\n+ public String toString() {\n+ return \"function call: \" + DMLProgram.constructFunctionKey(_fnamespace, _fname);\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String output) throws LopsException {\n+ return getInstructions(new String[]{input1}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2, input3}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String input4, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2, input3, input4}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String input4, String input5, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2, input3, input4, input5}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String input4, String input5, String input6, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2, input3, input4, input5, input6}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String input1, String input2, String input3, String input4, String input5, String input6, String input7, String output) throws LopsException {\n+ return getInstructions(new String[]{input1, input2, input3, input4, input5, input6, input7}, new String[]{output});\n+ }\n+\n+ @Override\n+ public String getInstructions(String[] inputs, String output) throws LopsException\n+ {\n+ StringBuilder inst = new StringBuilder();\n+ inst.append(getExecType());\n+\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(FunctionOp.OPSTRING);\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(_fnamespace);\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(_fname);\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(inputs.length);\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(\"1\"); //single output\n+\n+ for(int i=0; i<inputs.length; i++) {\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append( getInputs().get(i).prepInputOperand(inputs[i]) );\n+ }\n+\n+ inst.append(Lop.OPERAND_DELIMITOR);\n+ inst.append(output);\n+\n+ return inst.toString();\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/Lop.java", "new_path": "src/main/java/org/apache/sysml/lops/Lop.java", "diff": "@@ -52,7 +52,7 @@ public abstract class Lop\nCentralMoment, CoVariance, GroupedAgg, GroupedAggM,\nTransform, DataPartition, RepMat, //CP/MR reorganization, partitioning, replication\nParameterizedBuiltin, //CP/MR parameterized ops (name/value)\n- FunctionCallCP, //CP function calls\n+ FunctionCallCP, FunctionCallCPSingle, //CP function calls\nCumulativePartialAggregate, CumulativeSplitAggregate, CumulativeOffsetBinary, //MR cumsum/cumprod/cummin/cummax\nWeightedSquaredLoss, WeightedSigmoid, WeightedDivMM, WeightedCeMM, WeightedUMM,\nSortKeys, PickValues,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -1422,13 +1422,9 @@ public class DMLTranslator\n//create function op\nFunctionType ftype = fsb.getFunctionOpType();\n- FunctionOp fcall = null;\n- if (target == null) {\n- fcall = new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{});\n- } else {\n- fcall = new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{target.getName()});\n- }\n-\n+ FunctionOp fcall = (target == null) ?\n+ new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{}, false) :\n+ new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, new String[]{target.getName()}, false);\noutput.add(fcall);\n//TODO function output dataops (phase 3)\n@@ -1465,7 +1461,7 @@ public class DMLTranslator\n}\nFunctionType ftype = fsb.getFunctionOpType();\n- FunctionOp fcall = new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, foutputs);\n+ FunctionOp fcall = new FunctionOp(ftype, fci.getNamespace(), fci.getName(), finputs, foutputs, false);\noutput.add(fcall);\n//TODO function output dataops (phase 3)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1444] HOP/LOP extensions for UDFs in expressions (part 1) Closes #603.
49,737
26.08.2017 11:52:59
25,200
d2efa65c89e3f6eaf7073c314eb56a033c8c8d5f
[SYSTEMML-1451][Phase3] phase 3 work Offline CSV support Family bug fix Plots Doc Update Stats update Bug train, predict append family name Closes
[ { "change_type": "MODIFY", "old_path": "docs/python-performance-test.md", "new_path": "docs/python-performance-test.md", "diff": "@@ -148,6 +148,17 @@ Run performance test for all algorithms under the family `regression2` and log w\nRun performance test for all algorithms using HDFS.\n+## Result Consolidation and Plotting\n+We have two scripts, `stats.py` forpulling results from google docs and `update.py` to updating results to google docs or local file system.\n+\n+Example of `update.py` would be below\n+`./scripts/perftest/python/google_docs/update.py --file ../../temp/perf_test_singlenode.out --exec-type singlenode --tag 2 --append test.csv`\n+The arguments being `--file` path of the perf-test output, `--exec-type` execution mode used to generate the perf-test output, `--tag` being the realease version or a unique name, `--append` being an optional argument that would append the a local csv file. If instead of `--append` the `--auth` argument needs the location of the `google api key` file.\n+\n+Example of `stats.py` below\n+` ./stats.py --auth ../key/client_json.json --exec-type singlenode --plot stats1_data-gen_none_dense_10k_100`\n+`--plot` argument needs the name of the composite key that you would like to compare results over. If this argument is not specified the results would be grouped by keys.\n+\n## Operational Notes\nAll performance test depend mainly on two scripts for execution `systemml-standalone.py` and `systemml-spark-submit.py`. Incase we need to change standalone or spark parameters we need to manually change these parameters in their respective scripts.\n@@ -158,7 +169,7 @@ The logs contain the following information below comma separated.\nalgorithm | run_type | intercept | matrix_type | data_shape | time_sec\n--- | --- | --- | --- | --- | --- |\n-multinomial|data-gen|0|dense|10k_100| 0.33\n+multinomial|data-gen|0|10k_100|dense| 0.33\nMultiLogReg|train|0|10k_100|dense|6.956\nMultiLogReg|predict|0|10k_100|dense|4.780\n@@ -187,9 +198,12 @@ Matrix Shape | Approximate Data Size\n10M_1k|80GB\n100M_1k|800GB\n+\nFor example the command below runs performance test for all data sizes described above\n`run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2 --mat-shape 10k_1k 100k_1k 1M_1k 10M_1k 100M_1k --master yarn-client --temp-dir hdfs://localhost:9000/user/systemml`\n+By default data generated in `hybrid_spark` execution mode is in the current users `hdfs` home directory.\n+\nNote: Please use this command `pip3 install -r requirements.txt` before using the perftest scripts.\n" }, { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<exclude>src/test/scripts/functions/jmlc/tfmtd_example/dummycoded.column.names</exclude>\n<exclude>src/test/scripts/functions/jmlc/tfmtd_example2/column.names</exclude>\n<exclude>src/test/scripts/functions/jmlc/tfmtd_frame_example/tfmtd_frame</exclude>\n+ <!-- Perftest requirement file -->\n+ <exclude>scripts/perftest/python/requirements.txt</exclude>\n</excludes>\n</configuration>\n</plugin>\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/datagen.py", "new_path": "scripts/perftest/python/datagen.py", "diff": "@@ -243,6 +243,7 @@ def config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir,\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n\"\"\"\n+\nconfig_bundle = {}\ndistinct_families = set(map(lambda x: x[1], algo_payload))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/perftest/python/google_docs/gdocs_utils.py", "diff": "+#!/usr/bin/env python3\n+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+from oauth2client.service_account import ServiceAccountCredentials\n+import gspread\n+\n+\n+def auth(path, sheet_name):\n+ \"\"\"\n+ Responsible for authorization\n+ \"\"\"\n+\n+ scope = ['https://spreadsheets.google.com/feeds']\n+ creds = ServiceAccountCredentials.from_json_keyfile_name(path, scope)\n+ gc = gspread.authorize(creds)\n+ sheet = gc.open(\"Perf\").worksheet(sheet_name)\n+ return sheet\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/google_docs/stats.py", "new_path": "scripts/perftest/python/google_docs/stats.py", "diff": "# -------------------------------------------------------------\nimport argparse\n-from functools import reduce\n+import os\nimport pprint\n-from oauth2client.service_account import ServiceAccountCredentials\n-import gspread\n+from os.path import join\n+import matplotlib.pyplot as plt\n+from gdocs_utils import auth\n-# Get time difference between difference runs\n-\n-def auth(path, sheet_name):\n- \"\"\"\n- Responsible for authorization\n- \"\"\"\n- scope = ['https://spreadsheets.google.com/feeds']\n- creds = ServiceAccountCredentials.from_json_keyfile_name(path, scope)\n- gc = gspread.authorize(creds)\n- sheet = gc.open(\"Perf\").worksheet(sheet_name)\n- return sheet\n-\n-\n-def get_data(sheet, tag):\n+# Dict\n+# {algo_name : [algo_1.0': t1, 'algo_2.0': t2]}\n+def get_formatted_data(sheet_data):\n\"\"\"\n- Get time and algorithm from the sheet\n+ Read all the data from google sheets and transforms it into a dictionary that can be\n+ use for plotting later\n\"\"\"\n- time = sheet.find('time_{}'.format(tag))\n- algo = sheet.find('algo_{}'.format(tag))\n-\n- time_col = sheet.col_values(time.col)\n- time_col = list(filter(lambda x: len(x) > 0, time_col))\n-\n- algo_col = sheet.col_values(algo.col)\n- algo_col = list(filter(lambda x: len(x) > 0, algo_col))\n- return algo_col, time_col\n+ algo_dict = {}\n+\n+ for i in sheet_data:\n+ inn_count = 0\n+ data = []\n+ for key, val in i.items():\n+ inn_count += 1\n+ if inn_count < 3:\n+ data.append(key)\n+ data.append(val)\n+\n+ if inn_count == 2:\n+ t1, v1, _, v2 = data\n+ if len(str(v2)) > 0:\n+ if v1 not in algo_dict:\n+ algo_dict[v1] = [{t1: v2}]\n+ else:\n+ algo_dict[v1].append({t1: v2})\n+ inn_count = 0\n+ data = []\n+ return algo_dict\n-def get_data_dict(data_col):\n+def plot(x, y, xlab, ylab, title):\n\"\"\"\n- Return data as dictionary with key as algorithm and list time values\n+ Save plots to the current folder based on the arguments\n\"\"\"\n- data_dict = {}\n- all_algo = []\n- for algo, _ in data_col:\n- all_algo.append(algo)\n-\n- flatten_algo = reduce(lambda x, y: x+y, all_algo)\n-\n- # remove the header\n- filter_data = list(filter(lambda x: not x.startswith('algo_'), flatten_algo))\n- distict_algos = set(filter_data)\n-\n- for algo_dist in distict_algos:\n- for algo, time in data_col:\n- for k, v in zip(algo, time):\n- if algo_dist == k:\n- if algo_dist not in data_dict:\n- data_dict[k] = [v]\n- else:\n- data_dict[k].append(v)\n- return data_dict\n+ CWD = os.getcwd()\n+ PATH = join(CWD, title)\n+ width = .35\n+ plt.bar(x, y, color=\"red\", width=width)\n+ plt.xticks(x)\n+ plt.xlabel(xlab)\n+ plt.ylabel(ylab)\n+ plt.title(title)\n+ plt.savefig(PATH + '.png')\n+ print('Plot {} generated'.format(title))\n+ return plt\n# Example Usage\n-# ./stats.py --auth client_json.json --exec-mode singlenode --tags 1.0 2.0\n+# ./stats.py --auth ../key/client_json.json --exec-mode singlenode\nif __name__ == '__main__':\nexecution_mode = ['hybrid_spark', 'singlenode']\ncparser = argparse.ArgumentParser(description='System-ML Statistics Script')\ncparser.add_argument('--auth', help='Location to read auth file',\nrequired=True, metavar='')\n- cparser.add_argument('--exec-mode', help='Execution mode', choices=execution_mode,\n+ cparser.add_argument('--exec-type', help='Execution mode', choices=execution_mode,\nrequired=True, metavar='')\n- cparser.add_argument('--tags', help='Tagging header value',\n- required=True, nargs='+')\n+ cparser.add_argument('--plot', help='Algorithm to plot', metavar='')\nargs = cparser.parse_args()\n- arg_dict = vars(args)\n- sheet = auth(args.auth, args.exec_mode)\n- all_data = sheet.get_all_records()\n- data_col = []\n- for tag in args.tags:\n- algo_col, time_col = get_data(sheet, tag)\n- data_col.append((algo_col, time_col))\n-\n- data_dict = get_data_dict(data_col)\n-\n- delta_algo = {}\n- for k, v in data_dict.items():\n- delta = float(v[0]) - float(v[1])\n- delta_algo[k] = delta\n+ sheet = auth(args.auth, args.exec_type)\n+ all_data = sheet.get_all_records()\n- pprint.pprint(delta_algo, width=1)\n+ plot_data = get_formatted_data(all_data)\n+ if args.plot is not None:\n+ print(plot_data[args.plot])\n+ title = args.plot\n+ ylab = 'Time in sec'\n+ xlab = 'Version'\n+ x = []\n+ y = []\n+ for i in plot_data[args.plot]:\n+ version = list(i.keys())[0]\n+ time = list(i.values())[0]\n+ y.append(time)\n+ x.append(version)\n+\n+ x = list(map(lambda x: float(x.split('_')[1]), x))\n+ plot(x, y, xlab, ylab, title)\n+ else:\n+ pprint.pprint(plot_data, width=1)\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/google_docs/update.py", "new_path": "scripts/perftest/python/google_docs/update.py", "diff": "#\n# -------------------------------------------------------------\n+import sys\n+import os.path\nimport argparse\n-import gspread\n-from oauth2client.service_account import ServiceAccountCredentials\nimport pandas as pd\n-\n+from oauth2client.service_account import ServiceAccountCredentials\n+import gspread\n# Update data to google sheets\n@@ -33,6 +34,7 @@ def parse_data(file_path):\nSkip reading 1st row : Header\nSkip reading last row : Footer\n\"\"\"\n+\ncsv_file = pd.read_csv(file_path, sep=',', skiprows=1, skipfooter=1, engine='python')\nalgo = csv_file['INFO:root:algorithm'].apply(lambda x: x.split(':')[-1])\nkey = algo + '_'+ csv_file['run_type'] + '_' + csv_file['intercept'] + '_' + \\\n@@ -44,6 +46,7 @@ def auth(path, sheet_name):\n\"\"\"\nResponsible for authorization\n\"\"\"\n+\nscope = ['https://spreadsheets.google.com/feeds']\ncreds = ServiceAccountCredentials.from_json_keyfile_name(path, scope)\ngc = gspread.authorize(creds)\n@@ -64,6 +67,7 @@ def insert_values(sheet, key, col_num, header):\n\"\"\"\nInsert data to google sheets based on the arguments\n\"\"\"\n+\n# Col Name\nsheet.update_cell(1, col_num, header)\nfor id, val in enumerate(key):\n@@ -74,6 +78,7 @@ def get_dim(sheet):\n\"\"\"\nGet the dimensions of data\n\"\"\"\n+\ntry:\ncol_count = sheet.get_all_records()\nexcept:\n@@ -83,6 +88,16 @@ def get_dim(sheet):\nreturn row, col\n+def row_append(data_frame, file):\n+ \"\"\"\n+ Append results to a local csv\n+ \"\"\"\n+\n+ append_df = pd.read_csv(file)\n+ concat_data = pd.concat([data_frame, append_df], axis=1)\n+ return concat_data\n+\n+\n# Example Usage\n# ./update.py --file ../temp/test.out --exec-mode singlenode --auth client_json.json --tag 3.0\nif __name__ == '__main__':\n@@ -91,20 +106,36 @@ if __name__ == '__main__':\ncparser = argparse.ArgumentParser(description='System-ML Update / Stat Script')\ncparser.add_argument('--file', help='Location of the current perf test outputs',\nrequired=True, metavar='')\n- cparser.add_argument('--exec-mode', help='Backend Type', choices=execution_mode,\n- required=True, metavar='')\n- cparser.add_argument('--auth', help='Location to read auth file',\n+ cparser.add_argument('--exec-type', help='Backend Type', choices=execution_mode,\nrequired=True, metavar='')\ncparser.add_argument('--tag', help='Tagging header value',\nrequired=True, metavar='')\n+ cparser.add_argument('--auth', help='Location to read auth file', metavar='')\n+ cparser.add_argument('--append', help='Location to append the outputs', metavar='')\nargs = cparser.parse_args()\n- arg_dict = vars(args)\n- # Authenticate and get sheet dimensions\n- sheet = auth(args.auth, args.exec_mode)\n- row, col = get_dim(sheet)\n+ if args.auth is None and args.append is None:\n+ sys.exit('Both --auth and --append cannot be empty')\n+ algo, time = parse_data(args.file)\n+\n+ if args.append is not None:\n+ schema_df = {'algo_{}'.format(args.tag): algo,\n+ 'time_{}'.format(args.tag): time}\n+ data_frame = pd.DataFrame(schema_df)\n+ if os.path.isfile(args.append):\n+ append_data = row_append(data_frame, args.append)\n+ append_data.to_csv(args.append, sep=',', index=False)\n+ else:\n+ data_frame.to_csv(args.append, sep=',', index=False)\n+\n+ if args.auth is not None:\n# Read data from file and write to google docs\nalgo, time = parse_data(args.file)\n+\n+ # Authenticate and get sheet dimensions\n+ sheet = auth(args.auth, args.exec_type)\n+ row, col = get_dim(sheet)\n+\ninsert_pair(algo, time, col + 1, args.tag)\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/predict.py", "new_path": "scripts/perftest/python/predict.py", "diff": "@@ -239,10 +239,12 @@ def config_packets_predict(algo_payload, matrix_type, matrix_shape, datagen_dir,\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n\"\"\"\n+\nconfig_bundle = {}\n- for k, _ in algo_payload:\n- config_bundle[k] = []\n+ for current_algo, current_family in algo_payload:\n+ key_name = current_algo + '.' + current_family\n+ config_bundle[key_name] = []\nfor current_algo, current_family in algo_payload:\ncurrent_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)\n@@ -268,6 +270,7 @@ def config_packets_predict(algo_payload, matrix_type, matrix_shape, datagen_dir,\nconf_path = globals()[algo_func](save_name, current_data_gen_dir[0],\ncurrent_train_folder, predict_dir, config_dir)\n- config_bundle[current_algo].append(conf_path)\n+ key_name = current_algo + '.' + current_family\n+ config_bundle[key_name].append(conf_path)\nreturn config_bundle\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/perftest/python/requirements.txt", "diff": "+numpy\n+pandas\n+gspread\n+matplotlib\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -233,10 +233,11 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mo\nconf_packet = config_packets_train(algos_to_run, mat_type, mat_shape, data_gen_dir,\ntrain_dir, DENSE_TYPE_ALGOS, train_config_dir)\n- for algo_name, config_files in conf_packet.items():\n+ for algo_family_name, config_files in conf_packet.items():\nfor config in config_files:\n+ algo_name = algo_family_name.split('.')[0]\nfile_name = ML_TRAIN[algo_name]\n- algorithm_workflow(algo_name, exec_type, config, file_name, 'train', train_dir)\n+ algorithm_workflow(algo_family_name, exec_type, config, file_name, 'train', train_dir)\nif 'predict' in mode:\n# Create config directories\n@@ -255,10 +256,12 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mo\nconf_packet = config_packets_predict(algos_to_run, mat_type, mat_shape, data_gen_dir,\ntrain_dir, predict_dir, DENSE_TYPE_ALGOS,\npredict_config_dir)\n- for algo_name, config_files in conf_packet.items():\n+\n+ for algo_family_name, config_files in conf_packet.items():\nfor config in config_files:\n+ algo_name = algo_family_name.split('.')[0]\nfile_name = ML_PREDICT[algo_name]\n- algorithm_workflow(algo_name, exec_type, config, file_name, 'predict', predict_dir)\n+ algorithm_workflow(algo_family_name, exec_type, config, file_name, 'predict', predict_dir)\nif __name__ == '__main__':\n@@ -290,12 +293,28 @@ if __name__ == '__main__':\n# Families\nall_families = ML_ALGO.keys()\n+ # Default Conf\n+ default_conf = 'spark.driver.maxResultSize=0 ' \\\n+ 'spark.akka.frameSize=128 ' \\\n+ 'spark.network.timeout=6000s ' \\\n+ 'spark.rpc.askTimeout=6000s ' \\\n+ 'spark.memory.useLegacyMode=true ' \\\n+ 'spark.files.useFetchCache=false' \\\n+\n+\n+ default_conf_big_job = 'spark.executor.extraJavaOptions=\\\"-Xmn5500m\\\" ' \\\n+ 'spark.executor.memory=\\\"-Xms50g\\\" ' \\\n+ 'spark.yarn.executor.memoryOverhead=8250 ' \\\n+ 'spark.driver.extraJavaOptions=\\\"-Xms20g -Xmn2g\\\"'\n+\n+\n+\n# Argparse Module\ncparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\ndescription='SystemML Performance Test Script')\ncparser.add_argument('--family', help='space separated list of classes of algorithms '\n'(available : ' + ', '.join(sorted(all_families)) + ')',\n- metavar='', choices=all_families, nargs='+', default=' '.join(all_families))\n+ metavar='', choices=all_families, nargs='+', default=all_families)\ncparser.add_argument('--algo', help='space separated list of algorithm to run '\n'(Overrides --family, available : ' + ', '.join(sorted(all_algos)) + ')', metavar='',\nchoices=all_algos, nargs='+')\n@@ -335,12 +354,13 @@ if __name__ == '__main__':\ncparser.add_argument('--num-executors', help='Number of executors to launch', metavar='')\ncparser.add_argument('--executor-memory', help='Memory per executor', metavar='')\ncparser.add_argument('--executor-cores', help='Number of cores', metavar='')\n- cparser.add_argument('--conf', help='Spark configuration file', nargs='+', metavar='')\n+ cparser.add_argument('--conf', help='Spark configuration parameters, please use these '\n+ 'parameters for large performance tests ' + default_conf_big_job,\n+ default=default_conf, nargs='+', metavar='')\n# Single node execution mode options\ncparser.add_argument('-heapmem', help='maximum JVM heap memory', metavar='', default='8g')\n-\n# Args is a namespace\nargs = cparser.parse_args()\nall_arg_dict = vars(args)\n@@ -358,19 +378,7 @@ if __name__ == '__main__':\nprint('length of --mat-type argument cannot be greater than two')\nsys.exit()\n- # Check for validity of input arguments\n- if args.family is not None:\n- for fam in args.family:\n- if fam not in ML_ALGO.keys():\n- print('{} family not present in the performance test suit'.format(fam))\n- sys.exit()\n-\nif args.algo is not None:\n- for algo in args.algo:\n- if algo not in all_algos:\n- print('{} algorithm not present in the performance test suit'.format(args.algo))\n- sys.exit()\n-\n# This section check the validity of dual datagen algorithms like m-svm\nalgo_families = {}\nfor current_algo in args.algo:\n@@ -385,9 +393,8 @@ if __name__ == '__main__':\ninput_families = set(args.family)\ncommon_families = input_families.intersection(valid_families)\nif len(common_families) == 0:\n- print('Please specify a valid family for {} and the '\n+ sys.exit('Please specify a valid family for {} and the '\n'valid families are {}'.format(current_algo, ' '.join(valid_families)))\n- sys.exit()\n# Set level to 0 -> debug mode\n# Set level to 20 -> Plain metrics\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/train.py", "new_path": "scripts/perftest/python/train.py", "diff": "@@ -369,10 +369,12 @@ def config_packets_train(algo_payload, matrix_type, matrix_shape, datagen_dir, t\nThis dictionary contains algorithms to be executed as keys and the path of configuration\njson files to be executed list of values.\n\"\"\"\n+\nconfig_bundle = {}\n- for k, _ in algo_payload:\n- config_bundle[k] = []\n+ for current_algo, current_family in algo_payload:\n+ key_name = current_algo + '.' + current_family\n+ config_bundle[key_name] = []\nfor current_algo, current_family in algo_payload:\ncurrent_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)\n@@ -389,7 +391,8 @@ def config_packets_train(algo_payload, matrix_type, matrix_shape, datagen_dir, t\nalgo_func = '_'.join([current_family] + [current_algo.lower().replace('-', '_')]\n+ ['train'])\nconf_path = globals()[algo_func](save_name, current_datagen_dir, train_dir, config_dir)\n- config_bundle[current_algo].append(conf_path)\n+ key_name = current_algo + '.' + current_family\n+ config_bundle[key_name].append(conf_path)\nconfig_packets = {}\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_exec.py", "new_path": "scripts/perftest/python/utils_exec.py", "diff": "@@ -47,6 +47,7 @@ def subprocess_exec(cmd_string, log_file_path=None, extract=None):\n\"\"\"\n# Debug\n# print(cmd_string)\n+\nexec_command = shlex.split(cmd_string)\nproc1 = subprocess.Popen(exec_command, stdout=subprocess.PIPE,\nstderr=subprocess.PIPE)\n@@ -81,6 +82,7 @@ def parse_hdfs_base(std_outs):\nreturn: String\nhdfs base uri\n\"\"\"\n+\nhdfs_uri = None\nfor line in std_outs:\nif line.startswith('hdfs://'):\n@@ -94,6 +96,7 @@ def write_logs(std_outs, log_file_path):\n\"\"\"\nWrite all logs to the specified location\n\"\"\"\n+\nwith open(log_file_path, 'w')as log:\nlog.write(\"\\n\".join(std_outs))\n@@ -108,6 +111,7 @@ def get_all_logs(process):\nreturn: List, List\nStd out and Error as logs as list\n\"\"\"\n+\nout_arr = []\nwhile True:\nnextline = process.stdout.readline().decode('utf8').strip()\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_fs.py", "new_path": "scripts/perftest/python/utils_fs.py", "diff": "@@ -37,6 +37,7 @@ def create_dir_local(directory):\ndirectory: String\nLocation to create a directory\n\"\"\"\n+\nif not os.path.exists(directory):\nos.makedirs(directory)\n@@ -51,6 +52,7 @@ def write_success(time, path):\npath: String\nLocation to write the SUCCESS file\n\"\"\"\n+\nif 'data-gen' in path:\nif path.startswith('hdfs') and len(time.split('.')) == 2:\nfull_path = join(path, '_SUCCESS')\n@@ -75,6 +77,7 @@ def check_SUCCESS_file_exists(path):\nreturn: Boolean\nChecks if the file _SUCCESS exists\n\"\"\"\n+\nif 'data-gen' in path:\nif path.startswith('hdfs'):\nfull_path = join(path, '_SUCCESS')\n@@ -93,6 +96,7 @@ def contains_dir(hdfs_dirs, sub_folder):\n\"\"\"\nSupport for Lambda Function to check if a HDFS subfolder is contained by the HDFS directory\n\"\"\"\n+\nif sub_folder in hdfs_dirs:\nreturn True\nelse:\n@@ -106,6 +110,7 @@ def check_hdfs_path(path):\n\"\"\"\nCheck if a path is present in HDFS\n\"\"\"\n+\ncmd = ['hdfs', 'dfs', '-test', '-e', path]\nreturn_code = subprocess_exec(' '.join(cmd))\nif return_code != 0:\n@@ -137,6 +142,7 @@ def relevant_folders(path, algo, family, matrix_type, matrix_shape, mode):\nreturn: List\nList of folder locations to read data from\n\"\"\"\n+\nfolders = []\nfor current_matrix_type in matrix_type:\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -55,8 +55,6 @@ def split_config_args(args):\nperftest_args_dict['mode'] = args['mode']\nperftest_args_dict['temp_dir'] = args['temp_dir']\n-\n-\nsystemml_args_dict = {}\nif args['stats'] is not None:\n@@ -89,6 +87,7 @@ def split_config_args(args):\nif args['conf'] is not None:\nbackend_args_dict['--conf'] = ''.join(args['conf'])\n+\nelif exec_type == 'singlenode':\nif args['heapmem'] is not None:\nbackend_args_dict['-heapmem'] = args['heapmem']\n@@ -214,6 +213,7 @@ def exec_dml_and_parse_time(exec_type, dml_file_name, args, backend_args_dict, s\nalgorithm = dml_file_name + '.dml'\nsup_args = ''.join(['{} {}'.format(k, v) for k, v in systemml_args_dict.items()])\n+\nif exec_type == 'singlenode':\nexec_script = join(os.environ.get('SYSTEMML_HOME'), 'bin', 'systemml-standalone.py')\nsinglenode_pre_args = ''.join([' {} {} '.format(k, v) for k, v in backend_args_dict.items()])\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1451][Phase3] phase 3 work - Offline CSV support - Family bug fix - Plots - Doc Update - Stats update - Bug train, predict append family name Closes #604
49,703
28.08.2017 10:19:48
25,200
11e4605735f139b18b008e34a2d505397097389c
[MINOR] Replace QuaternaryOp copy/pasted code with methods Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/QuaternaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/QuaternaryOp.java", "diff": "@@ -321,6 +321,74 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\nsetLops( wsloss );\n}\n+ private Lop obtainlU(Hop U, Hop V, boolean cacheU, double m1Size) throws HopsException, LopsException {\n+ Lop lU = null;\n+ if (cacheU) {\n+ // partitioning of U for read through distributed cache\n+ boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n+ lU = U.constructLops();\n+ if (needPartU) { // requires partitioning\n+ lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE,\n+ (m1Size > OptimizerUtils.getLocalMemBudget()) ? ExecType.MR : ExecType.CP,\n+ PDataPartitionFormat.ROW_BLOCK_WISE_N);\n+ lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(),\n+ U.getNnz());\n+ setLineNumbers(lU);\n+ }\n+ } else {\n+ // replication of U for shuffle to target block\n+ Lop offset = createOffsetLop(V, false); // ncol of t(V) -> nrow of V determines num replicates\n+ lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n+ lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(),\n+ U.getNnz());\n+ setLineNumbers(lU);\n+\n+ Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n+ grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(),\n+ -1);\n+ setLineNumbers(grpU);\n+ lU = grpU;\n+ }\n+ return lU;\n+ }\n+\n+ private Lop obtainlV(Hop U, Hop V, boolean cacheV, double m2Size) throws HopsException, LopsException {\n+ Lop lV = null;\n+ if (cacheV) {\n+ // partitioning of V for read through distributed cache\n+ boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n+ lV = V.constructLops();\n+ if (needPartV) { // requires partitioning\n+ lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE,\n+ (m2Size > OptimizerUtils.getLocalMemBudget()) ? ExecType.MR : ExecType.CP,\n+ PDataPartitionFormat.ROW_BLOCK_WISE_N);\n+ lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(),\n+ V.getNnz());\n+ setLineNumbers(lV);\n+ }\n+ } else {\n+ // replication of t(V) for shuffle to target block\n+ Transform ltV = new Transform(V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(),\n+ getValueType(), ExecType.MR);\n+ ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(),\n+ V.getNnz());\n+ setLineNumbers(ltV);\n+\n+ Lop offset = createOffsetLop(U, false); // nrow of U determines num replicates\n+ lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n+ lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(),\n+ V.getNnz());\n+ setLineNumbers(lV);\n+\n+ Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n+ grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(),\n+ -1);\n+ setLineNumbers(grpV);\n+ lV = grpV;\n+ }\n+ return lV;\n+ }\n+\nprivate void constructMRLopsWeightedSquaredLoss(WeightsType wtype)\nthrows HopsException, LopsException\n{\n@@ -396,60 +464,8 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\nsetLineNumbers(grpW);\n}\n- Lop lU = null;\n- if( cacheU ) {\n- //partitioning of U for read through distributed cache\n- boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lU = U.constructLops();\n- if( needPartU ){ //requires partitioning\n- lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n- }\n- }\n- else {\n- //replication of U for shuffle to target block\n- Lop offset = createOffsetLop(V, false); //ncol of t(V) -> nrow of V determines num replicates\n- lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(),\n- U.getRowsInBlock(), U.getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n-\n- Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(), -1);\n- setLineNumbers(grpU);\n- lU = grpU;\n- }\n-\n- Lop lV = null;\n- if( cacheV ) {\n- //partitioning of V for read through distributed cache\n- boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lV = V.constructLops();\n- if( needPartV ){ //requires partitioning\n- lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n- }\n- }\n- else {\n- //replication of t(V) for shuffle to target block\n- Transform ltV = new Transform( V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(), getValueType(), ExecType.MR);\n- ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(ltV);\n-\n- Lop offset = createOffsetLop(U, false); //nrow of U determines num replicates\n- lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n- lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n-\n- Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(), -1);\n- setLineNumbers(grpV);\n- lV = grpV;\n- }\n+ Lop lU = obtainlU(U, V, cacheU, m1Size);\n+ Lop lV = obtainlV(U, V, cacheV, m2Size);\n//reduce-side wsloss w/ or without broadcast\nLop wsloss = new WeightedSquaredLossR(\n@@ -597,60 +613,8 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\ngrpX.getOutputParameters().setDimensions(X.getDim1(), X.getDim2(), X.getRowsInBlock(), X.getColsInBlock(), X.getNnz());\nsetLineNumbers(grpX);\n- Lop lU = null;\n- if( cacheU ) {\n- //partitioning of U for read through distributed cache\n- boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lU = U.constructLops();\n- if( needPartU ){ //requires partitioning\n- lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n- }\n- }\n- else {\n- //replication of U for shuffle to target block\n- Lop offset = createOffsetLop(V, false); //ncol of t(V) -> nrow of V determines num replicates\n- lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(),\n- U.getRowsInBlock(), U.getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n-\n- Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(), -1);\n- setLineNumbers(grpU);\n- lU = grpU;\n- }\n-\n- Lop lV = null;\n- if( cacheV ) {\n- //partitioning of V for read through distributed cache\n- boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lV = V.constructLops();\n- if( needPartV ){ //requires partitioning\n- lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n- }\n- }\n- else {\n- //replication of t(V) for shuffle to target block\n- Transform ltV = new Transform( V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(), getValueType(), ExecType.MR);\n- ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(ltV);\n-\n- Lop offset = createOffsetLop(U, false); //nrow of U determines num replicates\n- lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n- lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n-\n- Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(), -1);\n- setLineNumbers(grpV);\n- lV = grpV;\n- }\n+ Lop lU = obtainlU(U, V, cacheU, m1Size);\n+ Lop lV = obtainlV(U, V, cacheV, m2Size);\n//reduce-side wsig w/ or without broadcast\nLop wsigmoid = new WeightedSigmoidR(\n@@ -793,60 +757,8 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\ngrpX.getOutputParameters().setDimensions(X.getDim1(), X.getDim2(), X.getRowsInBlock(), X.getColsInBlock(), X.getNnz());\nsetLineNumbers(grpX);\n- Lop lU = null;\n- if( cacheU ) {\n- //partitioning of U for read through distributed cache\n- boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lU = U.constructLops();\n- if( needPartU ){ //requires partitioning\n- lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n- }\n- }\n- else {\n- //replication of U for shuffle to target block\n- Lop offset = createOffsetLop(V, false); //ncol of t(V) -> nrow of V determines num replicates\n- lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(),\n- U.getRowsInBlock(), U.getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n-\n- Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(), -1);\n- setLineNumbers(grpU);\n- lU = grpU;\n- }\n-\n- Lop lV = null;\n- if( cacheV ) {\n- //partitioning of V for read through distributed cache\n- boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lV = V.constructLops();\n- if( needPartV ){ //requires partitioning\n- lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n- }\n- }\n- else {\n- //replication of t(V) for shuffle to target block\n- Transform ltV = new Transform( V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(), getValueType(), ExecType.MR);\n- ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(ltV);\n-\n- Lop offset = createOffsetLop(U, false); //nrow of U determines num replicates\n- lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n- lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n-\n- Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(), -1);\n- setLineNumbers(grpV);\n- lV = grpV;\n- }\n+ Lop lU = obtainlU(U, V, cacheU, m1Size);\n+ Lop lV = obtainlV(U, V, cacheV, m2Size);\n//reduce-side wdivmm w/ or without broadcast\nLop wdivmm = new WeightedDivMMR( grpW, lU, lV, grpX,\n@@ -1007,60 +919,8 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\ngrpX.getOutputParameters().setDimensions(X.getDim1(), X.getDim2(), X.getRowsInBlock(), X.getColsInBlock(), -1);\nsetLineNumbers(grpX);\n- Lop lU = null;\n- if( cacheU ) {\n- //partitioning of U for read through distributed cache\n- boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lU = U.constructLops();\n- if( needPartU ){ //requires partitioning\n- lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n- }\n- }\n- else {\n- //replication of U for shuffle to target block\n- Lop offset = createOffsetLop(V, false); //ncol of t(V) -> nrow of V determines num replicates\n- lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(),\n- U.getRowsInBlock(), U.getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n-\n- Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(), -1);\n- setLineNumbers(grpU);\n- lU = grpU;\n- }\n-\n- Lop lV = null;\n- if( cacheV ) {\n- //partitioning of V for read through distributed cache\n- boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lV = V.constructLops();\n- if( needPartV ){ //requires partitioning\n- lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n- }\n- }\n- else {\n- //replication of t(V) for shuffle to target block\n- Transform ltV = new Transform( V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(), getValueType(), ExecType.MR);\n- ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(ltV);\n-\n- Lop offset = createOffsetLop(U, false); //nrow of U determines num replicates\n- lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n- lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n-\n- Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(), -1);\n- setLineNumbers(grpV);\n- lV = grpV;\n- }\n+ Lop lU = obtainlU(U, V, cacheU, m1Size);\n+ Lop lV = obtainlV(U, V, cacheV, m2Size);\n//reduce-side wcemm w/ or without broadcast\nLop wcemm = new WeightedCrossEntropyR( grpX, lU, lV, eps.constructLops(),\n@@ -1216,60 +1076,8 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\ngrpX.getOutputParameters().setDimensions(X.getDim1(), X.getDim2(), X.getRowsInBlock(), X.getColsInBlock(), X.getNnz());\nsetLineNumbers(grpX);\n- Lop lU = null;\n- if( cacheU ) {\n- //partitioning of U for read through distributed cache\n- boolean needPartU = !U.dimsKnown() || U.getDim1() * U.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lU = U.constructLops();\n- if( needPartU ){ //requires partitioning\n- lU = new DataPartition(lU, DataType.MATRIX, ValueType.DOUBLE, (m1Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), getRowsInBlock(), getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n- }\n- }\n- else {\n- //replication of U for shuffle to target block\n- Lop offset = createOffsetLop(V, false); //ncol of t(V) -> nrow of V determines num replicates\n- lU = new RepMat(U.constructLops(), offset, true, V.getDataType(), V.getValueType());\n- lU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(),\n- U.getRowsInBlock(), U.getColsInBlock(), U.getNnz());\n- setLineNumbers(lU);\n-\n- Group grpU = new Group(lU, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpU.getOutputParameters().setDimensions(U.getDim1(), U.getDim2(), U.getRowsInBlock(), U.getColsInBlock(), -1);\n- setLineNumbers(grpU);\n- lU = grpU;\n- }\n-\n- Lop lV = null;\n- if( cacheV ) {\n- //partitioning of V for read through distributed cache\n- boolean needPartV = !V.dimsKnown() || V.getDim1() * V.getDim2() > DistributedCacheInput.PARTITION_SIZE;\n- lV = V.constructLops();\n- if( needPartV ){ //requires partitioning\n- lV = new DataPartition(lV, DataType.MATRIX, ValueType.DOUBLE, (m2Size>OptimizerUtils.getLocalMemBudget())?ExecType.MR:ExecType.CP, PDataPartitionFormat.ROW_BLOCK_WISE_N);\n- lV.getOutputParameters().setDimensions(V.getDim1(), V.getDim2(), getRowsInBlock(), getColsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n- }\n- }\n- else {\n- //replication of t(V) for shuffle to target block\n- Transform ltV = new Transform( V.constructLops(), HopsTransf2Lops.get(ReOrgOp.TRANSPOSE), getDataType(), getValueType(), ExecType.MR);\n- ltV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(ltV);\n-\n- Lop offset = createOffsetLop(U, false); //nrow of U determines num replicates\n- lV = new RepMat(ltV, offset, false, V.getDataType(), V.getValueType());\n- lV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(),\n- V.getColsInBlock(), V.getRowsInBlock(), V.getNnz());\n- setLineNumbers(lV);\n-\n- Group grpV = new Group(lV, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);\n- grpV.getOutputParameters().setDimensions(V.getDim2(), V.getDim1(), V.getColsInBlock(), V.getRowsInBlock(), -1);\n- setLineNumbers(grpV);\n- lV = grpV;\n- }\n+ Lop lU = obtainlU(U, V, cacheU, m1Size);\n+ Lop lV = obtainlV(U, V, cacheV, m2Size);\n//reduce-side wumm w/ or without broadcast\nLop wumm = new WeightedUnaryMMR(\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Replace QuaternaryOp copy/pasted code with methods Closes #640.
49,703
28.08.2017 10:24:04
25,200
aa45de98b87300919669ae3ec64dfd44a0b54b21
[MINOR] Refactor MatrixMetadata/FrameMetadata methods to Metadata Move common methods from MatrixMetadata and FrameMetadata classes to abstract parent Metadata class. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/FrameMetadata.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/FrameMetadata.java", "diff": "package org.apache.sysml.api.mlcontext;\n-import org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n/**\n@@ -30,11 +29,6 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n*/\npublic class FrameMetadata extends Metadata {\n- private Long numRows = null;\n- private Long numColumns = null;\n- private Long numNonZeros = null;\n- private Integer numRowsPerBlock = null;\n- private Integer numColumnsPerBlock = null;\nprivate FrameFormat frameFormat;\nprivate FrameSchema frameSchema;\n@@ -522,139 +516,6 @@ public class FrameMetadata extends Metadata {\nthis.numColumnsPerBlock = matrixCharacteristics.getColsPerBlock();\n}\n- /**\n- * Obtain the number of rows\n- *\n- * @return the number of rows\n- */\n- public Long getNumRows() {\n- return numRows;\n- }\n-\n- /**\n- * Set the number of rows\n- *\n- * @param numRows\n- * the number of rows\n- */\n- public void setNumRows(Long numRows) {\n- this.numRows = numRows;\n- }\n-\n- /**\n- * Obtain the number of columns\n- *\n- * @return the number of columns\n- */\n- public Long getNumColumns() {\n- return numColumns;\n- }\n-\n- /**\n- * Set the number of columns\n- *\n- * @param numColumns\n- * the number of columns\n- */\n- public void setNumColumns(Long numColumns) {\n- this.numColumns = numColumns;\n- }\n-\n- /**\n- * Obtain the number of non-zero values\n- *\n- * @return the number of non-zero values\n- */\n- public Long getNumNonZeros() {\n- return numNonZeros;\n- }\n-\n- /**\n- * Set the number of non-zero values\n- *\n- * @param numNonZeros\n- * the number of non-zero values\n- */\n- public void setNumNonZeros(Long numNonZeros) {\n- this.numNonZeros = numNonZeros;\n- }\n-\n- /**\n- * Obtain the number of rows per block\n- *\n- * @return the number of rows per block\n- */\n- public Integer getNumRowsPerBlock() {\n- return numRowsPerBlock;\n- }\n-\n- /**\n- * Set the number of rows per block\n- *\n- * @param numRowsPerBlock\n- * the number of rows per block\n- */\n- public void setNumRowsPerBlock(Integer numRowsPerBlock) {\n- this.numRowsPerBlock = numRowsPerBlock;\n- }\n-\n- /**\n- * Obtain the number of columns per block\n- *\n- * @return the number of columns per block\n- */\n- public Integer getNumColumnsPerBlock() {\n- return numColumnsPerBlock;\n- }\n-\n- /**\n- * Set the number of columns per block\n- *\n- * @param numColumnsPerBlock\n- * the number of columns per block\n- */\n- public void setNumColumnsPerBlock(Integer numColumnsPerBlock) {\n- this.numColumnsPerBlock = numColumnsPerBlock;\n- }\n-\n- /**\n- * Convert the frame metadata to a MatrixCharacteristics object. If all\n- * field values are {@code null}, {@code null} is returned.\n- *\n- * @return the frame metadata as a MatrixCharacteristics object, or\n- * {@code null} if all field values are null\n- */\n- public MatrixCharacteristics asMatrixCharacteristics() {\n-\n- if ((numRows == null) && (numColumns == null) && (numRowsPerBlock == null) && (numColumnsPerBlock == null)\n- && (numNonZeros == null)) {\n- return null;\n- }\n-\n- long nr = (numRows == null) ? -1 : numRows;\n- long nc = (numColumns == null) ? -1 : numColumns;\n- int nrpb = (numRowsPerBlock == null) ? ConfigurationManager.getBlocksize() : numRowsPerBlock;\n- int ncpb = (numColumnsPerBlock == null) ? ConfigurationManager.getBlocksize() : numColumnsPerBlock;\n- long nnz = (numNonZeros == null) ? -1 : numNonZeros;\n- MatrixCharacteristics mc = new MatrixCharacteristics(nr, nc, nrpb, ncpb, nnz);\n- return mc;\n- }\n-\n- @Override\n- public String toString() {\n- return \"rows: \" + fieldDisplay(numRows) + \", columns: \" + fieldDisplay(numColumns) + \", non-zeros: \"\n- + fieldDisplay(numNonZeros) + \", rows per block: \" + fieldDisplay(numRowsPerBlock)\n- + \", columns per block: \" + fieldDisplay(numColumnsPerBlock);\n- }\n-\n- private String fieldDisplay(Object field) {\n- if (field == null) {\n- return \"None\";\n- } else {\n- return field.toString();\n- }\n- }\n-\n/**\n* Obtain the frame format\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MatrixMetadata.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MatrixMetadata.java", "diff": "package org.apache.sysml.api.mlcontext;\n-import org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n/**\n@@ -30,11 +29,6 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n*/\npublic class MatrixMetadata extends Metadata {\n- private Long numRows = null;\n- private Long numColumns = null;\n- private Long numNonZeros = null;\n- private Integer numRowsPerBlock = null;\n- private Integer numColumnsPerBlock = null;\nprivate MatrixFormat matrixFormat;\npublic MatrixMetadata() {\n@@ -354,152 +348,6 @@ public class MatrixMetadata extends Metadata {\nthis.numColumnsPerBlock = matrixCharacteristics.getColsPerBlock();\n}\n- /**\n- * Set the MatrixMetadata fields based on a MatrixCharacteristics object.\n- *\n- * @param matrixCharacteristics\n- * the matrix metadata as a MatrixCharacteristics object\n- */\n- public void setMatrixCharacteristics(MatrixCharacteristics matrixCharacteristics) {\n- this.numRows = matrixCharacteristics.getRows();\n- this.numColumns = matrixCharacteristics.getCols();\n- this.numNonZeros = matrixCharacteristics.getNonZeros();\n- this.numRowsPerBlock = matrixCharacteristics.getRowsPerBlock();\n- this.numColumnsPerBlock = matrixCharacteristics.getColsPerBlock();\n- }\n-\n- /**\n- * Obtain the number of rows\n- *\n- * @return the number of rows\n- */\n- public Long getNumRows() {\n- return numRows;\n- }\n-\n- /**\n- * Set the number of rows\n- *\n- * @param numRows\n- * the number of rows\n- */\n- public void setNumRows(Long numRows) {\n- this.numRows = numRows;\n- }\n-\n- /**\n- * Obtain the number of columns\n- *\n- * @return the number of columns\n- */\n- public Long getNumColumns() {\n- return numColumns;\n- }\n-\n- /**\n- * Set the number of columns\n- *\n- * @param numColumns\n- * the number of columns\n- */\n- public void setNumColumns(Long numColumns) {\n- this.numColumns = numColumns;\n- }\n-\n- /**\n- * Obtain the number of non-zero values\n- *\n- * @return the number of non-zero values\n- */\n- public Long getNumNonZeros() {\n- return numNonZeros;\n- }\n-\n- /**\n- * Set the number of non-zero values\n- *\n- * @param numNonZeros\n- * the number of non-zero values\n- */\n- public void setNumNonZeros(Long numNonZeros) {\n- this.numNonZeros = numNonZeros;\n- }\n-\n- /**\n- * Obtain the number of rows per block\n- *\n- * @return the number of rows per block\n- */\n- public Integer getNumRowsPerBlock() {\n- return numRowsPerBlock;\n- }\n-\n- /**\n- * Set the number of rows per block\n- *\n- * @param numRowsPerBlock\n- * the number of rows per block\n- */\n- public void setNumRowsPerBlock(Integer numRowsPerBlock) {\n- this.numRowsPerBlock = numRowsPerBlock;\n- }\n-\n- /**\n- * Obtain the number of columns per block\n- *\n- * @return the number of columns per block\n- */\n- public Integer getNumColumnsPerBlock() {\n- return numColumnsPerBlock;\n- }\n-\n- /**\n- * Set the number of columns per block\n- *\n- * @param numColumnsPerBlock\n- * the number of columns per block\n- */\n- public void setNumColumnsPerBlock(Integer numColumnsPerBlock) {\n- this.numColumnsPerBlock = numColumnsPerBlock;\n- }\n-\n- /**\n- * Convert the matrix metadata to a MatrixCharacteristics object. If all\n- * field values are {@code null}, {@code null} is returned.\n- *\n- * @return the matrix metadata as a MatrixCharacteristics object, or\n- * {@code null} if all field values are null\n- */\n- public MatrixCharacteristics asMatrixCharacteristics() {\n-\n- if ((numRows == null) && (numColumns == null) && (numRowsPerBlock == null) && (numColumnsPerBlock == null)\n- && (numNonZeros == null)) {\n- return null;\n- }\n-\n- long nr = (numRows == null) ? -1 : numRows;\n- long nc = (numColumns == null) ? -1 : numColumns;\n- int nrpb = (numRowsPerBlock == null) ? ConfigurationManager.getBlocksize() : numRowsPerBlock;\n- int ncpb = (numColumnsPerBlock == null) ? ConfigurationManager.getBlocksize() : numColumnsPerBlock;\n- long nnz = (numNonZeros == null) ? -1 : numNonZeros;\n- return new MatrixCharacteristics(nr, nc, nrpb, ncpb, nnz);\n- }\n-\n- @Override\n- public String toString() {\n- return \"rows: \" + fieldDisplay(numRows) + \", columns: \" + fieldDisplay(numColumns) + \", non-zeros: \"\n- + fieldDisplay(numNonZeros) + \", rows per block: \" + fieldDisplay(numRowsPerBlock)\n- + \", columns per block: \" + fieldDisplay(numColumnsPerBlock);\n- }\n-\n- private String fieldDisplay(Object field) {\n- if (field == null) {\n- return \"None\";\n- } else {\n- return field.toString();\n- }\n- }\n-\n/**\n* Obtain the matrix format\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/Metadata.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/Metadata.java", "diff": "package org.apache.sysml.api.mlcontext;\n+import org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+\n/**\n* Abstract metadata class for MLContext API. Complex types such as SystemML\n* matrices and frames typically require metadata, so this abstract class serves\n@@ -27,4 +30,156 @@ package org.apache.sysml.api.mlcontext;\n*/\npublic abstract class Metadata {\n+ protected Long numColumns = null;\n+ protected Integer numColumnsPerBlock = null;\n+ protected Long numNonZeros = null;\n+ protected Long numRows = null;\n+ protected Integer numRowsPerBlock = null;\n+\n+ /**\n+ * Convert the metadata to a MatrixCharacteristics object. If all field\n+ * values are {@code null}, {@code null} is returned.\n+ *\n+ * @return the metadata as a MatrixCharacteristics object, or {@code null}\n+ * if all field values are null\n+ */\n+ public MatrixCharacteristics asMatrixCharacteristics() {\n+\n+ if ((numRows == null) && (numColumns == null) && (numRowsPerBlock == null) && (numColumnsPerBlock == null)\n+ && (numNonZeros == null)) {\n+ return null;\n+ }\n+\n+ long nr = (numRows == null) ? -1 : numRows;\n+ long nc = (numColumns == null) ? -1 : numColumns;\n+ int nrpb = (numRowsPerBlock == null) ? ConfigurationManager.getBlocksize() : numRowsPerBlock;\n+ int ncpb = (numColumnsPerBlock == null) ? ConfigurationManager.getBlocksize() : numColumnsPerBlock;\n+ long nnz = (numNonZeros == null) ? -1 : numNonZeros;\n+ return new MatrixCharacteristics(nr, nc, nrpb, ncpb, nnz);\n+ }\n+\n+ protected String fieldDisplay(Object field) {\n+ if (field == null) {\n+ return \"None\";\n+ } else {\n+ return field.toString();\n+ }\n+ }\n+\n+ /**\n+ * Obtain the number of columns\n+ *\n+ * @return the number of columns\n+ */\n+ public Long getNumColumns() {\n+ return numColumns;\n+ }\n+\n+ /**\n+ * Obtain the number of columns per block\n+ *\n+ * @return the number of columns per block\n+ */\n+ public Integer getNumColumnsPerBlock() {\n+ return numColumnsPerBlock;\n+ }\n+\n+ /**\n+ * Obtain the number of non-zero values\n+ *\n+ * @return the number of non-zero values\n+ */\n+ public Long getNumNonZeros() {\n+ return numNonZeros;\n+ }\n+\n+ /**\n+ * Obtain the number of rows\n+ *\n+ * @return the number of rows\n+ */\n+ public Long getNumRows() {\n+ return numRows;\n+ }\n+\n+ /**\n+ * Obtain the number of rows per block\n+ *\n+ * @return the number of rows per block\n+ */\n+ public Integer getNumRowsPerBlock() {\n+ return numRowsPerBlock;\n+ }\n+\n+ /**\n+ * Set the metadata fields based on a MatrixCharacteristics object.\n+ *\n+ * @param matrixCharacteristics\n+ * the matrix metadata as a MatrixCharacteristics object\n+ */\n+ public void setMatrixCharacteristics(MatrixCharacteristics matrixCharacteristics) {\n+ this.numRows = matrixCharacteristics.getRows();\n+ this.numColumns = matrixCharacteristics.getCols();\n+ this.numNonZeros = matrixCharacteristics.getNonZeros();\n+ this.numRowsPerBlock = matrixCharacteristics.getRowsPerBlock();\n+ this.numColumnsPerBlock = matrixCharacteristics.getColsPerBlock();\n+ }\n+\n+ /**\n+ * Set the number of columns\n+ *\n+ * @param numColumns\n+ * the number of columns\n+ */\n+ public void setNumColumns(Long numColumns) {\n+ this.numColumns = numColumns;\n+ }\n+\n+ /**\n+ * Set the number of columns per block\n+ *\n+ * @param numColumnsPerBlock\n+ * the number of columns per block\n+ */\n+ public void setNumColumnsPerBlock(Integer numColumnsPerBlock) {\n+ this.numColumnsPerBlock = numColumnsPerBlock;\n+ }\n+\n+ /**\n+ * Set the number of non-zero values\n+ *\n+ * @param numNonZeros\n+ * the number of non-zero values\n+ */\n+ public void setNumNonZeros(Long numNonZeros) {\n+ this.numNonZeros = numNonZeros;\n+ }\n+\n+ /**\n+ * Set the number of rows\n+ *\n+ * @param numRows\n+ * the number of rows\n+ */\n+ public void setNumRows(Long numRows) {\n+ this.numRows = numRows;\n+ }\n+\n+ /**\n+ * Set the number of rows per block\n+ *\n+ * @param numRowsPerBlock\n+ * the number of rows per block\n+ */\n+ public void setNumRowsPerBlock(Integer numRowsPerBlock) {\n+ this.numRowsPerBlock = numRowsPerBlock;\n+ }\n+\n+ @Override\n+ public String toString() {\n+ return \"rows: \" + fieldDisplay(numRows) + \", columns: \" + fieldDisplay(numColumns) + \", non-zeros: \"\n+ + fieldDisplay(numNonZeros) + \", rows per block: \" + fieldDisplay(numRowsPerBlock)\n+ + \", columns per block: \" + fieldDisplay(numColumnsPerBlock);\n+ }\n+\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Refactor MatrixMetadata/FrameMetadata methods to Metadata Move common methods from MatrixMetadata and FrameMetadata classes to abstract parent Metadata class. Closes #641.
49,737
28.08.2017 10:32:55
25,200
428f3aa217ea2d266544f1e7e0178ea87ccfba4a
[MINOR][DOC] Perf Test Google sheets API Instructions on how to configure the google client API for performance tests. Closes
[ { "change_type": "MODIFY", "old_path": "docs/python-performance-test.md", "new_path": "docs/python-performance-test.md", "diff": "@@ -148,6 +148,27 @@ Run performance test for all algorithms under the family `regression2` and log w\nRun performance test for all algorithms using HDFS.\n+## Google sheets API\n+\n+Steps below to configure google client API:\n+\n+- Navigate to [Google APIs Console](https://console.developers.google.com/apis/).\n+- Create a new project.\n+- Click Enable API. Search for and enable the Google Drive API.\n+- Create credentials for a Web Server to access Application Data.\n+- Name the service account and grant it a Project Role of Editor.\n+- Download the JSON file.\n+- Copy the JSON file to your code directory and rename it to client_secret.json\n+\n+Steps below to configure google sheets:\n+\n+- Create a new spread sheet with google sheets.\n+- Create seperate sheets for `singlenode` and `hybrid_spark`.\n+- Find the client_email inside client_secret.json and save it.\n+- Back in your spreadsheet, click the Share button in the top right, and paste the client email into the People field to give it edit rights for each sheet.\n+- Click Send\n+\n+\n## Result Consolidation and Plotting\nWe have two scripts, `stats.py` forpulling results from google docs and `update.py` to updating results to google docs or local file system.\n@@ -159,6 +180,7 @@ Example of `stats.py` below\n` ./stats.py --auth ../key/client_json.json --exec-type singlenode --plot stats1_data-gen_none_dense_10k_100`\n`--plot` argument needs the name of the composite key that you would like to compare results over. If this argument is not specified the results would be grouped by keys.\n+\n## Operational Notes\nAll performance test depend mainly on two scripts for execution `systemml-standalone.py` and `systemml-spark-submit.py`. Incase we need to change standalone or spark parameters we need to manually change these parameters in their respective scripts.\n@@ -198,7 +220,6 @@ Matrix Shape | Approximate Data Size\n10M_1k|80GB\n100M_1k|800GB\n-\nFor example the command below runs performance test for all data sizes described above\n`run_perftest.py --family binomial clustering multinomial regression1 regression2 stats1 stats2 --mat-shape 10k_1k 100k_1k 1M_1k 10M_1k 100M_1k --master yarn-client --temp-dir hdfs://localhost:9000/user/systemml`\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR][DOC] Perf Test Google sheets API Instructions on how to configure the google client API for performance tests. Closes #642.
49,768
29.08.2017 10:27:42
25,200
9a8421d4c4fe3bc65ebac703fd30be49a20d8c8c
Update license with current code change
[ { "change_type": "MODIFY", "old_path": "dev/release/src/test/java/org/apache/sysml/validation/ValidateLicAndNotice.java", "new_path": "dev/release/src/test/java/org/apache/sysml/validation/ValidateLicAndNotice.java", "diff": "@@ -59,7 +59,10 @@ public class ValidateLicAndNotice\nstatic final String[][] packageLicenses =\n{ {\"org/antlr\", \"ANTLR 4 Runtime (http://www.antlr.org/antlr4-runtime) org.antlr:antlr4-runtime:4.5.3\"},\n- {\"org/apache/wink/json4j\",\"Apache Wink :: JSON4J (http://www.apache.org/wink/wink-json4j/) org.apache.wink:wink-json4j:1.4\"}\n+ {\"org/apache/wink/json4j\",\"Apache Wink :: JSON4J (http://www.apache.org/wink/wink-json4j/) org.apache.wink:wink-json4j:1.4\"},\n+ {\"caffe\",\"The proto file (src/main/proto/caffe/caffe.proto) is part of Caffe project,\"},\n+ {\"org/tensorflow\",\"The proto files (src/main/proto/tensorflow/event.proto and src/main/proto/tensorflow/summary.proto) is part of TensorFlow project,\"},\n+ {\"jcuda\",\"JCuda (jcuda.org)\"},\n};\npublic static HashMap<String, String[][]> hmJSLicenses = new HashMap<String, String[][]>();\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1642] Update license with current code change
49,736
29.08.2017 14:37:19
25,200
bd1946a3d91f3661fc6572bbc6c9500a59992044
[MINOR] Allow users to set explain level of mllearn algorithms
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/estimators.py", "new_path": "src/main/python/systemml/mllearn/estimators.py", "diff": "@@ -100,6 +100,19 @@ class BaseSystemMLEstimator(Estimator):\nself.estimator.setExplain(explain)\nreturn self\n+ def setExplainLevel(self, explainLevel):\n+ \"\"\"\n+ Set explain level. Mainly intended for developers.\n+\n+ Parameters\n+ ----------\n+ explainLevel: string\n+ Can be one of \"hops\", \"runtime\", \"recompile_hops\", \"recompile_runtime\"\n+ or in the above in upper case.\n+ \"\"\"\n+ self.estimator.setExplainLevel(explainLevel)\n+ return self\n+\ndef setStatistics(self, statistics):\n\"\"\"\nWhether or not to output statistics (such as execution time, elapsed time)\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "diff": "@@ -108,23 +108,27 @@ trait BaseSystemMLEstimatorOrModel {\nvar enableGPU:Boolean = false\nvar forceGPU:Boolean = false\nvar explain:Boolean = false\n+ var explainLevel:String = \"runtime\"\nvar statistics:Boolean = false\nvar statisticsMaxHeavyHitters:Int = 10\nval config:HashMap[String, String] = new HashMap[String, String]()\ndef setGPU(enableGPU1:Boolean):BaseSystemMLEstimatorOrModel = { enableGPU = enableGPU1; this}\ndef setForceGPU(enableGPU1:Boolean):BaseSystemMLEstimatorOrModel = { forceGPU = enableGPU1; this}\ndef setExplain(explain1:Boolean):BaseSystemMLEstimatorOrModel = { explain = explain1; this}\n+ def setExplainLevel(explainLevel1:String):BaseSystemMLEstimatorOrModel = { explainLevel = explainLevel1; this }\ndef setStatistics(statistics1:Boolean):BaseSystemMLEstimatorOrModel = { statistics = statistics1; this}\ndef setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters1:Int):BaseSystemMLEstimatorOrModel = { statisticsMaxHeavyHitters = statisticsMaxHeavyHitters1; this}\ndef setConfigProperty(key:String, value:String):BaseSystemMLEstimatorOrModel = { config.put(key, value); this}\ndef updateML(ml:MLContext):Unit = {\nml.setGPU(enableGPU); ml.setForceGPU(forceGPU);\n- ml.setExplain(explain); ml.setStatistics(statistics); ml.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\n+ ml.setExplain(explain); ml.setExplainLevel(explainLevel);\n+ ml.setStatistics(statistics); ml.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\nconfig.map(x => ml.setConfigProperty(x._1, x._2))\n}\ndef copyProperties(other:BaseSystemMLEstimatorOrModel):BaseSystemMLEstimatorOrModel = {\nother.setGPU(enableGPU); other.setForceGPU(forceGPU);\n- other.setExplain(explain); other.setStatistics(statistics); other.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\n+ other.setExplain(explain); other.setExplainLevel(explainLevel);\n+ other.setStatistics(statistics); other.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\nconfig.map(x => other.setConfigProperty(x._1, x._2))\nreturn other\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Allow users to set explain level of mllearn algorithms
49,703
01.09.2017 16:02:55
25,200
912c65506d626c8b0128ceb80744fde49efd4a1a
[MINOR] Increase MLContext test coverage Create MLContext tests to test previously untested methods. Update MLContext and MLContextConversionUtil to avoid NPEs. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "diff": "@@ -55,7 +55,7 @@ public class MLContext {\n/**\n* Logger for MLContext\n*/\n- public static Logger log = Logger.getLogger(MLContext.class);\n+ protected static Logger log = Logger.getLogger(MLContext.class);\n/**\n* SparkSession object.\n@@ -665,7 +665,9 @@ public class MLContext {\n// clear local status, but do not stop sc as it\n// may be used or stopped externally\n+ if (executionScript != null) {\nexecutionScript.clearAll();\n+ }\nresetConfig();\nspark = null;\n}\n@@ -693,7 +695,7 @@ public class MLContext {\n*/\npublic String version() {\nif (info() == null) {\n- return \"Version not available\";\n+ return MLContextUtil.VERSION_NOT_AVAILABLE;\n}\nreturn info().version();\n}\n@@ -705,7 +707,7 @@ public class MLContext {\n*/\npublic String buildTime() {\nif (info() == null) {\n- return \"Build time not available\";\n+ return MLContextUtil.BUILD_TIME_NOT_AVAILABLE;\n}\nreturn info().buildTime();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "diff": "@@ -482,6 +482,9 @@ public class MLContextConversionUtil {\n* the matrix metadata, if available\n*/\npublic static void determineMatrixFormatIfNeeded(Dataset<Row> dataFrame, MatrixMetadata matrixMetadata) {\n+ if (matrixMetadata == null) {\n+ return;\n+ }\nMatrixFormat matrixFormat = matrixMetadata.getMatrixFormat();\nif (matrixFormat != null) {\nreturn;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java", "diff": "@@ -93,120 +93,30 @@ import org.w3c.dom.NodeList;\npublic final class MLContextUtil {\n/**\n- * Get HOP DAG in dot format for a DML or PYDML Script.\n- *\n- * @param mlCtx\n- * MLContext object.\n- * @param script\n- * The DML or PYDML Script object to execute.\n- * @param lines\n- * Only display the hops that have begin and end line number\n- * equals to the given integers.\n- * @param performHOPRewrites\n- * should perform static rewrites, perform\n- * intra-/inter-procedural analysis to propagate size information\n- * into functions and apply dynamic rewrites\n- * @param withSubgraph\n- * If false, the dot graph will be created without subgraphs for\n- * statement blocks.\n- * @return hop DAG in dot format\n- * @throws LanguageException\n- * if error occurs\n- * @throws DMLRuntimeException\n- * if error occurs\n- * @throws HopsException\n- * if error occurs\n+ * Version not available message.\n*/\n- public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines,\n- boolean performHOPRewrites, boolean withSubgraph) throws HopsException, DMLRuntimeException,\n- LanguageException {\n- return getHopDAG(mlCtx, script, lines, null, performHOPRewrites, withSubgraph);\n- }\n+ public static final String VERSION_NOT_AVAILABLE = \"Version not available\";\n/**\n- * Get HOP DAG in dot format for a DML or PYDML Script.\n- *\n- * @param mlCtx\n- * MLContext object.\n- * @param script\n- * The DML or PYDML Script object to execute.\n- * @param lines\n- * Only display the hops that have begin and end line number\n- * equals to the given integers.\n- * @param newConf\n- * Spark Configuration.\n- * @param performHOPRewrites\n- * should perform static rewrites, perform\n- * intra-/inter-procedural analysis to propagate size information\n- * into functions and apply dynamic rewrites\n- * @param withSubgraph\n- * If false, the dot graph will be created without subgraphs for\n- * statement blocks.\n- * @return hop DAG in dot format\n- * @throws LanguageException\n- * if error occurs\n- * @throws DMLRuntimeException\n- * if error occurs\n- * @throws HopsException\n- * if error occurs\n+ * Build time not available message.\n*/\n- public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines, SparkConf newConf,\n- boolean performHOPRewrites, boolean withSubgraph) throws HopsException, DMLRuntimeException,\n- LanguageException {\n- SparkConf oldConf = mlCtx.getSparkSession().sparkContext().getConf();\n- SparkExecutionContext.SparkClusterConfig systemmlConf = SparkExecutionContext.getSparkClusterConfig();\n- long oldMaxMemory = InfrastructureAnalyzer.getLocalMaxMemory();\n- try {\n- if (newConf != null) {\n- systemmlConf.analyzeSparkConfiguation(newConf);\n- InfrastructureAnalyzer.setLocalMaxMemory(newConf.getSizeAsBytes(\"spark.driver.memory\"));\n- }\n- ScriptExecutor scriptExecutor = new ScriptExecutor();\n- scriptExecutor.setExecutionType(mlCtx.getExecutionType());\n- scriptExecutor.setGPU(mlCtx.isGPU());\n- scriptExecutor.setForceGPU(mlCtx.isForceGPU());\n- scriptExecutor.setInit(mlCtx.isInitBeforeExecution());\n- if (mlCtx.isInitBeforeExecution()) {\n- mlCtx.setInitBeforeExecution(false);\n- }\n- scriptExecutor.setMaintainSymbolTable(mlCtx.isMaintainSymbolTable());\n-\n- Long time = new Long((new Date()).getTime());\n- if ((script.getName() == null) || (script.getName().equals(\"\"))) {\n- script.setName(time.toString());\n- }\n-\n- mlCtx.setExecutionScript(script);\n- scriptExecutor.compile(script, performHOPRewrites);\n- Explain.reset();\n- // To deal with potential Py4J issues\n- lines = lines.size() == 1 && lines.get(0) == -1 ? new ArrayList<Integer>() : lines;\n- return Explain.getHopDAG(scriptExecutor.dmlProgram, lines, withSubgraph);\n- } catch (RuntimeException e) {\n- throw new MLContextException(\"Exception when compiling script\", e);\n- } finally {\n- if (newConf != null) {\n- systemmlConf.analyzeSparkConfiguation(oldConf);\n- InfrastructureAnalyzer.setLocalMaxMemory(oldMaxMemory);\n- }\n- }\n- }\n+ public static final String BUILD_TIME_NOT_AVAILABLE = \"Build time not available\";\n/**\n- * Basic data types supported by the MLContext API\n+ * Basic data types supported by the MLContext API.\n*/\n@SuppressWarnings(\"rawtypes\")\npublic static final Class[] BASIC_DATA_TYPES = { Integer.class, Boolean.class, Double.class, String.class };\n/**\n- * Complex data types supported by the MLContext API\n+ * Complex data types supported by the MLContext API.\n*/\n@SuppressWarnings(\"rawtypes\")\npublic static final Class[] COMPLEX_DATA_TYPES = { JavaRDD.class, RDD.class, Dataset.class, Matrix.class,\nFrame.class, (new double[][] {}).getClass(), MatrixBlock.class, URL.class };\n/**\n- * All data types supported by the MLContext API\n+ * All data types supported by the MLContext API.\n*/\n@SuppressWarnings(\"rawtypes\")\npublic static final Class[] ALL_SUPPORTED_DATA_TYPES = (Class[]) ArrayUtils.addAll(BASIC_DATA_TYPES,\n@@ -1252,4 +1162,104 @@ public final class MLContextUtil {\n}\n}\n}\n+\n+ /**\n+ * Get HOP DAG in dot format for a DML or PYDML Script.\n+ *\n+ * @param mlCtx\n+ * MLContext object.\n+ * @param script\n+ * The DML or PYDML Script object to execute.\n+ * @param lines\n+ * Only display the hops that have begin and end line number\n+ * equals to the given integers.\n+ * @param performHOPRewrites\n+ * should perform static rewrites, perform\n+ * intra-/inter-procedural analysis to propagate size information\n+ * into functions and apply dynamic rewrites\n+ * @param withSubgraph\n+ * If false, the dot graph will be created without subgraphs for\n+ * statement blocks.\n+ * @return hop DAG in dot format\n+ * @throws LanguageException\n+ * if error occurs\n+ * @throws DMLRuntimeException\n+ * if error occurs\n+ * @throws HopsException\n+ * if error occurs\n+ */\n+ public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines, boolean performHOPRewrites,\n+ boolean withSubgraph) throws HopsException, DMLRuntimeException, LanguageException {\n+ return getHopDAG(mlCtx, script, lines, null, performHOPRewrites, withSubgraph);\n+ }\n+\n+ /**\n+ * Get HOP DAG in dot format for a DML or PYDML Script.\n+ *\n+ * @param mlCtx\n+ * MLContext object.\n+ * @param script\n+ * The DML or PYDML Script object to execute.\n+ * @param lines\n+ * Only display the hops that have begin and end line number\n+ * equals to the given integers.\n+ * @param newConf\n+ * Spark Configuration.\n+ * @param performHOPRewrites\n+ * should perform static rewrites, perform\n+ * intra-/inter-procedural analysis to propagate size information\n+ * into functions and apply dynamic rewrites\n+ * @param withSubgraph\n+ * If false, the dot graph will be created without subgraphs for\n+ * statement blocks.\n+ * @return hop DAG in dot format\n+ * @throws LanguageException\n+ * if error occurs\n+ * @throws DMLRuntimeException\n+ * if error occurs\n+ * @throws HopsException\n+ * if error occurs\n+ */\n+ public static String getHopDAG(MLContext mlCtx, Script script, ArrayList<Integer> lines, SparkConf newConf,\n+ boolean performHOPRewrites, boolean withSubgraph)\n+ throws HopsException, DMLRuntimeException, LanguageException {\n+ SparkConf oldConf = mlCtx.getSparkSession().sparkContext().getConf();\n+ SparkExecutionContext.SparkClusterConfig systemmlConf = SparkExecutionContext.getSparkClusterConfig();\n+ long oldMaxMemory = InfrastructureAnalyzer.getLocalMaxMemory();\n+ try {\n+ if (newConf != null) {\n+ systemmlConf.analyzeSparkConfiguation(newConf);\n+ InfrastructureAnalyzer.setLocalMaxMemory(newConf.getSizeAsBytes(\"spark.driver.memory\"));\n+ }\n+ ScriptExecutor scriptExecutor = new ScriptExecutor();\n+ scriptExecutor.setExecutionType(mlCtx.getExecutionType());\n+ scriptExecutor.setGPU(mlCtx.isGPU());\n+ scriptExecutor.setForceGPU(mlCtx.isForceGPU());\n+ scriptExecutor.setInit(mlCtx.isInitBeforeExecution());\n+ if (mlCtx.isInitBeforeExecution()) {\n+ mlCtx.setInitBeforeExecution(false);\n+ }\n+ scriptExecutor.setMaintainSymbolTable(mlCtx.isMaintainSymbolTable());\n+\n+ Long time = new Long((new Date()).getTime());\n+ if ((script.getName() == null) || (script.getName().equals(\"\"))) {\n+ script.setName(time.toString());\n+ }\n+\n+ mlCtx.setExecutionScript(script);\n+ scriptExecutor.compile(script, performHOPRewrites);\n+ Explain.reset();\n+ // To deal with potential Py4J issues\n+ lines = lines.size() == 1 && lines.get(0) == -1 ? new ArrayList<Integer>() : lines;\n+ return Explain.getHopDAG(scriptExecutor.dmlProgram, lines, withSubgraph);\n+ } catch (RuntimeException e) {\n+ throw new MLContextException(\"Exception when compiling script\", e);\n+ } finally {\n+ if (newConf != null) {\n+ systemmlConf.analyzeSparkConfiguation(oldConf);\n+ InfrastructureAnalyzer.setLocalMaxMemory(oldMaxMemory);\n+ }\n+ }\n+ }\n+\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "diff": "@@ -42,10 +42,13 @@ import java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\n+import java.util.stream.Collectors;\n+import java.util.stream.Stream;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\nimport org.apache.spark.api.java.function.Function;\n+import org.apache.spark.ml.linalg.DenseVector;\nimport org.apache.spark.ml.linalg.Vector;\nimport org.apache.spark.ml.linalg.VectorUDT;\nimport org.apache.spark.ml.linalg.Vectors;\n@@ -54,10 +57,12 @@ import org.apache.spark.sql.Dataset;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.RowFactory;\nimport org.apache.spark.sql.types.DataTypes;\n+import org.apache.spark.sql.types.DoubleType;\nimport org.apache.spark.sql.types.StructField;\nimport org.apache.spark.sql.types.StructType;\nimport org.apache.sysml.api.mlcontext.MLContextConversionUtil;\nimport org.apache.sysml.api.mlcontext.MLContextException;\n+import org.apache.sysml.api.mlcontext.MLContextUtil;\nimport org.apache.sysml.api.mlcontext.MLResults;\nimport org.apache.sysml.api.mlcontext.Matrix;\nimport org.apache.sysml.api.mlcontext.MatrixFormat;\n@@ -69,11 +74,14 @@ import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+import org.apache.sysml.runtime.util.DataConverter;\nimport org.junit.Assert;\nimport org.junit.Test;\n+import scala.Tuple1;\nimport scala.Tuple2;\nimport scala.Tuple3;\n+import scala.Tuple4;\nimport scala.collection.Iterator;\nimport scala.collection.JavaConversions;\nimport scala.collection.Seq;\n@@ -2756,4 +2764,310 @@ public class MLContextTest extends MLContextTestBase {\nAssert.assertEquals(3, results.getLong(\"y\"));\n}\n+ @Test\n+ public void testOutputDataFrameOfVectorsDML() {\n+ System.out.println(\"MLContextTest - output DataFrame of vectors DML\");\n+\n+ String s = \"m=matrix('1 2 3 4',rows=2,cols=2);\";\n+ Script script = dml(s).out(\"m\");\n+ MLResults results = ml.execute(script);\n+ Dataset<Row> df = results.getDataFrame(\"m\", true);\n+ Dataset<Row> sortedDF = df.sort(RDDConverterUtils.DF_ID_COLUMN);\n+\n+ // verify column types\n+ StructType schema = sortedDF.schema();\n+ StructField[] fields = schema.fields();\n+ StructField idColumn = fields[0];\n+ StructField vectorColumn = fields[1];\n+ Assert.assertTrue(idColumn.dataType() instanceof DoubleType);\n+ Assert.assertTrue(vectorColumn.dataType() instanceof VectorUDT);\n+\n+ List<Row> list = sortedDF.collectAsList();\n+\n+ Row row1 = list.get(0);\n+ Assert.assertEquals(1.0, row1.getDouble(0), 0.0);\n+ Vector v1 = (DenseVector) row1.get(1);\n+ double[] arr1 = v1.toArray();\n+ Assert.assertArrayEquals(new double[] { 1.0, 2.0 }, arr1, 0.0);\n+\n+ Row row2 = list.get(1);\n+ Assert.assertEquals(2.0, row2.getDouble(0), 0.0);\n+ Vector v2 = (DenseVector) row2.get(1);\n+ double[] arr2 = v2.toArray();\n+ Assert.assertArrayEquals(new double[] { 3.0, 4.0 }, arr2, 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputDoubleArrayFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output double array from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=2, cols=2);\";\n+ double[][] matrix = ml.execute(dml(s).out(\"M\")).getMatrix(\"M\").to2DDoubleArray();\n+ Assert.assertEquals(1.0, matrix[0][0], 0);\n+ Assert.assertEquals(2.0, matrix[0][1], 0);\n+ Assert.assertEquals(3.0, matrix[1][0], 0);\n+ Assert.assertEquals(4.0, matrix[1][1], 0);\n+ }\n+\n+ @Test\n+ public void testOutputDataFrameFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output DataFrame from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=2, cols=2);\";\n+ Script script = dml(s).out(\"M\");\n+ Dataset<Row> df = ml.execute(script).getMatrix(\"M\").toDF();\n+ Dataset<Row> sortedDF = df.sort(RDDConverterUtils.DF_ID_COLUMN);\n+ List<Row> list = sortedDF.collectAsList();\n+ Row row1 = list.get(0);\n+ Assert.assertEquals(1.0, row1.getDouble(0), 0.0);\n+ Assert.assertEquals(1.0, row1.getDouble(1), 0.0);\n+ Assert.assertEquals(2.0, row1.getDouble(2), 0.0);\n+\n+ Row row2 = list.get(1);\n+ Assert.assertEquals(2.0, row2.getDouble(0), 0.0);\n+ Assert.assertEquals(3.0, row2.getDouble(1), 0.0);\n+ Assert.assertEquals(4.0, row2.getDouble(2), 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputDataFrameDoublesNoIDColumnFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output DataFrame of doubles with no ID column from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=1, cols=4);\";\n+ Script script = dml(s).out(\"M\");\n+ Dataset<Row> df = ml.execute(script).getMatrix(\"M\").toDFDoubleNoIDColumn();\n+ List<Row> list = df.collectAsList();\n+\n+ Row row = list.get(0);\n+ Assert.assertEquals(1.0, row.getDouble(0), 0.0);\n+ Assert.assertEquals(2.0, row.getDouble(1), 0.0);\n+ Assert.assertEquals(3.0, row.getDouble(2), 0.0);\n+ Assert.assertEquals(4.0, row.getDouble(3), 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputDataFrameDoublesWithIDColumnFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output DataFrame of doubles with ID column from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=2, cols=2);\";\n+ Script script = dml(s).out(\"M\");\n+ Dataset<Row> df = ml.execute(script).getMatrix(\"M\").toDFDoubleWithIDColumn();\n+ Dataset<Row> sortedDF = df.sort(RDDConverterUtils.DF_ID_COLUMN);\n+ List<Row> list = sortedDF.collectAsList();\n+\n+ Row row1 = list.get(0);\n+ Assert.assertEquals(1.0, row1.getDouble(0), 0.0);\n+ Assert.assertEquals(1.0, row1.getDouble(1), 0.0);\n+ Assert.assertEquals(2.0, row1.getDouble(2), 0.0);\n+\n+ Row row2 = list.get(1);\n+ Assert.assertEquals(2.0, row2.getDouble(0), 0.0);\n+ Assert.assertEquals(3.0, row2.getDouble(1), 0.0);\n+ Assert.assertEquals(4.0, row2.getDouble(2), 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputDataFrameVectorsNoIDColumnFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output DataFrame of vectors with no ID column from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=1, cols=4);\";\n+ Script script = dml(s).out(\"M\");\n+ Dataset<Row> df = ml.execute(script).getMatrix(\"M\").toDFVectorNoIDColumn();\n+ List<Row> list = df.collectAsList();\n+\n+ Row row = list.get(0);\n+ Assert.assertArrayEquals(new double[] { 1.0, 2.0, 3.0, 4.0 }, ((Vector) row.get(0)).toArray(), 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputDataFrameVectorsWithIDColumnFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output DataFrame of vectors with ID column from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=1, cols=4);\";\n+ Script script = dml(s).out(\"M\");\n+ Dataset<Row> df = ml.execute(script).getMatrix(\"M\").toDFVectorWithIDColumn();\n+ List<Row> list = df.collectAsList();\n+\n+ Row row = list.get(0);\n+ Assert.assertEquals(1.0, row.getDouble(0), 0.0);\n+ Assert.assertArrayEquals(new double[] { 1.0, 2.0, 3.0, 4.0 }, ((Vector) row.get(1)).toArray(), 0.0);\n+ }\n+\n+ @Test\n+ public void testOutputJavaRDDStringCSVFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output Java RDD String CSV from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=1, cols=4);\";\n+ Script script = dml(s).out(\"M\");\n+ JavaRDD<String> javaRDDStringCSV = ml.execute(script).getMatrix(\"M\").toJavaRDDStringCSV();\n+ List<String> lines = javaRDDStringCSV.collect();\n+ Assert.assertEquals(\"1.0,2.0,3.0,4.0\", lines.get(0));\n+ }\n+\n+ @Test\n+ public void testOutputJavaRDDStringIJVFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output Java RDD String IJV from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=2, cols=2);\";\n+ Script script = dml(s).out(\"M\");\n+ MLResults results = ml.execute(script);\n+ JavaRDD<String> javaRDDStringIJV = results.getJavaRDDStringIJV(\"M\");\n+ List<String> lines = javaRDDStringIJV.sortBy(row -> row, true, 1).collect();\n+ Assert.assertEquals(\"1 1 1.0\", lines.get(0));\n+ Assert.assertEquals(\"1 2 2.0\", lines.get(1));\n+ Assert.assertEquals(\"2 1 3.0\", lines.get(2));\n+ Assert.assertEquals(\"2 2 4.0\", lines.get(3));\n+ }\n+\n+ @Test\n+ public void testOutputRDDStringCSVFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output RDD String CSV from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=1, cols=4);\";\n+ Script script = dml(s).out(\"M\");\n+ RDD<String> rddStringCSV = ml.execute(script).getMatrix(\"M\").toRDDStringCSV();\n+ Iterator<String> iterator = rddStringCSV.toLocalIterator();\n+ Assert.assertEquals(\"1.0,2.0,3.0,4.0\", iterator.next());\n+ }\n+\n+ @Test\n+ public void testOutputRDDStringIJVFromMatrixDML() {\n+ System.out.println(\"MLContextTest - output RDD String IJV from matrix DML\");\n+\n+ String s = \"M = matrix('1 2 3 4', rows=2, cols=2);\";\n+ Script script = dml(s).out(\"M\");\n+ RDD<String> rddStringIJV = ml.execute(script).getMatrix(\"M\").toRDDStringIJV();\n+ String[] rows = (String[]) rddStringIJV.collect();\n+ Arrays.sort(rows);\n+ Assert.assertEquals(\"1 1 1.0\", rows[0]);\n+ Assert.assertEquals(\"1 2 2.0\", rows[1]);\n+ Assert.assertEquals(\"2 1 3.0\", rows[2]);\n+ Assert.assertEquals(\"2 2 4.0\", rows[3]);\n+ }\n+\n+ @Test\n+ public void testMLContextVersionMessage() {\n+ System.out.println(\"MLContextTest - version message\");\n+\n+ String version = ml.version();\n+ // not available until jar built\n+ Assert.assertEquals(MLContextUtil.VERSION_NOT_AVAILABLE, version);\n+ }\n+\n+ @Test\n+ public void testMLContextBuildTimeMessage() {\n+ System.out.println(\"MLContextTest - build time message\");\n+\n+ String buildTime = ml.buildTime();\n+ // not available until jar built\n+ Assert.assertEquals(MLContextUtil.BUILD_TIME_NOT_AVAILABLE, buildTime);\n+ }\n+\n+ @Test\n+ public void testMLContextCreateAndClose() {\n+ // MLContext created by the @BeforeClass method in MLContextTestBase\n+ // MLContext closed by the @AfterClass method in MLContextTestBase\n+ System.out.println(\"MLContextTest - create MLContext and close (without script execution)\");\n+ }\n+\n+ @Test\n+ public void testDataFrameToBinaryBlocks() {\n+ System.out.println(\"MLContextTest - DataFrame to binary blocks\");\n+\n+ List<String> list = new ArrayList<String>();\n+ list.add(\"1,2,3\");\n+ list.add(\"4,5,6\");\n+ list.add(\"7,8,9\");\n+ JavaRDD<String> javaRddString = sc.parallelize(list);\n+\n+ JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());\n+ List<StructField> fields = new ArrayList<StructField>();\n+ fields.add(DataTypes.createStructField(\"C1\", DataTypes.DoubleType, true));\n+ fields.add(DataTypes.createStructField(\"C2\", DataTypes.DoubleType, true));\n+ fields.add(DataTypes.createStructField(\"C3\", DataTypes.DoubleType, true));\n+ StructType schema = DataTypes.createStructType(fields);\n+ Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);\n+\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> binaryBlocks = MLContextConversionUtil\n+ .dataFrameToMatrixBinaryBlocks(dataFrame);\n+ Tuple2<MatrixIndexes, MatrixBlock> first = binaryBlocks.first();\n+ MatrixBlock mb = first._2();\n+ double[][] matrix = DataConverter.convertToDoubleMatrix(mb);\n+ Assert.assertArrayEquals(new double[] { 1.0, 2.0, 3.0 }, matrix[0], 0.0);\n+ Assert.assertArrayEquals(new double[] { 4.0, 5.0, 6.0 }, matrix[1], 0.0);\n+ Assert.assertArrayEquals(new double[] { 7.0, 8.0, 9.0 }, matrix[2], 0.0);\n+ }\n+\n+ @Test\n+ public void testGetTuple1DML() {\n+ System.out.println(\"MLContextTest - Get Tuple1<Matrix> DML\");\n+ JavaRDD<String> javaRddString = sc\n+ .parallelize(Stream.of(\"1,2,3\", \"4,5,6\", \"7,8,9\").collect(Collectors.toList()));\n+ JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());\n+ List<StructField> fields = new ArrayList<StructField>();\n+ fields.add(DataTypes.createStructField(\"C1\", DataTypes.DoubleType, true));\n+ fields.add(DataTypes.createStructField(\"C2\", DataTypes.DoubleType, true));\n+ fields.add(DataTypes.createStructField(\"C3\", DataTypes.DoubleType, true));\n+ StructType schema = DataTypes.createStructType(fields);\n+ Dataset<Row> df = spark.createDataFrame(javaRddRow, schema);\n+\n+ Script script = dml(\"N=M*2\").in(\"M\", df).out(\"N\");\n+ Tuple1<Matrix> tuple = ml.execute(script).getTuple(\"N\");\n+ double[][] n = tuple._1().to2DDoubleArray();\n+ Assert.assertEquals(2.0, n[0][0], 0);\n+ Assert.assertEquals(4.0, n[0][1], 0);\n+ Assert.assertEquals(6.0, n[0][2], 0);\n+ Assert.assertEquals(8.0, n[1][0], 0);\n+ Assert.assertEquals(10.0, n[1][1], 0);\n+ Assert.assertEquals(12.0, n[1][2], 0);\n+ Assert.assertEquals(14.0, n[2][0], 0);\n+ Assert.assertEquals(16.0, n[2][1], 0);\n+ Assert.assertEquals(18.0, n[2][2], 0);\n+ }\n+\n+ @Test\n+ public void testGetTuple2DML() {\n+ System.out.println(\"MLContextTest - Get Tuple2<Matrix,Double> DML\");\n+\n+ double[][] m = new double[][] { { 1, 2 }, { 3, 4 } };\n+\n+ Script script = dml(\"N=M*2;s=sum(N)\").in(\"M\", m).out(\"N\", \"s\");\n+ Tuple2<Matrix, Double> tuple = ml.execute(script).getTuple(\"N\", \"s\");\n+ double[][] n = tuple._1().to2DDoubleArray();\n+ double s = tuple._2();\n+ Assert.assertArrayEquals(new double[] { 2, 4 }, n[0], 0.0);\n+ Assert.assertArrayEquals(new double[] { 6, 8 }, n[1], 0.0);\n+ Assert.assertEquals(20.0, s, 0.0);\n+ }\n+\n+ @Test\n+ public void testGetTuple3DML() {\n+ System.out.println(\"MLContextTest - Get Tuple3<Long,Double,Boolean> DML\");\n+\n+ Script script = dml(\"a=1+2;b=a+0.5;c=TRUE;\").out(\"a\", \"b\", \"c\");\n+ Tuple3<Long, Double, Boolean> tuple = ml.execute(script).getTuple(\"a\", \"b\", \"c\");\n+ long a = tuple._1();\n+ double b = tuple._2();\n+ boolean c = tuple._3();\n+ Assert.assertEquals(3, a);\n+ Assert.assertEquals(3.5, b, 0.0);\n+ Assert.assertEquals(true, c);\n+ }\n+\n+ @Test\n+ public void testGetTuple4DML() {\n+ System.out.println(\"MLContextTest - Get Tuple4<Long,Double,Boolean,String> DML\");\n+\n+ Script script = dml(\"a=1+2;b=a+0.5;c=TRUE;d=\\\"yes it's \\\"+c\").out(\"a\", \"b\", \"c\", \"d\");\n+ Tuple4<Long, Double, Boolean, String> tuple = ml.execute(script).getTuple(\"a\", \"b\", \"c\", \"d\");\n+ long a = tuple._1();\n+ double b = tuple._2();\n+ boolean c = tuple._3();\n+ String d = tuple._4();\n+ Assert.assertEquals(3, a);\n+ Assert.assertEquals(3.5, b, 0.0);\n+ Assert.assertEquals(true, c);\n+ Assert.assertEquals(\"yes it's TRUE\", d);\n+ }\n+\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Increase MLContext test coverage Create MLContext tests to test previously untested methods. Update MLContext and MLContextConversionUtil to avoid NPEs. Closes #649.
49,736
03.09.2017 12:42:01
25,200
f5de13e7bbf6ce8351b30cbe5284a4df13b88143
[MINOR] Added scikit learn as dependency for systemml pip package Without this fix, running a simple mlcontext example after `pip install systemml` will result in an error.
[ { "change_type": "MODIFY", "old_path": "src/main/python/setup.py", "new_path": "src/main/python/setup.py", "diff": "@@ -42,7 +42,7 @@ pillow_version = '2.0.0'\nREQUIRED_PACKAGES = [\n'numpy >= %s' % numpy_version,\n'scipy >= %s' % scipy_version,\n- 'pandas',\n+ 'pandas', 'scikit-learn',\n'Pillow >= %s' % pillow_version\n]\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Added scikit learn as dependency for systemml pip package - Without this fix, running a simple mlcontext example after `pip install systemml` will result in an error.
49,738
04.09.2017 19:08:27
25,200
30caf3652125851979b3bca3312f98551853fbed
Extended codegen outer template (sparse-safe driver ops) This patch extends the codegen outer template by the ability to fuse additional sparse-safe operations on the driver. In detail, this is realized by generalizing the OFMC-merge condition of the outer template as well as the related cplan construction.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -1015,15 +1015,17 @@ public class OptimizerUtils\nreturn ( op==OpOp2.NOTEQUAL && val==0);\n}\n- public static double getBinaryOpSparsityConditionalSparseSafe( double sp1, OpOp2 op, LiteralOp lit )\n- {\n+ public static boolean isBinaryOpSparsityConditionalSparseSafe( OpOp2 op, LiteralOp lit ) {\ndouble val = HopRewriteUtils.getDoubleValueSafe(lit);\n-\nreturn ( (op==OpOp2.GREATER && val==0)\n||(op==OpOp2.LESS && val==0)\n||(op==OpOp2.NOTEQUAL && val==0)\n||(op==OpOp2.EQUAL && val!=0)\n- ||(op==OpOp2.MINUS && val==0)) ? sp1 : 1.0;\n+ ||(op==OpOp2.MINUS && val==0));\n+ }\n+\n+ public static double getBinaryOpSparsityConditionalSparseSafe( double sp1, OpOp2 op, LiteralOp lit ) {\n+ return isBinaryOpSparsityConditionalSparseSafe(op, lit) ? sp1 : 1.0;\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java", "diff": "@@ -78,7 +78,10 @@ public class TemplateOuterProduct extends TemplateBase {\npublic boolean merge(Hop hop, Hop input) {\nreturn !isClosed() &&\n(TemplateUtils.isBinaryMatrixRowVector(hop)\n- || HopRewriteUtils.isBinaryMatrixScalarOperation(hop));\n+ || HopRewriteUtils.isBinaryMatrixScalarOperation(hop)\n+ || (HopRewriteUtils.isBinary(hop, OpOp2.MULT)\n+ && HopRewriteUtils.isBinarySparseSafe(input)\n+ && !TemplateUtils.rContainsOuterProduct(input)));\n}\n@Override\n@@ -167,10 +170,10 @@ public class TemplateOuterProduct extends TemplateBase {\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nString primitiveOpName = ((BinaryOp)hop).getOp().toString();\n- if( HopRewriteUtils.isEqualSize(hop.getInput().get(0), hop.getInput().get(1)) ) {\n- Hop main = hop.getInput().get((cdata1 instanceof CNodeData) ? 0 : 1);\n- inHops2.put(\"_X\", main);\n- }\n+ if( TemplateUtils.isMatrix(hop.getInput().get(0)) && cdata1 instanceof CNodeData )\n+ inHops2.put(\"_X\", hop.getInput().get(0));\n+ if( TemplateUtils.isMatrix(hop.getInput().get(1)) && cdata2 instanceof CNodeData )\n+ inHops2.put(\"_X\", hop.getInput().get(1));\n//add lookups if required\ncdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -430,4 +430,22 @@ public class TemplateUtils\nnode.setVisited();\nreturn ret;\n}\n+\n+ public static boolean containsOuterProduct(Hop hop) {\n+ hop.resetVisitStatus();\n+ boolean ret = rContainsOuterProduct(hop);\n+ hop.resetVisitStatus();\n+ return ret;\n+ }\n+\n+ public static boolean rContainsOuterProduct(Hop current) {\n+ if( current.isVisited() )\n+ return false;\n+ boolean ret = false;\n+ ret |= HopRewriteUtils.isOuterProductLikeMM(current);\n+ for( int i=0; i<current.getInput().size() && !ret; i++ )\n+ ret |= rContainsOuterProduct(current.getInput().get(i));\n+ current.setVisited();\n+ return ret;\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -47,6 +47,7 @@ import org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LeftIndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.MemoTable;\n+import org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.ReorgOp;\nimport org.apache.sysml.hops.TernaryOp;\n@@ -824,6 +825,18 @@ public class HopRewriteUtils\nreturn isBinary(hop, type) && hop.getParent().size() <= maxParents;\n}\n+ public static boolean isBinarySparseSafe(Hop hop) {\n+ if( !(hop instanceof BinaryOp) )\n+ return false;\n+ if( isBinary(hop, OpOp2.MULT) )\n+ return true;\n+ BinaryOp bop = (BinaryOp) hop;\n+ Hop lit = bop.getInput().get(0) instanceof LiteralOp ? bop.getInput().get(0) :\n+ bop.getInput().get(1) instanceof LiteralOp ? bop.getInput().get(1) : null;\n+ return lit != null && OptimizerUtils\n+ .isBinaryOpSparsityConditionalSparseSafe(bop.getOp(), (LiteralOp)lit);\n+ }\n+\npublic static boolean isBinaryMatrixScalarOperation(Hop hop) {\nreturn hop instanceof BinaryOp &&\n((hop.getInput().get(0).getDataType().isMatrix() && hop.getInput().get(1).getDataType().isScalar())\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/OuterProdTmplTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/OuterProdTmplTest.java", "diff": "@@ -43,6 +43,7 @@ public class OuterProdTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME6 = \"wdivmmbasic\";\nprivate static final String TEST_NAME7 = \"wdivmmTransposeOut\";\nprivate static final String TEST_NAME8 = \"wSparseUnsafeOuterProduct\";\n+ private static final String TEST_NAME9 = \"wdivmmNeq\";\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + OuterProdTmplTest.class.getSimpleName() + \"/\";\n@@ -62,6 +63,7 @@ public class OuterProdTmplTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"6\" }) );\naddTestConfiguration( TEST_NAME7, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME7, new String[] { \"7\" }) );\naddTestConfiguration( TEST_NAME8, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME8, new String[] { \"8\" }) );\n+ addTestConfiguration( TEST_NAME9, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME9, new String[] { \"9\" }) );\n}\n@Test\n@@ -169,6 +171,20 @@ public class OuterProdTmplTest extends AutomatedTestBase\ntestCodegenIntegrationWithInput( TEST_NAME8, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenOuterProdRewrite9() {\n+ testCodegenIntegration( TEST_NAME9, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenOuterProd9() {\n+ testCodegenIntegration( TEST_NAME9, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenOuterProdRewrite9_sp() {\n+ testCodegenIntegrationWithInput( TEST_NAME9, true, ExecType.SPARK );\n+ }\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\n@@ -209,9 +225,12 @@ public class OuterProdTmplTest extends AutomatedTestBase\nif( testname.equals(TEST_NAME8) )\nAssert.assertTrue(!(heavyHittersContainsSubString(\"spoofOP\")\n|| heavyHittersContainsSubString(\"sp_spoofOP\")));\n- else if( !rewrites )\n+ else if( !rewrites ) {\nAssert.assertTrue(heavyHittersContainsSubString(\"spoofOP\")\n|| heavyHittersContainsSubString(\"sp_spoofOP\"));\n+ if( testname.equals(TEST_NAME9) )\n+ Assert.assertTrue(!heavyHittersContainsSubString(\"!=\"));\n+ }\n}\nfinally {\nrtplatform = platformOld;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/wdivmmNeq.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix( 3, 4000, 2000);\n+U = matrix( 4, 4000, 10);\n+V = matrix( 5, 2000, 10);\n+eps = 0.1;\n+S= ((X!=0)*(U%*%t(V)+eps)) %*% V;\n+writeMM(as(S, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/codegen/wdivmmNeq.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix( 3, rows=4000, cols=2000);\n+U = matrix( 4, rows=4000, cols=10);\n+V = matrix( 5, rows=2000, cols=10);\n+while(FALSE){}\n+eps = 0.1;\n+S= ((X!=0)*(U%*%t(V)+eps)) %*% V;\n+\n+write(S,$1)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1886] Extended codegen outer template (sparse-safe driver ops) This patch extends the codegen outer template by the ability to fuse additional sparse-safe operations on the driver. In detail, this is realized by generalizing the OFMC-merge condition of the outer template as well as the related cplan construction.
49,768
05.09.2017 09:59:46
25,200
0ba9e74b96d2638e0ca2d9af569cdcc26676a78e
Update Release Distribution policy
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -289,8 +289,8 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\nfor i in *.zip *.tgz; do gpg --output $i.asc --detach-sig --armor $i; done\nrm -f *.md5\nfor i in *.zip *.tgz; do openssl md5 -hex $i | sed 's/MD5(\\([^)]*\\))= \\([0-9a-f]*\\)/\\2 *\\1/' > $i.md5; done\n- rm -f *.sha\n- for i in *.zip *.tgz; do shasum $i > $i.sha; done\n+ rm -f *.sha512\n+ for i in *.zip *.tgz; do shasum -a 512 $i > $i.sha512; done\ncd .. #exit $RELEASE_VERSION-$RELEASE_RC/\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1890] Update Release Distribution policy
49,736
05.09.2017 09:59:59
25,200
b34079a283a1859ed23de77f4ff0e50985b57dd3
[MINOR] Enable systemml to be imported in the pyspark workers Moved the race condition avoidance logic to classloader instead of import level. This avoids creation of dataframe in pyspark workers. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/classloader.py", "new_path": "src/main/python/systemml/classloader.py", "diff": "__all__ = ['createJavaObject']\nimport os\n+import numpy as np\n+import pandas as pd\ntry:\nimport py4j.java_gateway\nfrom py4j.java_gateway import JavaObject\nfrom pyspark import SparkContext\n+ from pyspark.sql import SparkSession\nexcept ImportError:\nraise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')\n+_initializedSparkSession = False\ndef _createJavaObject(sc, obj_type):\n+ # -----------------------------------------------------------------------------------\n+ # Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession.\n+ # This is done at toDF() rather than import level to avoid creation of SparkSession in worker processes.\n+ global _initializedSparkSession\n+ if not _initializedSparkSession:\n+ _initializedSparkSession = True\n+ SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))\n+ # -----------------------------------------------------------------------------------\nif obj_type == 'mlcontext':\nreturn sc._jvm.org.apache.sysml.api.mlcontext.MLContext(sc._jsc)\nelif obj_type == 'dummy':\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mlcontext.py", "new_path": "src/main/python/systemml/mlcontext.py", "diff": "@@ -36,11 +36,7 @@ try:\nfrom pyspark import SparkContext\nfrom pyspark.conf import SparkConf\nimport pyspark.mllib.common\n- # -----------------------------------------------------------------------------------\n- # Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession\nfrom pyspark.sql import SparkSession\n- SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))\n- # -----------------------------------------------------------------------------------\nexcept ImportError:\nraise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Enable systemml to be imported in the pyspark workers - Moved the race condition avoidance logic to classloader instead of import level. This avoids creation of dataframe in pyspark workers. Closes #652.
49,768
05.09.2017 10:26:14
25,200
a073ad24a3ab1cdbdc77a252e8f72e1d45037fc3
[maven-release-plugin] prepare release v0.15.0-incubating-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>1.0.0-SNAPSHOT</version>\n+ <version>0.15.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.15.0-incubating-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v0.15.0-incubating-rc1
49,768
05.09.2017 10:45:51
25,200
467de1cb17c70a1f9796e732d34cb2c216bb70a8
[maven-release-plugin] prepare release v0.15.0-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>1.0.0-incubating-SNAPSHOT</version>\n+ <version>0.15.0</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.15.0-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v0.15.0-rc1
49,698
06.09.2017 14:17:20
25,200
a0cf8e3bee9470f622d57c4d83f64f2a5bf5f72f
MLContext test for linear regression Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/algorithms/MLContextLinregTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext.algorithms;\n+\n+import static org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile;\n+\n+import org.apache.log4j.Logger;\n+import org.apache.sysml.api.mlcontext.Script;\n+import org.apache.sysml.test.integration.mlcontext.MLContextTestBase;\n+import org.junit.Test;\n+\n+public class MLContextLinregTest extends MLContextTestBase {\n+ protected static Logger log = Logger.getLogger(MLContextLinregTest.class);\n+\n+ protected final static String TEST_SCRIPT_CG = \"scripts/algorithms/LinearRegCG.dml\";\n+ protected final static String TEST_SCRIPT_DS = \"scripts/algorithms/LinearRegDS.dml\";\n+ private final static double sparsity1 = 0.7; // dense\n+ private final static double sparsity2 = 0.1; // sparse\n+\n+ public enum LinregType {\n+ CG, DS,\n+ }\n+\n+ @Test\n+ public void testLinregCGSparse() {\n+ runLinregTestMLC(LinregType.CG, true);\n+ }\n+\n+ @Test\n+ public void testLinregCGDense() {\n+ runLinregTestMLC(LinregType.CG, false);\n+ }\n+\n+ @Test\n+ public void testLinregDSSparse() {\n+ runLinregTestMLC(LinregType.DS, true);\n+ }\n+\n+ @Test\n+ public void testLinregDSDense() {\n+ runLinregTestMLC(LinregType.DS, false);\n+ }\n+\n+ private void runLinregTestMLC(LinregType type, boolean sparse) {\n+\n+ double[][] X = getRandomMatrix(10, 3, 0, 1, sparse ? sparsity2 : sparsity1, 7);\n+ double[][] Y = getRandomMatrix(10, 1, 0, 10, 1.0, 3);\n+\n+ switch (type) {\n+ case CG:\n+ Script lrcg = dmlFromFile(TEST_SCRIPT_CG);\n+ lrcg.in(\"X\", X).in(\"y\", Y).in(\"$icpt\", \"0\").in(\"$tol\", \"0.000001\").in(\"$maxi\", \"0\").in(\"$reg\", \"0.000001\")\n+ .out(\"beta_out\");\n+ ml.execute(lrcg);\n+\n+ break;\n+\n+ case DS:\n+ Script lrds = dmlFromFile(TEST_SCRIPT_DS);\n+ lrds.in(\"X\", X).in(\"y\", Y).in(\"$icpt\", \"0\").in(\"$reg\", \"0.000001\").out(\"beta_out\");\n+ ml.execute(lrds);\n+\n+ break;\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/algorithms/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/algorithms/ZPackageSuite.java", "diff": "@@ -26,7 +26,10 @@ import org.junit.runners.Suite;\n* Group together the tests in this package.\n*/\n@RunWith(Suite.class)\[email protected]({ MLContextUnivariateStatisticsTest.class })\[email protected]({\n+ MLContextUnivariateStatisticsTest.class,\n+ MLContextLinregTest.class\n+ })\n/** This class is just a holder for the above JUnit annotations. */\npublic class ZPackageSuite {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1863] MLContext test for linear regression Closes #647.
49,717
07.09.2017 14:42:12
25,200
c00029a7be735dcaba533c50ba69169b18ef1675
jcuda for windows & linux (x86_64, ppc64le) are included in extra jar Closes
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcufft</artifactId>\n+ <artifactId>jcusparse</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcusparse</artifactId>\n+ <artifactId>jcusolver</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcusolver</artifactId>\n+ <artifactId>jcudnn</artifactId>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcurand</artifactId>\n+ <artifactId>jcuda-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jnvgraph</artifactId>\n+ <artifactId>jcublas-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcudnn</artifactId>\n+ <artifactId>jcusparse-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcudnn-natives</artifactId>\n+ <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+ <!-- for all platforms, to be included in the extra jar -->\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcuda-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcublas-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>windows-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcufft-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <artifactId>jcusparse-natives</artifactId>\n+ <classifier>windows-x86_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver-natives</artifactId>\n+ <classifier>windows-x86_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcudnn-natives</artifactId>\n+ <classifier>windows-x86_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcuda-natives</artifactId>\n+ <classifier>linux-x86_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcublas-natives</artifactId>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusparse-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcusolver-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jcurand-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <artifactId>jcudnn-natives</artifactId>\n+ <classifier>linux-x86_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n+\n<dependency>\n<groupId>org.jcuda</groupId>\n- <artifactId>jnvgraph-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <artifactId>jcuda-natives</artifactId>\n+ <classifier>linux-ppc_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcublas-natives</artifactId>\n+ <classifier>linux-ppc_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusparse-natives</artifactId>\n+ <classifier>linux-ppc_64</classifier>\n+ <version>${jcuda.version}</version>\n+ <scope>${jcuda.scope}</scope>\n+ </dependency>\n+ <dependency>\n+ <groupId>org.jcuda</groupId>\n+ <artifactId>jcusolver-natives</artifactId>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n<dependency>\n<groupId>org.jcuda</groupId>\n<artifactId>jcudnn-natives</artifactId>\n- <classifier>${jcuda.os}-${jcuda.arch}</classifier>\n+ <classifier>linux-ppc_64</classifier>\n<version>${jcuda.version}</version>\n<scope>${jcuda.scope}</scope>\n</dependency>\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/extra.xml", "new_path": "src/assembly/extra.xml", "diff": "</fileSet>\n</fileSets>\n- <!-- Include platform specific JCuda Jars -->\n+ <!-- Include JCuda Jars -->\n<dependencySets>\n<dependencySet>\n<includes>\n- <include>org.jcuda:*</include>\n+ <include>org.jcuda:jcuda:jar:${jcuda.version}</include>\n+ <include>org.jcuda:jcublas:jar:${jcuda.version}</include>\n+ <include>org.jcuda:jcusparse:jar:${jcuda.version}</include>\n+ <include>org.jcuda:jcusolver:jar:${jcuda.version}</include>\n+ <include>org.jcuda:jcudnn:jar:${jcuda.version}</include>\n+\n+ <!-- windows specific jcuda jars -->\n+ <include>org.jcuda:jcuda-natives:jar:windows-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcublas-natives:jar:windows-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusparse-natives:jar:windows-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusolver-natives:jar:windows-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcudnn-natives:jar:windows-x86_64:${jcuda.version}</include>\n+\n+ <!-- linux x86_64 specific jcuda jars -->\n+ <include>org.jcuda:jcuda-natives:jar:linux-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcublas-natives:jar:linux-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusparse-natives:jar:linux-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusolver-natives:jar:linux-x86_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcudnn-natives:jar:linux-x86_64:${jcuda.version}</include>\n+\n+ <!-- linux ppc_64le specific jcuda jars -->\n+ <include>org.jcuda:jcuda-natives:jar:linux-ppc_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcublas-natives:jar:linux-ppc_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusparse-natives:jar:linux-ppc_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcusolver-natives:jar:linux-ppc_64:${jcuda.version}</include>\n+ <include>org.jcuda:jcudnn-natives:jar:linux-ppc_64:${jcuda.version}</include>\n</includes>\n<unpack>true</unpack>\n<scope>compile</scope>\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1895] jcuda for windows & linux (x86_64, ppc64le) are included in extra jar Closes #656
49,772
07.09.2017 16:01:45
25,200
4d376637a99891e53ea93f650fb9341fc19b99f9
[MINOR] Fixes for the breast cancer project.
[ { "change_type": "MODIFY", "old_path": "projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb", "new_path": "projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb", "diff": "\"from keras.applications.resnet50 import ResNet50\\n\",\n\"from keras.callbacks import ModelCheckpoint, TensorBoard\\n\",\n\"from keras.initializers import VarianceScaling\\n\",\n- \"from keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D, Input, Lambda, merge\\n\",\n+ \"from keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D, Input, Lambda\\n\",\n\"from keras.models import Model, load_model\\n\",\n\"from keras.optimizers import SGD\\n\",\n\"from keras.preprocessing.image import ImageDataGenerator\\n\",\n\"from keras.regularizers import l2\\n\",\n- \"from keras.utils import to_categorical\\n\",\n\"import matplotlib.pyplot as plt\\n\",\n\"import numpy as np\\n\",\n\"import pandas as pd\\n\",\n" }, { "change_type": "MODIFY", "old_path": "projects/breast_cancer/breastcancer/preprocessing.py", "new_path": "projects/breast_cancer/breastcancer/preprocessing.py", "diff": "@@ -71,6 +71,8 @@ def open_slide(slide_num, folder, training):\nslide = openslide.open_slide(filename)\nexcept OpenSlideError:\nslide = None\n+ except FileNotFoundError:\n+ slide = None\nreturn slide\n@@ -586,7 +588,7 @@ def preprocess(spark, slide_nums, folder=\"data\", training=True, tile_size=1024,\n# Append labels\nlabels_df = get_labels_df(folder)\nsamples_with_labels = (samples.map(\n- lambda tup: (tup[0], int(labels_df.at[tup[0],\"tumor_score\"]),\n+ lambda tup: (int(tup[0]), int(labels_df.at[tup[0],\"tumor_score\"]),\nfloat(labels_df.at[tup[0],\"molecular_score\"]), Vectors.dense(tup[1]))))\ndf = samples_with_labels.toDF([\"slide_num\", \"tumor_score\", \"molecular_score\", \"sample\"])\ndf = df.select(df.slide_num.astype(\"int\"), df.tumor_score.astype(\"int\"),\n" }, { "change_type": "MODIFY", "old_path": "projects/breast_cancer/preprocess.py", "new_path": "projects/breast_cancer/preprocess.py", "diff": "@@ -33,7 +33,7 @@ import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom pyspark.sql import SparkSession\n-from breastcancer.preprocessing import add_row_indices, get_labels_df, preprocess, save\n+from breastcancer.preprocessing import add_row_indices, get_labels_df, preprocess, save, sample\n# Create new SparkSession\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixes for the breast cancer project.
49,736
07.09.2017 18:49:04
28,800
5a2b49f20c6e9d72061b5cb631736f15d577d396
Prevent 'Script string is blank' exception for Python Closes
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/defmatrix.py", "new_path": "src/main/python/systemml/defmatrix.py", "diff": "@@ -321,7 +321,7 @@ def eval(outputs, execute=True):\ncheck_MLContext()\nreset()\noutputs = convert_outputs_to_list(outputs)\n- matrix.script.scriptString = perform_dfs(outputs, execute)\n+ matrix.script.setScriptString(perform_dfs(outputs, execute))\nif not execute:\nreset_output_flag(outputs)\nreturn matrix.script.scriptString\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1891] Prevent 'Script string is blank' exception for Python Closes #659.
49,736
08.09.2017 14:54:22
28,800
3b8a86065452e77b41525098d92f6f11f5419be3
[MINOR] Throw user-friendly message for invalid convolution parameters This commit checks if the given parameters generates an output activation of negative dimensions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java", "diff": "@@ -47,13 +47,31 @@ public class ConvolutionUtils {\nif(H <= 0 || R <= 0 || heightPadding < 0 || verticalStride < 0) {\nthrow new RuntimeException(\"Incorrect parameters: height=\" + H + \" filter_height=\" + R + \" stride=\" + verticalStride + \" pad=\" + heightPadding);\n}\n- return (H + 2 * heightPadding - R) / verticalStride + 1;\n+ long padded_image_height = H + 2 * heightPadding;\n+ long ret = (padded_image_height - R) / verticalStride + 1;\n+ if(ret <= 0 || ret > Integer.MAX_VALUE) {\n+ // Check for valid output activation height\n+ if(padded_image_height < R)\n+ throw new RuntimeException(\"Incorrect parameters: padded image height:\" + padded_image_height + \" cannot be less than filter_height:\" + R);\n+ else\n+ throw new RuntimeException(\"Incorrect parameters: height=\" + H + \" filter_height=\" + R + \" stride=\" + verticalStride + \" pad=\" + heightPadding + \" as P=\" + ret);\n+ }\n+ return ret;\n}\npublic static long getQ(long W, long S, long horizontalStride, long widthPadding) {\nif(W <= 0 || S <= 0 || widthPadding < 0 || horizontalStride < 0) {\nthrow new RuntimeException(\"Incorrect parameters: width=\" + W + \" filter_width=\" + S + \" stride=\" + horizontalStride + \" pad=\" + widthPadding);\n}\n- return (W + 2 * widthPadding - S) / horizontalStride + 1;\n+ long padded_image_width = W + 2 * widthPadding;\n+ long ret = (padded_image_width - S) / horizontalStride + 1;\n+ if(ret <= 0 || ret > Integer.MAX_VALUE) {\n+ // Check for valid output activation width\n+ if(padded_image_width < S)\n+ throw new RuntimeException(\"Incorrect parameters: padded image width:\" + padded_image_width + \" cannot be less than filter width:\" + S);\n+ else\n+ throw new RuntimeException(\"Incorrect parameters: width=\" + W + \" filter_width=\" + S + \" stride=\" + horizontalStride + \" pad=\" + widthPadding + \" as Q=\" + ret);\n+ }\n+ return ret;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] [MINOR] Throw user-friendly message for invalid convolution parameters - This commit checks if the given parameters generates an output activation of negative dimensions.
49,738
10.09.2017 14:57:06
25,200
9f7fae6e6295caf3f2fee0b8407eb520c571d26b
[MINOR] Fix codegen GLM binomial probit tests (wrong parameterization)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/AlgorithmGLM.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/AlgorithmGLM.java", "diff": "@@ -213,7 +213,7 @@ public class AlgorithmGLM extends AutomatedTestBase\naddArgs[0] = \"1\"; addArgs[1] = \"2.0\"; addArgs[2] = \"1\"; addArgs[3] = \"0.0\";\nbreak;\ncase BINOMIAL_PROBIT: //dfam, vpow, link, yneg\n- addArgs[0] = \"2\"; addArgs[1] = \"0.0\"; addArgs[2] = \"3\"; addArgs[3] = \"2\";\n+ addArgs[0] = \"2\"; addArgs[1] = \"0.0\"; addArgs[2] = \"3\"; addArgs[3] = \"0\";\nparam4Name = \"yneg=\";\nbreak;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix codegen GLM binomial probit tests (wrong parameterization)
49,738
10.09.2017 20:09:25
25,200
44439400cf51abc84de56e7546a4ceba46e81692
[MINOR] Fix missing stats reset, consolidated cache stats reset
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "new_path": "src/main/java/org/apache/sysml/api/DMLScript.java", "diff": "@@ -77,7 +77,6 @@ import org.apache.sysml.parser.ParserWrapper;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.DMLScriptException;\nimport org.apache.sysml.runtime.controlprogram.Program;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n@@ -829,11 +828,9 @@ public class DMLScript\n//reset statistics (required if multiple scripts executed in one JVM)\nStatistics.resetNoOfExecutedJobs();\n- if( STATISTICS ) {\n- CacheStatistics.reset();\n+ if( STATISTICS )\nStatistics.reset();\n}\n- }\nprivate static void checkSecuritySetup(DMLConfig config)\nthrows IOException, DMLRuntimeException\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -48,7 +48,6 @@ import org.apache.sysml.parser.ParserWrapper;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\n@@ -376,11 +375,9 @@ public class ScriptExecutor {\nsetGlobalFlags();\n// reset all relevant summary statistics\nStatistics.resetNoOfExecutedJobs();\n- if (statistics) {\n- CacheStatistics.reset();\n+ if (statistics)\nStatistics.reset();\n}\n- }\n/**\n* Perform any necessary cleanup operations after program execution.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParWorkerReducer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteDPParWorkerReducer.java", "diff": "@@ -33,7 +33,6 @@ import org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitionFormat;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PartitionFormat;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.parfor.Task.TaskType;\n@@ -189,11 +188,8 @@ public class RemoteDPParWorkerReducer extends ParWorker\n//always reset stats because counters per map task (for case of JVM reuse)\nif( DMLScript.STATISTICS && !InfrastructureAnalyzer.isLocalMode(job) )\n- {\n- CacheStatistics.reset();\nStatistics.reset();\n}\n- }\n@Override\npublic void close()\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParWorkerMapper.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/RemoteParWorkerMapper.java", "diff": "@@ -33,7 +33,6 @@ import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.ParForProgramBlock;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\n@@ -212,11 +211,8 @@ public class RemoteParWorkerMapper extends ParWorker //MapReduceBase not requir\n//always reset stats because counters per map task (for case of JVM reuse)\nif( DMLScript.STATISTICS && !InfrastructureAnalyzer.isLocalMode(job) )\n- {\n- CacheStatistics.reset();\nStatistics.reset();\n}\n- }\n@Override\npublic void close()\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -407,6 +407,8 @@ public class Statistics\ncodegenEnumEvalP.reset();\ncodegenCompileTime.reset();\ncodegenClassCompileTime.reset();\n+ codegenPlanCacheHits.reset();\n+ codegenPlanCacheTotal.reset();\nparforOptCount = 0;\nparforOptTime = 0;\n@@ -417,6 +419,8 @@ public class Statistics\nlTotalLixUIP.reset();\nlTotalUIPVar.reset();\n+ CacheStatistics.reset();\n+\nresetJITCompileTime();\nresetJVMgcTime();\nresetJVMgcCount();\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix missing stats reset, consolidated cache stats reset
49,768
11.09.2017 10:48:12
25,200
918e57937dc6476ae27744a37f400e0a5e0997e6
[MINOR] Change # of classes in multinomial family scripts
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/genMultinomialData.sh", "new_path": "scripts/perftest/genMultinomialData.sh", "diff": "@@ -33,31 +33,31 @@ SPARSE_SP=0.01\nexport HADOOP_CLIENT_OPTS=\"-Xmx2048m -Xms2048m -Xmn256m\"\n#generate XS scenarios (80MB)\n-${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000 1000 $DENSE_SP 150 0 $BASE/X10k_1k_dense_k150 $BASE/y10k_1k_dense_k150 $FORMAT 1\n-${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000 1000 $SPARSE_SP 150 0 $BASE/X10k_1k_sparse_k150 $BASE/y10k_1k_sparse_k150 $FORMAT 1\n-${CMD} -f extractTestData.dml $DASH-args $BASE/X10k_1k_dense_k150 $BASE/y10k_1k_dense_k150 $BASE/X10k_1k_dense_k150_test $BASE/y10k_1k_dense_k150_test $FORMAT\n-${CMD} -f extractTestData.dml $DASH-args $BASE/X10k_1k_sparse_k150 $BASE/y10k_1k_sparse_k150 $BASE/X10k_1k_sparse_k150_test $BASE/y10k_1k_sparse_k150_test $FORMAT\n+${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000 1000 $DENSE_SP 5 0 $BASE/X10k_1k_dense_k5 $BASE/y10k_1k_dense_k5 $FORMAT 1\n+${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000 1000 $SPARSE_SP 5 0 $BASE/X10k_1k_sparse_k5 $BASE/y10k_1k_sparse_k5 $FORMAT 1\n+${CMD} -f extractTestData.dml $DASH-args $BASE/X10k_1k_dense_k5 $BASE/y10k_1k_dense_k5 $BASE/X10k_1k_dense_k5_test $BASE/y10k_1k_dense_k5_test $FORMAT\n+${CMD} -f extractTestData.dml $DASH-args $BASE/X10k_1k_sparse_k5 $BASE/y10k_1k_sparse_k5 $BASE/X10k_1k_sparse_k5_test $BASE/y10k_1k_sparse_k5_test $FORMAT\n##generate S scenarios (80MB)\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000 1000 $DENSE_SP 150 0 $BASE/X100k_1k_dense_k150 $BASE/y100k_1k_dense_k150 $FORMAT 1\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000 1000 $SPARSE_SP 150 0 $BASE/X100k_1k_sparse_k150 $BASE/y100k_1k_sparse_k150 $FORMAT 1\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X100k_1k_dense_k150 $BASE/y100k_1k_dense_k150 $BASE/X100k_1k_dense_k150_test $BASE/y100k_1k_dense_k150_test $FORMAT\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X100k_1k_sparse_k150 $BASE/y100k_1k_sparse_k150 $BASE/X100k_1k_sparse_k150_test $BASE/y100k_1k_sparse_k150_test $FORMAT\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000 1000 $DENSE_SP 5 0 $BASE/X100k_1k_dense_k5 $BASE/y100k_1k_dense_k5 $FORMAT 1\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000 1000 $SPARSE_SP 5 0 $BASE/X100k_1k_sparse_k5 $BASE/y100k_1k_sparse_k5 $FORMAT 1\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X100k_1k_dense_k5 $BASE/y100k_1k_dense_k5 $BASE/X100k_1k_dense_k5_test $BASE/y100k_1k_dense_k5_test $FORMAT\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X100k_1k_sparse_k5 $BASE/y100k_1k_sparse_k5 $BASE/X100k_1k_sparse_k5_test $BASE/y100k_1k_sparse_k5_test $FORMAT\n#\n##generate M scenarios (8GB)\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 1000000 1000 $DENSE_SP 150 0 $BASE/X1M_1k_dense_k150 $BASE/y1M_1k_dense_k150 $FORMAT 1\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 1000000 1000 $SPARSE_SP 150 0 $BASE/X1M_1k_sparse_k150 $BASE/y1M_1k_sparse_k150 $FORMAT 1\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X1M_1k_dense_k150 $BASE/y1M_1k_dense_k150 $BASE/X1M_1k_dense_k150_test $BASE/y1M_1k_dense_k150_test $FORMAT\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X1M_1k_sparse_k150 $BASE/y1M_1k_sparse_k150 $BASE/X1M_1k_sparse_k150_test $BASE/y1M_1k_sparse_k150_test $FORMAT\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 1000000 1000 $DENSE_SP 5 0 $BASE/X1M_1k_dense_k5 $BASE/y1M_1k_dense_k5 $FORMAT 1\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 1000000 1000 $SPARSE_SP 5 0 $BASE/X1M_1k_sparse_k5 $BASE/y1M_1k_sparse_k5 $FORMAT 1\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X1M_1k_dense_k5 $BASE/y1M_1k_dense_k5 $BASE/X1M_1k_dense_k5_test $BASE/y1M_1k_dense_k5_test $FORMAT\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X1M_1k_sparse_k5 $BASE/y1M_1k_sparse_k5 $BASE/X1M_1k_sparse_k5_test $BASE/y1M_1k_sparse_k5_test $FORMAT\n#\n##generate L scenarios (80GB)\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000000 1000 $DENSE_SP 150 0 $BASE/X10M_1k_dense_k150 $BASE/y10M_1k_dense_k150 $FORMAT 1\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000000 1000 $SPARSE_SP 150 0 $BASE/X10M_1k_sparse_k150 $BASE/y10M_1k_sparse_k150 $FORMAT 1\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X10M_1k_dense_k150 $BASE/y10M_1k_dense_k150 $BASE/X10M_1k_dense_k150_test $BASE/y10M_1k_dense_k150_test $FORMAT\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X10M_1k_sparse_k150 $BASE/y10M_1k_sparse_k150 $BASE/X10M_1k_sparse_k150_test $BASE/y10M_1k_sparse_k150_test $FORMAT\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000000 1000 $DENSE_SP 5 0 $BASE/X10M_1k_dense_k5 $BASE/y10M_1k_dense_k5 $FORMAT 1\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 10000000 1000 $SPARSE_SP 5 0 $BASE/X10M_1k_sparse_k5 $BASE/y10M_1k_sparse_k5 $FORMAT 1\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X10M_1k_dense_k5 $BASE/y10M_1k_dense_k5 $BASE/X10M_1k_dense_k5_test $BASE/y10M_1k_dense_k5_test $FORMAT\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X10M_1k_sparse_k5 $BASE/y10M_1k_sparse_k5 $BASE/X10M_1k_sparse_k5_test $BASE/y10M_1k_sparse_k5_test $FORMAT\n#\n##generate LARGE scenarios (800GB)\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000000 1000 $DENSE_SP 150 0 $BASE/X100M_1k_dense_k150 $BASE/y100M_1k_dense_k150 $FORMAT 1\n-#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000000 1000 $SPARSE_SP 150 0 $BASE/X100M_1k_sparse_k150 $BASE/y100M_1k_sparse_k150 $FORMAT 1\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X100M_1k_dense_k150 $BASE/y100M_1k_dense_k150 $BASE/X100M_1k_dense_k150_test $BASE/y100M_1k_dense_k150_test $FORMAT\n-#${CMD} -f extractTestData.dml $DASH-args $BASE/X100M_1k_sparse_k150 $BASE/y100M_1k_sparse_k150 $BASE/X100M_1k_sparse_k150_test $BASE/y100M_1k_sparse_k150_test $FORMAT\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000000 1000 $DENSE_SP 5 0 $BASE/X100M_1k_dense_k5 $BASE/y100M_1k_dense_k5 $FORMAT 1\n+#${CMD} -f ../datagen/genRandData4Multinomial.dml $DASH-args 100000000 1000 $SPARSE_SP 5 0 $BASE/X100M_1k_sparse_k5 $BASE/y100M_1k_sparse_k5 $FORMAT 1\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X100M_1k_dense_k5 $BASE/y100M_1k_dense_k5 $BASE/X100M_1k_dense_k5_test $BASE/y100M_1k_dense_k5_test $FORMAT\n+#${CMD} -f extractTestData.dml $DASH-args $BASE/X100M_1k_sparse_k5 $BASE/y100M_1k_sparse_k5 $BASE/X100M_1k_sparse_k5_test $BASE/y100M_1k_sparse_k5_test $FORMAT\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/runAllMultinomial.sh", "new_path": "scripts/perftest/runAllMultinomial.sh", "diff": "@@ -47,18 +47,18 @@ do\nfor f in \"runNaiveBayes\"\ndo\necho \"-- Running \"$f\" on \"$d\" (all configs)\" >> times.txt;\n- ./${f}.sh ${BASE}/X${d}_k150 ${BASE}/y${d}_k150 150 ${BASE} $2 &> logs/${f}_${d}_k150.out;\n+ ./${f}.sh ${BASE}/X${d}_k5 ${BASE}/y${d}_k5 5 ${BASE} $2 &> logs/${f}_${d}_k5.out;\ndone\n# run with the parameter setting maximum of iterations\nfor f in \"runMultiLogReg\" \"runMSVM\"\ndo\necho \"-- Running \"$f\" on \"$d\" (all configs)\" >> times.txt;\n- ./${f}.sh ${BASE}/X${d}_k150 ${BASE}/y${d}_k150 150 ${BASE} $2 ${MAXITR} &> logs/${f}_${d}_k150.out;\n+ ./${f}.sh ${BASE}/X${d}_k5 ${BASE}/y${d}_k5 5 ${BASE} $2 ${MAXITR} &> logs/${f}_${d}_k5.out;\ndone\ndone\n#run KDD only on naive bayes (see binomial for the others)\n-#./runNaiveBayes.sh ${BASE0}/X_KDD_k150 ${BASE}/y_KDD_k150 150 &> logs/runNaiveBayes__KDD_k150.out;\n-#./runNaiveBayes.sh ${BASE0}/X_KDD ${BASE}/y_KDD 150 &> logs/runNaiveBayes__KDD_k150.out;\n+#./runNaiveBayes.sh ${BASE0}/X_KDD_k5 ${BASE}/y_KDD_k5 5 &> logs/runNaiveBayes__KDD_k5.out;\n+#./runNaiveBayes.sh ${BASE0}/X_KDD ${BASE}/y_KDD 5 &> logs/runNaiveBayes__KDD_k5.out;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Change # of classes in multinomial family scripts
49,768
11.09.2017 13:38:18
25,200
687e19c5529b371f937a9ecb60075373f36fd4d6
[MINOR] Update kmeans' number of centroids in perf scripts
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/genClusteringData.sh", "new_path": "scripts/perftest/genClusteringData.sh", "diff": "@@ -31,22 +31,22 @@ SPARSE_SP=0.01\nexport HADOOP_CLIENT_OPTS=\"-Xmx2048m -Xms2048m -Xmn256m\"\n#generate XS scenarios (80MB)\n-${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=10000 nf=1000 nc=50 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X10k_1k_dense C=$BASE/C10k_1k_dense Y=$BASE/y10k_1k_dense YbyC=$BASE/YbyC10k_1k_dense fmt=$FORMAT\n+${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=10000 nf=1000 nc=5 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X10k_1k_dense C=$BASE/C10k_1k_dense Y=$BASE/y10k_1k_dense YbyC=$BASE/YbyC10k_1k_dense fmt=$FORMAT\n${CMD} -f extractTestData.dml $DASH-args $BASE/X10k_1k_dense $BASE/y10k_1k_dense $BASE/X10k_1k_dense_test $BASE/y10k_1k_dense_test $FORMAT\n#generate S scenarios (800MB)\n-#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=100000 nf=1000 nc=50 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X100k_1k_dense C=$BASE/C100k_1k_dense Y=$BASE/y100k_1k_dense YbyC=$BASE/YbyC100k_1k_dense fmt=$FORMAT\n+#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=100000 nf=1000 nc=5 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X100k_1k_dense C=$BASE/C100k_1k_dense Y=$BASE/y100k_1k_dense YbyC=$BASE/YbyC100k_1k_dense fmt=$FORMAT\n#${CMD} -f extractTestData.dml $DASH-args $BASE/X100k_1k_dense $BASE/y100k_1k_dense $BASE/X100k_1k_dense_test $BASE/y100k_1k_dense_test $FORMAT\n#generate M scenarios (8GB)\n-#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=1000000 nf=1000 nc=50 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X1M_1k_dense C=$BASE/C1M_1k_dense Y=$BASE/y1M_1k_dense YbyC=$BASE/YbyC1M_1k_dense fmt=$FORMAT\n+#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=1000000 nf=1000 nc=5 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X1M_1k_dense C=$BASE/C1M_1k_dense Y=$BASE/y1M_1k_dense YbyC=$BASE/YbyC1M_1k_dense fmt=$FORMAT\n#${CMD} -f extractTestData.dml $DASH-args $BASE/X1M_1k_dense $BASE/y1M_1k_dense $BASE/X1M_1k_dense_test $BASE/y1M_1k_dense_test $FORMAT\n#generate L scenarios (80GB)\n-#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=10000000 nf=1000 nc=50 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X10M_1k_dense C=$BASE/C10M_1k_dense Y=$BASE/y10M_1k_dense YbyC=$BASE/YbyC10M_1k_dense fmt=$FORMAT\n+#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=10000000 nf=1000 nc=5 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X10M_1k_dense C=$BASE/C10M_1k_dense Y=$BASE/y10M_1k_dense YbyC=$BASE/YbyC10M_1k_dense fmt=$FORMAT\n#${CMD} -f extractTestData.dml $DASH-args $BASE/X10M_1k_dense $BASE/y10M_1k_dense $BASE/X10M_1k_dense_test $BASE/y10M_1k_dense_test $FORMAT\n#generate LARGE scenarios (800GB)\n-#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=100000000 nf=1000 nc=50 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X100M_1k_dense C=$BASE/C100M_1k_dense Y=$BASE/y100M_1k_dense YbyC=$BASE/YbyC100M_1k_dense fmt=$FORMAT\n+#${CMD} -f ../datagen/genRandData4Kmeans.dml $DASH-nvargs nr=100000000 nf=1000 nc=5 dc=10.0 dr=1.0 fbf=100.0 cbf=100.0 X=$BASE/X100M_1k_dense C=$BASE/C100M_1k_dense Y=$BASE/y100M_1k_dense YbyC=$BASE/YbyC100M_1k_dense fmt=$FORMAT\n#${CMD} -f extractTestData.dml $DASH-args $BASE/X100M_1k_dense $BASE/y100M_1k_dense $BASE/X100M_1k_dense_test $BASE/y100M_1k_dense_test $FORMAT\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/runKmeans.sh", "new_path": "scripts/perftest/runKmeans.sh", "diff": "@@ -29,7 +29,7 @@ export HADOOP_CLIENT_OPTS=\"-Xmx2048m -Xms2048m -Xmn256m\"\n#training\ntstart=$SECONDS\n-${CMD} -f ../algorithms/Kmeans.dml $DASH-explain $DASH-stats $DASH-nvargs X=$1 k=50 C=${BASE}/centroids.mtx maxi=$2 tol=0.0001\n+${CMD} -f ../algorithms/Kmeans.dml $DASH-explain $DASH-stats $DASH-nvargs X=$1 k=5 C=${BASE}/centroids.mtx maxi=$2 tol=0.0001\nttrain=$(($SECONDS - $tstart - 3))\necho \"Kmeans train on \"$1\": \"$ttrain >> times.txt\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Update kmeans' number of centroids in perf scripts
49,738
11.09.2017 14:00:36
25,200
f45493e8aabb33ec1f628c016b3e68508dcf4ac8
Allow external codegen java compiler configuration This patch exposes the codegen compiler configuration via a SystemML configuration property. The default is 'auto', which is equivalent to the previous behavior. However, now users can explicitly set the codegen compiler, which is useful for programmatic APIs such as JMLC.
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- enables operator fusion via code generation, experimental feature -->\n<codegen.enabled>false</codegen.enabled>\n+ <!-- set the codegen java compiler (auto, janino, javac) -->\n+ <codegen.compiler>auto</codegen.compiler>\n+\n<!-- if codegen.enabled, enables source code caching of fused operators -->\n<codegen.plancache>false</codegen.plancache>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -40,6 +40,7 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.hadoop.fs.FileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.hops.codegen.SpoofCompiler.CompilerType;\nimport org.apache.sysml.parser.ParseException;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n@@ -74,6 +75,7 @@ public class DMLConfig\npublic static final String COMPRESSED_LINALG = \"compressed.linalg\";\npublic static final String NATIVE_BLAS = \"native.blas\";\npublic static final String CODEGEN = \"codegen.enabled\"; //boolean\n+ public static final String CODEGEN_COMPILER = \"codegen.compiler\"; //see SpoofCompiler.CompilerType\npublic static final String CODEGEN_PLANCACHE = \"codegen.plancache\"; //boolean\npublic static final String CODEGEN_LITERALS = \"codegen.literals\"; //1..heuristic, 2..always\npublic static final String EXTRA_FINEGRAINED_STATS = \"systemml.stats.finegrained\"; //boolean\n@@ -119,6 +121,7 @@ public class DMLConfig\n_defaultVals.put(CP_PARALLEL_IO, \"true\" );\n_defaultVals.put(COMPRESSED_LINALG, \"false\" );\n_defaultVals.put(CODEGEN, \"false\" );\n+ _defaultVals.put(CODEGEN_COMPILER, CompilerType.AUTO.name() );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n_defaultVals.put(NATIVE_BLAS, \"none\" );\n@@ -126,7 +129,6 @@ public class DMLConfig\n_defaultVals.put(STATS_MAX_WRAP_LEN, \"30\" );\n_defaultVals.put(EXTRA_GPU_STATS, \"false\" );\n_defaultVals.put(EXTRA_DNN_STATS, \"false\" );\n-\n_defaultVals.put(GPU_MEMORY_UTILIZATION_FACTOR, \"0.9\" );\n_defaultVals.put(AVAILABLE_GPUS, \"-1\");\n}\n@@ -408,7 +410,8 @@ public class DMLConfig\nNUM_REDUCERS, DEFAULT_BLOCK_SIZE,\nYARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM,\nCP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS,\n- COMPRESSED_LINALG, CODEGEN, CODEGEN_LITERALS, CODEGEN_PLANCACHE,\n+ COMPRESSED_LINALG,\n+ CODEGEN, CODEGEN_COMPILER, CODEGEN_PLANCACHE, CODEGEN_LITERALS,\nEXTRA_GPU_STATS, EXTRA_DNN_STATS, EXTRA_FINEGRAINED_STATS, STATS_MAX_WRAP_LEN,\nAVAILABLE_GPUS\n};\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -36,6 +36,8 @@ import org.apache.log4j.Logger;\nimport org.apache.sysml.api.DMLException;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.conf.ConfigurationManager;\n+import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeCell;\nimport org.apache.sysml.hops.codegen.cplan.CNodeData;\n@@ -114,6 +116,7 @@ public class SpoofCompiler\npublic static final PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\npublic enum CompilerType {\n+ AUTO,\nJAVAC,\nJANINO,\n}\n@@ -484,7 +487,11 @@ public class SpoofCompiler\n}\npublic static void setExecTypeSpecificJavaCompiler() {\n- JAVA_COMPILER = OptimizerUtils.isSparkExecutionMode() ?\n+ DMLConfig conf = ConfigurationManager.getDMLConfig();\n+ String compiler = conf.getTextValue(DMLConfig.CODEGEN_COMPILER);\n+ CompilerType type = CompilerType.valueOf(compiler.toUpperCase());\n+ JAVA_COMPILER = (type != CompilerType.AUTO) ? type :\n+ OptimizerUtils.isSparkExecutionMode() ?\nCompilerType.JANINO : CompilerType.JAVAC;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1900] Allow external codegen java compiler configuration This patch exposes the codegen compiler configuration via a SystemML configuration property. The default is 'auto', which is equivalent to the previous behavior. However, now users can explicitly set the codegen compiler, which is useful for programmatic APIs such as JMLC.
49,738
11.09.2017 15:19:00
25,200
754548190a7ade052ab7218941bd2fa43b50b318
Fix parfor support for frame inputs and intermediates This patch fixes parfor optimizer and runtime issues regarding the support of frame inputs and intermediates. Accordingly, this patch also adds some related tests to avoid such issues in the future.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -1279,7 +1279,8 @@ public class ParForProgramBlock extends ForProgramBlock\n}\nbreak;\ncase MATRIX:\n- //currently we do not create any unscoped matrix object outputs\n+ case FRAME:\n+ //currently we do not create any unscoped matrix or frame outputs\n//because metadata (e.g., outputinfo) not known at this place.\nbreak;\ncase UNKNOWN:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -456,8 +456,7 @@ public class OptimizerRuleBased extends Optimizer\n{\nboolean ret = false;\n- if( !n.isLeaf() )\n- {\n+ if( !n.isLeaf() ) {\nfor( OptNode cn : n.getChilds() )\nif( cn.getNodeType() != NodeType.FUNCCALL ) //prevent conflicts with aliases\nret |= rFindDataPartitioningCandidates( cn, cand, vars, thetaM );\n@@ -467,7 +466,7 @@ public class OptimizerRuleBased extends Optimizer\n{\nHop h = OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\nString inMatrix = h.getInput().get(0).getName();\n- if( cand.containsKey(inMatrix) ) //Required Condition: partitioning applicable\n+ if( cand.containsKey(inMatrix) && h.getDataType().isMatrix() ) //Required: partitionable\n{\nPartitionFormat dpf = cand.get(inMatrix);\ndouble mnew = getNewRIXMemoryEstimate( n, inMatrix, dpf, vars );\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/frame/ParforFrameIntermediateTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.frame;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.io.FrameWriterFactory;\n+import org.apache.sysml.runtime.matrix.data.FrameBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+public class ParforFrameIntermediateTest extends AutomatedTestBase\n+{\n+ private final static String TEST_DIR = \"functions/frame/\";\n+ private final static String TEST_NAME = \"ParforFrameIntermediates\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + ParforFrameIntermediateTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 1382;\n+ private final static int cols = 5;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"F2\"}));\n+ }\n+\n+ @Test\n+ public void testParforFrameIntermediatesCP() {\n+ runParforFrameIntermediatesTest(ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testParforFrameIntermediatesSpark() {\n+ runParforFrameIntermediatesTest(ExecType.SPARK);\n+ }\n+\n+ private void runParforFrameIntermediatesTest( ExecType et ) {\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK\n+ || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ //setup testcase\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-args\", input(\"F\")};\n+\n+ //generate input data and write as frame\n+ double[][] A = getRandomMatrix(rows, cols, -10, 10, 0.9, 8362);\n+ FrameBlock fA = DataConverter.convertToFrameBlock(\n+ DataConverter.convertToMatrixBlock(A));\n+ FrameWriterFactory.createFrameWriter(OutputInfo.CSVOutputInfo)\n+ .writeFrameToHDFS(fA, input(\"F\"), rows, cols);\n+\n+ //run test\n+ runTest(true, false, null, -1);\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/frame/ParforFrameIntermediates.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+F = read($1, data_type=\"frame\", format=\"csv\");\n+\n+parfor(i in 1:ncol(F)) {\n+ Fi = F[, i];\n+ Mi = as.matrix(Fi) + 7;\n+ Fi2 = as.frame(Mi);\n+ print(toString(Fi2[1:2,]));\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/frame/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/frame/ZPackageSuite.java", "diff": "@@ -45,6 +45,7 @@ import org.junit.runners.Suite;\nFrameScalarCastingTest.class,\nFrameSchemaReadTest.class,\nFrameSerializationTest.class,\n+ ParforFrameIntermediateTest.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1901] Fix parfor support for frame inputs and intermediates This patch fixes parfor optimizer and runtime issues regarding the support of frame inputs and intermediates. Accordingly, this patch also adds some related tests to avoid such issues in the future.
49,719
11.09.2017 15:30:29
25,200
cddd2a4f60e22e8b621712135e5ed263b25343c0
[Minor] added cross validation example
[ { "change_type": "MODIFY", "old_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "new_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"1. [Value-based join of two Matrices](#JoinMatrices)\\n\",\n+ \"1. [Cross Validation](#CrossValidation)\\n\",\n+ \"* [Value-based join of two Matrices](#JoinMatrices)\\n\",\n\"* [Filter Matrix to include only Frequent Column Values](#FilterMatrix)\\n\",\n\"* [Construct (sparse) Matrix from (rowIndex, colIndex, values) triplets](#Construct_sparse_Matrix)\\n\",\n\"* [Find and remove duplicates in columns or rows](#Find_and_remove_duplicates)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": null,\n+ \"execution_count\": 2,\n\"metadata\": {\n\"collapsed\": false,\n\"scrolled\": true\n},\n- \"outputs\": [],\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"2017-08-18 21:33:18 UTC\\n\"\n+ ]\n+ }\n+ ],\n\"source\": [\n\"from systemml import MLContext, dml, jvm_stdout\\n\",\n\"ml = MLContext(sc)\\n\",\n\"print (ml.buildTime())\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Cross Validation<a id=\\\"CrossValidation\\\" />\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Perform kFold cross validation by running in parallel fold creation, training algorithm, test algorithm, and evaluation.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 4,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"Test data Xyi2\\n\",\n+ \"10.000 11.000 12.000 4.000\\n\",\n+ \"16.000 17.000 18.000 6.000\\n\",\n+ \"\\n\",\n+ \"Train data Xyni2\\n\",\n+ \"1.000 2.000 3.000 1.000\\n\",\n+ \"4.000 5.000 6.000 2.000\\n\",\n+ \"7.000 8.000 9.000 3.000\\n\",\n+ \"13.000 14.000 15.000 5.000\\n\",\n+ \"\\n\",\n+ \"w2\\n\",\n+ \"95.000\\n\",\n+ \"106.000\\n\",\n+ \"117.000\\n\",\n+ \"\\n\",\n+ \"stats2\\n\",\n+ \"8938.000\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"Test data Xyi3\\n\",\n+ \"1.000 2.000 3.000 1.000\\n\",\n+ \"7.000 8.000 9.000 3.000\\n\",\n+ \"\\n\",\n+ \"Train data Xyni3\\n\",\n+ \"4.000 5.000 6.000 2.000\\n\",\n+ \"10.000 11.000 12.000 4.000\\n\",\n+ \"13.000 14.000 15.000 5.000\\n\",\n+ \"16.000 17.000 18.000 6.000\\n\",\n+ \"\\n\",\n+ \"w3\\n\",\n+ \"209.000\\n\",\n+ \"226.000\\n\",\n+ \"243.000\\n\",\n+ \"\\n\",\n+ \"stats3\\n\",\n+ \"6844.000\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"Test data Xyi1\\n\",\n+ \"4.000 5.000 6.000 2.000\\n\",\n+ \"13.000 14.000 15.000 5.000\\n\",\n+ \"\\n\",\n+ \"Train data Xyni1\\n\",\n+ \"1.000 2.000 3.000 1.000\\n\",\n+ \"7.000 8.000 9.000 3.000\\n\",\n+ \"10.000 11.000 12.000 4.000\\n\",\n+ \"16.000 17.000 18.000 6.000\\n\",\n+ \"\\n\",\n+ \"w1\\n\",\n+ \"158.000\\n\",\n+ \"172.000\\n\",\n+ \"186.000\\n\",\n+ \"\\n\",\n+ \"stats1\\n\",\n+ \"9853.000\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"SV selection vector:\\n\",\n+ \"3.000\\n\",\n+ \"1.000\\n\",\n+ \"3.000\\n\",\n+ \"2.000\\n\",\n+ \"1.000\\n\",\n+ \"2.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.024 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"prog = \\\"\\\"\\\"\\n\",\n+ \"holdOut = 1/3\\n\",\n+ \"kFolds = 1/holdOut\\n\",\n+ \"\\n\",\n+ \"nRows = 6; nCols = 3; \\n\",\n+ \"\\n\",\n+ \"X = matrix(seq(1, nRows * nCols), rows = nRows, cols = nCols) # X data\\n\",\n+ \"y = matrix(seq(1, nRows), rows = nRows, cols = 1) # y label data\\n\",\n+ \"Xy = cbind (X,y) # Xy Data for CV\\n\",\n+ \"\\n\",\n+ \"sv = rand (rows = nRows, cols = 1, min = 0.0, max = 1.0, pdf = \\\"uniform\\\") # sv selection vector for fold creation \\n\",\n+ \"sv = (order(target=sv, by=1, index.return=TRUE)) %% kFolds + 1 # with numbers between 1 .. kFolds \\n\",\n+ \"\\n\",\n+ \"stats = matrix(0, rows=kFolds, cols=1) # stats per kFolds model on test data\\n\",\n+ \"\\n\",\n+ \"parfor (i in 1:kFolds)\\n\",\n+ \"{\\n\",\n+ \" # Skip empty training data or test data. \\n\",\n+ \" if ( sum (sv == i) > 0 & sum (sv == i) < nrow(X) ) \\n\",\n+ \" {\\n\",\n+ \" Xyi = removeEmpty(target = Xy, margin = \\\"rows\\\", select = (sv == i)) # Xyi fold, i.e. 1/k of rows (test data)\\n\",\n+ \" Xyni = removeEmpty(target = Xy, margin = \\\"rows\\\", select = (sv != i)) # Xyni data, i.e. (k-1)/k of rows (train data)\\n\",\n+ \"\\n\",\n+ \" # Skip extreme label inbalance\\n\",\n+ \" distinctLabels = aggregate( target = Xyni[,1], groups = Xyni[,1], fn = \\\"count\\\")\\n\",\n+ \" if ( nrow(distinctLabels) > 1)\\n\",\n+ \" {\\n\",\n+ \" wi = trainAlg (Xyni[ ,1:ncol(Xy)-1], Xyni[ ,ncol(Xy)]) # wi Model for i-th training data\\n\",\n+ \" pi = testAlg (Xyi [ ,1:ncol(Xy)-1], wi) # pi Prediction for i-th test data\\n\",\n+ \" ei = evalPrediction (pi, Xyi[ ,ncol(Xy)]) # stats[i,] evaluation of prediction of i-th fold\\n\",\n+ \" stats[i,] = ei\\n\",\n+ \" \\n\",\n+ \" print ( \\\"Test data Xyi\\\" + i + \\\"\\\\n\\\" + toString(Xyi) \\n\",\n+ \" + \\\"\\\\nTrain data Xyni\\\" + i + \\\"\\\\n\\\" + toString(Xyni) \\n\",\n+ \" + \\\"\\\\nw\\\" + i + \\\"\\\\n\\\" + toString(wi) \\n\",\n+ \" + \\\"\\\\nstats\\\" + i + \\\"\\\\n\\\" + toString(stats[i,]) \\n\",\n+ \" + \\\"\\\\n\\\")\\n\",\n+ \" }\\n\",\n+ \" else\\n\",\n+ \" {\\n\",\n+ \" print (\\\"Training data for fold \\\" + i + \\\" has only \\\" + nrow(distinctLabels) + \\\" distinct labels. Needs to be > 1.\\\")\\n\",\n+ \" } \\n\",\n+ \" } \\n\",\n+ \" else \\n\",\n+ \" {\\n\",\n+ \" print (\\\"Training data or test data for fold \\\" + i + \\\" is empty. Fold not validated.\\\")\\n\",\n+ \" }\\n\",\n+ \"\\n\",\n+ \"}\\n\",\n+ \"\\n\",\n+ \"print (\\\"SV selection vector:\\\\n\\\" + toString(sv))\\n\",\n+ \"\\n\",\n+ \"trainAlg = function (matrix[double] X, matrix[double] y)\\n\",\n+ \" return (matrix[double] w)\\n\",\n+ \"{\\n\",\n+ \" w = t(X) %*% y\\n\",\n+ \"}\\n\",\n+ \"\\n\",\n+ \"testAlg = function (matrix[double] X, matrix[double] w)\\n\",\n+ \" return (matrix[double] p)\\n\",\n+ \"{\\n\",\n+ \" p = X %*% w\\n\",\n+ \"}\\n\",\n+ \"\\n\",\n+ \"evalPrediction = function (matrix[double] p, matrix[double] y)\\n\",\n+ \" return (matrix[double] e)\\n\",\n+ \"{\\n\",\n+ \" e = as.matrix(sum (p - y))\\n\",\n+ \"}\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"\\n\",\n+ \"with jvm_stdout(True):\\n\",\n+ \" ml.execute(dml(prog))\"\n+ ]\n+ },\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n" } ]
Java
Apache License 2.0
apache/systemds
[Minor] added cross validation example
49,768
12.09.2017 11:11:08
25,200
0751ddc0f73e44445a8439f17aafd990138634e6
[MINOR] Update python3 package
[ { "change_type": "MODIFY", "old_path": "dev/release/src/test/bin/verifyBuild.sh", "new_path": "dev/release/src/test/bin/verifyBuild.sh", "diff": "@@ -90,6 +90,7 @@ if [ -z $WORKING_DIR ] ; then\nWORKING_DIR=\"$USER_DIR/tmp/relValidation\"\nfi\n+rm -rf \"$WORKING_DIR\"/systemml\nmkdir -p \"$WORKING_DIR\"\nOUT_FILE=$WORKING_DIR/relValidation.out\nERR_FILE=$WORKING_DIR/relValidation.err\n@@ -171,6 +172,7 @@ runCommand \"cd ../../\"\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Verifying Python scripts...\"\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Verifying Python scripts...\" >> $OUT_FILE\nrunCommand \"pip install --upgrade systemml-$VER_NAME-python.tgz\"\n+runCommand \"pip3 install --upgrade systemml-$VER_NAME-python.tgz\"\nrunCommand \"cd ../../../\"\nrunCommand \"$SPARK_HOME/bin/spark-submit src/test/python/matrix_sum_example.py\"\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Update python3 package
49,738
12.09.2017 19:10:58
25,200
3acd94186b5e6e2fdf12823e7932bf56027511c6
Extended codegen plan enumeration statistics (all plans) This patch extends the codegen plan enumeration statistics by the number of total plans without partitioning by connected components, i.e., the sum of 2^(|M|) per DAG.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBased.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBased.java", "diff": "@@ -57,6 +57,7 @@ import org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;\n+import org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.utils.Statistics;\n/**\n@@ -87,12 +88,14 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nCollection<PlanPartition> parts = PlanAnalyzer.analyzePlanPartitions(memo, roots, false);\n//step 2: optimize individual plan partitions\n+ int sumMatPoints = 0;\nfor( PlanPartition part : parts ) {\n//create composite templates (within the partition)\ncreateAndAddMultiAggPlans(memo, part.getPartition(), part.getRoots());\n//plan enumeration and plan selection\nselectPlans(memo, part.getPartition(), part.getRoots(), part.getMatPoints());\n+ sumMatPoints += part.getMatPoints().size();\n}\n//step 3: add composite templates (across partitions)\n@@ -101,6 +104,10 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n//take all distinct best plans\nfor( Entry<Long, List<MemoTableEntry>> e : getBestPlans().entrySet() )\nmemo.setDistinct(e.getKey(), e.getValue());\n+\n+ //maintain statistics\n+ if( DMLScript.STATISTICS )\n+ Statistics.incrementCodegenEnumAll(UtilFunctions.pow(2, sumMatPoints));\n}\n//within-partition multi-agg templates\n@@ -389,7 +396,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n}\nif( DMLScript.STATISTICS ) {\n- Statistics.incrementCodegenEnumAll(len);\n+ Statistics.incrementCodegenEnumAllP(len);\nStatistics.incrementCodegenEnumEval(len);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "diff": "@@ -107,12 +107,14 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\nCollection<PlanPartition> parts = PlanAnalyzer.analyzePlanPartitions(memo, roots, true);\n//step 2: optimize individual plan partitions\n+ int sumMatPoints = 0;\nfor( PlanPartition part : parts ) {\n//create composite templates (within the partition)\ncreateAndAddMultiAggPlans(memo, part.getPartition(), part.getRoots());\n//plan enumeration and plan selection\nselectPlans(memo, part);\n+ sumMatPoints += part.getMatPointsExt().length;\n}\n//step 3: add composite templates (across partitions)\n@@ -121,6 +123,10 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\n//take all distinct best plans\nfor( Entry<Long, List<MemoTableEntry>> e : getBestPlans().entrySet() )\nmemo.setDistinct(e.getKey(), e.getValue());\n+\n+ //maintain statistics\n+ if( DMLScript.STATISTICS )\n+ Statistics.incrementCodegenEnumAll(UtilFunctions.pow(2, sumMatPoints));\n}\nprivate void selectPlans(CPlanMemoTable memo, PlanPartition part)\n@@ -257,7 +263,7 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\n}\nif( DMLScript.STATISTICS ) {\n- Statistics.incrementCodegenEnumAll((rgraph!=null)?len:0);\n+ Statistics.incrementCodegenEnumAllP((rgraph!=null)?len:0);\nStatistics.incrementCodegenEnumEval(numEvalPlans);\nStatistics.incrementCodegenEnumEvalP(numEvalPartPlans);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -79,6 +79,7 @@ public class Statistics\nprivate static final LongAdder codegenCPlanCompile = new LongAdder(); //count\nprivate static final LongAdder codegenClassCompile = new LongAdder(); //count\nprivate static final LongAdder codegenEnumAll = new LongAdder(); //count\n+ private static final LongAdder codegenEnumAllP = new LongAdder(); //count\nprivate static final LongAdder codegenEnumEval = new LongAdder(); //count\nprivate static final LongAdder codegenEnumEvalP = new LongAdder(); //count\nprivate static final LongAdder codegenPlanCacheHits = new LongAdder(); //count\n@@ -262,6 +263,9 @@ public class Statistics\npublic static void incrementCodegenEnumAll(long delta) {\ncodegenEnumAll.add(delta);\n}\n+ public static void incrementCodegenEnumAllP(long delta) {\n+ codegenEnumAllP.add(delta);\n+ }\npublic static void incrementCodegenEnumEval(long delta) {\ncodegenEnumEval.add(delta);\n}\n@@ -300,6 +304,9 @@ public class Statistics\npublic static long getCodegenEnumAll() {\nreturn codegenEnumAll.longValue();\n}\n+ public static long getCodegenEnumAllP() {\n+ return codegenEnumAllP.longValue();\n+ }\npublic static long getCodegenEnumEval() {\nreturn codegenEnumEval.longValue();\n}\n@@ -403,6 +410,7 @@ public class Statistics\ncodegenCPlanCompile.reset();\ncodegenClassCompile.reset();\ncodegenEnumAll.reset();\n+ codegenEnumAllP.reset();\ncodegenEnumEval.reset();\ncodegenEnumEvalP.reset();\ncodegenCompileTime.reset();\n@@ -795,8 +803,8 @@ public class Statistics\nif( ConfigurationManager.isCodegenEnabled() ) {\nsb.append(\"Codegen compile (DAG,CP,JC):\\t\" + getCodegenDAGCompile() + \"/\"\n+ getCodegenCPlanCompile() + \"/\" + getCodegenClassCompile() + \".\\n\");\n- sb.append(\"Codegen enum (All,Eval,EvalP):\\t\" + getCodegenEnumAll() + \"/\"\n- + getCodegenEnumEval() + \"/\" + getCodegenEnumEvalP() + \".\\n\");\n+ sb.append(\"Codegen enum (ALLt/p,EVALt/p):\\t\" + getCodegenEnumAll() + \"/\" +\n+ getCodegenEnumAllP() + \"/\" + getCodegenEnumEval() + \"/\" + getCodegenEnumEvalP() + \".\\n\");\nsb.append(\"Codegen compile times (DAG,JC):\\t\" + String.format(\"%.3f\", (double)getCodegenCompileTime()/1000000000) + \"/\" +\nString.format(\"%.3f\", (double)getCodegenClassCompileTime()/1000000000) + \" sec.\\n\");\nsb.append(\"Codegen plan cache hits:\\t\" + getCodegenPlanCacheHits() + \"/\" + getCodegenPlanCacheTotal() + \".\\n\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1905] Extended codegen plan enumeration statistics (all plans) This patch extends the codegen plan enumeration statistics by the number of total plans without partitioning by connected components, i.e., the sum of 2^(|M|) per DAG.
49,736
14.09.2017 13:20:46
25,200
c6d499d3e27a1842ccf5987ab84c92eee72aa5c2
[MINOR] Refactored the locks to seperate out read and write lock Refactoring the locks will avoid future bugs where the developer tries to obtain 2 write lock or a read lock on a write-locked objects, etc. I have also added a debugging utility to track potential memory leaks. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "diff": "@@ -374,7 +374,7 @@ public class ExecutionContext {\n}\n// The lock is added here for an output block\n// so that any block currently in use is not deallocated by eviction on the GPU\n- mo.getGPUObject(getGPUContext(0)).addLock();\n+ mo.getGPUObject(getGPUContext(0)).addWriteLock();\nreturn mo;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "diff": "package org.apache.sysml.runtime.instructions.gpu;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.lops.runtime.RunMRJobs;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -26,6 +28,7 @@ import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.instructions.GPUInstructionParser;\nimport org.apache.sysml.runtime.instructions.Instruction;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.runtime.matrix.data.Pair;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\nimport org.apache.sysml.utils.GPUStatistics;\n@@ -47,6 +50,8 @@ public abstract class GPUInstruction extends Instruction {\nMatrixIndexing\n};\n+ private static final Log LOG = LogFactory.getLog(GPUInstruction.class.getName());\n+\n// Memory/conversions\npublic final static String MISC_TIMER_HOST_TO_DEVICE = \"H2D\"; // time spent in bringing data to gpu (from host)\npublic final static String MISC_TIMER_DEVICE_TO_HOST = \"D2H\"; // time spent in bringing data from gpu (to host)\n@@ -191,6 +196,13 @@ public abstract class GPUInstruction extends Instruction {\nif(DMLScript.SYNCHRONIZE_GPU) {\njcuda.runtime.JCuda.cudaDeviceSynchronize();\n}\n+ if(LOG.isDebugEnabled()) {\n+ for(GPUContext gpuCtx : ec.getGPUContexts()) {\n+ if(gpuCtx != null)\n+ gpuCtx.printMemoryInfo(getOpcode());\n+ }\n+ }\n+\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -43,6 +43,7 @@ import java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.LinkedList;\nimport java.util.Map;\n+import java.util.Map.Entry;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -131,6 +132,40 @@ public class GPUContext {\n*/\nprivate final ThreadLocal<JCudaKernels> kernels = new ThreadLocal<>();\n+ /**\n+ * Print information of memory usage.\n+ *\n+ * @param opcode opcode of caller\n+ * @throws DMLRuntimeException if error\n+ */\n+ public void printMemoryInfo(String opcode) throws DMLRuntimeException {\n+ if(LOG.isDebugEnabled()) {\n+ long totalFreeCUDASpace = 0;\n+ for(Entry<Long, LinkedList<Pointer>> kv : freeCUDASpaceMap.entrySet()) {\n+ totalFreeCUDASpace += kv.getKey()*kv.getValue().size();\n+ }\n+ long readLockedAllocatedMemory = 0;\n+ long writeLockedAllocatedMemory = 0;\n+ long unlockedAllocatedMemory = 0;\n+ for(GPUObject gpuObj : allocatedGPUObjects) {\n+ if(gpuObj.readLocks.longValue() > 0)\n+ readLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ else if(gpuObj.writeLock)\n+ writeLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ else\n+ unlockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ }\n+ long free[] = { 0 };\n+ long total[] = { 0 };\n+ cudaMemGetInfo(free, total);\n+ long gpuFreeMemory = (long) (free[0] * GPU_MEMORY_UTILIZATION_FACTOR);\n+ LOG.debug(opcode + \": Total memory: \" + total[0] + \", Free memory: \" + free[0] + \" (with util factor: \" + gpuFreeMemory + \"), \"\n+ + \"Lazy unfreed memory: \" + totalFreeCUDASpace + \", Locked allocated memory (read/write): \"\n+ + readLockedAllocatedMemory + \"/\" + writeLockedAllocatedMemory + \", \"\n+ + \" Unlocked allocated memory: \" + unlockedAllocatedMemory);\n+ }\n+ }\n+\nprotected GPUContext(int deviceNum) throws DMLRuntimeException {\nthis.deviceNum = deviceNum;\ncudaSetDevice(deviceNum);\n@@ -472,18 +507,19 @@ public class GPUContext {\nCollections.sort(allocatedGPUObjects, new Comparator<GPUObject>() {\n@Override\npublic int compare(GPUObject p1, GPUObject p2) {\n- long p1Val = p1.locks.get();\n- long p2Val = p2.locks.get();\n-\n- if (p1Val > 0 && p2Val > 0) {\n+ if (p1.isLocked() && p2.isLocked()) {\n// Both are locked, so don't sort\nreturn 0;\n- } else if (p1Val > 0 || p2Val > 0) {\n+ } else if (p1.isLocked()) {\n+ // Put the unlocked one to RHS\n+ // a value less than 0 if x < y; and a value greater than 0 if x > y\n+ return -1;\n+ } else if (p2.isLocked()) {\n// Put the unlocked one to RHS\n- return Long.compare(p2Val, p1Val);\n+ // a value less than 0 if x < y; and a value greater than 0 if x > y\n+ return 1;\n} else {\n// Both are unlocked\n-\nif (evictionPolicy == EvictionPolicy.MIN_EVICT) {\nlong p1Size = 0;\nlong p2Size = 0;\n@@ -510,7 +546,7 @@ public class GPUContext {\nwhile (neededSize > getAvailableMemory() && allocatedGPUObjects.size() > 0) {\nGPUObject toBeRemoved = allocatedGPUObjects.get(allocatedGPUObjects.size() - 1);\n- if (toBeRemoved.locks.get() > 0) {\n+ if (toBeRemoved.isLocked()) {\nthrow new DMLRuntimeException(\n\"There is not enough memory on device for this matrix, request (\" + neededSize + \"). Allocated GPU objects:\" + allocatedGPUObjects.toString());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java", "diff": "@@ -34,6 +34,7 @@ import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\nimport java.util.Arrays;\nimport java.util.concurrent.atomic.AtomicLong;\n+import java.util.concurrent.atomic.LongAdder;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -99,9 +100,14 @@ public class GPUObject {\nprotected boolean dirty = false;\n/**\n- * number of read/write locks on this object (this GPUObject is being used in a current instruction)\n+ * number of read locks on this object (this GPUObject is being used in a current instruction)\n*/\n- protected AtomicLong locks = new AtomicLong();\n+ protected LongAdder readLocks = new LongAdder();\n+\n+ /**\n+ * whether write lock on this object (this GPUObject is being used in a current instruction)\n+ */\n+ protected boolean writeLock = false;\n/**\n* Timestamp, needed by {@link GPUContext#evict(long)}\n@@ -132,7 +138,11 @@ public class GPUObject {\nthat.allocateTensorDescriptor(me.tensorShape[0], me.tensorShape[1], me.tensorShape[2], me.tensorShape[3]);\n}\nthat.dirty = me.dirty;\n- that.locks = new AtomicLong(me.locks.get());\n+ // TODO Nakul: Should the locks be cloned here ?\n+ // The only place clone is getting called: LibMatrixCUDA's solve\n+ that.readLocks.reset();\n+ that.writeLock = false;\n+\nthat.timestamp = new AtomicLong(me.timestamp.get());\nthat.isSparse = me.isSparse;\n@@ -618,7 +628,7 @@ public class GPUObject {\ncopyFromHostToDevice(opcode);\ntransferred = true;\n}\n- addLock();\n+ addReadLock();\nif (!isAllocated())\nthrow new DMLRuntimeException(\"Expected device data to be allocated\");\nreturn transferred;\n@@ -664,10 +674,6 @@ public class GPUObject {\nreturn allocated;\n}\n- public void addLock() {\n- locks.addAndGet(1);\n- }\n-\n/**\n* if the data is allocated on the GPU and is dirty, it is copied back to the host memory\n*\n@@ -694,21 +700,50 @@ public class GPUObject {\nreturn copied;\n}\n+ public boolean isLocked() {\n+ return writeLock || readLocks.longValue() > 0;\n+ }\n+\n+ public void addReadLock() throws DMLRuntimeException {\n+ if(writeLock)\n+ throw new DMLRuntimeException(\"Attempting to add a read lock when writeLock=\"+ writeLock);\n+ else\n+ readLocks.increment();\n+ }\n+\n+ public void addWriteLock() throws DMLRuntimeException {\n+ if(readLocks.longValue() > 0)\n+ throw new DMLRuntimeException(\"Attempting to add a write lock when readLocks=\"+ readLocks.longValue());\n+ else if(writeLock)\n+ throw new DMLRuntimeException(\"Attempting to add a write lock when writeLock=\"+ writeLock);\n+ else\n+ writeLock = true;\n+ }\n+\n+ public void releaseReadLock() throws DMLRuntimeException {\n+ readLocks.decrement();\n+ if(readLocks.longValue() < 0)\n+ throw new DMLRuntimeException(\"Attempting to release a read lock when readLocks=\"+ readLocks.longValue());\n+ }\n+\n+ public void releaseWriteLock() throws DMLRuntimeException {\n+ if(writeLock)\n+ writeLock = false;\n+ else\n+ throw new DMLRuntimeException(\"Internal state error : Attempting to release write lock on a GPUObject, which was already released\");\n+ }\n+\n+ public void resetReadWriteLock() {\n+ readLocks.reset();\n+ writeLock = false;\n+ }\n+\n/**\n* Updates the locks depending on the eviction policy selected\n*\n* @throws DMLRuntimeException if there is no locked GPU Object or if could not obtain a {@link GPUContext}\n*/\n- private void updateReleaseLocks(int l) throws DMLRuntimeException {\n- int newLocks = (int) locks.addAndGet(l);\n- if (newLocks < 0) {\n- throw new CacheException(\"Internal state error : Invalid number of locks on a GPUObject\");\n- }\n-\n- if(LOG.isTraceEnabled()) {\n- LOG.trace(\"GPU : updateReleaseLocks, new number of locks is \" + newLocks + \", on \" + this + \", GPUContext=\"\n- + getGPUContext());\n- }\n+ private void updateReleaseLocks() throws DMLRuntimeException {\nGPUContext.EvictionPolicy evictionPolicy = getGPUContext().evictionPolicy;\nswitch (evictionPolicy) {\ncase LRU:\n@@ -730,8 +765,8 @@ public class GPUObject {\n* @throws DMLRuntimeException if data is not allocated or if there is no locked GPU Object or if could not obtain a {@link GPUContext}\n*/\npublic void releaseInput() throws DMLRuntimeException {\n- // A read lock is a positive quantity, therefor when the lock is freed, a negative 1 is added\n- updateReleaseLocks(-1);\n+ releaseReadLock();\n+ updateReleaseLocks();\nif (!isAllocated())\nthrow new CacheException(\"Attempting to release an input before allocating it\");\n}\n@@ -742,8 +777,8 @@ public class GPUObject {\n* @throws DMLRuntimeException if data is not allocated or if there is no locked GPU Object or if could not obtain a {@link GPUContext}\n*/\npublic void releaseOutput() throws DMLRuntimeException {\n- // A write lock is a negative quantity, therefore when the lock is freed, a positive number is added\n- updateReleaseLocks(1);\n+ releaseWriteLock();\n+ updateReleaseLocks();\ndirty = true;\nif (!isAllocated())\nthrow new CacheException(\"Attempting to release an output before allocating it\");\n@@ -798,7 +833,7 @@ public class GPUObject {\ncudnnDestroyTensorDescriptor(tensorDescriptor);\ntensorDescriptor = null;\n}\n- locks.set(0);\n+ resetReadWriteLock();\ngetGPUContext().removeRecordedUsage(this);\n}\n@@ -1061,7 +1096,8 @@ public class GPUObject {\nfinal StringBuilder sb = new StringBuilder(\"GPUObject{\");\nsb.append(\", tensorShape=\").append(Arrays.toString(tensorShape));\nsb.append(\", dirty=\").append(dirty);\n- sb.append(\", locks=\").append(locks);\n+ sb.append(\", readLocks=\").append(readLocks.longValue());\n+ sb.append(\", writeLock=\").append(writeLock);\nsb.append(\", sparse? \").append(isSparse);\nsb.append(\", dims=[\").append(mat.getNumRows()).append(\",\").append(mat.getNumColumns()).append(\"]\");\nsb.append('}');\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Refactored the locks to seperate out read and write lock - Refactoring the locks will avoid future bugs where the developer tries to obtain 2 write lock or a read lock on a write-locked objects, etc. - I have also added a debugging utility to track potential memory leaks. Closes #664.
49,768
14.09.2017 14:29:31
25,200
143fa7848e23d86c355f88b5756e3c4afee3cd12
[maven-release-plugin] prepare release v0.1.0-rc1
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>1.0.0-SNAPSHOT</version>\n+ <version>0.1.0</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:[email protected]:apache/systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.1.0-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n" } ]
Java
Apache License 2.0
apache/systemds
[maven-release-plugin] prepare release v0.1.0-rc1
49,738
15.09.2017 01:05:22
25,200
ebb6ea6128b4babc6ffe07e2fc7f9666593a76b1
[SYSTEMML-1908,1771,1770,1769,1739] Fix misc api and compiler issues
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "diff": "@@ -448,14 +448,14 @@ public class PreparedScript\n//enable requested functions for recompile once\nfor( String fname : fnames ) {\nString fkey = DMLProgram.constructFunctionKey(fnamespace, fname);\n- if( !fgraph.isRecursiveFunction(fkey) ) {\n+ if( fgraph != null && !fgraph.isRecursiveFunction(fkey) ) {\nFunctionProgramBlock fpb = _prog.getFunctionProgramBlock(fnamespace, fname);\nif( fpb != null )\nfpb.setRecompileOnce(true);\nelse\nLOG.warn(\"Failed to enable function recompile for non-existing '\"+fkey+\"'.\");\n}\n- else {\n+ else if( fgraph != null ) {\nLOG.warn(\"Failed to enable function recompile for recursive '\"+fkey+\"'.\");\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextConversionUtil.java", "diff": "@@ -308,12 +308,14 @@ public class MLContextConversionUtil {\npublic static FrameObject binaryBlocksToFrameObject(String variableName, JavaPairRDD<Long, FrameBlock> binaryBlocks,\nFrameMetadata frameMetadata) {\n- MatrixCharacteristics mc = (frameMetadata != null) ? frameMetadata.asMatrixCharacteristics()\n- : new MatrixCharacteristics();\n+ MatrixCharacteristics mc = (frameMetadata != null) ?\n+ frameMetadata.asMatrixCharacteristics() : new MatrixCharacteristics();\n+ ValueType[] schema = (frameMetadata != null) ?\n+ frameMetadata.getFrameSchema().getSchema().toArray(new ValueType[0]) :\n+ UtilFunctions.nCopies((int)mc.getCols(), ValueType.STRING);\nFrameObject frameObject = new FrameObject(OptimizerUtils.getUniqueTempFileName(),\n- new MatrixFormatMetaData(mc, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo),\n- frameMetadata.getFrameSchema().getSchema().toArray(new ValueType[0]));\n+ new MatrixFormatMetaData(mc, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo), schema);\nframeObject.setRDDHandle(new RDDObject(binaryBlocks, variableName));\nreturn frameObject;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/UnaryOp.java", "diff": "@@ -584,7 +584,8 @@ public class UnaryOp extends Hop implements MultiThreadedHop\n}\nif (isGPUEnabled()) {\n- OptimizerUtils.estimateSize(dim1, dim2); // Intermediate memory required to convert sparse to dense\n+ // Intermediate memory required to convert sparse to dense\n+ ret += OptimizerUtils.estimateSize(dim1, dim2);\n}\nreturn ret;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -366,7 +366,7 @@ public class TemplateCell extends TemplateBase\nif( h1.isScalar() && h2.isScalar() )\nreturn Long.compare(h1.getHopID(), h2.getHopID());\nreturn (h1.dimsKnown(true) && h2.dimsKnown(true) && h1.getNnz() != h2.getNnz()\n- && HopRewriteUtils.isSparse(h1) || HopRewriteUtils.isSparse(h1)) ?\n+ && (HopRewriteUtils.isSparse(h1) || HopRewriteUtils.isSparse(h2))) ?\nLong.compare(h1.getNnz(), h2.getNnz()) :\nLong.compare(h1.getHopID(), h2.getHopID());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixAppendGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixAppendGPUInstruction.java", "diff": "@@ -39,7 +39,7 @@ public class MatrixAppendGPUInstruction extends GPUInstruction {\nCPOperand output;\nCPOperand input1, input2;\n- AppendCPInstruction.AppendType type;\n+ AppendCPInstruction.AppendType atype;\nprivate MatrixAppendGPUInstruction(Operator op, CPOperand in1, CPOperand in2, CPOperand out,\nAppendCPInstruction.AppendType type, String opcode, String istr) {\n@@ -47,7 +47,7 @@ public class MatrixAppendGPUInstruction extends GPUInstruction {\nthis.output = out;\nthis.input1 = in1;\nthis.input2 = in2;\n- this.type = type;\n+ this.atype = type;\n}\npublic static MatrixAppendGPUInstruction parseInstruction ( String str )\n@@ -86,9 +86,9 @@ public class MatrixAppendGPUInstruction extends GPUInstruction {\nMatrixObject mat1 = getMatrixInputForGPUInstruction(ec, input1.getName());\nMatrixObject mat2 = getMatrixInputForGPUInstruction(ec, input2.getName());\n- if(type == AppendCPInstruction.AppendType.CBIND) {\n+ if(atype == AppendCPInstruction.AppendType.CBIND) {\nLibMatrixCUDA.cbind(ec, ec.getGPUContext(0), getExtendedOpcode(), mat1, mat2, output.getName());\n- } else if (type == AppendCPInstruction.AppendType.RBIND ) {\n+ } else if (atype == AppendCPInstruction.AppendType.RBIND ) {\nLibMatrixCUDA.rbind(ec, ec.getGPUContext(0), getExtendedOpcode(), mat1, mat2, output.getName());\n} else {\nthrow new DMLRuntimeException(\"Unsupported GPU operator:\" + opcode);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1908,1771,1770,1769,1739] Fix misc api and compiler issues
49,768
16.09.2017 11:01:04
25,200
6ea9b33b45e00fb878f3df4649561166cf11d5ab
Automate workaround for maven plugin issue
[ { "change_type": "MODIFY", "old_path": "dev/release/release-build.sh", "new_path": "dev/release/release-build.sh", "diff": "@@ -265,11 +265,14 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\necho \"RELEASE_STAGING_LOCATION=$RELEASE_STAGING_LOCATION\"\necho \"BASE_DIR=$BASE_DIR\"\n- exit 5\n+ # As fix has been added below to update version information exit to update pom file is not needed.\n+ # exit 5\n# Update dev/release/target/release/systemml/pom.xml with similar to following contents which is for 0.13.0 RC1\n# Update <version>0.13.0</version>\n# Update <tag>v0.13.0-rc1</tag>\n+ sed -i .bak \"s|<version>$DEVELOPMENT_VERSION<\\/version>|<version>$RELEASE_VERSION<\\/version>|\" $BASE_DIR/target/release/systemml/pom.xml\n+ sed -i .bak \"s|<tag>HEAD<\\/tag>|<tag>$RELEASE_TAG<\\/tag>|\" $BASE_DIR/target/release/systemml/pom.xml\ncd $RELEASE_WORK_DIR/systemml\n## Rerunning mvn with clean and package goals, as release:prepare changes ordeer for some dependencies like unpack and shade.\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1250] Automate workaround for maven plugin issue
49,738
16.09.2017 20:14:18
25,200
1634239ce19b4d42ed1ec59bfac01cd8777d153a
[MINOR] Fix incorrect setup 'matrix mult chain' tests
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationAllTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationAllTest.java", "diff": "@@ -82,7 +82,7 @@ public class RewriteElementwiseMultChainOptimizationAllTest extends AutomatedTes\n}\nboolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n- if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\nboolean rewritesOld = OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteElementwiseMultChainOptimizationTest.java", "diff": "@@ -81,7 +81,7 @@ public class RewriteElementwiseMultChainOptimizationTest extends AutomatedTestBa\n}\nboolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n- if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\nboolean rewritesOld = OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteMatrixMultChainOptTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteMatrixMultChainOptTest.java", "diff": "@@ -78,7 +78,7 @@ public class RewriteMatrixMultChainOptTest extends AutomatedTestBase\n}\nboolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n- if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\nboolean rewritesOld = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix incorrect setup 'matrix mult chain' tests
49,738
17.09.2017 15:05:40
25,200
119893f11c88aaedc0ee6f06f6a2cad72b842cfb
Shuffle-free spark binary reblock for aligned blocks This patch makes the existing spark binary reblock instruction more adaptive. If the source and target block sizes are aligned, i.e., output blocks can be constructed in 1:N manner, we now avoid the unnecessary block aggregation which causes a shuffle of the entire matrix.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ReblockSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ReblockSPInstruction.java", "diff": "@@ -174,8 +174,13 @@ public class ReblockSPInstruction extends UnarySPInstruction {\n//BINARY BLOCK <- BINARY BLOCK (different sizes)\nJavaPairRDD<MatrixIndexes, MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(input1.getName());\n- JavaPairRDD<MatrixIndexes, MatrixBlock> out =\n- in1.flatMapToPair(new ExtractBlockForBinaryReblock(mc, mcOut));\n+ boolean shuffleFreeReblock = mc.dimsKnown() && mcOut.dimsKnown()\n+ && (mc.getRows() < mcOut.getRowsPerBlock() || mc.getRowsPerBlock()%mcOut.getRowsPerBlock() == 0)\n+ && (mc.getCols() < mcOut.getColsPerBlock() || mc.getColsPerBlock()%mcOut.getColsPerBlock() == 0);\n+\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> out = in1\n+ .flatMapToPair(new ExtractBlockForBinaryReblock(mc, mcOut));\n+ if( !shuffleFreeReblock )\nout = RDDAggregateUtils.mergeByKey(out, false);\n//put output RDD handle into symbol table\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1920] Shuffle-free spark binary reblock for aligned blocks This patch makes the existing spark binary reblock instruction more adaptive. If the source and target block sizes are aligned, i.e., output blocks can be constructed in 1:N manner, we now avoid the unnecessary block aggregation which causes a shuffle of the entire matrix.
49,717
19.09.2017 14:57:16
25,200
ec5dfda57a42b172886dd5d42bfe3b034b30c7b7
[MINOR] gpu memory leak fix Changed list of free pointers to set of free pointers for GPU Changed threadlocal cuda handles to non threadlocal. This is assuming there will be one thread per GPU. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/cpp/CMakeLists.txt", "new_path": "src/main/cpp/CMakeLists.txt", "diff": "@@ -29,6 +29,7 @@ option(USE_INTEL_MKL \"Whether to use Intel MKL (Defaults to compiling with Intel\n# Build a shared libraray\nadd_library(systemml SHARED libmatrixdnn.cpp libmatrixmult.cpp systemml.cpp)\n+set_target_properties(systemml PROPERTIES MACOSX_RPATH 1)\nset(MATH_LIBRARIES \"\")\n@@ -72,5 +73,6 @@ if (USE_OPEN_BLAS)\nfind_package(OpenMP REQUIRED)\nset_target_properties(systemml PROPERTIES LINK_FLAGS \"${OpenMP_CXX_FLAGS} ${MATH_LIBRARIES}\")\nelseif(USE_INTEL_MKL)\n- set_target_properties(systemml PROPERTIES LINK_FLAGS ${MATH_LIBRARIES}\")\n+ set_target_properties(systemml PROPERTIES LINK_FLAGS \"${MATH_LIBRARIES}\")\nendif()\n+\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -41,9 +41,11 @@ import java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.Comparator;\nimport java.util.HashMap;\n-import java.util.LinkedList;\n+import java.util.HashSet;\n+import java.util.Iterator;\nimport java.util.Map;\nimport java.util.Map.Entry;\n+import java.util.Set;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -88,83 +90,50 @@ public class GPUContext {\n* active device assigned to this GPUContext instance\n*/\nprivate final int deviceNum;\n- // Invoke cudaMemGetInfo to get available memory information. Useful if GPU is shared among multiple application.\n- public double GPU_MEMORY_UTILIZATION_FACTOR = ConfigurationManager.getDMLConfig()\n- .getDoubleValue(DMLConfig.GPU_MEMORY_UTILIZATION_FACTOR);\n- /**\n- * Map of free blocks allocate on GPU. maps size_of_block -> pointer on GPU\n- */\n- private LRUCacheMap<Long, LinkedList<Pointer>> freeCUDASpaceMap = new LRUCacheMap<>();\n- /**\n- * To record size of allocated blocks\n- */\n- private HashMap<Pointer, Long> cudaBlockSizeMap = new HashMap<>();\n- /**\n- * list of allocated {@link GPUObject} instances allocated on {@link GPUContext#deviceNum} GPU\n- * These are matrices allocated on the GPU on which rmvar hasn't been called yet.\n- * If a {@link GPUObject} has more than one lock on it, it cannot be freed\n- * If it has zero locks on it, it can be freed, but it is preferrable to keep it around\n- * so that an extraneous host to dev transfer can be avoided\n- */\n- private ArrayList<GPUObject> allocatedGPUObjects = new ArrayList<>();\n/**\n* cudnnHandle for Deep Neural Network operations on the GPU\n*/\n- private final ThreadLocal<cudnnHandle> cudnnHandle = new ThreadLocal<>();\n+ private cudnnHandle cudnnHandle;\n/**\n* cublasHandle for BLAS operations on the GPU\n*/\n- private final ThreadLocal<cublasHandle> cublasHandle = new ThreadLocal<>();\n+ private cublasHandle cublasHandle;\n/**\n* cusparseHandle for certain sparse BLAS operations on the GPU\n*/\n- private final ThreadLocal<cusparseHandle> cusparseHandle = new ThreadLocal<>();\n+ private cusparseHandle cusparseHandle;\n/**\n* cusolverDnHandle for invoking solve() function on dense matrices on the GPU\n*/\n- private final ThreadLocal<cusolverDnHandle> cusolverDnHandle = new ThreadLocal<>();\n+ private cusolverDnHandle cusolverDnHandle;\n/**\n* cusolverSpHandle for invoking solve() function on sparse matrices on the GPU\n*/\n- private final ThreadLocal<cusolverSpHandle> cusolverSpHandle = new ThreadLocal<>();\n+ private cusolverSpHandle cusolverSpHandle;\n/**\n* to launch custom CUDA kernel, specific to the active GPU for this GPUContext\n*/\n- private final ThreadLocal<JCudaKernels> kernels = new ThreadLocal<>();\n+ private JCudaKernels kernels;\n+ // Invoke cudaMemGetInfo to get available memory information. Useful if GPU is shared among multiple application.\n+ public double GPU_MEMORY_UTILIZATION_FACTOR = ConfigurationManager.getDMLConfig()\n+ .getDoubleValue(DMLConfig.GPU_MEMORY_UTILIZATION_FACTOR);\n/**\n- * Print information of memory usage.\n- *\n- * @param opcode opcode of caller\n- * @throws DMLRuntimeException if error\n+ * Map of free blocks allocate on GPU. maps size_of_block -> pointer on GPU\n*/\n- public void printMemoryInfo(String opcode) throws DMLRuntimeException {\n- if(LOG.isDebugEnabled()) {\n- long totalFreeCUDASpace = 0;\n- for(Entry<Long, LinkedList<Pointer>> kv : freeCUDASpaceMap.entrySet()) {\n- totalFreeCUDASpace += kv.getKey()*kv.getValue().size();\n- }\n- long readLockedAllocatedMemory = 0;\n- long writeLockedAllocatedMemory = 0;\n- long unlockedAllocatedMemory = 0;\n- for(GPUObject gpuObj : allocatedGPUObjects) {\n- if(gpuObj.readLocks.longValue() > 0)\n- readLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n- else if(gpuObj.writeLock)\n- writeLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n- else\n- unlockedAllocatedMemory += gpuObj.getSizeOnDevice();\n- }\n- long free[] = { 0 };\n- long total[] = { 0 };\n- cudaMemGetInfo(free, total);\n- long gpuFreeMemory = (long) (free[0] * GPU_MEMORY_UTILIZATION_FACTOR);\n- LOG.debug(opcode + \": Total memory: \" + total[0] + \", Free memory: \" + free[0] + \" (with util factor: \" + gpuFreeMemory + \"), \"\n- + \"Lazy unfreed memory: \" + totalFreeCUDASpace + \", Locked allocated memory (read/write): \"\n- + readLockedAllocatedMemory + \"/\" + writeLockedAllocatedMemory + \", \"\n- + \" Unlocked allocated memory: \" + unlockedAllocatedMemory);\n- }\n- }\n+ private LRUCacheMap<Long, Set<Pointer>> freeCUDASpaceMap = new LRUCacheMap<>();\n+ /**\n+ * To record size of allocated blocks\n+ */\n+ private HashMap<Pointer, Long> cudaBlockSizeMap = new HashMap<>();\n+ /**\n+ * list of allocated {@link GPUObject} instances allocated on {@link GPUContext#deviceNum} GPU\n+ * These are matrices allocated on the GPU on which rmvar hasn't been called yet.\n+ * If a {@link GPUObject} has more than one lock on it, it cannot be freed\n+ * If it has zero locks on it, it can be freed, but it is preferrable to keep it around\n+ * so that an extraneous host to dev transfer can be avoided\n+ */\n+ private ArrayList<GPUObject> allocatedGPUObjects = new ArrayList<>();\nprotected GPUContext(int deviceNum) throws DMLRuntimeException {\nthis.deviceNum = deviceNum;\n@@ -194,49 +163,85 @@ public class GPUContext {\n}\n}\n+ /**\n+ * Returns which device is currently being used.\n+ *\n+ * @return the current device for the calling host thread\n+ */\n+ public static int cudaGetDevice() {\n+ int[] device = new int[1];\n+ JCuda.cudaGetDevice(device);\n+ return device[0];\n+ }\n+\n+ /**\n+ * Print information of memory usage.\n+ *\n+ * @param opcode opcode of caller\n+ * @throws DMLRuntimeException if error\n+ */\n+ public void printMemoryInfo(String opcode) throws DMLRuntimeException {\n+ if (LOG.isDebugEnabled()) {\n+ long totalFreeCUDASpace = 0;\n+ for (Entry<Long, Set<Pointer>> kv : freeCUDASpaceMap.entrySet()) {\n+ totalFreeCUDASpace += kv.getKey() * kv.getValue().size();\n+ }\n+ long readLockedAllocatedMemory = 0;\n+ long writeLockedAllocatedMemory = 0;\n+ long unlockedAllocatedMemory = 0;\n+ for (GPUObject gpuObj : allocatedGPUObjects) {\n+ if (gpuObj.readLocks.longValue() > 0)\n+ readLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ else if (gpuObj.writeLock)\n+ writeLockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ else\n+ unlockedAllocatedMemory += gpuObj.getSizeOnDevice();\n+ }\n+ long free[] = { 0 };\n+ long total[] = { 0 };\n+ cudaMemGetInfo(free, total);\n+ long gpuFreeMemory = (long) (free[0] * GPU_MEMORY_UTILIZATION_FACTOR);\n+ LOG.debug(opcode + \": Total memory: \" + total[0] + \", Free memory: \" + free[0] + \" (with util factor: \"\n+ + gpuFreeMemory + \"), \" + \"Lazy unfreed memory: \" + totalFreeCUDASpace\n+ + \", Locked allocated memory (read/write): \" + readLockedAllocatedMemory + \"/\"\n+ + writeLockedAllocatedMemory + \", \" + \" Unlocked allocated memory: \" + unlockedAllocatedMemory);\n+ }\n+ }\n+\nprivate void initializeCudaLibraryHandles() throws DMLRuntimeException {\n- if (cudnnHandle.get() == null) {\n- cudnnHandle.set(new cudnnHandle());\n- cudnnCreate(cudnnHandle.get());\n+ deleteCudaLibraryHandles();\n+\n+ if (cudnnHandle == null) {\n+ cudnnHandle = new cudnnHandle();\n+ cudnnCreate(cudnnHandle);\n}\n- if (cublasHandle.get() == null) {\n- cublasHandle.set(new cublasHandle());\n- cublasCreate(cublasHandle.get());\n+ if (cublasHandle == null) {\n+ cublasHandle = new cublasHandle();\n+ cublasCreate(cublasHandle);\n}\n// For cublas v2, cublasSetPointerMode tells Cublas whether to expect scalar arguments on device or on host\n// This applies to arguments like \"alpha\" in Dgemm, and \"y\" in Ddot.\n// cublasSetPointerMode(LibMatrixCUDA.cublasHandle, cublasPointerMode.CUBLAS_POINTER_MODE_DEVICE);\n- if (cusparseHandle.get() == null) {\n- cusparseHandle.set(new cusparseHandle());\n- cusparseCreate(cusparseHandle.get());\n+ if (cusparseHandle == null) {\n+ cusparseHandle = new cusparseHandle();\n+ cusparseCreate(cusparseHandle);\n}\n- if (cusolverDnHandle.get() == null) {\n- cusolverDnHandle.set(new cusolverDnHandle());\n- cusolverDnCreate(cusolverDnHandle.get());\n+ if (cusolverDnHandle == null) {\n+ cusolverDnHandle = new cusolverDnHandle();\n+ cusolverDnCreate(cusolverDnHandle);\n}\n- if (cusolverSpHandle.get() == null) {\n- cusolverSpHandle.set(new cusolverSpHandle());\n- cusolverSpCreate(cusolverSpHandle.get());\n+ if (cusolverSpHandle == null) {\n+ cusolverSpHandle = new cusolverSpHandle();\n+ cusolverSpCreate(cusolverSpHandle);\n}\n- if (kernels.get() == null) {\n- kernels.set(new JCudaKernels());\n- }\n+ if (kernels == null) {\n+ kernels = new JCudaKernels();\n}\n-\n- /**\n- * Returns which device is currently being used.\n- *\n- * @return the current device for the calling host thread\n- */\n- public static int cudaGetDevice() {\n- int[] device = new int[1];\n- JCuda.cudaGetDevice(device);\n- return device[0];\n}\n/**\n@@ -301,13 +306,18 @@ public class GPUContext {\nPointer A;\nif (freeCUDASpaceMap.containsKey(size)) {\nif (LOG.isTraceEnabled()) {\n- LOG.trace(\"GPU : in allocate from instruction \" + instructionName + \", found free block of size \" + (size\n+ LOG.trace(\n+ \"GPU : in allocate from instruction \" + instructionName + \", found free block of size \" + (size\n/ 1024.0) + \" Kbytes from previously allocated block on \" + this);\n}\nif (instructionName != null && GPUStatistics.DISPLAY_STATISTICS)\nt0 = System.nanoTime();\n- LinkedList<Pointer> freeList = freeCUDASpaceMap.get(size);\n- A = freeList.pop();\n+ Set<Pointer> freeList = freeCUDASpaceMap.get(size);\n+\n+ Iterator<Pointer> it = freeList.iterator(); // at this point, freeList should have at least one element\n+ A = it.next();\n+ it.remove();\n+\nif (freeList.isEmpty())\nfreeCUDASpaceMap.remove(size);\nif (instructionName != null && GPUStatistics.DISPLAY_STATISTICS)\n@@ -316,8 +326,8 @@ public class GPUContext {\n} else {\nif (LOG.isTraceEnabled()) {\nLOG.trace(\n- \"GPU : in allocate from instruction \" + instructionName + \", allocating new block of size \" + (size\n- / 1024.0) + \" Kbytes on \" + this);\n+ \"GPU : in allocate from instruction \" + instructionName + \", allocating new block of size \" + (\n+ size / 1024.0) + \" Kbytes on \" + this);\n}\nif (DMLScript.STATISTICS)\nt0 = System.nanoTime();\n@@ -336,8 +346,8 @@ public class GPUContext {\nif (DMLScript.STATISTICS)\nt1 = System.nanoTime();\nif (LOG.isTraceEnabled()) {\n- LOG.trace(\"GPU : in allocate from instruction \" + instructionName + \", setting block of size \" + (size / 1024.0)\n- + \" Kbytes to zero on \" + this);\n+ LOG.trace(\"GPU : in allocate from instruction \" + instructionName + \", setting block of size \" + (size\n+ / 1024.0) + \" Kbytes to zero on \" + this);\n}\ncudaMemset(A, 0, size);\nif (DMLScript.STATISTICS)\n@@ -395,12 +405,13 @@ public class GPUContext {\nreturn;\nlong t0 = 0;\nif (!cudaBlockSizeMap.containsKey(toFree))\n- throw new RuntimeException(\"ERROR : Internal state corrupted, cache block size map is not aware of a block it trying to free up\");\n+ throw new RuntimeException(\n+ \"ERROR : Internal state corrupted, cache block size map is not aware of a block it trying to free up\");\nlong size = cudaBlockSizeMap.get(toFree);\nif (eager) {\nif (LOG.isTraceEnabled()) {\n- LOG.trace(\"GPU : eagerly freeing cuda memory [ \" + toFree + \" ] for instruction \" + instructionName + \" on \"\n- + this);\n+ LOG.trace(\"GPU : eagerly freeing cuda memory [ \" + toFree + \" ] for instruction \" + instructionName\n+ + \" on \" + this);\n}\nif (DMLScript.STATISTICS)\nt0 = System.nanoTime();\n@@ -417,9 +428,9 @@ public class GPUContext {\nif (LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : lazily freeing cuda memory for instruction \" + instructionName + \" on \" + this);\n}\n- LinkedList<Pointer> freeList = freeCUDASpaceMap.get(size);\n+ Set<Pointer> freeList = freeCUDASpaceMap.get(size);\nif (freeList == null) {\n- freeList = new LinkedList<Pointer>();\n+ freeList = new HashSet<>();\nfreeCUDASpaceMap.put(size, freeList);\n}\nif (freeList.contains(toFree))\n@@ -483,14 +494,18 @@ public class GPUContext {\nGPUStatistics.cudaEvictionCount.add(1);\n// Release the set of free blocks maintained in a GPUObject.freeCUDASpaceMap\n// to free up space\n- LRUCacheMap<Long, LinkedList<Pointer>> lruCacheMap = freeCUDASpaceMap;\n+ LRUCacheMap<Long, Set<Pointer>> lruCacheMap = freeCUDASpaceMap;\nwhile (lruCacheMap.size() > 0) {\nif (neededSize <= getAvailableMemory())\nbreak;\n- Map.Entry<Long, LinkedList<Pointer>> toFreeListPair = lruCacheMap.removeAndGetLRUEntry();\n- LinkedList<Pointer> toFreeList = toFreeListPair.getValue();\n+ Map.Entry<Long, Set<Pointer>> toFreeListPair = lruCacheMap.removeAndGetLRUEntry();\n+ Set<Pointer> toFreeList = toFreeListPair.getValue();\nLong size = toFreeListPair.getKey();\n- Pointer toFree = toFreeList.pop();\n+\n+ Iterator<Pointer> it = toFreeList.iterator(); // at this point, freeList should have at least one element\n+ Pointer toFree = it.next();\n+ it.remove();\n+\nif (toFreeList.isEmpty())\nlruCacheMap.remove(size);\ncudaFreeHelper(instructionName, toFree, true);\n@@ -548,7 +563,8 @@ public class GPUContext {\nGPUObject toBeRemoved = allocatedGPUObjects.get(allocatedGPUObjects.size() - 1);\nif (toBeRemoved.isLocked()) {\nthrow new DMLRuntimeException(\n- \"There is not enough memory on device for this matrix, request (\" + neededSize + \"). Allocated GPU objects:\" + allocatedGPUObjects.toString());\n+ \"There is not enough memory on device for this matrix, request (\" + neededSize\n+ + \"). Allocated GPU objects:\" + allocatedGPUObjects.toString());\n}\nif (toBeRemoved.dirty) {\ntoBeRemoved.copyFromDeviceToHost();\n@@ -697,7 +713,7 @@ public class GPUContext {\n* @return cudnnHandle for current thread\n*/\npublic cudnnHandle getCudnnHandle() {\n- return cudnnHandle.get();\n+ return cudnnHandle;\n}\n/**\n@@ -706,7 +722,7 @@ public class GPUContext {\n* @return cublasHandle for current thread\n*/\npublic cublasHandle getCublasHandle() {\n- return cublasHandle.get();\n+ return cublasHandle;\n}\n/**\n@@ -715,7 +731,7 @@ public class GPUContext {\n* @return cusparseHandle for current thread\n*/\npublic cusparseHandle getCusparseHandle() {\n- return cusparseHandle.get();\n+ return cusparseHandle;\n}\n/**\n@@ -724,7 +740,7 @@ public class GPUContext {\n* @return cusolverDnHandle for current thread\n*/\npublic cusolverDnHandle getCusolverDnHandle() {\n- return cusolverDnHandle.get();\n+ return cusolverDnHandle;\n}\n/**\n@@ -733,7 +749,7 @@ public class GPUContext {\n* @return cusolverSpHandle for current thread\n*/\npublic cusolverSpHandle getCusolverSpHandle() {\n- return cusolverSpHandle.get();\n+ return cusolverSpHandle;\n}\n/**\n@@ -742,7 +758,7 @@ public class GPUContext {\n* @return {@link JCudaKernels} for current thread\n*/\npublic JCudaKernels getKernels() {\n- return kernels.get();\n+ return kernels;\n}\n/**\n@@ -755,11 +771,34 @@ public class GPUContext {\nLOG.trace(\"GPU : this context was destroyed, this = \" + this.toString());\n}\nclearMemory();\n- cudnnDestroy(cudnnHandle.get());\n- cublasDestroy(cublasHandle.get());\n- cusparseDestroy(cusparseHandle.get());\n- cusolverDnDestroy(cusolverDnHandle.get());\n- cusolverSpDestroy(cusolverSpHandle.get());\n+\n+ deleteCudaLibraryHandles();\n+ }\n+\n+ /**\n+ * Deletes CUDA library handles\n+ */\n+ private void deleteCudaLibraryHandles() {\n+ if (cudnnHandle != null)\n+ cudnnDestroy(cudnnHandle);\n+\n+ if (cublasHandle != null)\n+ cublasDestroy(cublasHandle);\n+\n+ if (cusparseHandle != null)\n+ cusparseDestroy(cusparseHandle);\n+\n+ if (cusolverDnHandle != null)\n+ cusolverDnDestroy(cusolverDnHandle);\n+\n+ if (cusolverSpHandle != null)\n+ cusolverSpDestroy(cusolverSpHandle);\n+\n+ cudnnHandle = null;\n+ cublasHandle = null;\n+ cusparseHandle = null;\n+ cusolverDnHandle = null;\n+ cusolverSpHandle = null;\n}\n/**\n@@ -817,7 +856,7 @@ public class GPUContext {\n}\n// garbage collect all temporarily allocated spaces\n- for (LinkedList<Pointer> l : freeCUDASpaceMap.values()) {\n+ for (Set<Pointer> l : freeCUDASpaceMap.values()) {\nfor (Pointer p : l) {\ncudaFreeHelper(p, true);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] gpu memory leak fix - Changed list of free pointers to set of free pointers for GPU - Changed threadlocal cuda handles to non threadlocal. This is assuming there will be one thread per GPU. Closes #665
49,717
20.09.2017 14:10:15
25,200
c14682b9cb2d02e959ed35a87288f54b7b51da3c
[MINOR] Refer to correct version of protoc-jar maven plugin in pom
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<plugin>\n<groupId>com.github.os72</groupId>\n<artifactId>protoc-jar-maven-plugin</artifactId>\n- <version>3.4.0.1-SNAPSHOT</version>\n+ <version>3.0.0-b2.1</version>\n<executions>\n<execution>\n<id>caffe-sources</id>\n<goal>run</goal>\n</goals>\n<configuration>\n- <protocVersion>3.4.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.0.0 -->\n+ <protocVersion>3.0.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.0.0 -->\n<inputDirectories>\n<include>src/main/proto/tensorflow</include>\n</inputDirectories>\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Refer to correct version of protoc-jar maven plugin in pom
49,717
20.09.2017 15:04:11
25,200
aa15197ec2e1a1e81c9031a91ec0791284978f27
[MINOR] minor tweaks to the performance test script
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -72,8 +72,6 @@ def split_config_args(args):\nif 'config' in args.keys():\nif args['config'] is not None:\nsystemml_args_dict['-config'] = args['config']\n- else:\n- systemml_args_dict['-config'] = ''\nif 'gpu' in args.keys():\nif args['gpu'] is not None:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] minor tweaks to the performance test script
49,738
22.09.2017 00:05:35
25,200
9a286a2130fe09ecf1f345ab902be26f7b7c0328
[HOTFIX][SYSTEMML-1925] Fix missing update hard-coded testsuite conf This patch fixes the hard-coded scratch_space and local_tmp_dir replacement in our test suite, which I missed during the rework of configuration parameters.
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java", "new_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java", "diff": "@@ -874,12 +874,12 @@ public abstract class AutomatedTestBase\n// Create a SystemML config file for this test case based on default template\n// from src/test/config or derive from custom configuration provided by test.\nString configTemplate = FileUtils.readFileToString(getConfigTemplateFile(), \"UTF-8\");\n-\nString localTemp = curLocalTempDir.getPath();\n- String configContents = configTemplate.replace(\"<scratch>scratch_space</scratch>\",\n- String.format(\"<scratch>%s/scratch_space</scratch>\", localTemp));\n- configContents = configContents.replace(\"<localtmpdir>/tmp/systemml</localtmpdir>\",\n- String.format(\"<localtmpdir>%s/localtmp</localtmpdir>\", localTemp));\n+ String configContents = configTemplate\n+ .replace(createXMLElement(DMLConfig.SCRATCH_SPACE, \"scratch_space\"),\n+ createXMLElement(DMLConfig.SCRATCH_SPACE, localTemp+\"/scratch_space\"))\n+ .replace(createXMLElement(DMLConfig.LOCAL_TMP_DIR, \"/tmp/systemml\"),\n+ createXMLElement(DMLConfig.LOCAL_TMP_DIR, localTemp+\"/localtmp\"));\nFileUtils.write(getCurConfigFile(), configContents, \"UTF-8\");\n@@ -892,7 +892,9 @@ public abstract class AutomatedTestBase\nTestUtils.clearDirectory(DEBUG_TEMP_DIR + baseDirectory + INPUT_DIR);\n}\n-\n+ public String createXMLElement(String tagName, String value) {\n+ return String.format(\"<%s>%s</%s>\",tagName, value, tagName);\n+ }\n/**\n* <p>\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1925] Fix missing update hard-coded testsuite conf This patch fixes the hard-coded scratch_space and local_tmp_dir replacement in our test suite, which I missed during the rework of configuration parameters.
49,737
22.09.2017 10:51:44
25,200
317f2189c585e2dd37d847f5e2e32d97f8b31a60
Functionalize PCA Changes include wrapping PCA computation to a function call. Improve docs and add default values in genRandData4PCA.dml. Closes
[ { "change_type": "MODIFY", "old_path": "scripts/datagen/genRandData4PCA.dml", "new_path": "scripts/datagen/genRandData4PCA.dml", "diff": "#\n#-------------------------------------------------------------\n-/*\n-Synthetic data generator for PCA.\n--> 3 hidden dimensions (V1, V2, V3)\n--> generates only \"dense\" data\n-\n----------------------------------\n- Parameters\n----------------------------------\n-$R = #rows\n-$C = #columns\n-$OUT = output file path on HDFS\n-$FMT = output format\n----------------------------------\n-hadoop jar SystemML.jar -f genRandData4PCA.dml -nvargs R=1000000 C=1000 DATA=/user/biuser/pcaData.mtx FMT=csv\n----------------------------------\n-*/\n-\n-FMT = ifdef($FMT,\"binary\"); # default output format\n-\n-# number of categorical attributes.. numC <= C\n-R = $R;\n-C = $C;\n+#\n+# Synthetic data generator for PCA\n+# 3 hidden dimensions (V1, V2, V3)\n+# generates only \"dense\" data\n+#\n+# INPUT PARAMETERS:\n+# --------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# --------------------------------------------------------------------------------------------\n+# R Int 10000 Number of rows\n+# C Int 1000 Number of categorical attributes\n+# OUT String --- Location (on HDFS) to store the generated dataset\n+# FMT String \"csv\" Matrix output format, usually \"text\", \"csv\" or \"binary\"\n+# --------------------------------------------------------------------------------------------\n+#\n+# Example:\n+# hadoop jar SystemML.jar -f genRandData4PCA.dml -nvargs R=1000000 C=1000 OUT=/user/biuser/pcaData.mtx FMT=csv\n+\n+R = ifdef ($R, 10000)\n+C = ifdef ($C, 1000)\n+FMT = ifdef ($FMT, \"csv\");\n# Modofied version of the procedure from Zou et.al., \"Sparse Principal Component Analysis\", 2006.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/PCA.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#\n+# This script performs Principal Component Analysis (PCA) on the given input data.\n+#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# INPUT String --- Location to read the matrix A of feature vectors\n+# K Int --- Indicates dimension of the new vector space constructed from eigen vectors\n+# CENTER Int 0 Indicates whether or not to center data\n+# SCALE Int 0 Indicates whether or not to scale data\n+# OFMT String --- Output data format\n+# PROJDATA Int 0 This argument indicates if the data should be projected or not\n+# MODEL String --- Location to already existing model: eigenvectors and eigenvalues\n+# OUTPUT String / Location to write output matrices (covariance matrix, new basis vectors,\n+# and data projected onto new basis vectors)\n+# hadoop jar SystemML.jar -f PCA.dml -nvargs INPUT=INPUT_DIR/pca-1000x1000\n+# OUTPUT=OUTPUT_DIR/pca-1000x1000-model PROJDATA=1 CENTER=1 SCALE=1\n+# ---------------------------------------------------------------------------------------------\n+\n+A = read($INPUT);\n+K = ifdef($K, ncol(A));\n+ofmt = ifdef($OFMT, \"CSV\");\n+projectData = ifdef($PROJDATA,0);\n+model = ifdef($MODEL,\"\");\n+center = ifdef($CENTER,0);\n+scale = ifdef($SCALE,0);\n+output = ifdef($OUTPUT,\"/\");\n+\n+# reuse existing model to project data\n+if (model != \"\") {\n+ evec_dominant = read(model+\"/dominant.eigen.vectors\");\n+ }else{\n+ model = output;\n+}\n+\n+PCA = function(matrix[double] A, integer K, string ofmt, integer projectData, string model, integer center, integer scale, string output)\n+ return(matrix[double] eval_dominant, matrix[double] evec_dominant) {\n+\n+ evec_dominant = matrix(0,cols=1,rows=1);\n+\n+ N = nrow(A);\n+ D = ncol(A);\n+\n+ # perform z-scoring (centering and scaling)\n+ if (center == 1) {\n+ cm = colMeans(A);\n+ A = A - cm;\n+ }\n+ if (scale == 1) {\n+ cvars = (colSums (A^2));\n+ if (center == 1){\n+ cm = colMeans(A);\n+ cvars = (cvars - N*(cm^2))/(N-1);\n+ }\n+ Azscored = (A)/sqrt(cvars);\n+ A = Azscored;\n+ }\n+\n+ # co-variance matrix\n+ mu = colSums(A)/N;\n+ C = (t(A) %*% A)/(N-1) - (N/(N-1))*t(mu) %*% mu;\n+\n+\n+ # compute eigen vectors and values\n+ [evalues, evectors] = eigen(C);\n+\n+ decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+ diagmat = table(seq(1,D),decreasing_Idx);\n+ # sorts eigenvalues by decreasing order\n+ evalues = diagmat %*% evalues;\n+ # sorts eigenvectors column-wise in the order of decreasing eigenvalues\n+ evectors = evectors %*% diagmat;\n+\n+\n+ # select K dominant eigen vectors\n+ nvec = ncol(evectors);\n+\n+ eval_dominant = evalues[1:K, 1];\n+ evec_dominant = evectors[,1:K];\n+\n+}\n+\n+[eval_dominant, evec_dominant] = PCA(A, K, ofmt, projectData, model, center, scale, output)\n+\n+# the square root of eigenvalues\n+eval_stdev_dominant = sqrt(eval_dominant);\n+write(eval_stdev_dominant, model+\"/dominant.eigen.standard.deviations\", format=ofmt);\n+write(eval_dominant, model+\"/dominant.eigen.values\", format=ofmt);\n+write(evec_dominant, model+\"/dominant.eigen.vectors\", format=ofmt);\n+\n+# Construct new data set by treating computed dominant eigenvectors as the basis vectors\n+if (projectData == 1 | model != \"\"){\n+ newA = A %*% evec_dominant;\n+ write(newA, output+\"/projected.data\", format=ofmt);\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-493] Functionalize PCA Changes include wrapping PCA computation to a function call. Improve docs and add default values in genRandData4PCA.dml. Closes #653.
49,738
27.09.2017 23:49:39
25,200
b5ef21fdcad73852d878ac519a0959092393af20
[MINOR] Tuning multi-threaded codegen ops (sparse-safe par thresholds)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java", "diff": "@@ -116,10 +116,6 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nif( inputs==null || inputs.size() < 1 )\nthrow new RuntimeException(\"Invalid input arguments.\");\n- if( getTotalInputNnz(inputs) < PAR_NUMCELL_THRESHOLD ) {\n- k = 1; //serial execution\n- }\n-\n//input preparation\nMatrixBlock a = inputs.get(0);\nSideInput[] b = prepInputMatrices(inputs);\n@@ -131,6 +127,12 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nboolean sparseSafe = isSparseSafe() || (b.length == 0\n&& genexec( 0, b, scalars, m, n, 0, 0 ) == 0);\n+ long inputSize = sparseSafe ?\n+ getTotalInputNnz(inputs) : getTotalInputSize(inputs);\n+ if( inputSize < PAR_NUMCELL_THRESHOLD ) {\n+ k = 1; //serial execution\n+ }\n+\ndouble ret = 0;\nif( k <= 1 ) //SINGLE-THREADED\n{\n@@ -199,10 +201,6 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nif( inputs==null || inputs.size() < 1 || out==null )\nthrow new RuntimeException(\"Invalid input arguments.\");\n- if( getTotalInputNnz(inputs) < PAR_NUMCELL_THRESHOLD ) {\n- k = 1; //serial execution\n- }\n-\n//input preparation\nMatrixBlock a = inputs.get(0);\nSideInput[] b = prepInputMatrices(inputs);\n@@ -214,6 +212,12 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nboolean sparseSafe = isSparseSafe() || (b.length == 0\n&& genexec( 0, b, scalars, m, n, 0, 0 ) == 0);\n+ long inputSize = sparseSafe ?\n+ getTotalInputNnz(inputs) : getTotalInputSize(inputs);\n+ if( inputSize < PAR_NUMCELL_THRESHOLD ) {\n+ k = 1; //serial execution\n+ }\n+\n//result allocation and preparations\nboolean sparseOut = _type == CellType.NO_AGG\n&& sparseSafe && a.isInSparseFormat();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java", "diff": "@@ -85,7 +85,9 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\nif( inputs==null || inputs.size() < 1 )\nthrow new RuntimeException(\"Invalid input arguments.\");\n- if( getTotalInputNnz(inputs) < PAR_NUMCELL_THRESHOLD ) {\n+ long inputSize = isSparseSafe() ?\n+ getTotalInputNnz(inputs) : getTotalInputSize(inputs);\n+ if( inputSize < PAR_NUMCELL_THRESHOLD ) {\nk = 1; //serial execution\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "diff": "@@ -156,6 +156,11 @@ public abstract class SpoofOperator implements Serializable\nreturn inputs.stream().mapToLong(in -> in.getNonZeros()).sum();\n}\n+ public static long getTotalInputSize(ArrayList<MatrixBlock> inputs) {\n+ return inputs.stream().mapToLong(\n+ in -> (long)in.getNumRows() * in.getNumColumns()).sum();\n+ }\n+\n//abstraction for safely accessing sideways matrices without the need\n//to allocate empty matrices as dense, see prepInputMatrices\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "diff": "@@ -170,7 +170,7 @@ public abstract class SpoofRowwise extends SpoofOperator\n{\n//redirect to serial execution\nif( k <= 1 || (_type.isColumnAgg() && !LibMatrixMult.checkParColumnAgg(inputs.get(0), k, false))\n- || getTotalInputNnz(inputs) < PAR_NUMCELL_THRESHOLD ) {\n+ || getTotalInputSize(inputs) < PAR_NUMCELL_THRESHOLD ) {\nreturn execute(inputs, scalarObjects, out);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Tuning multi-threaded codegen ops (sparse-safe par thresholds)
49,736
28.09.2017 12:14:28
28,800
0cb2f7f68cb644c7fda6666bc84782e82069fb34
[MINOR] Added time spent in jcuda sync to fine-grained statistics Also added force accelerator flag to LibMatrixCuDNN to skip worst-case memory budget restriction.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java", "diff": "@@ -61,6 +61,7 @@ public abstract class GPUInstruction extends Instruction {\npublic final static String MISC_TIMER_ROW_TO_COLUMN_MAJOR = \"r2c\"; // time spent in converting data from row major to column major\npublic final static String MISC_TIMER_COLUMN_TO_ROW_MAJOR = \"c2r\"; // time spent in converting data from column major to row major\npublic final static String MISC_TIMER_OBJECT_CLONE = \"clone\";// time spent in cloning (deep copying) a GPUObject instance\n+ public final static String MISC_TIMER_CUDA_SYNC = \"sync\"; // time spent in device sync\npublic final static String MISC_TIMER_CUDA_FREE = \"f\"; // time spent in calling cudaFree\npublic final static String MISC_TIMER_ALLOCATE = \"a\"; // time spent to allocate memory on gpu\n@@ -198,7 +199,11 @@ public abstract class GPUInstruction extends Instruction {\nthrows DMLRuntimeException\n{\nif(DMLScript.SYNCHRONIZE_GPU) {\n+ long t0 = GPUStatistics.DISPLAY_STATISTICS ? System.nanoTime() : 0;\njcuda.runtime.JCuda.cudaDeviceSynchronize();\n+ if(GPUStatistics.DISPLAY_STATISTICS) {\n+ GPUStatistics.maintainCPMiscTimes(getExtendedOpcode(), GPUInstruction.MISC_TIMER_CUDA_SYNC, System.nanoTime() - t0);\n+ }\n}\nif(LOG.isDebugEnabled()) {\nfor(GPUContext gpuCtx : ec.getGPUContexts()) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -67,6 +67,7 @@ import jcuda.jcudnn.cudnnTensorDescriptor;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n@@ -153,7 +154,8 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nlong CHW = C*H*W; long KPQ = K*P*Q; long CRS = C*R*S;\nlong NCHW = N*CHW; long NKPQ = N*KPQ; long KCRS = K*CRS;\n- if(NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor) {\n+ if(DMLScript.FORCE_ACCELERATOR ||\n+ (NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor)) {\n// Filter and output are accounted as dense in the memory estimation for conv2d\ndouble overhead = isInSparseFormat(gCtx, filter) ? OptimizerUtils.estimateSizeExactSparsity(K, CRS, 1.0) : 0;\noverhead += isInSparseFormat(gCtx, image) ? OptimizerUtils.estimateSizeExactSparsity(N, CHW, 1.0) : 0;\n@@ -161,7 +163,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nPointer filterPointer = getDensePointerForCuDNN(gCtx, filter, instName);\nPointer dstPointer = getDensePointerForCuDNN(gCtx, outputBlock, instName);\n- if(overhead <= intermediateMemoryBudget) {\n+ if(DMLScript.FORCE_ACCELERATOR || overhead <= intermediateMemoryBudget) {\n// Perform all-input all-channel conv2d\nPointer imagePointer = getDensePointerForCuDNN(gCtx, image, instName);\ncudnnConv2d(gCtx, instName, imagePointer, filterPointer, dstPointer, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n@@ -346,11 +348,12 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nlong CHW = C*H*W; long KPQ = K*P*Q; long CRS = C*R*S;\nlong NCHW = N*CHW; long NKPQ = N*KPQ; long KCRS = K*CRS;\n- if(NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor) {\n+ if(DMLScript.FORCE_ACCELERATOR ||\n+ (NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor)) {\nPointer dwPointer = getDensePointerForCuDNN(gCtx, outputBlock, instName);\ndouble overhead = isInSparseFormat(gCtx, image) ? OptimizerUtils.estimateSizeExactSparsity(N, CHW, 1.0) : 0;\noverhead += isInSparseFormat(gCtx, dout) ? OptimizerUtils.estimateSizeExactSparsity(N, KPQ, 1.0) : 0;\n- if(overhead <= intermediateMemoryBudget) {\n+ if(DMLScript.FORCE_ACCELERATOR || overhead <= intermediateMemoryBudget) {\n// Perform all-input all-channel conv2dBackwardFilter\nPointer imagePointer = getDensePointerForCuDNN(gCtx, image, instName);\nPointer doutPointer = getDensePointerForCuDNN(gCtx, dout, instName);\n@@ -502,13 +505,14 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nlong CHW = C*H*W; long KPQ = K*P*Q; long CRS = C*R*S;\nlong NCHW = N*CHW; long NKPQ = N*KPQ; long KCRS = K*CRS;\n- if(NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor) {\n+ if(DMLScript.FORCE_ACCELERATOR ||\n+ (NCHW < maxNumDoublesOfCuDNNTensor && NKPQ < maxNumDoublesOfCuDNNTensor && KCRS < maxNumDoublesOfCuDNNTensor)) {\n// Filter and output are accounted as dense in the memory estimation for conv2dBackwardData\ndouble overhead = isInSparseFormat(gCtx, filter) ? OptimizerUtils.estimateSizeExactSparsity(K, CRS, 1.0) : 0;\noverhead += isInSparseFormat(gCtx, dout) ? OptimizerUtils.estimateSizeExactSparsity(N, KPQ, 1.0) : 0;\nPointer filterPointer = getDensePointerForCuDNN(gCtx, filter, instName);\nPointer dstPointer = getDensePointerForCuDNN(gCtx, output, instName);\n- if(overhead <= intermediateMemoryBudget) {\n+ if(DMLScript.FORCE_ACCELERATOR || overhead <= intermediateMemoryBudget) {\n// Perform all-input all-channel conv2dBackwardData\nPointer doutPointer = getDensePointerForCuDNN(gCtx, dout, instName);\ncudnnConv2dBackwardData(gCtx, instName, filterPointer, doutPointer, dstPointer,\n@@ -638,11 +642,12 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nlong CHW = C*H*W; long CPQ = C*P*Q;\nlong NCHW = N*CHW; long NCPQ = N*CPQ;\n- if(NCHW < maxNumDoublesOfCuDNNTensor && NCPQ < maxNumDoublesOfCuDNNTensor) {\n+ if(DMLScript.FORCE_ACCELERATOR ||\n+ (NCHW < maxNumDoublesOfCuDNNTensor && NCPQ < maxNumDoublesOfCuDNNTensor)) {\n// Filter and output are accounted as dense in the memory estimation for conv2dBackwardData\nlong overhead = isInSparseFormat(gCtx, image) ? OptimizerUtils.estimateSizeExactSparsity(N, CHW, 1.0) : 0;\nPointer y = getDensePointerForCuDNN(gCtx, outputBlock, instName);\n- if(overhead <= intermediateMemoryBudget) {\n+ if(DMLScript.FORCE_ACCELERATOR || overhead <= intermediateMemoryBudget) {\nPointer x = getDensePointerForCuDNN(gCtx, image, instName);\ncudnnTensorDescriptor xDesc = allocateTensorDescriptor(gCtx, image, N, C, H, W);\ncudnnMaxpooling(gCtx, instName, x, xDesc, y, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n@@ -780,12 +785,13 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nlong CHW = C*H*W; long CPQ = C*P*Q;\nlong NCHW = N*CHW; long NCPQ = N*CPQ;\n- if(NCHW < maxNumDoublesOfCuDNNTensor && NCPQ < maxNumDoublesOfCuDNNTensor) {\n+ if(DMLScript.FORCE_ACCELERATOR ||\n+ (NCHW < maxNumDoublesOfCuDNNTensor && NCPQ < maxNumDoublesOfCuDNNTensor)) {\n// Filter and output are accounted as dense in the memory estimation for conv2dBackwardData\nlong overhead = isInSparseFormat(gCtx, image) ? OptimizerUtils.estimateSizeExactSparsity(N, CHW, 1.0) : 0;\noverhead += isInSparseFormat(gCtx, dout) ? OptimizerUtils.estimateSizeExactSparsity(N, CPQ, 1.0) : 0;\nPointer dx = getDensePointerForCuDNN(gCtx, outputBlock, instName);\n- if(overhead <= intermediateMemoryBudget) {\n+ if(DMLScript.FORCE_ACCELERATOR || overhead <= intermediateMemoryBudget) {\nPointer x = getDensePointerForCuDNN(gCtx, image, instName);\nPointer dy = getDensePointerForCuDNN(gCtx, dout, instName);\ncudnnMaxpoolingBackward(gCtx, instName, x, dy, dx, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] [SYSTEMML-446] Added time spent in jcuda sync to fine-grained statistics - Also added force accelerator flag to LibMatrixCuDNN to skip worst-case memory budget restriction.
49,717
28.09.2017 15:28:17
25,200
a725b2d2ebf6dcb56f4edb68376c3849c8991b27
[MINOR]bug fixes & feature added in perf test & spark-submit python scripts
[ { "change_type": "MODIFY", "old_path": "bin/systemml-spark-submit.py", "new_path": "bin/systemml-spark-submit.py", "diff": "@@ -92,6 +92,7 @@ def spark_submit_entry(master, driver_memory, num_executors, executor_memory,\nml_options.append(stats)\nif gpu is not None:\nml_options.append('-gpu')\n+ if gpu is not 'no_option':\nml_options.append(gpu)\nif len(ml_options) < 1:\n@@ -99,18 +100,27 @@ def spark_submit_entry(master, driver_memory, num_executors, executor_memory,\n# stats, explain, target_jars\ncmd_spark = [spark_path, '--class', 'org.apache.sysml.api.DMLScript',\n- '--master', master, '--driver-memory', driver_memory,\n- '--num-executors', num_executors, '--executor-memory', executor_memory,\n- '--executor-cores', executor_cores, '--conf', default_conf,\n+ '--master', master,\n+ '--driver-memory', driver_memory,\n+ '--conf', default_conf,\n'--jars', cuda_jars, systemml_jars]\n+ if num_executors is not None:\n+ cmd_spark = cmd_spark + ['--num-executors', num_executors]\n+\n+ if executor_memory is not None:\n+ cmd_spark = cmd_spark + ['--executor-memory', executor_memory]\n+\n+ if executor_cores is not None:\n+ cmd_spark = cmd_spark + ['--executor-cores', executor_cores]\n+\ncmd_system_ml = ['-config', default_config,\n'-exec', 'hybrid_spark', '-f', script_file, ' '.join(ml_options)]\ncmd = cmd_spark + cmd_system_ml\n# Debug\n- # print(' '.join(cmd))\n+ print(' '.join(cmd))\nreturn_code = os.system(' '.join(cmd))\nreturn return_code\n@@ -120,10 +130,10 @@ if __name__ == '__main__':\ndescription='System-ML Spark Submit Script')\n# SPARK-SUBMIT Options\ncparser.add_argument('--master', default='local[*]', help='local, yarn-client, yarn-cluster', metavar='')\n- cparser.add_argument('--driver-memory', default='5G', help='Memory for driver (e.g. 512M)', metavar='')\n- cparser.add_argument('--num-executors', default='2', help='Number of executors to launch', metavar='')\n- cparser.add_argument('--executor-memory', default='2G', help='Memory per executor', metavar='')\n- cparser.add_argument('--executor-cores', default='1', help='Number of cores', metavar='')\n+ cparser.add_argument('--driver-memory', default='8G', help='Memory for driver (e.g. 512M, 1G)', metavar='')\n+ cparser.add_argument('--num-executors', nargs=1, help='Number of executors to launch', metavar='')\n+ cparser.add_argument('--executor-memory', nargs=1, help='Memory per executor', metavar='')\n+ cparser.add_argument('--executor-cores', nargs=1, help='Number of executor cores', metavar='')\ncparser.add_argument('--conf', help='Spark configuration file', nargs='+', metavar='')\n# SYSTEM-ML Options\n@@ -138,7 +148,7 @@ if __name__ == '__main__':\nmetavar='')\ncparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '\n'set <force> option to skip conservative memory estimates '\n- 'and use GPU wherever possible', nargs='?')\n+ 'and use GPU wherever possible', nargs='?', const='no_option')\ncparser.add_argument('-f', required=True, help='specifies dml/pydml file to execute; '\n'path can be local/hdfs/gpfs', metavar='')\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/datagen.py", "new_path": "scripts/perftest/python/datagen.py", "diff": "@@ -25,7 +25,7 @@ from os.path import join\nfrom utils_misc import split_rowcol, config_writer, mat_type_check\n# This file contains configuration settings for data generation\n-DATA_FORMAT = 'csv'\n+DATA_FORMAT = 'binary'\nMATRIX_TYPE_DICT = {'dense': '0.9',\n'sparse': '0.01'}\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/predict.py", "new_path": "scripts/perftest/python/predict.py", "diff": "@@ -26,7 +26,7 @@ from utils_misc import config_writer, mat_type_check\nfrom utils_fs import relevant_folders\n# Contains configuration setting for predicting\n-DATA_FORMAT = 'csv'\n+DATA_FORMAT = 'binary'\ndef m_svm_predict(save_folder_name, datagen_dir, train_dir, predict_dir, config_dir):\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -141,7 +141,7 @@ def algorithm_workflow(algo, exec_type, config_path, dml_file_name, action_mode,\nreturn exit_flag_success\n-def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mode, temp_dir):\n+def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mode, temp_dir, file_system_type):\n\"\"\"\nThis function is the entry point for performance testing\n@@ -168,6 +168,9 @@ def perf_test_entry(family, algo, exec_type, mat_type, mat_shape, config_dir, mo\ntemp_dir: String\nLocation to store all output files created during perf test\n+\n+ file_system_type: String\n+\n\"\"\"\n# algos to run is a list of tuples with\n# [(m-svm, binomial), (m-svm, multinomial)...]\n@@ -275,6 +278,7 @@ if __name__ == '__main__':\nmat_type = ['dense', 'sparse', 'all']\nworkload = ['data-gen', 'train', 'predict']\nexecution_mode = ['hybrid_spark', 'singlenode']\n+ file_system_type = ['hdfs', 'local']\n# Default Arguments\ndefault_mat_shape = ['10k_100']\n@@ -308,7 +312,6 @@ if __name__ == '__main__':\n'spark.driver.extraJavaOptions=\\\"-Xms20g -Xmn2g\\\"'\n-\n# Argparse Module\ncparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\ndescription='SystemML Performance Test Script')\n@@ -335,8 +338,12 @@ if __name__ == '__main__':\ncparser.add_argument('--mode', default=workload,\nhelp='space separated list of types of workloads to run (available: data-gen, train, predict)',\nmetavar='', choices=workload, nargs='+')\n- # Change this to temp-dir\n- cparser.add_argument('--temp-dir', help='define the file system to work on', metavar='')\n+ cparser.add_argument('--temp-dir', help='the path on the file system to place the working temporary directory at',\n+ metavar='')\n+ cparser.add_argument('--file-system-type', choices=file_system_type, metavar='',\n+ help='file system for temp directory, '\n+ 'supported types are \\'hdfs\\' for hybrid_spark and \\'local\\' for standalone;'\n+ 'default for hybrid_spark is \\'hdfs\\' and for standalone is \\'local\\'')\n# Configuration Options\ncparser.add_argument('-stats', help='Monitor and report caching/recompilation statistics, '\n@@ -347,7 +354,7 @@ if __name__ == '__main__':\ncparser.add_argument('-config', help='System-ML configuration file (e.g SystemML-config.xml)', metavar='')\ncparser.add_argument('-gpu', help='uses CUDA instructions when reasonable, '\n'set <force> option to skip conservative memory estimates '\n- 'and use GPU wherever possible', nargs='?')\n+ 'and use GPU wherever possible', nargs='?', const='no_option')\n# Spark Configuration Option\ncparser.add_argument('--master', help='local, yarn-client, yarn-cluster', metavar='')\ncparser.add_argument('--driver-memory', help='Memory for driver (e.g. 512M)', metavar='')\n@@ -371,7 +378,7 @@ if __name__ == '__main__':\nperftest_args_dict, systemml_args_dict, backend_args_dict = split_config_args(all_arg_dict)\n# temp_dir hdfs / local path check\n- perftest_args_dict['temp_dir'] = get_default_dir(args.temp_dir, args.exec_type, default_config_dir)\n+ perftest_args_dict['temp_dir'] = get_default_dir(args.file_system_type, args.temp_dir, args.exec_type, default_config_dir)\n# default_mat_type validity\nif len(args.mat_type) > 2:\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/train.py", "new_path": "scripts/perftest/python/train.py", "diff": "@@ -27,7 +27,7 @@ from functools import reduce\nfrom utils_fs import relevant_folders\n# Contains configuration setting for training\n-DATA_FORMAT = 'csv'\n+DATA_FORMAT = 'binary'\ndef binomial_m_svm_train(save_folder_name, datagen_dir, train_dir, config_dir):\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -54,30 +54,28 @@ def split_config_args(args):\nperftest_args_dict['filename'] = args['filename']\nperftest_args_dict['mode'] = args['mode']\nperftest_args_dict['temp_dir'] = args['temp_dir']\n+ perftest_args_dict['file_system_type'] = args['file_system_type']\nsystemml_args_dict = {}\n- if 'stats' in args.keys():\nif args['stats'] is not None:\nsystemml_args_dict['-stats'] = args['stats']\nelse:\nsystemml_args_dict['-stats'] = ''\n- if 'explain' in args.keys():\nif args['explain'] is not None:\nsystemml_args_dict['-explain'] = args['explain']\nelse:\nsystemml_args_dict['-explain'] = ''\n- if 'config' in args.keys():\nif args['config'] is not None:\nsystemml_args_dict['-config'] = args['config']\n- if 'gpu' in args.keys():\nif args['gpu'] is not None:\n- systemml_args_dict['-gpu'] = args['gpu']\n- else:\n+ if args['gpu'] == 'no_option':\nsystemml_args_dict['-gpu'] = ''\n+ else:\n+ systemml_args_dict['-gpu'] = args['gpu']\nbackend_args_dict = {}\nexec_type = args['exec_type']\n@@ -373,8 +371,9 @@ def mat_type_check(current_family, matrix_types, dense_algos):\nreturn current_type\n-def get_default_dir(temp_dir, exec_mode, config_dir):\n+def get_default_dir(file_system_type, temp_dir, exec_mode, config_dir):\n\"\"\"\n+ file_system_type: String\ntemp_dir: String\nexec_mode: String\nconfig_dir: String\n@@ -390,6 +389,7 @@ def get_default_dir(temp_dir, exec_mode, config_dir):\nreturn temp_dir\nif exec_mode == 'hybrid_spark':\n+ if file_system_type == 'hdfs':\ncmd = ['hdfs', 'getconf', '-confKey', 'fs.default.name']\nhdfs_base = subprocess_exec(' '.join(cmd), extract='hdfs_base')\n@@ -404,3 +404,8 @@ def get_default_dir(temp_dir, exec_mode, config_dir):\nelse:\nhdfs_home = join(hdfs_base, 'user', getpass.getuser(), temp_dir)\nreturn hdfs_home\n+ else:\n+ if temp_dir is None:\n+ return config_dir\n+ if temp_dir is not None:\n+ return temp_dir\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR]bug fixes & feature added in perf test & spark-submit python scripts
49,717
28.09.2017 15:43:44
25,200
e4c74eda67ca4596fcdbae77603514259c1b6e10
[HOTFIX] minor bug fix in perf test suite
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -378,6 +378,12 @@ if __name__ == '__main__':\nperftest_args_dict, systemml_args_dict, backend_args_dict = split_config_args(all_arg_dict)\n# temp_dir hdfs / local path check\n+ if args.file_system_type is None:\n+ if args.exec_type == 'hybrid_spark':\n+ args.file_system_type = 'hdfs'\n+ else:\n+ args.file_system_type = 'local'\n+\nperftest_args_dict['temp_dir'] = get_default_dir(args.file_system_type, args.temp_dir, args.exec_type, default_config_dir)\n# default_mat_type validity\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] minor bug fix in perf test suite
49,717
29.09.2017 14:47:49
25,200
5dce90b3bbf120a053233f0b49eb6fa7c0ddfebf
Changed pom.xml to use the latest protoc-jar This uses the latest protoc-jar-maven-plugin to get the protoc compiler for ppc Closes
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<plugin>\n<groupId>com.github.os72</groupId>\n<artifactId>protoc-jar-maven-plugin</artifactId>\n- <version>3.0.0-b2.1</version>\n+ <version>3.4.0.1</version>\n<executions>\n<execution>\n<id>caffe-sources</id>\n<goal>run</goal>\n</goals>\n<configuration>\n- <protocVersion>2.5.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.0.0 -->\n+ <protocVersion>2.6.1</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.4.0 -->\n<inputDirectories>\n<include>src/main/proto/caffe</include>\n</inputDirectories>\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1875] Changed pom.xml to use the latest protoc-jar - This uses the latest protoc-jar-maven-plugin to get the protoc compiler for ppc Closes #676
49,738
01.10.2017 15:50:05
25,200
8ed251668f4d33bd5dbdf3c3acdc874f41a48f41
Configurable codegen optimizer (w/ default cost-based) This patch makes the (already pluggable) codegen optimizer configurable from outside of SystemML, which serves for testing purposes and as a fallback in case the default cost-based optimizer causes problems.
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- set the codegen java compiler (auto, janino, javac) -->\n<sysml.codegen.compiler>auto</sysml.codegen.compiler>\n+ <!-- set the codegen optimizer (fuse_all, fuse_no_redundancy, fuse_cost_based_v2) -->\n+ <sysml.codegen.compiler>fuse_cost_based_v2</sysml.codegen.compiler>\n+\n<!-- if codegen.enabled, enables source code caching of fused operators -->\n<sysml.codegen.plancache>true</sysml.codegen.plancache>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.codegen.SpoofCompiler.CompilerType;\n+import org.apache.sysml.hops.codegen.SpoofCompiler.PlanSelector;\nimport org.apache.sysml.lops.Compression;\nimport org.apache.sysml.parser.ParseException;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -77,6 +78,7 @@ public class DMLConfig\npublic static final String NATIVE_BLAS = \"sysml.native.blas\";\npublic static final String CODEGEN = \"sysml.codegen.enabled\"; //boolean\npublic static final String CODEGEN_COMPILER = \"sysml.codegen.compiler\"; //see SpoofCompiler.CompilerType\n+ public static final String CODEGEN_OPTIMIZER = \"sysml.codegen.optimizer\"; //see SpoofCompiler.PlanSelector\npublic static final String CODEGEN_PLANCACHE = \"sysml.codegen.plancache\"; //boolean\npublic static final String CODEGEN_LITERALS = \"sysml.codegen.literals\"; //1..heuristic, 2..always\n@@ -125,6 +127,7 @@ public class DMLConfig\n_defaultVals.put(COMPRESSED_LINALG, Compression.CompressConfig.AUTO.name() );\n_defaultVals.put(CODEGEN, \"false\" );\n_defaultVals.put(CODEGEN_COMPILER, CompilerType.AUTO.name() );\n+ _defaultVals.put(CODEGEN_COMPILER, PlanSelector.FUSE_COST_BASED_V2.name() );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n_defaultVals.put(NATIVE_BLAS, \"none\" );\n@@ -416,7 +419,7 @@ public class DMLConfig\nYARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM,\nCP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS,\nCOMPRESSED_LINALG,\n- CODEGEN, CODEGEN_COMPILER, CODEGEN_PLANCACHE, CODEGEN_LITERALS,\n+ CODEGEN, CODEGEN_COMPILER, CODEGEN_OPTIMIZER, CODEGEN_PLANCACHE, CODEGEN_LITERALS,\nEXTRA_GPU_STATS, EXTRA_DNN_STATS, EXTRA_FINEGRAINED_STATS, STATS_MAX_WRAP_LEN,\nAVAILABLE_GPUS, SYNCHRONIZE_GPU, EAGER_CUDA_FREE\n};\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java", "diff": "@@ -108,12 +108,12 @@ public class SpoofCompiler\n//internal configuration flags\npublic static boolean LDEBUG = false;\npublic static CompilerType JAVA_COMPILER = CompilerType.JANINO;\n+ public static PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\npublic static IntegrationType INTEGRATION = IntegrationType.RUNTIME;\npublic static final boolean RECOMPILE_CODEGEN = true;\npublic static final boolean PRUNE_REDUNDANT_PLANS = true;\npublic static PlanCachePolicy PLAN_CACHE_POLICY = PlanCachePolicy.CSLH;\npublic static final int PLAN_CACHE_SIZE = 1024; //max 1K classes\n- public static final PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_COST_BASED_V2;\npublic enum CompilerType {\nAUTO,\n@@ -486,6 +486,13 @@ public class SpoofCompiler\n}\n}\n+ public static void setConfiguredPlanSelector() {\n+ DMLConfig conf = ConfigurationManager.getDMLConfig();\n+ String optimizer = conf.getTextValue(DMLConfig.CODEGEN_OPTIMIZER);\n+ PlanSelector type = PlanSelector.valueOf(optimizer.toUpperCase());\n+ PLAN_SEL_POLICY = type;\n+ }\n+\npublic static void setExecTypeSpecificJavaCompiler() {\nDMLConfig conf = ConfigurationManager.getDMLConfig();\nString compiler = conf.getTextValue(DMLConfig.CODEGEN_COMPILER);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -297,6 +297,7 @@ public class DMLTranslator\nSpoofCompiler.PLAN_CACHE_POLICY = PlanCachePolicy.get(\ndmlconf.getBooleanValue(DMLConfig.CODEGEN_PLANCACHE),\ndmlconf.getIntValue(DMLConfig.CODEGEN_LITERALS)==2);\n+ SpoofCompiler.setConfiguredPlanSelector();\nSpoofCompiler.setExecTypeSpecificJavaCompiler();\nif( SpoofCompiler.INTEGRATION==IntegrationType.HOPS )\ncodgenHopsDAG(dmlp);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1943] Configurable codegen optimizer (w/ default cost-based) This patch makes the (already pluggable) codegen optimizer configurable from outside of SystemML, which serves for testing purposes and as a fallback in case the default cost-based optimizer causes problems.
49,738
01.10.2017 20:04:39
25,200
c27c488bef54887d549792c4cf6532d95c3f5c58
Fix codegen fuse_all optimizer and consolidation This patch fixes special cases of row operations that caused the fuse_all optimizer fail on Kmeans. Furthermore, this also includes a cleanup for consolidating the fuse-all selection of plans as used in the fuse_all and both cost-based optimizers.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -127,7 +127,7 @@ public class DMLConfig\n_defaultVals.put(COMPRESSED_LINALG, Compression.CompressConfig.AUTO.name() );\n_defaultVals.put(CODEGEN, \"false\" );\n_defaultVals.put(CODEGEN_COMPILER, CompilerType.AUTO.name() );\n- _defaultVals.put(CODEGEN_COMPILER, PlanSelector.FUSE_COST_BASED_V2.name() );\n+ _defaultVals.put(CODEGEN_OPTIMIZER, PlanSelector.FUSE_COST_BASED_V2.name() );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n_defaultVals.put(NATIVE_BLAS, \"none\" );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java", "diff": "@@ -42,6 +42,7 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nROW_DIMS,\nCOLUMN_DIMS_ROWS,\nCOLUMN_DIMS_COLS,\n+ RANK_DIMS_COLS,\nSCALAR,\nMULTI_SCALAR,\nROW_RANK_DIMS, // right wdivmm, row mm\n@@ -163,6 +164,12 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\ncase COLUMN_DIMS_COLS:\nret = new long[]{1, mc.getCols(), -1};\nbreak;\n+ case RANK_DIMS_COLS: {\n+ MatrixCharacteristics mc2 = memo.getAllInputStats(getInput().get(1));\n+ if( mc2.dimsKnown() )\n+ ret = new long[]{1, mc2.getCols(), -1};\n+ break;\n+ }\ncase INPUT_DIMS:\nret = new long[]{mc.getRows(), mc.getCols(), -1};\nbreak;\n@@ -219,6 +226,10 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nsetDim1(1);\nsetDim2(getInput().get(0).getDim2());\nbreak;\n+ case RANK_DIMS_COLS:\n+ setDim1(1);\n+ setDim2(getInput().get(1).getDim2());\n+ break;\ncase INPUT_DIMS:\nsetDim1(getInput().get(0).getDim1());\nsetDim2(getInput().get(0).getDim2());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRow.java", "diff": "@@ -159,6 +159,7 @@ public class CNodeRow extends CNodeTpl\ncase COL_AGG_T: return SpoofOutputDimsType.COLUMN_DIMS_ROWS; //column vector\ncase COL_AGG_B1: return SpoofOutputDimsType.COLUMN_RANK_DIMS;\ncase COL_AGG_B1_T: return SpoofOutputDimsType.COLUMN_RANK_DIMS_T;\n+ case COL_AGG_B1R: return SpoofOutputDimsType.RANK_DIMS_COLS;\ndefault:\nthrow new RuntimeException(\"Unsupported row type: \"+_type.toString());\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelection.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelection.java", "diff": "@@ -34,6 +34,9 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class PlanSelection\n{\n+ private static final BasicPlanComparator BASE_COMPARE = new BasicPlanComparator();\n+ private final TypedPlanComparator _typedCompare = new TypedPlanComparator();\n+\nprivate final HashMap<Long, List<MemoTableEntry>> _bestPlans =\nnew HashMap<Long, List<MemoTableEntry>>();\nprivate final HashSet<VisitMark> _visited = new HashSet<VisitMark>();\n@@ -84,6 +87,49 @@ public abstract class PlanSelection\n_visited.add(new VisitMark(hopID, type));\n}\n+ protected void rSelectPlansFuseAll(CPlanMemoTable memo, Hop current, TemplateType currentType, HashSet<Long> partition)\n+ {\n+ if( isVisited(current.getHopID(), currentType)\n+ || (partition!=null && !partition.contains(current.getHopID())) )\n+ return;\n+\n+ //step 1: prune subsumed plans of same type\n+ if( memo.contains(current.getHopID()) ) {\n+ HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n+ List<MemoTableEntry> hopP = memo.get(current.getHopID());\n+ for( MemoTableEntry e1 : hopP )\n+ for( MemoTableEntry e2 : hopP )\n+ if( e1 != e2 && e1.subsumes(e2) )\n+ rmSet.add(e2);\n+ memo.remove(current, rmSet);\n+ }\n+\n+ //step 2: select plan for current path\n+ MemoTableEntry best = null;\n+ if( memo.contains(current.getHopID()) ) {\n+ if( currentType == null ) {\n+ best = memo.get(current.getHopID()).stream()\n+ .filter(p -> isValid(p, current))\n+ .min(BASE_COMPARE).orElse(null);\n+ }\n+ else {\n+ _typedCompare.setType(currentType);\n+ best = memo.get(current.getHopID()).stream()\n+ .filter(p -> p.type==currentType || p.type==TemplateType.CELL)\n+ .min(_typedCompare).orElse(null);\n+ }\n+ addBestPlan(current.getHopID(), best);\n+ }\n+\n+ //step 3: recursively process children\n+ for( int i=0; i< current.getInput().size(); i++ ) {\n+ TemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\n+ rSelectPlansFuseAll(memo, current.getInput().get(i), pref, partition);\n+ }\n+\n+ setVisited(current.getHopID(), currentType);\n+ }\n+\n/**\n* Basic plan comparator to compare memo table entries with regard to\n* a pre-defined template preference order and the number of references.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseAll.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseAll.java", "diff": "package org.apache.sysml.hops.codegen.opt;\nimport java.util.ArrayList;\n-import java.util.Comparator;\nimport java.util.Map.Entry;\n-import java.util.HashSet;\nimport java.util.List;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\n-import org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\n/**\n* This plan selection heuristic aims for maximal fusion, which\n@@ -43,52 +40,10 @@ public class PlanSelectionFuseAll extends PlanSelection\npublic void selectPlans(CPlanMemoTable memo, ArrayList<Hop> roots) {\n//pruning and collection pass\nfor( Hop hop : roots )\n- rSelectPlans(memo, hop, null);\n+ rSelectPlansFuseAll(memo, hop, null, null);\n//take all distinct best plans\nfor( Entry<Long, List<MemoTableEntry>> e : getBestPlans().entrySet() )\nmemo.setDistinct(e.getKey(), e.getValue());\n}\n-\n- private void rSelectPlans(CPlanMemoTable memo, Hop current, TemplateType currentType)\n- {\n- if( isVisited(current.getHopID(), currentType) )\n- return;\n-\n- //step 1: prune subsumed plans of same type\n- if( memo.contains(current.getHopID()) ) {\n- HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n- List<MemoTableEntry> hopP = memo.get(current.getHopID());\n- for( MemoTableEntry e1 : hopP )\n- for( MemoTableEntry e2 : hopP )\n- if( e1 != e2 && e1.subsumes(e2) )\n- rmSet.add(e2);\n- memo.remove(current, rmSet);\n- }\n-\n- //step 2: select plan for current path\n- MemoTableEntry best = null;\n- if( memo.contains(current.getHopID()) ) {\n- if( currentType == null ) {\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> isValid(p, current))\n- .min(new BasicPlanComparator()).orElse(null);\n- }\n- else {\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> p.type==currentType || p.type==TemplateType.CELL)\n- .min(Comparator.comparing(p -> 7-((p.type==currentType)?4:0)-p.countPlanRefs()))\n- .orElse(null);\n- }\n- addBestPlan(current.getHopID(), best);\n- }\n-\n- //step 3: recursively process children\n- for( int i=0; i< current.getInput().size(); i++ ) {\n- TemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\n- rSelectPlans(memo, current.getInput().get(i), pref);\n- }\n-\n- setVisited(current.getHopID(), currentType);\n- }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBased.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBased.java", "diff": "@@ -510,49 +510,6 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nvisited.add(current.getHopID());\n}\n- private void rSelectPlansFuseAll(CPlanMemoTable memo, Hop current, TemplateType currentType, HashSet<Long> partition)\n- {\n- if( isVisited(current.getHopID(), currentType)\n- || !partition.contains(current.getHopID()) )\n- return;\n-\n- //step 1: prune subsumed plans of same type\n- if( memo.contains(current.getHopID()) ) {\n- HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n- List<MemoTableEntry> hopP = memo.get(current.getHopID());\n- for( MemoTableEntry e1 : hopP )\n- for( MemoTableEntry e2 : hopP )\n- if( e1 != e2 && e1.subsumes(e2) )\n- rmSet.add(e2);\n- memo.remove(current, rmSet);\n- }\n-\n- //step 2: select plan for current path\n- MemoTableEntry best = null;\n- if( memo.contains(current.getHopID()) ) {\n- if( currentType == null ) {\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> isValid(p, current))\n- .min(new BasicPlanComparator()).orElse(null);\n- }\n- else {\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> p.type==currentType || p.type==TemplateType.CELL)\n- .min(Comparator.comparing(p -> 7-((p.type==currentType)?4:0)-p.countPlanRefs()))\n- .orElse(null);\n- }\n- addBestPlan(current.getHopID(), best);\n- }\n-\n- //step 3: recursively process children\n- for( int i=0; i< current.getInput().size(); i++ ) {\n- TemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\n- rSelectPlansFuseAll(memo, current.getInput().get(i), pref, partition);\n- }\n-\n- setVisited(current.getHopID(), currentType);\n- }\n-\nprivate static boolean[] createAssignment(int len, int pos) {\nboolean[] ret = new boolean[len];\nint tmp = pos;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/opt/PlanSelectionFuseCostBasedV2.java", "diff": "@@ -98,8 +98,6 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\nprivate static final IDSequence COST_ID = new IDSequence();\nprivate static final TemplateRow ROW_TPL = new TemplateRow();\n- private static final BasicPlanComparator BASE_COMPARE = new BasicPlanComparator();\n- private final TypedPlanComparator _typedCompare = new TypedPlanComparator();\n@Override\npublic void selectPlans(CPlanMemoTable memo, ArrayList<Hop> roots)\n@@ -729,49 +727,6 @@ public class PlanSelectionFuseCostBasedV2 extends PlanSelection\nvisited.add(current.getHopID());\n}\n- private void rSelectPlansFuseAll(CPlanMemoTable memo, Hop current, TemplateType currentType, HashSet<Long> partition)\n- {\n- if( isVisited(current.getHopID(), currentType)\n- || !partition.contains(current.getHopID()) )\n- return;\n-\n- //step 1: prune subsumed plans of same type\n- if( memo.contains(current.getHopID()) ) {\n- HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n- List<MemoTableEntry> hopP = memo.get(current.getHopID());\n- for( MemoTableEntry e1 : hopP )\n- for( MemoTableEntry e2 : hopP )\n- if( e1 != e2 && e1.subsumes(e2) )\n- rmSet.add(e2);\n- memo.remove(current, rmSet);\n- }\n-\n- //step 2: select plan for current path\n- MemoTableEntry best = null;\n- if( memo.contains(current.getHopID()) ) {\n- if( currentType == null ) {\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> isValid(p, current))\n- .min(BASE_COMPARE).orElse(null);\n- }\n- else {\n- _typedCompare.setType(currentType);\n- best = memo.get(current.getHopID()).stream()\n- .filter(p -> p.type==currentType || p.type==TemplateType.CELL)\n- .min(_typedCompare).orElse(null);\n- }\n- addBestPlan(current.getHopID(), best);\n- }\n-\n- //step 3: recursively process children\n- for( int i=0; i< current.getInput().size(); i++ ) {\n- TemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\n- rSelectPlansFuseAll(memo, current.getInput().get(i), pref, partition);\n- }\n-\n- setVisited(current.getHopID(), currentType);\n- }\n-\n/////////////////////////////////////////////////////////\n// Cost model fused operators w/ materialization points\n//////////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -204,6 +204,8 @@ public class TemplateUtils\nreturn RowType.COL_AGG_B1_T;\nelse if( B1 != null && output.getDim1()==B1.getDim2() && output.getDim2()==X.getDim2())\nreturn RowType.COL_AGG_B1;\n+ else if( B1 != null && output.getDim1()==1 && B1.getDim2() == output.getDim2() )\n+ return RowType.COL_AGG_B1R;\nelse if( X.getDim1() == output.getDim1() && X.getDim2() != output.getDim2() )\nreturn RowType.NO_AGG_CONST;\nelse\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "diff": "@@ -55,14 +55,17 @@ public abstract class SpoofRowwise extends SpoofOperator\nCOL_AGG, //col aggregation (e.g., colSums() or t(y) %*% X)\nCOL_AGG_T, //transposed col aggregation (e.g., t(X) %*% y)\nCOL_AGG_B1, //col aggregation w/ matrix mult B1\n- COL_AGG_B1_T; //transposed col aggregation w/ matrix mult B1\n+ COL_AGG_B1_T, //transposed col aggregation w/ matrix mult B1\n+ COL_AGG_B1R; //col aggregation w/ matrix mult B1 to row vector\npublic boolean isColumnAgg() {\n- return (this == COL_AGG || this == COL_AGG_T)\n- || (this == COL_AGG_B1) || (this == COL_AGG_B1_T);\n+ return this == COL_AGG || this == COL_AGG_T\n+ || this == COL_AGG_B1 || this == COL_AGG_B1_T\n+ || this == COL_AGG_B1R;\n}\npublic boolean isRowTypeB1() {\n- return (this == NO_AGG_B1) || (this == COL_AGG_B1) || (this == COL_AGG_B1_T);\n+ return this == NO_AGG_B1 || this == COL_AGG_B1\n+ || this == COL_AGG_B1_T || this == COL_AGG_B1R;\n}\npublic boolean isRowTypeB1ColumnAgg() {\nreturn (this == COL_AGG_B1) || (this == COL_AGG_B1_T);\n@@ -268,7 +271,7 @@ public abstract class SpoofRowwise extends SpoofOperator\ncase COL_AGG_T: out.reset(n, 1, false); break;\ncase COL_AGG_B1: out.reset(n2, n, false); break;\ncase COL_AGG_B1_T: out.reset(n, n2, false); break;\n-\n+ case COL_AGG_B1R: out.reset(1, n2, false); break;\n}\nout.allocateDenseBlock();\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1943] Fix codegen fuse_all optimizer and consolidation This patch fixes special cases of row operations that caused the fuse_all optimizer fail on Kmeans. Furthermore, this also includes a cleanup for consolidating the fuse-all selection of plans as used in the fuse_all and both cost-based optimizers.
49,736
05.10.2017 19:18:46
25,200
374b0b197f9e2fc76b46e0e72f3b3d2594326734
[MINOR] GPU right indexing bugfix Since GPU only supports sparse_dense rightindexing, updated the memory budget accordingly.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "new_path": "src/main/java/org/apache/sysml/hops/IndexingOp.java", "diff": "@@ -237,7 +237,8 @@ public class IndexingOp extends Hop\n@Override\nprotected double computeOutputMemEstimate( long dim1, long dim2, long nnz )\n{\n- double sparsity = OptimizerUtils.getSparsity(dim1, dim2, nnz);\n+ // only dense right indexing supported on GPU\n+ double sparsity = isGPUEnabled() ? 1.0 : OptimizerUtils.getSparsity(dim1, dim2, nnz);\nreturn OptimizerUtils.estimateSizeExactSparsity(dim1, dim2, sparsity);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1793] [MINOR] GPU right indexing bugfix - Since GPU only supports sparse_dense rightindexing, updated the memory budget accordingly.
49,736
05.10.2017 19:28:48
25,200
355373990c478293090c90557383ab24c2c01c6b
Modify the default directory of performance test to avoid maven compilation issue Closes
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -283,7 +283,7 @@ if __name__ == '__main__':\ndefault_mat_shape = ['10k_100']\n# Default temp directory, contains everything generated in perftest\n- default_config_dir = join(systemml_home, 'scripts', 'perftest', 'temp')\n+ default_config_dir = join(systemml_home, 'temp_perftest')\n# Initialize time\nstart_time = time.time()\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1940] Modify the default directory of performance test to avoid maven compilation issue Closes #675.
49,717
06.10.2017 11:59:59
25,200
8ea38a1b14a17d81299063cd281631369574a067
[HOTFIX] fix for Caffe2DML switching back to using protoc v2.5.0 to compile the proto file
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<goal>run</goal>\n</goals>\n<configuration>\n- <protocVersion>2.6.1</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.4.0 -->\n+ <protocVersion>2.5.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.4.0 -->\n<inputDirectories>\n<include>src/main/proto/caffe</include>\n</inputDirectories>\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] fix for Caffe2DML - switching back to using protoc v2.5.0 to compile the proto file
49,698
07.10.2017 16:08:57
25,200
4e0c7f1c98f444926fa0f69b13459220aac67de0
[MINOR] Cleanup redundant LinregCG algorithm test scripts Closes
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "diff": "@@ -55,6 +55,7 @@ public class CompressedLinregCG extends AutomatedTestBase\nprivate final static int intercept = 0;\nprivate final static double epsilon = 0.000000001;\nprivate final static double maxiter = 10;\n+ private final static double regular = 0.001;\n@Override\npublic void setUp() {\n@@ -103,20 +104,17 @@ public class CompressedLinregCG extends AutomatedTestBase\nTestConfiguration config = getTestConfiguration(TEST_NAME);\n/* This is for running the junit test the new way, i.e., construct the arguments directly */\n- String HOME = SCRIPT_DIR + TEST_DIR;\n- fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[]{ \"-explain\",\"-stats\",\n- \"-args\", HOME + INPUT_DIR + \"X\",\n- HOME + INPUT_DIR + \"y\",\n- String.valueOf(intercept),\n- String.valueOf(epsilon),\n- String.valueOf(maxiter),\n- HOME + OUTPUT_DIR + \"w\"};\n- fullRScriptName = HOME + TEST_NAME + \".R\";\n+ String HOME = SCRIPT_DIR + \"functions/codegen/\";\n+ fullDMLScriptName = \"scripts/algorithms/LinearRegCG.dml\";\n+ programArgs = new String[]{ \"-explain\", \"-stats\", \"-nvargs\", \"X=\"+input(\"X\"), \"Y=\"+input(\"y\"),\n+ \"icpt=\"+String.valueOf(intercept), \"tol=\"+String.valueOf(epsilon),\n+ \"maxi=\"+String.valueOf(maxiter), \"reg=\"+String.valueOf(regular), \"B=\"+output(\"w\")};\n+\n+ fullRScriptName = HOME + \"Algorithm_LinregCG.R\";\nrCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" +\nHOME + INPUT_DIR + \" \" +\nString.valueOf(intercept) + \" \" + String.valueOf(epsilon) + \" \" +\n- String.valueOf(maxiter) + \" \" + HOME + EXPECTED_DIR;\n+ String.valueOf(maxiter) + \" \" + String.valueOf(regular) + HOME + EXPECTED_DIR;\nloadTestConfiguration(config);\n" }, { "change_type": "DELETE", "old_path": "src/test/scripts/functions/compress/LinregCG.R", "new_path": null, "diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-\n-args <- commandArgs(TRUE)\n-options(digits=22)\n-library(\"Matrix\")\n-\n-X = readMM(paste(args[1], \"X.mtx\", sep=\"\"))\n-y = readMM(paste(args[1], \"y.mtx\", sep=\"\"))\n-\n-intercept = as.integer(args[2]);\n-eps = as.double(args[3]);\n-maxiter = as.double(args[4]);\n-\n-if( intercept == 1 ){\n- ones = matrix(1, nrow(X), 1);\n- X = cbind(X, ones);\n-}\n-\n-r = -(t(X) %*% y);\n-p = -r;\n-norm_r2 = sum(r * r);\n-w = matrix(0, ncol(X), 1);\n-\n-i = 0;\n-while(i < maxiter) {\n- q = ((t(X) %*% (X %*% p)) + eps * p);\n- alpha = norm_r2 / ((t(p) %*% q)[1:1]);\n- w = w + alpha * p;\n- old_norm_r2 = norm_r2;\n- r = r + alpha * q;\n- norm_r2 = sum(r * r);\n- beta = norm_r2 / old_norm_r2;\n- p = -r + beta * p;\n- i = i + 1;\n-}\n-\n-writeMM(as(w,\"CsparseMatrix\"), paste(args[5], \"w\", sep=\"\"))\n" }, { "change_type": "DELETE", "old_path": "src/test/scripts/functions/compress/LinregCG.dml", "new_path": null, "diff": "-#-------------------------------------------------------------\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-#\n-#-------------------------------------------------------------\n-\n-\n-X = read($1);\n-y = read($2);\n-intercept = $3;\n-eps = $4;\n-maxiter = $5;\n-\n-if( intercept == 1 ){\n- ones = matrix(1, nrow(X), 1);\n- X = cbind(X, ones);\n-}\n-\n-r = -(t(X) %*% y);\n-p = -r;\n-norm_r2 = sum(r * r);\n-w = matrix(0, rows = ncol(X), cols = 1);\n-\n-i = 0;\n-while(i < maxiter) {\n- q = ((t(X) %*% (X %*% p)) + eps * p);\n- alpha = norm_r2 / castAsScalar(t(p) %*% q);\n- w = w + alpha * p;\n- old_norm_r2 = norm_r2;\n- r = r + alpha * q;\n- norm_r2 = sum(r * r);\n- beta = norm_r2 / old_norm_r2;\n- p = -r + beta * p;\n- i = i + 1;\n-}\n-\n-write(w, $6);\n-\n-\n-\n-\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup redundant LinregCG algorithm test scripts Closes #678.
49,738
09.10.2017 19:27:18
25,200
be3e0c993d2aa59f80f7c39ec1fd1f17d7f730b0
Fix robustness builtin functions w/ missing assignment This patch fixes NPE issues on compiling scripts that call builtin functions but miss the assignment of the expression output to left-hand-side variables. Furthermore, this also includes some minor refactoring and additional tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/StatementBlock.java", "diff": "@@ -545,24 +545,19 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nthrows LanguageException, ParseException, IOException\n{\n_constVarsIn.putAll(constVars);\n- HashMap<String, ConstIdentifier> currConstVars = new HashMap<String,ConstIdentifier>();\n- currConstVars.putAll(constVars);\n-\n_statements = rewriteFunctionCallStatements(dmlProg, _statements);\n_dmlProg = dmlProg;\n+ HashMap<String, ConstIdentifier> currConstVars = new HashMap<String,ConstIdentifier>(constVars);\nfor (Statement current : _statements) {\n-\nif (current instanceof OutputStatement) {\nOutputStatement os = (OutputStatement)current;\n-\n// validate variable being written by output statement exists\nDataIdentifier target = (DataIdentifier)os.getIdentifier();\nif (ids.getVariable(target.getName()) == null) {\n//undefined variables are always treated unconditionally as error in order to prevent common script-level bugs\nraiseValidateError(\"Undefined Variable (\" + target.getName() + \") used in statement\", false, LanguageErrorCodes.INVALID_PARAMETERS);\n}\n-\nif ( ids.getVariable(target.getName()).getDataType() == DataType.SCALAR) {\nboolean paramsOkay = true;\nfor (String key : os.getSource().getVarParams().keySet()){\n@@ -577,20 +572,59 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nExpression source = os.getSource();\nsource.setOutput(target);\nsource.validateExpression(ids.getVariables(), currConstVars, conditional);\n-\nsetStatementFormatType(os, conditional);\ntarget.setDimensionValueProperties(ids.getVariable(target.getName()));\n}\n-\nelse if (current instanceof AssignmentStatement){\n+ validateAssignmentStatement(current, dmlProg, ids, currConstVars, conditional);\n+ }\n+ else if (current instanceof MultiAssignmentStatement){\n+ validateMultiAssignmentStatement(current, dmlProg, ids, currConstVars, conditional);\n+ }\n+ else if(current instanceof ForStatement || current instanceof IfStatement || current instanceof WhileStatement ){\n+ raiseValidateError(\"control statement (WhileStatement, IfStatement, ForStatement) should not be in generic statement block. Likely a parsing error\", conditional);\n+ }\n+ else if (current instanceof PrintStatement) {\n+ PrintStatement pstmt = (PrintStatement) current;\n+ List<Expression> expressions = pstmt.getExpressions();\n+ for (Expression expression : expressions) {\n+ expression.validateExpression(ids.getVariables(), currConstVars, conditional);\n+ if (expression.getOutput().getDataType() != Expression.DataType.SCALAR) {\n+ if (expression.getOutput().getDataType() == Expression.DataType.MATRIX) {\n+ pstmt.raiseValidateError(\"Print statements can only print scalars. To print a matrix, please wrap it in a toString() function.\", conditional);\n+ } else {\n+ pstmt.raiseValidateError(\"Print statements can only print scalars.\", conditional);\n+ }\n+ }\n+ }\n+ }\n+ // no work to perform for PathStatement or ImportStatement\n+ else if (current instanceof PathStatement){}\n+ else if (current instanceof ImportStatement){}\n+ else {\n+ raiseValidateError(\"cannot process statement of type \" + current.getClass().getSimpleName(), conditional);\n+ }\n+ }\n+ _constVarsOut.putAll(currConstVars);\n+ return ids;\n+ }\n+\n+ private void validateAssignmentStatement(Statement current, DMLProgram dmlProg,\n+ VariableSet ids, HashMap<String, ConstIdentifier> currConstVars, boolean conditional)\n+ throws LanguageException, IOException, ParseException\n+ {\nAssignmentStatement as = (AssignmentStatement)current;\nDataIdentifier target = as.getTarget();\nExpression source = as.getSource();\nif (source instanceof FunctionCallIdentifier) {\n- ((FunctionCallIdentifier) source).validateExpression(dmlProg, ids.getVariables(),currConstVars, conditional);\n+ ((FunctionCallIdentifier) source).validateExpression(\n+ dmlProg, ids.getVariables(),currConstVars, conditional);\n}\n- else {\n+ else { //all builtin functions and expressions\n+ if( target == null )\n+ raiseValidateError(\"Missing variable assignment.\", false);\n+\nif( MLContextProxy.isActive() )\nMLContextProxy.setAppropriateVarsForRead(source, target._name);\n@@ -636,22 +670,19 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\n// handle case when nrow / ncol called on variable with size unknown (dims == -1)\n// --> const prop NOT possible\n- if (intid.getValue() != -1){\n+ if (intid.getValue() != -1)\ncurrConstVars.put(target.getName(), intid);\n}\n}\n- }\nif (target == null) {\n// function has no return value\n}\n// CASE: target NOT indexed identifier\nelse if (!(target instanceof IndexedIdentifier)){\ntarget.setProperties(source.getOutput());\n- if (source.getOutput() instanceof IndexedIdentifier){\n+ if (source.getOutput() instanceof IndexedIdentifier)\ntarget.setDimensions(source.getOutput().getDim1(), source.getOutput().getDim2());\n}\n-\n- }\n// CASE: target is indexed identifier\nelse\n{\n@@ -672,12 +703,6 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nif ( ((IndexedIdentifier)target).getColUpperBound() != null )\n((IndexedIdentifier)target).getColUpperBound().validateExpression(ids.getVariables(), currConstVars, conditional);\n- // validate that LHS indexed identifier is being assigned a matrix value\n-// if (source.getOutput().getDataType() != Expression.DataType.MATRIX){\n-// LOG.error(target.printErrorLocation() + \"Indexed expression \" + target.toString() + \" can only be assigned matrix value\");\n-// throw new LanguageException(target.printErrorLocation() + \"Indexed expression \" + target.toString() + \" can only be assigned matrix value\");\n-// }\n-\n// validate that size of LHS index ranges is being assigned:\n// (a) a matrix value of same size as LHS\n// (b) singleton value (semantics: initialize enitre submatrix with this value)\n@@ -694,33 +719,27 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\n+ targetSize._row + \" rows and \" + targetSize._col + \" cols. Attempted to assign matrix with dimensions \"\n+ source.getOutput().getDim1() + \" rows and \" + source.getOutput().getDim2() + \" cols \", conditional);\n}\n-\n((IndexedIdentifier)target).setDimensions(targetSize._row, targetSize._col);\n}\n- if (target != null) {\n+ if (target != null)\nids.addVariable(target.getName(), target);\n}\n- }\n-\n- else if (current instanceof MultiAssignmentStatement){\n+ private void validateMultiAssignmentStatement(Statement current, DMLProgram dmlProg,\n+ VariableSet ids, HashMap<String, ConstIdentifier> currConstVars, boolean conditional)\n+ throws LanguageException, IOException\n+ {\nMultiAssignmentStatement mas = (MultiAssignmentStatement) current;\nArrayList<DataIdentifier> targetList = mas.getTargetList();\n-\n- // perform validation of source expression\nExpression source = mas.getSource();\n- /*\n- * MultiAssignmentStatments currently supports only External,\n- * User-defined, and Multi-return Builtin function expressions\n- */\n+\n+ //MultiAssignmentStatments currently supports only External,\n+ //User-defined, and Multi-return Builtin function expressions\nif (!(source instanceof DataIdentifier)\n|| (source instanceof DataIdentifier && !((DataIdentifier)source).multipleReturns()) ) {\n- //if (!(source instanceof FunctionCallIdentifier) ) {\n- //|| !(source instanceof BuiltinFunctionExpression && ((BuiltinFunctionExpression)source).isMultiReturnBuiltinFunction()) ){\nsource.raiseValidateError(\"can only use user-defined functions with multi-assignment statement\", conditional);\n}\n-\nif ( source instanceof FunctionCallIdentifier) {\nFunctionCallIdentifier fci = (FunctionCallIdentifier)source;\nfci.validateExpression(dmlProg, ids.getVariables(), currConstVars, conditional);\n@@ -732,16 +751,16 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nelse\nthrow new LanguageException(\"Unexpected error.\");\n-\nif ( source instanceof FunctionCallIdentifier ) {\nfor (int j =0; j< targetList.size(); j++) {\n-\nDataIdentifier target = targetList.get(j);\n// set target properties (based on type info in function call statement return params)\nFunctionCallIdentifier fci = (FunctionCallIdentifier)source;\n- FunctionStatement fstmt = (FunctionStatement)_dmlProg.getFunctionStatementBlock(fci.getNamespace(), fci.getName()).getStatement(0);\n+ FunctionStatement fstmt = (FunctionStatement)_dmlProg\n+ .getFunctionStatementBlock(fci.getNamespace(), fci.getName()).getStatement(0);\nif (fstmt == null){\n- fci.raiseValidateError(\" function \" + fci.getName() + \" is undefined in namespace \" + fci.getNamespace(), conditional);\n+ fci.raiseValidateError(\" function \" + fci.getName()\n+ + \" is undefined in namespace \" + fci.getNamespace(), conditional);\n}\nif (!(target instanceof IndexedIdentifier)){\ntarget.setProperties(fstmt.getOutputParams().get(j));\n@@ -749,7 +768,8 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\nelse{\nDataIdentifier targetAsSeen = ids.getVariable(target.getName());\nif (targetAsSeen == null){\n- raiseValidateError(target.printErrorLocation() + \"cannot assign value to indexed identifier \" + target.toString() + \" without first initializing \" + target.getName(), conditional);\n+ raiseValidateError(target.printErrorLocation() + \"cannot assign value to indexed identifier \"\n+ + target.toString() + \" without first initializing \" + target.getName(), conditional);\n}\ntarget.setProperties(targetAsSeen);\n}\n@@ -764,40 +784,6 @@ public class StatementBlock extends LiveVariableAnalysis implements ParseInfo\n}\n}\n- else if(current instanceof ForStatement || current instanceof IfStatement || current instanceof WhileStatement ){\n- raiseValidateError(\"control statement (WhileStatement, IfStatement, ForStatement) should not be in generic statement block. Likely a parsing error\", conditional);\n- }\n-\n- else if (current instanceof PrintStatement) {\n- PrintStatement pstmt = (PrintStatement) current;\n- List<Expression> expressions = pstmt.getExpressions();\n- for (Expression expression : expressions) {\n- expression.validateExpression(ids.getVariables(), currConstVars, conditional);\n- if (expression.getOutput().getDataType() != Expression.DataType.SCALAR) {\n- if (expression.getOutput().getDataType() == Expression.DataType.MATRIX) {\n- pstmt.raiseValidateError(\"Print statements can only print scalars. To print a matrix, please wrap it in a toString() function.\", conditional);\n- } else {\n- pstmt.raiseValidateError(\"Print statements can only print scalars.\", conditional);\n- }\n- }\n- }\n- }\n-\n- // no work to perform for PathStatement or ImportStatement\n- else if (current instanceof PathStatement){}\n- else if (current instanceof ImportStatement){}\n-\n-\n- else {\n- raiseValidateError(\"cannot process statement of type \" + current.getClass().getSimpleName(), conditional);\n- }\n-\n- } // end for (Statement current : _statements){\n- _constVarsOut.putAll(currConstVars);\n- return ids;\n-\n- }\n-\npublic void setStatementFormatType(OutputStatement s, boolean conditionalValidate)\nthrows LanguageException, ParseException\n{\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/InvalidBuiltinFunctionCallTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.misc;\n+\n+import org.junit.Test;\n+\n+import org.apache.sysml.api.DMLException;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class InvalidBuiltinFunctionCallTest extends AutomatedTestBase\n+{\n+ private final static String TEST_DIR = \"functions/misc/\";\n+ private final static String TEST_NAME1 = \"InvalidBuiltinFunctionCallTest1\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + InvalidBuiltinFunctionCallTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] {}));\n+ }\n+\n+ @Test\n+ public void testInvalidBuiltinFunctionCall1() {\n+ runTest( TEST_NAME1, true );\n+ }\n+\n+ private void runTest( String testName, boolean expected )\n+ {\n+ TestConfiguration config = getTestConfiguration(testName);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testName + \".dml\";\n+ programArgs = new String[]{};\n+\n+ //run tests\n+ runTest(true, expected, DMLException.class, -1);\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/InvalidBuiltinFunctionCallTest1.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=1006, cols=784, sparsity=0.001);\n+removeEmpty(target=X, margin=\"rows\");\n+print(\"nrow(X): \" + nrow(X));\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/misc/ZPackageSuite.java", "diff": "@@ -32,6 +32,7 @@ import org.junit.runners.Suite;\nFunctionInliningTest.class,\nFunctionNamespaceTest.class,\nIfTest.class,\n+ InvalidBuiltinFunctionCallTest.class,\nInvalidFunctionAssignmentTest.class,\nInvalidFunctionSignatureTest.class,\nIPAConstantFoldingScalarVariablePropagationTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1949] Fix robustness builtin functions w/ missing assignment This patch fixes NPE issues on compiling scripts that call builtin functions but miss the assignment of the expression output to left-hand-side variables. Furthermore, this also includes some minor refactoring and additional tests.
49,738
10.10.2017 13:54:10
25,200
4f865b218dd49f5fcbfbc6886c64462e0269bdd2
Fix codegen for sparse binary scalar-matrix ops This patch fixes a special case of code generation for sparse scalar-matrix binary operations such as 0-X, which previously led to compiler errors.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java", "diff": "@@ -269,10 +269,9 @@ public class CNodeBinary extends CNode\nsb.append(_inputs.get(1).codegen(sparse));\n//generate binary operation (use sparse template, if data input)\n- boolean lsparse = sparse && (_inputs.get(0) instanceof CNodeData\n- && (_inputs.get(0).getVarname().startsWith(\"a\")\n- || _inputs.get(1).getVarname().startsWith(\"a\"))\n- && !_inputs.get(0).isLiteral());\n+ boolean lsparse = sparse\n+ && ((_inputs.get(0) instanceof CNodeData && _inputs.get(0).getVarname().startsWith(\"a\"))\n+ ||(_inputs.get(1) instanceof CNodeData && _inputs.get(1).getVarname().startsWith(\"a\")));\nboolean scalarInput = _inputs.get(0).getDataType().isScalar();\nboolean scalarVector = (_inputs.get(0).getDataType().isScalar()\n&& _inputs.get(1).getDataType().isMatrix());\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1950] Fix codegen for sparse binary scalar-matrix ops This patch fixes a special case of code generation for sparse scalar-matrix binary operations such as 0-X, which previously led to compiler errors.
49,737
11.10.2017 09:20:40
25,200
8f786aa227d536558ed684060cef0e628bf3247f
added --deploy-mode param to python scripts Closes
[ { "change_type": "MODIFY", "old_path": "bin/systemml-spark-submit.py", "new_path": "bin/systemml-spark-submit.py", "diff": "@@ -41,8 +41,8 @@ def default_jars(systemml_home):\nreturn target_jars, systemml_jar\n-def spark_submit_entry(master, driver_memory, num_executors, executor_memory,\n- executor_cores, conf,\n+def spark_submit_entry(master, deploy_mode, driver_memory, num_executors,\n+ executor_memory, executor_cores, conf,\nnvargs, args, config, explain, debug, stats, gpu, f):\n\"\"\"\nThis function is responsible for the execution of arguments via\n@@ -100,7 +100,7 @@ def spark_submit_entry(master, driver_memory, num_executors, executor_memory,\n# stats, explain, target_jars\ncmd_spark = [spark_path, '--class', 'org.apache.sysml.api.DMLScript',\n- '--master', master,\n+ '--master', master, '--deploy-mode', deploy_mode,\n'--driver-memory', driver_memory,\n'--conf', default_conf,\n'--jars', cuda_jars, systemml_jars]\n@@ -129,7 +129,8 @@ if __name__ == '__main__':\ncparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\ndescription='System-ML Spark Submit Script')\n# SPARK-SUBMIT Options\n- cparser.add_argument('--master', default='local[*]', help='local, yarn-client, yarn-cluster', metavar='')\n+ cparser.add_argument('--master', default='local[*]', help='local, yarn', metavar='')\n+ cparser.add_argument('--deploy-mode', help='client, cluster', default='client', metavar='')\ncparser.add_argument('--driver-memory', default='8G', help='Memory for driver (e.g. 512M, 1G)', metavar='')\ncparser.add_argument('--num-executors', nargs=1, help='Number of executors to launch', metavar='')\ncparser.add_argument('--executor-memory', nargs=1, help='Memory per executor', metavar='')\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -355,7 +355,8 @@ if __name__ == '__main__':\n'set <force> option to skip conservative memory estimates '\n'and use GPU wherever possible', nargs='?', const='no_option')\n# Spark Configuration Option\n- cparser.add_argument('--master', help='local, yarn-client, yarn-cluster', metavar='')\n+ cparser.add_argument('--master', help='local, yarn', metavar='')\n+ cparser.add_argument('--deploy-mode', help='client, cluster', metavar='')\ncparser.add_argument('--driver-memory', help='Memory for driver (e.g. 512M)', metavar='')\ncparser.add_argument('--num-executors', help='Number of executors to launch', metavar='')\ncparser.add_argument('--executor-memory', help='Memory per executor', metavar='')\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_misc.py", "new_path": "scripts/perftest/python/utils_misc.py", "diff": "@@ -84,6 +84,9 @@ def split_config_args(args):\nif args['master'] is not None:\nbackend_args_dict['--master'] = args['master']\n+ if args['deploy_mode'] is not None:\n+ backend_args_dict['--deploy-mode'] = args['master']\n+\nif args['num_executors'] is not None:\nbackend_args_dict['--num-executors'] = args['num_executors']\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1945] added --deploy-mode param to python scripts Closes #681
49,701
11.10.2017 23:45:01
25,200
426d7fa0d91a54fd40b4ab90294007f7a2376e43
[MINOR] Foreign parent check bug in RewriteElementwiseMultChainOpt Closes The check for foreign parents in the interior of an element-wise multiply chain is incorrect. Interior element-wise multiply nodes that have a foreign parent which is not a BinaryOp (for example, a write DataOp) are missed. This leads to incorrect rewrites in unlucky DAGs.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteElementwiseMultChainOptimization.java", "diff": "@@ -310,7 +310,7 @@ public class RewriteElementwiseMultChainOptimization extends HopRewriteRule {\nfinal ArrayList<Hop> parents = child.getParent();\nif (parents.size() > 1)\nfor (final Hop parent : parents)\n- if (parent instanceof BinaryOp && !emults.contains(parent))\n+ if (!(parent instanceof BinaryOp) || !emults.contains(parent))\nreturn false;\n// child does not have foreign parents\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Foreign parent check bug in RewriteElementwiseMultChainOpt Closes #683. The check for foreign parents in the interior of an element-wise multiply chain is incorrect. Interior element-wise multiply nodes that have a foreign parent which is not a BinaryOp (for example, a write DataOp) are missed. This leads to incorrect rewrites in unlucky DAGs.
49,738
11.10.2017 17:12:31
25,200
deb4baf06c3a9d204523dc868f72ea23e307f4c4
Fix robustness frame rbind w/ mismatching schemas This patch makes the frame rbind more robust by allowing graceful schema conversions. We now try - in a best effort manner - to convert the values of the second input into the types of the first input, which determine the schema of the output.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/FrameBlock.java", "diff": "@@ -553,6 +553,20 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nreturn new ObjectRowIterator(0, _numRows);\n}\n+ /**\n+ * Get a row iterator over the frame where all fields are encoded\n+ * as boxed objects according to the value types of the provided\n+ * target schema.\n+ *\n+ * @param schema target schema of objects\n+ * @return object array iterator\n+ */\n+ public Iterator<Object[]> getObjectRowIterator(ValueType[] schema) {\n+ ObjectRowIterator iter = new ObjectRowIterator(0, _numRows);\n+ iter.setSchema(schema);\n+ return iter;\n+ }\n+\n/**\n* Get a row iterator over the frame where all selected fields are\n* encoded as boxed objects according to their value types.\n@@ -992,7 +1006,7 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nret._coldata = new Array[getNumColumns()];\nfor( int j=0; j<getNumColumns(); j++ )\nret._coldata[j] = _coldata[j].clone();\n- Iterator<Object[]> iter = that.getObjectRowIterator();\n+ Iterator<Object[]> iter = that.getObjectRowIterator(_schema);\nwhile( iter.hasNext() )\nret.appendRow(iter.next());\n}\n@@ -1221,6 +1235,8 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n}\nprivate class ObjectRowIterator extends RowIterator<Object> {\n+ private ValueType[] _tgtSchema = null;\n+\npublic ObjectRowIterator(int rl, int ru) {\nsuper(rl, ru);\n}\n@@ -1229,6 +1245,10 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\nsuper(rl, ru, cols);\n}\n+ public void setSchema(ValueType[] schema) {\n+ _tgtSchema = schema;\n+ }\n+\n@Override\nprotected Object[] createRow(int size) {\nreturn new Object[size];\n@@ -1237,10 +1257,17 @@ public class FrameBlock implements Writable, CacheBlock, Externalizable\n@Override\npublic Object[] next( ) {\nfor( int j=0; j<_cols.length; j++ )\n- _curRow[j] = get(_curPos, _cols[j]-1);\n+ _curRow[j] = getValue(_curPos, _cols[j]-1);\n_curPos++;\nreturn _curRow;\n}\n+\n+ private Object getValue(int i, int j) {\n+ Object val = get(i, j);\n+ if( _tgtSchema != null )\n+ val = UtilFunctions.objectToObject(_tgtSchema[j], val);\n+ return val;\n+ }\n}\n///////\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1956] Fix robustness frame rbind w/ mismatching schemas This patch makes the frame rbind more robust by allowing graceful schema conversions. We now try - in a best effort manner - to convert the values of the second input into the types of the first input, which determine the schema of the output.
49,738
15.10.2017 02:42:20
25,200
33559144cd707e324b59ed5ca417e3d5461c2f0a
[HOTFIX][SYSTEMML-1959] Fix sparse-sparse transpose w/ CSR input This patch fixes a remaining issue of sparse-sparse transpose related to the correct handling of sparse blocks in CSR or COO format.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -859,8 +859,8 @@ public class LibMatrixReorg\nif( cl > 0 ) {\nfor( int i=bi; i<bimin; i++ )\nif( !a.isEmpty(i) ) {\n- int pos = a.posFIndexGTE(i, cl);\n- ix[i-bi] = (pos>=0) ? pos : a.size(i);\n+ int j = a.posFIndexGTE(i, cl);\n+ ix[i-bi] = (j>=0) ? j : a.size(i);\n}\n}\n@@ -868,19 +868,19 @@ public class LibMatrixReorg\nint bjmin = Math.min(bj+blocksizeJ, cu);\n//core block transpose operation\n- for( int i=bi, iix=0; i<bimin; i++, iix++ ) {\n+ for( int i=bi; i<bimin; i++ ) {\nif( a.isEmpty(i) ) continue;\nint apos = a.pos(i);\nint alen = a.size(i);\nint[] aix = a.indexes(i);\ndouble[] avals = a.values(i);\n- int j = ix[iix]; //last block boundary\n- for( ; j<alen && aix[j]<bjmin; j++ ) {\n- c.allocate(aix[apos+j], ennz2,n2);\n- c.append(aix[apos+j], i, avals[apos+j]);\n+ int j = ix[i-bi] + apos; //last block boundary\n+ for( ; j<apos+alen && aix[j]<bjmin; j++ ) {\n+ c.allocate(aix[j], ennz2,n2);\n+ c.append(aix[j], i, avals[j]);\n}\n- ix[iix] = j; //keep block boundary\n+ ix[i-bi] = j - apos; //keep block boundary\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1959] Fix sparse-sparse transpose w/ CSR input This patch fixes a remaining issue of sparse-sparse transpose related to the correct handling of sparse blocks in CSR or COO format.
49,738
15.10.2017 17:04:31
25,200
06b4b9d5ff04f09c61d44864d36a65b31527bbb3
[MINOR] Fix consistency ALS datagen script (factor names, cleanup) This patch cleans up the ALS data generation script to use the same factor names as ALS-CG and remove unnecessary operations.
[ { "change_type": "MODIFY", "old_path": "scripts/datagen/genRandData4ALS.dml", "new_path": "scripts/datagen/genRandData4ALS.dml", "diff": "#-------------------------------------------------------------\nXfile = $X; # input matrix X of size m x n\n-Wfile = $W; # original row factor of size m x r\n-Hfile = $H; # original col factor of size r x n\n+Ufile = $U; # original row factor of size m x r\n+Vfile = $V; # original col factor of size r x n\nm = $rows; # no. of rows of X\nn = $cols; # no. of cols of X\nr = $rank; # rank of factorization\n@@ -30,15 +30,18 @@ sigma = ifdef ($sigma, 0.01); # variance of Gaussian noise\nfmt = ifdef ($fmt, \"binary\"); # output format\n# generate original factors by sampling from a normal(0,1.0) distribution\n-W = rand(rows = m, cols = r, pdf = \"normal\", seed = 123);\n-H = rand(rows = r, cols = n, pdf = \"normal\", seed = 456);\n+U = rand(rows = m, cols = r, pdf = \"normal\", seed = 123);\n+V = rand(rows = n, cols = r, pdf = \"normal\", seed = 456);\nI = floor(rand(rows = nnz, cols = 1, min = 1, max = m + 0.999999999));\nJ = floor(rand(rows = nnz, cols = 1, min = 1, max = n + 0.999999999));\nX = rand(rows = nnz, cols = 1, pdf = \"normal\") * sqrt(sigma);\nN = table(I, J, X);\n-T = (N != 0);\n-X = T * (W %*% H) + T * N;\n+X = (N != 0) * (U %*% t(V)) + N;\nwrite(X, Xfile, format = fmt);\n-write(W, Wfile, format = fmt);\n-write(H, Hfile, format = fmt);\n+if( Ufile != \" \" )\n+ write(U, Ufile, format = fmt);\n+if( Vfile != \" \" ) {\n+ V = t(V);\n+ write(V, Vfile, format = fmt);\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix consistency ALS datagen script (factor names, cleanup) This patch cleans up the ALS data generation script to use the same factor names as ALS-CG and remove unnecessary operations.
49,736
17.10.2017 16:52:19
25,200
259814e6c00021c643c33867906d0c5d8dc4bc5e
[MINOR] Reset the _cachedParams to avoid incorrect sizes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -639,6 +639,9 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nreturn;\n}\n+ // Reset the _cachedParams to avoid incorrect sizes\n+ _cachedParams = new ConvolutionParameters(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, _maxNumThreads);\n+\nswitch(op)\n{\ncase MAX_POOLING:\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] [SYSTEMML-540] Reset the _cachedParams to avoid incorrect sizes
49,738
17.10.2017 20:54:01
25,200
5b8d62659b2e5727bebcaf0d2681fc4ecd4ea85f
[MINOR] Fix missing warning on truncation of matrix/frame toString
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java", "diff": "@@ -26,6 +26,7 @@ import org.apache.sysml.lops.Lop;\nimport org.apache.sysml.parser.ParameterizedBuiltinFunctionExpression;\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheBlock;\nimport org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n@@ -328,10 +329,12 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nString out = null;\nif( data instanceof MatrixObject ) {\nMatrixBlock matrix = (MatrixBlock) data.acquireRead();\n+ warnOnTrunction(matrix, rows, cols);\nout = DataConverter.toString(matrix, sparse, separator, lineseparator, rows, cols, decimal);\n}\nelse if( data instanceof FrameObject ) {\nFrameBlock frame = (FrameBlock) data.acquireRead();\n+ warnOnTrunction(frame, rows, cols);\nout = DataConverter.toString(frame, sparse, separator, lineseparator, rows, cols, decimal);\n}\nelse {\n@@ -344,4 +347,15 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nthrow new DMLRuntimeException(\"Unknown opcode : \" + opcode);\n}\n}\n+\n+ private void warnOnTrunction(CacheBlock data, int rows, int cols) {\n+ //warn on truncation because users might not be aware and use toString for verification\n+ if( (getParam(\"rows\")==null && data.getNumRows()>rows)\n+ || (getParam(\"cols\")==null && data.getNumColumns()>cols) )\n+ {\n+ LOG.warn(\"Truncating \"+data.getClass().getSimpleName()+\" of size \"\n+ + data.getNumRows()+\"x\"+data.getNumColumns()+\" to \"+rows+\"x\"+cols+\". \"\n+ + \"Use toString(X, rows=..., cols=...) if necessary.\");\n+ }\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix missing warning on truncation of matrix/frame toString
49,738
19.10.2017 15:07:54
25,200
323dd72a8ed18687aa3019387c4ab7b0598bd9d5
Fix robustness codegen row ops w/ unknowns This patch fixes special cases of codegen row templates with partial unknowns, which is important for robustness during initial compilation even though the unknowns led to dynamic recompilation during runtime.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -310,7 +310,8 @@ public class TemplateRow extends TemplateBase\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n// if one input is a matrix then we need to do vector by scalar operations\n- if(hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1 )\n+ if(hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1\n+ || (!hop.dimsKnown() && cdata1.getDataType()==DataType.MATRIX ) )\n{\nif( HopRewriteUtils.isUnary(hop, SUPPORTED_VECT_UNARY) ) {\nString opname = \"VECT_\"+((UnaryOp)hop).getOp().name();\n@@ -325,7 +326,6 @@ public class TemplateRow extends TemplateBase\nelse //general scalar case\n{\ncdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n-\nString primitiveOpName = ((UnaryOp)hop).getOp().toString();\nout = new CNodeUnary(cdata1, UnaryType.valueOf(primitiveOpName));\n}\n@@ -355,7 +355,9 @@ public class TemplateRow extends TemplateBase\n// if one input is a matrix then we need to do vector by scalar operations\nif( (hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1)\n- || (hop.getInput().get(1).getDim1() > 1 && hop.getInput().get(1).getDim2() > 1))\n+ || (hop.getInput().get(1).getDim1() > 1 && hop.getInput().get(1).getDim2() > 1)\n+ || (!(hop.dimsKnown() && hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown())\n+ && (cdata1.getDataType().isMatrix() || cdata2.getDataType().isMatrix())))\n{\nif( HopRewriteUtils.isBinary(hop, SUPPORTED_VECT_BINARY) ) {\nif( TemplateUtils.isMatrix(cdata1) && (TemplateUtils.isMatrix(cdata2)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java", "diff": "@@ -184,7 +184,7 @@ public class TemplateUtils\npublic static RowType getRowType(Hop output, Hop... inputs) {\nHop X = inputs[0];\nHop B1 = (inputs.length>1) ? inputs[1] : null;\n- if( (X!=null && HopRewriteUtils.isEqualSize(output, X)) || X==null )\n+ if( (X!=null && HopRewriteUtils.isEqualSize(output, X)) || X==null || !X.dimsKnown() )\nreturn RowType.NO_AGG;\nelse if( ((B1!=null && output.getDim1()==X.getDim1() && output.getDim2()==B1.getDim2())\n|| (output instanceof IndexingOp && HopRewriteUtils.isColumnRangeIndexing((IndexingOp)output)))\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1903] Fix robustness codegen row ops w/ unknowns This patch fixes special cases of codegen row templates with partial unknowns, which is important for robustness during initial compilation even though the unknowns led to dynamic recompilation during runtime.
49,738
22.10.2017 17:57:29
25,200
78a3808e0aaefb0c6f6959611ef119695d4d1d3e
Performance conv2d-backward-data (for sparse filter) This patch follows-up on the recent modification of conv2d backward filter, by similarly applying a sparse rotate for conv2d backward data. Furthermore, this also includes the removal of unnecessary allocations per input row, and thread-local nnz maintenance.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java", "diff": "@@ -186,10 +186,10 @@ public class LibMatrixDNN {\nif(isEligibleForConv2dBackwardDataDense(params))\nStatistics.numNativeSparseConv2dBwdDataCalls.increment();\n- execute(LibMatrixDNNHelper.getConv2dBackwardDataWorkers(params), params);\n+ long nnz = execute(LibMatrixDNNHelper.getConv2dBackwardDataWorkers(params), params);\n//post-processing: maintain nnz\n- outputBlock.recomputeNonZeros();\n+ outputBlock.setNonZeros(nnz);\noutputBlock.examSparsity();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dBackwardDataHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dBackwardDataHelper.java", "diff": "@@ -78,22 +78,22 @@ public class LibMatrixDNNConv2dBackwardDataHelper {\nint PQ = _params.P*_params.Q; int K = _params.K; int CRS = _params.C*_params.R*_params.S;\nMatrixBlock filter = _params.input1;\nMatrixBlock dout = _params.input2;\n- MatrixBlock dout_reshaped = new MatrixBlock(PQ, K, false);\n- dout_reshaped.allocateDenseBlock();\n+ MatrixBlock outRotate = new MatrixBlock(PQ, K, dout.sparse);\n+ MatrixBlock outMM = new MatrixBlock(PQ, CRS, false);\n+ outRotate.allocateBlock();\nLibMatrixDNNRotate180Helper.Rotate180Worker rotate180Worker =\n- LibMatrixDNNRotate180Helper.Rotate180Worker.getWorker( dout, dout_reshaped, _params, true, false);\n+ LibMatrixDNNRotate180Helper.Rotate180Worker.getWorker( dout, outRotate, _params, true, false);\nlong time1 = 0; long time2 = 0;\nfor(int n = _rl; n < _ru; n++) {\n// rotate180(dout[n,]) => dout_reshaped\nrotate180Worker.execute(n, 0);\n-\n// dout_reshaped %*% filter => temp\n- MatrixBlock temp = new MatrixBlock(PQ, CRS, false);\nlong t1 = DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS ? System.nanoTime() : 0;\n- LibMatrixDNNHelper.singleThreadedMatMult(dout_reshaped, filter, temp, true, false, _params);\n+ outMM.reset(PQ, CRS, false);\n+ LibMatrixDNNHelper.singleThreadedMatMult(outRotate, filter, outMM, !outRotate.sparse, false, _params);\nlong t2 = DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS ? System.nanoTime() : 0;\n// col2im(temp) => output[n,]\n- LibMatrixDNNHelper.doCol2imOverSingleImage(n, temp, _params);\n+ LibMatrixDNNHelper.doCol2imOverSingleImage(n, outMM, _params);\nlong t3 = DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS ? System.nanoTime() : 0;\nif(DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS) {\n@@ -105,8 +105,9 @@ public class LibMatrixDNNConv2dBackwardDataHelper {\nLibMatrixDNN.loopedConvBwdDataMatMultTime.addAndGet(time1);\nLibMatrixDNN.loopedConvBwdDataCol2ImTime.addAndGet(time2);\n}\n- return 0L;\n- }\n+ //multi-threaded nnz maintenance of current working set\n+ return _params.output.recomputeNonZeros(_rl, _ru-1);\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dBackwardFilterHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dBackwardFilterHelper.java", "diff": "@@ -86,13 +86,12 @@ public class LibMatrixDNNConv2dBackwardFilterHelper {\nint PQ = _params.P*_params.Q, K = _params.K, CRS = _params.C*_params.R*_params.S;\nMatrixBlock dout = _params.input2;\nMatrixBlock im2ColOutBlock = new MatrixBlock(CRS, PQ, false);\n- MatrixBlock dout_reshaped = new MatrixBlock(PQ, K, dout.sparse);\n- MatrixBlock temp = new MatrixBlock(CRS, K, false);\n- dout_reshaped.allocateBlock();\n- temp.allocateBlock();\n+ MatrixBlock outRotate = new MatrixBlock(PQ, K, dout.sparse);\n+ MatrixBlock outMM = new MatrixBlock(CRS, K, false);\n+ outRotate.allocateBlock();\nIm2colWorker im2ColWorker = Im2colWorker.getWorker( _params.input1, im2ColOutBlock, _params, true, false);\n- Rotate180Worker rotate180Worker = Rotate180Worker.getWorker( dout, dout_reshaped, _params, true, false);\n+ Rotate180Worker rotate180Worker = Rotate180Worker.getWorker( dout, outRotate, _params, true, false);\ndouble [] partRet = new double[CRS*_params.K];\nlong time1 = 0; long time2 = 0;\nfor(int n = _rl; n < _ru; n++) {\n@@ -104,12 +103,12 @@ public class LibMatrixDNNConv2dBackwardFilterHelper {\nim2ColWorker.execute(n);\nlong t2 = DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS ? System.nanoTime() : 0;\n- temp.reset(CRS, K, false);\n- LibMatrixDNNHelper.singleThreadedMatMult(im2ColOutBlock, dout_reshaped, temp, true, true, _params);\n+ outMM.reset(CRS, K, false);\n+ LibMatrixDNNHelper.singleThreadedMatMult(im2ColOutBlock, outRotate, outMM, !im2ColOutBlock.sparse, !outRotate.sparse, _params);\nlong t3 = DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS ? System.nanoTime() : 0;\n- if( !temp.isEmptyBlock() ) //accumulate row results\n- LibMatrixMult.vectAdd(temp.getDenseBlock(), partRet, 0, 0, K*CRS);\n+ if( !outMM.isEmptyBlock() ) //accumulate row results\n+ LibMatrixMult.vectAdd(outMM.getDenseBlock(), partRet, 0, 0, K*CRS);\nif(DMLScript.STATISTICS && LibMatrixDNN.DISPLAY_STATISTICS) {\ntime1 += t2 - t1;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNConv2dHelper.java", "diff": "@@ -219,7 +219,7 @@ public class LibMatrixDNNConv2dHelper {\n// t(_im2ColOutBlock) %*% t(filter) => t(matMultOutBlock)\noutMM.reset(outMM.rlen, outMM.clen, false);\n- LibMatrixDNNHelper.singleThreadedMatMult(outIm2col, _params.input2, outMM, false, true, _params);\n+ LibMatrixDNNHelper.singleThreadedMatMult(outIm2col, _params.input2, outMM, false, false, _params);\n// Copy the matrix matMultOutBlock of shape [K X PQ] to params.output.denseBlock + destPos\npartialCopyTrans(outMM, _params.output, n*K*PQ, K, PQ);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1970] Performance conv2d-backward-data (for sparse filter) This patch follows-up on the recent modification of conv2d backward filter, by similarly applying a sparse rotate for conv2d backward data. Furthermore, this also includes the removal of unnecessary allocations per input row, and thread-local nnz maintenance.
49,738
22.10.2017 18:57:35
25,200
c70cb1166f4ec6c79d10248727a3eb7b85f70360
[MINOR] Fix analysis of sparse-safeness for codegen cell/magg ops
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -322,10 +322,12 @@ public class TemplateCell extends TemplateBase\nprotected boolean isSparseSafe(List<Hop> roots, Hop mainInput, List<CNode> outputs, List<AggOp> aggOps, boolean onlySum) {\nboolean ret = true;\nfor( int i=0; i<outputs.size() && ret; i++ ) {\n- ret &= (HopRewriteUtils.isBinary(roots.get(i), OpOp2.MULT)\n- && roots.get(i).getInput().contains(mainInput))\n- || (HopRewriteUtils.isBinary(roots.get(i), OpOp2.DIV)\n- && roots.get(i).getInput().get(0) == mainInput)\n+ Hop root = (roots.get(i) instanceof AggUnaryOp || roots.get(i)\n+ instanceof AggBinaryOp) ? roots.get(i).getInput().get(0) : roots.get(i);\n+ ret &= (HopRewriteUtils.isBinarySparseSafe(root)\n+ && root.getInput().contains(mainInput))\n+ || (HopRewriteUtils.isBinary(root, OpOp2.DIV)\n+ && root.getInput().get(0) == mainInput)\n|| (TemplateUtils.rIsSparseSafeOnly(outputs.get(i), BinType.MULT)\n&& TemplateUtils.rContainsInput(outputs.get(i), mainInput.getHopID()));\nif( onlySum )\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix analysis of sparse-safeness for codegen cell/magg ops
49,698
22.10.2017 21:35:08
25,200
596005a80d0b39fef9b33b55145ffda043a4573d
Making SVM scripts work with MLContext Closes
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/l2-svm-predict.dml", "new_path": "scripts/algorithms/l2-svm-predict.dml", "diff": "@@ -51,6 +51,7 @@ cmdLine_Y = ifdef($Y, \" \")\ncmdLine_confusion = ifdef($confusion, \" \")\ncmdLine_accuracy = ifdef($accuracy, \" \")\ncmdLine_scores = ifdef($scores, \" \")\n+cmdLine_scoring_only = ifdef($scoring_only, FALSE)\ncmdLine_fmt = ifdef($fmt, \"text\")\nX = read($X)\n@@ -75,27 +76,30 @@ scores = b + (X %*% w[1:ncol(X),])\nif(cmdLine_scores != \" \")\nwrite(scores, cmdLine_scores, format=cmdLine_fmt)\n-if(cmdLine_Y != \" \"){\n- y = read(cmdLine_Y)\n+if(!cmdLine_scoring_only){\n+ Y = read(cmdLine_Y)\npred = (scores >= 0)\npred_labels = pred*positive_label + (1-pred)*negative_label\n- num_correct = sum(pred_labels == y)\n+ num_correct = sum(pred_labels == Y)\nacc = 100*num_correct/nrow(X)\nacc_str = \"Accuracy (%): \" + acc\nprint(acc_str)\n+\nif(cmdLine_accuracy != \" \")\nwrite(acc_str, cmdLine_accuracy)\nif(cmdLine_confusion != \" \"){\n+\npred = 2*pred - 1\n+\nif(negative_label != -1 | positive_label != +1)\n- y = 2/(positive_label - negative_label)*y - (negative_label + positive_label)/(positive_label - negative_label)\n+ Y = 2/(positive_label - negative_label)*Y - (negative_label + positive_label)/(positive_label - negative_label)\npred_is_minus = (pred == -1)\npred_is_plus = 1 - pred_is_minus\n- y_is_minus = (y == -1)\n+ y_is_minus = (Y == -1)\ny_is_plus = 1 - y_is_minus\ncheck_min_y_minus = sum(pred_is_minus*y_is_minus)\n@@ -103,21 +107,13 @@ if(cmdLine_Y != \" \"){\ncheck_max_y_minus = sum(pred_is_plus*y_is_minus)\ncheck_max_y_plus = sum(pred_is_plus*y_is_plus)\n- #s = check_min_y_minus + \",\" + check_min_y_plus\n- #s = append(s, check_max_y_minus + \",\" + check_max_y_plus)\n- #s = append(s, \"\")\n- #write(s, cmdLine_confusion)\n-\n- confusion_mat = matrix(0, rows=3, cols=3)\n- confusion_mat[1,2] = negative_label\n- confusion_mat[1,3] = positive_label\n- confusion_mat[2,1] = negative_label\n- confusion_mat[3,1] = positive_label\n- confusion_mat[2,2] = check_min_y_minus\n- confusion_mat[2,3] = check_max_y_minus\n- confusion_mat[3,2] = check_min_y_plus\n- confusion_mat[3,3] = check_max_y_plus\n+ confusion_mat = matrix(0, rows=2, cols=2)\n+ confusion_mat[1,1] = check_min_y_minus\n+ confusion_mat[1,2] = check_min_y_plus\n+ confusion_mat[2,1] = check_max_y_minus\n+ confusion_mat[2,2] = check_max_y_plus\nwrite(confusion_mat, cmdLine_confusion, format=\"csv\")\n}\n}\n+\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/l2-svm.dml", "new_path": "scripts/algorithms/l2-svm.dml", "diff": "@@ -80,8 +80,6 @@ if(check_min != -1 | check_max != +1)\npositive_label = check_max\nnegative_label = check_min\n-continue = 1\n-\nintercept = cmdLine_icpt\nif(intercept != 0 & intercept != 1)\nstop(\"Stopping due to invalid argument: Currently supported intercept options are 0 and 1\")\n@@ -118,14 +116,16 @@ s = g_old\nXw = matrix(0, rows=nrow(X), cols=1)\ndebug_str = \"# Iter, Obj\"\niter = 0\n-while(continue == 1 & iter < maxiterations) {\n+continue = TRUE\n+while(continue & iter < maxiterations) {\n# minimizing primal obj along direction s\nstep_sz = 0\nXd = X %*% s\nwd = lambda * sum(w * s)\ndd = lambda * sum(s * s)\n- continue1 = 1\n- while(continue1 == 1){\n+\n+ continue1 = TRUE\n+ while(continue1){\ntmp_Xw = Xw + step_sz*Xd\nout = 1 - Y * (tmp_Xw)\nsv = (out > 0)\n@@ -133,9 +133,8 @@ while(continue == 1 & iter < maxiterations) {\ng = wd + step_sz*dd - sum(out * Y * Xd)\nh = dd + sum(Xd * sv * Xd)\nstep_sz = step_sz - g/h\n- if (g*g/h < 0.0000000001){\n- continue1 = 0\n- }\n+\n+ continue1 = (gg/h >= 0.0000000001);\n}\n#update weights\n@@ -152,19 +151,13 @@ while(continue == 1 & iter < maxiterations) {\ndebug_str = append(debug_str, iter + \",\" + obj)\ntmp = sum(s * g_old)\n- if(step_sz*tmp < epsilon*obj){\n- continue = 0\n- }\n+ continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\n#non-linear CG step\nbe = sum(g_new * g_new)/sum(g_old * g_old)\ns = be * s + g_new\ng_old = g_new\n- if(sum(s^2) == 0){\n- continue = 0\n- }\n-\niter = iter + 1\n}\n@@ -174,8 +167,11 @@ extra_model_params[2,1] = negative_label\nextra_model_params[3,1] = intercept\nextra_model_params[4,1] = dimensions\n+weights = w\nw = t(cbind(t(w), t(extra_model_params)))\nwrite(w, $model, format=cmdLine_fmt)\n+# write(extra_model_params, \" \", format=cmdLine_fmt)\n+# write(weights, \" \", format=cmdLine_fmt)\nlogFile = $Log\nif(logFile != \" \") {\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/m-svm-predict.dml", "new_path": "scripts/algorithms/m-svm-predict.dml", "diff": "# accuracy (%) for the predictions\n#\n# Example Usage:\n-# hadoop jar SystemML.jar -f m-svm-predict.dml -nvargs X=data Y=labels model=model scores=scores accuracy=accuracy confusion=confusion fmt=\"text\"\n+# hadoop jar SystemML.jar -f m-svm-predict.dml -nvargs X=data Y=labels scoring_only=FALSE model=model scores=scores accuracy=accuracy confusion=confusion fmt=\"text\"\n#\ncmdLine_Y = ifdef($Y, \" \")\ncmdLine_confusion = ifdef($confusion, \" \")\ncmdLine_accuracy = ifdef($accuracy, \" \")\ncmdLine_scores = ifdef($scores, \" \")\n+cmdLine_scoring_only = ifdef($scoring_only, FALSE)\ncmdLine_fmt = ifdef($fmt, \"text\")\nX = read($X);\n@@ -59,26 +60,26 @@ scores = X %*% W[1:m,] + ones %*% b;\nif(cmdLine_scores != \" \")\nwrite(scores, cmdLine_scores, format=cmdLine_fmt);\n-if(cmdLine_Y != \" \"){\n- y = read(cmdLine_Y);\n+if(!cmdLine_scoring_only){\n+ Y = read(cmdLine_Y);\n- if(min(y) < 1)\n+ if(min(Y) < 1)\nstop(\"Stopping due to invalid argument: Label vector (Y) must be recoded\")\npred = rowIndexMax(scores);\n- correct_percentage = sum((pred - y) == 0) / N * 100;\n+ correct_percentage = sum((pred - Y) == 0) / N * 100;\nacc_str = \"Accuracy (%): \" + correct_percentage\nprint(acc_str)\nif(cmdLine_accuracy != \" \")\nwrite(acc_str, cmdLine_accuracy)\n- num_classes_ground_truth = max(y)\n+ num_classes_ground_truth = max(Y)\nif(num_classes < num_classes_ground_truth)\nnum_classes = num_classes_ground_truth\nif(cmdLine_confusion != \" \"){\n- confusion_mat = table(y, pred, num_classes, num_classes)\n+ confusion_mat = table(Y, pred, num_classes, num_classes)\nwrite(confusion_mat, cmdLine_confusion, format=\"csv\")\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/m-svm.dml", "new_path": "scripts/algorithms/m-svm.dml", "diff": "# Assume input and output directories are on hdfs as INPUT_DIR and OUTPUT_DIR\n# Assume epsilon = 0.001, lambda=1.0, max_iterations = 100\n#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Location to read the matrix X of feature vectors\n+# Y String --- Location to read response matrix Y\n+# icpt Int 0 Intercept presence\n+# 0 = no intercept\n+# 1 = add intercept;\n+# tol Double 0.001 Tolerance (epsilon);\n+# reg Double 1.0 Regularization parameter\n+# maxiter Int 100 Maximum number of conjugate gradient iterations\n+# model String --- Location to write model\n+# fmt String \"text\" The output format of the output, such as \"text\" or \"csv\"\n+# Log String --- [OPTIONAL] Location to write the log file\n+# ---------------------------------------------------------------------------------------------\n+#\n# hadoop jar SystemML.jar -f $SVM_HOME/m-svm.dml -nvargs X=$INPUT_DIR/X Y=$INPUT_DIR/y icpt=intercept tol=.001 reg=1.0 maxiter=100 model=$OUTPUT_DIR/w Log=$OUTPUT_DIR/Log fmt=\"text\"\n#\n@@ -57,9 +74,11 @@ if(intercept != 0 & intercept != 1)\nmin_y = min(Y)\nif(min_y < 1)\nstop(\"Stopping due to invalid argument: Label vector (Y) must be recoded\")\n+\nnum_classes = max(Y)\nif(num_classes == 1)\nstop(\"Stopping due to invalid argument: Maximum label value is 1, need more than one class to learn a multi-class classifier\")\n+\nmod1 = Y %% 1\nmod1_should_be_nrow = sum(abs(mod1 == 0))\nif(mod1_should_be_nrow != nrow(Y))\n@@ -92,9 +111,11 @@ if(intercept == 1){\nw = matrix(0, rows=num_rows_in_w, cols=num_classes)\ndebug_mat = matrix(-1, rows=max_iterations, cols=num_classes)\n+\nparfor(iter_class in 1:num_classes){\nY_local = 2 * (Y == iter_class) - 1\nw_class = matrix(0, rows=num_features, cols=1)\n+\nif (intercept == 1) {\nzero_matrix = matrix(0, rows=1, cols=1);\nw_class = t(cbind(t(w_class), zero_matrix));\n@@ -105,15 +126,16 @@ parfor(iter_class in 1:num_classes){\nXw = matrix(0, rows=nrow(X), cols=1)\niter = 0\n- continue = 1\n- while(continue == 1) {\n+ continue = TRUE\n+ while(continue & iter < maxiterations) {\n# minimizing primal obj along direction s\nstep_sz = 0\nXd = X %*% s\nwd = lambda * sum(w_class * s)\ndd = lambda * sum(s * s)\n- continue1 = 1\n- while(continue1 == 1){\n+\n+ continue1 = TRUE\n+ while(continue1){\ntmp_Xw = Xw + step_sz*Xd\nout = 1 - Y_local * (tmp_Xw)\nsv = (out > 0)\n@@ -121,9 +143,9 @@ parfor(iter_class in 1:num_classes){\ng = wd + step_sz*dd - sum(out * Y_local * Xd)\nh = dd + sum(Xd * sv * Xd)\nstep_sz = step_sz - g/h\n- if (g*g/h < 0.0000000001){\n- continue1 = 0\n- }\n+\n+ continue1 = (g*g/h >= 0.0000000001)\n+\n}\n#update weights\n@@ -142,30 +164,27 @@ parfor(iter_class in 1:num_classes){\nprint(\"For class \" + iter_class + \" iteration \" + iter + \" training accuracy: \" + train_acc)\ndebug_mat[iter+1,iter_class] = obj\n- if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){\n- continue = 0\n- }\n+ continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\n#non-linear CG step\nbe = sum(g_new * g_new)/sum(g_old * g_old)\ns = be * s + g_new\ng_old = g_new\n- if(sum(s^2) == 0){\n- continue = 0\n- }\n-\niter = iter + 1\n}\nw[,iter_class] = w_class\n-}\n+} # parfor loop\nextra_model_params = matrix(0, rows=2, cols=ncol(w))\nextra_model_params[1, 1] = intercept\nextra_model_params[2, 1] = dimensions\n+weights = w\nw = t(cbind(t(w), t(extra_model_params)))\nwrite(w, $model, format=cmdLine_fmt)\n+# write(extra_model_params, \" \", format=cmdLine_fmt)\n+# write(weights, \" \", format=cmdLine_fmt)\ndebug_str = \"# Class, Iter, Obj\"\nfor(iter_class in 1:ncol(debug_mat)){\n@@ -175,7 +194,8 @@ for(iter_class in 1:ncol(debug_mat)){\ndebug_str = append(debug_str, iter_class + \",\" + iter + \",\" + obj)\n}\n}\n+\nlogFile = $Log\n-if(logFile != \" \") {\n+if(logFile != \" \")\nwrite(debug_str, logFile)\n-}\n\\ No newline at end of file\n+\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1648] Making SVM scripts work with MLContext Closes #687.
49,738
24.10.2017 19:48:07
25,200
70ab072ae764a9abffaead3431ca11e8e1efec68
[HOTFIX][SYSTEMML-1648] Fix l2svm and msvm algorithm scripts This patch fixes the recently changed l2svm and msvm algorithm scripts with regard to (1) use of non-existing variables, (2) corrupted convergence checks (before update), and (3) various smaller issues (unused variables, commented code, formatting).
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/l2-svm.dml", "new_path": "scripts/algorithms/l2-svm.dml", "diff": "@@ -134,7 +134,7 @@ while(continue & iter < maxiterations) {\nh = dd + sum(Xd * sv * Xd)\nstep_sz = step_sz - g/h\n- continue1 = (gg/h >= 0.0000000001);\n+ continue1 = (g*g/h >= 0.0000000001);\n}\n#update weights\n@@ -151,14 +151,15 @@ while(continue & iter < maxiterations) {\ndebug_str = append(debug_str, iter + \",\" + obj)\ntmp = sum(s * g_old)\n- continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\n#non-linear CG step\nbe = sum(g_new * g_new)/sum(g_old * g_old)\ns = be * s + g_new\ng_old = g_new\n+ continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\niter = iter + 1\n+\n}\nextra_model_params = matrix(0, rows=4, cols=1)\n@@ -167,11 +168,8 @@ extra_model_params[2,1] = negative_label\nextra_model_params[3,1] = intercept\nextra_model_params[4,1] = dimensions\n-weights = w\nw = t(cbind(t(w), t(extra_model_params)))\nwrite(w, $model, format=cmdLine_fmt)\n-# write(extra_model_params, \" \", format=cmdLine_fmt)\n-# write(weights, \" \", format=cmdLine_fmt)\nlogFile = $Log\nif(logFile != \" \") {\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/m-svm.dml", "new_path": "scripts/algorithms/m-svm.dml", "diff": "# Example Usage:\n# Assume SVM_HOME is set to the home of the dml script\n# Assume input and output directories are on hdfs as INPUT_DIR and OUTPUT_DIR\n-# Assume epsilon = 0.001, lambda=1.0, max_iterations = 100\n+# Assume epsilon = 0.001, lambda=1.0, maxiterations = 100\n#\n# INPUT PARAMETERS:\n# ---------------------------------------------------------------------------------------------\n@@ -92,8 +92,8 @@ lambda = cmdLine_reg\nif(lambda < 0)\nstop(\"Stopping due to invalid argument: Regularization constant (reg) must be non-negative\")\n-max_iterations = cmdLine_maxiter\n-if(max_iterations < 1)\n+maxiterations = cmdLine_maxiter\n+if(maxiterations < 1)\nstop(\"Stopping due to invalid argument: Maximum iterations should be a positive integer\")\nnum_samples = nrow(X)\n@@ -110,7 +110,7 @@ if(intercept == 1){\n}\nw = matrix(0, rows=num_rows_in_w, cols=num_classes)\n-debug_mat = matrix(-1, rows=max_iterations, cols=num_classes)\n+debug_mat = matrix(-1, rows=maxiterations, cols=num_classes)\nparfor(iter_class in 1:num_classes){\nY_local = 2 * (Y == iter_class) - 1\n@@ -145,7 +145,6 @@ parfor(iter_class in 1:num_classes){\nstep_sz = step_sz - g/h\ncontinue1 = (g*g/h >= 0.0000000001)\n-\n}\n#update weights\n@@ -164,13 +163,12 @@ parfor(iter_class in 1:num_classes){\nprint(\"For class \" + iter_class + \" iteration \" + iter + \" training accuracy: \" + train_acc)\ndebug_mat[iter+1,iter_class] = obj\n- continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\n-\n#non-linear CG step\nbe = sum(g_new * g_new)/sum(g_old * g_old)\ns = be * s + g_new\ng_old = g_new\n+ continue = (step_sz*tmp >= epsilon*obj & sum(s^2) != 0);\niter = iter + 1\n}\n@@ -180,11 +178,9 @@ parfor(iter_class in 1:num_classes){\nextra_model_params = matrix(0, rows=2, cols=ncol(w))\nextra_model_params[1, 1] = intercept\nextra_model_params[2, 1] = dimensions\n-weights = w\n+\nw = t(cbind(t(w), t(extra_model_params)))\nwrite(w, $model, format=cmdLine_fmt)\n-# write(extra_model_params, \" \", format=cmdLine_fmt)\n-# write(weights, \" \", format=cmdLine_fmt)\ndebug_str = \"# Class, Iter, Obj\"\nfor(iter_class in 1:ncol(debug_mat)){\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1648] Fix l2svm and msvm algorithm scripts This patch fixes the recently changed l2svm and msvm algorithm scripts with regard to (1) use of non-existing variables, (2) corrupted convergence checks (before update), and (3) various smaller issues (unused variables, commented code, formatting).
49,738
24.10.2017 20:39:22
25,200
8f4ecdce23780a4b820cb79865322d05ba1b9411
[SYSTEMML-1903,1968] Fix codegen row templates w/ partial unknowns After recent codegen optimizer changes, GLM was failing during initial compilation when used through JMLC. The reason was an incorrect handling of partial unknowns that led to vector operations although the output was known to be scalar.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -362,6 +362,7 @@ public class TemplateRow extends TemplateBase\nif( (hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1)\n|| (hop.getInput().get(1).getDim1() > 1 && hop.getInput().get(1).getDim2() > 1)\n|| (!(hop.dimsKnown() && hop.getInput().get(0).dimsKnown() && hop.getInput().get(1).dimsKnown())\n+ && (hop.getDim2() != 1) //not a known vector output\n&& (cdata1.getDataType().isMatrix() || cdata2.getDataType().isMatrix())))\n{\nif( HopRewriteUtils.isBinary(hop, SUPPORTED_VECT_BINARY) ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1903,1968] Fix codegen row templates w/ partial unknowns After recent codegen optimizer changes, GLM was failing during initial compilation when used through JMLC. The reason was an incorrect handling of partial unknowns that led to vector operations although the output was known to be scalar.
49,736
25.10.2017 15:40:21
25,200
881caa9ba508b029f72f27d468bb33805704c7cb
Include the memory requirement of each layer in the summary table of Caffe2DML This helps the user to estimate the batch size she should set for optimal performance.
[ { "change_type": "MODIFY", "old_path": "docs/beginners-guide-caffe2dml.md", "new_path": "docs/beginners-guide-caffe2dml.md", "diff": "@@ -64,22 +64,27 @@ lenet.summary()\nOutput:\n```\n-+-----+---------------+--------------+------------+---------+-----------+---------+\n-| Name| Type| Output| Weight| Bias| Top| Bottom|\n-+-----+---------------+--------------+------------+---------+-----------+---------+\n-|mnist| Data| (, 1, 28, 28)| | |mnist,mnist| |\n-|conv1| Convolution|(, 32, 28, 28)| [32 X 25]| [32 X 1]| conv1| mnist|\n-|relu1| ReLU|(, 32, 28, 28)| | | relu1| conv1|\n-|pool1| Pooling|(, 32, 14, 14)| | | pool1| relu1|\n-|conv2| Convolution|(, 64, 14, 14)| [64 X 800]| [64 X 1]| conv2| pool1|\n-|relu2| ReLU|(, 64, 14, 14)| | | relu2| conv2|\n-|pool2| Pooling| (, 64, 7, 7)| | | pool2| relu2|\n-| ip1| InnerProduct| (, 512, 1, 1)|[3136 X 512]|[1 X 512]| ip1| pool2|\n-|relu3| ReLU| (, 512, 1, 1)| | | relu3| ip1|\n-|drop1| Dropout| (, 512, 1, 1)| | | drop1| relu3|\n-| ip2| InnerProduct| (, 10, 1, 1)| [512 X 10]| [1 X 10]| ip2| drop1|\n-| loss|SoftmaxWithLoss| (, 10, 1, 1)| | | loss|ip2,mnist|\n-+-----+---------------+--------------+------------+---------+-----------+---------+\n++-----+---------------+--------------+------------+---------+-----------+---------+--------------------+\n+| Name| Type| Output| Weight| Bias| Top| Bottom|Memory* (train/test)|\n++-----+---------------+--------------+------------+---------+-----------+---------+--------------------+\n+|mnist| Data| (, 1, 28, 28)| | |mnist,mnist| | 1/0|\n+|conv1| Convolution|(, 32, 28, 28)| [32 X 25]| [32 X 1]| conv1| mnist| 25/12|\n+|relu1| ReLU|(, 32, 28, 28)| | | relu1| conv1| 25/12|\n+|pool1| Pooling|(, 32, 14, 14)| | | pool1| relu1| 6/3|\n+|conv2| Convolution|(, 64, 14, 14)| [64 X 800]| [64 X 1]| conv2| pool1| 38/7|\n+|relu2| ReLU|(, 64, 14, 14)| | | relu2| conv2| 12/6|\n+|pool2| Pooling| (, 64, 7, 7)| | | pool2| relu2| 3/2|\n+| ip1| InnerProduct| (, 512, 1, 1)|[3136 X 512]|[1 X 512]| ip1| pool2| 797/13|\n+|relu3| ReLU| (, 512, 1, 1)| | | relu3| ip1| 1/0|\n+|drop1| Dropout| (, 512, 1, 1)| | | drop1| relu3| 1/0|\n+| ip2| InnerProduct| (, 10, 1, 1)| [512 X 10]| [1 X 10]| ip2| drop1| 3/0|\n+| loss|SoftmaxWithLoss| (, 10, 1, 1)| | | loss|ip2,mnist| 0/0|\n++-----+---------------+--------------+------------+---------+-----------+---------+--------------------+\n+\n+Total number of layer outputs/errors/weights/bias/gradients: 5568768/5568768/1662752/618/106455680\n+Total memory requirements for parameters* for train/test: 910/55\n+[Advanced] Key network statistics to compute intermediate CP overhead batchSize/maxThreads/1-thread im2col*(sum, max)/1-thread reshape_col*(sum, max): 64/48/(1, 1)/(0, 0).\n+* => memory in megabytes assuming the parameters are in double precision and in dense format.\n```\nTo train the above lenet model, we use the MNIST dataset.\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala", "new_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala", "diff": "@@ -50,6 +50,8 @@ import java.util.Random\nimport org.apache.commons.logging.Log\nimport org.apache.commons.logging.LogFactory\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer\n+import org.apache.sysml.hops.OptimizerUtils\n+import java.lang.Double\n/***************************************************************************************\nDESIGN OF CAFFE2DML:\n@@ -306,10 +308,21 @@ class Caffe2DML(val sc: SparkContext,\ndef getTrainAlgo(): String = if (inputs.containsKey(\"$train_algo\")) inputs.get(\"$train_algo\") else \"minibatch\"\ndef getTestAlgo(): String = if (inputs.containsKey(\"$test_algo\")) inputs.get(\"$test_algo\") else \"minibatch\"\n+ private def getMemInBytes(l:CaffeLayer, batchSize:Int, isTraining:Boolean):Long = {\n+ val numLayerOutput = l.outputShape._1.toLong * l.outputShape._2.toLong * l.outputShape._3.toLong * batchSize\n+ val numLayerError = numLayerOutput\n+ val numLayerWeights = if(l.weightShape != null) l.weightShape()(0).toLong * l.weightShape()(1).toLong else 0\n+ val numLayerBias = if(l.biasShape != null)l.biasShape()(0).toLong * l.biasShape()(1).toLong else 0\n+ val numLayerGradients = (numLayerWeights + numLayerBias) * batchSize\n+ if(isTraining) (numLayerOutput + numLayerError + numLayerWeights + numLayerBias + numLayerGradients)*Double.BYTES\n+ else (numLayerOutput + numLayerWeights + numLayerBias)*Double.BYTES\n+ }\ndef summary(sparkSession: org.apache.spark.sql.SparkSession): Unit = {\n- val header = Seq(\"Name\", \"Type\", \"Output\", \"Weight\", \"Bias\", \"Top\", \"Bottom\")\n- val entries = net.getLayers\n- .map(l => (l, net.getCaffeLayer(l)))\n+ val layers = net.getLayers .map(l => (l, net.getCaffeLayer(l)))\n+ val numDataLayers = layers.filter(l => l._2.isInstanceOf[Data]).length\n+ val batchSize = if(numDataLayers == 1) layers.filter(l => l._2.isInstanceOf[Data]).map(l => l._2.param.getDataParam.getBatchSize).get(0) else -1\n+ val header = Seq(\"Name\", \"Type\", \"Output\", \"Weight\", \"Bias\", \"Top\", \"Bottom\", \"Memory* (train/test)\")\n+ val entries = layers\n.map(l => {\nval layer = l._2\n(l._1,\n@@ -318,10 +331,35 @@ class Caffe2DML(val sc: SparkContext,\nif (layer.weightShape != null) \"[\" + layer.weightShape()(0) + \" X \" + layer.weightShape()(1) + \"]\" else \"\",\nif (layer.biasShape != null) \"[\" + layer.biasShape()(0) + \" X \" + layer.biasShape()(1) + \"]\" else \"\",\nlayer.param.getTopList.mkString(\",\"),\n- layer.param.getBottomList.mkString(\",\"))\n+ layer.param.getBottomList.mkString(\",\"),\n+ OptimizerUtils.toMB(getMemInBytes(l._2, batchSize, true)) + \"/\" + OptimizerUtils.toMB(getMemInBytes(l._2, batchSize, false))\n+ )\n})\nimport sparkSession.implicits._\nsc.parallelize(entries).toDF(header: _*).show(net.getLayers.size)\n+\n+ val numLayerOutput = layers.map(l => l._2.outputShape._1.toLong * l._2.outputShape._2.toLong * l._2.outputShape._3.toLong).sum * batchSize\n+ val numLayerError = numLayerOutput\n+ val numLayerWeights = layers.map(l => if(l._2.weightShape != null) l._2.weightShape()(0).toLong * l._2.weightShape()(1).toLong else 0).sum\n+ val numLayerBias = layers.map(l => if(l._2.biasShape != null) l._2.biasShape()(0).toLong * l._2.biasShape()(1).toLong else 0).sum\n+ val numLayerGradients = (numLayerWeights + numLayerBias) * batchSize\n+ val convLayers = layers.filter(l => l._2.isInstanceOf[Convolution]).map(l => l._2.asInstanceOf[Convolution])\n+ val crspq = convLayers.map(l => l.numChannels.toLong*l.kernel_h.toLong*l.kernel_w.toLong*l.outputShape._2.toLong*l.outputShape._3.toLong)\n+ val kpq = convLayers.map(l => l.outputShape._1.toLong*l.outputShape._2.toLong*l.outputShape._3.toLong)\n+\n+ if(getTrainAlgo().equals(\"minibatch\") && getTestAlgo().equals(\"minibatch\")) {\n+ System.out.println(\"Total number of layer outputs/errors/weights/bias/gradients: \" + numLayerOutput + \"/\" + numLayerError +\n+ \"/\" + numLayerWeights + \"/\" + numLayerBias + \"/\" + numLayerGradients)\n+ System.out.println(\"Total memory requirements for parameters* for train/test: \" +\n+ OptimizerUtils.toMB(layers.map(l => getMemInBytes(l._2, batchSize, true)).sum) + \"/\" +\n+ OptimizerUtils.toMB(layers.map(l => getMemInBytes(l._2, batchSize, false)).sum))\n+ System.out.println(\"[Advanced] Key network statistics to compute intermediate CP overhead \" +\n+ \"batchSize/maxThreads/1-thread im2col*(sum, max)/1-thread reshape_col*(sum, max): \" +\n+ batchSize + \"/\" + OptimizerUtils.getConstrainedNumThreads(-1) + \"/(\" +\n+ OptimizerUtils.toMB(crspq.sum*Double.BYTES) + \", \" + OptimizerUtils.toMB(crspq.max*Double.BYTES) + \")/(\" +\n+ OptimizerUtils.toMB(kpq.sum*Double.BYTES) + \", \" + OptimizerUtils.toMB(kpq.max*Double.BYTES) + \").\")\n+ }\n+ System.out.println(\"* => memory in megabytes assuming the parameters are in double precision and in dense format.\")\n}\n// ================================================================================================\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Include the memory requirement of each layer in the summary table of Caffe2DML - This helps the user to estimate the batch size she should set for optimal performance.
49,736
25.10.2017 19:57:28
25,200
d3917effd988de0e0977a310c73c4f232214632e
Bugfix for GPU sparse right indexing with empty output
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ExecutionConfig.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/ExecutionConfig.java", "diff": "@@ -69,38 +69,14 @@ public class ExecutionConfig {\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static ExecutionConfig getConfigForSimpleVectorOperations(int numCells) throws DMLRuntimeException {\n+ if(numCells == 0)\n+ throw new DMLRuntimeException(\"Attempting to invoke a kernel with 0 threads\");\nint deviceNumber = 0;\nint blockDimX = getMaxBlockDim(deviceNumber);\nint gridDimX = (int) Math.ceil((double) numCells / blockDimX);\nreturn new ExecutionConfig(gridDimX, blockDimX);\n}\n- /**\n- * Use this for simple matrix operations and use following in the kernel\n- * <code>\n- * int ix = blockIdx.x * blockDim.x + threadIdx.x;\n- * int iy = blockIdx.y * blockDim.y + threadIdx.y;\n- * </code>\n- * <p>\n- * This tries to schedule as minimum grids as possible.\n- *\n- * @param rlen number of rows\n- * @param clen number of columns\n- * @return execution configuration\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n- */\n- public static ExecutionConfig getConfigForMatrixOperations(int rlen, int clen) throws DMLRuntimeException {\n- int deviceNumber = 0;\n- int maxBlockDim = getMaxBlockDim(deviceNumber);\n- int blockDimX = (int) Math.min(maxBlockDim, rlen);\n- int gridDimX = (int) Math.ceil((double) rlen / blockDimX);\n- int blockDimY = (int) Math.min(Math.floor(((double) maxBlockDim) / blockDimX), clen);\n- int gridDimY = (int) Math.ceil((double) clen / blockDimY);\n- if (gridDimY > 65535)\n- throw new DMLRuntimeException(\"Internal Error: gridDimY must be less than 65535 for all supported CUDA compute capabilites!\");\n- return new ExecutionConfig(gridDimX, gridDimY, blockDimX, blockDimY);\n- }\n-\n/**\n* Use this for simple vector operations and use following in the kernel\n* <code>\n@@ -116,7 +92,6 @@ public class ExecutionConfig {\nreturn getConfigForSimpleVectorOperations(rlen * clen);\n}\n-\npublic ExecutionConfig(int gridDimX, int blockDimX) {\nthis.gridDimX = gridDimX;\nthis.blockDimX = blockDimX;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -1821,17 +1821,19 @@ public class LibMatrixCUDA {\n*/\nprotected static void sliceSparseDense(GPUContext gCtx, String instName, CSRPointer inPointer, Pointer outPointer,\nint rl, int ru, int cl, int cu, int inClen) throws DMLRuntimeException {\n+ int size = getNnz(inPointer, rl, ru);\n+ // Return since nnz of the output is 0 as outPointer is expected to be zeroed out.\n+ if(size == 0) return;\n+\nint retRlen = ru - rl + 1;\nlong t0 = GPUStatistics.DISPLAY_STATISTICS ? System.nanoTime() : 0;\nint retClen = cu - cl + 1;\n- int size = -1; String kernel = null; String timer = null;\n-\n+ String kernel = null; String timer = null;\n// Note: row-wise parallelization scheme iterates over input rows in single thread\n// whereas nnz parallelization scheme iterates over number of output rows in single thread.\nif(inClen > 10 && retClen > 2*retRlen) {\n// Perform nnz parallelization for wide and short matrices\n- size = getNnz(inPointer, rl, ru);\ntimer = GPUInstruction.MISC_TIMER_RIX_SPARSE_DENSE_OP_NNZ;\nkernel = \"slice_sparse_dense_nnz\";\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-446] Bugfix for GPU sparse right indexing with empty output
49,736
25.10.2017 20:29:55
25,200
f040674661ae818d0379abbcac624a726d3b3e3a
[MINOR] Enable single precision GPU tests
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "diff": "@@ -55,7 +55,7 @@ public abstract class GPUTests extends AutomatedTestBase {\nprivate static final boolean PRINT_MAT_ERROR = false;\n// We will use this flag until lower precision is supported on CP.\n- private final static String DATA_TYPE = \"double\";\n+ private final static String FLOATING_POINT_PRECISION = \"double\";\nprotected final double SINGLE_PRECISION_THRESHOLD = 1e-3; // for relative error\n@@ -75,9 +75,9 @@ public abstract class GPUTests extends AutomatedTestBase {\n* @return a valid threshold\n*/\nprotected double getTHRESHOLD() {\n- if(DATA_TYPE.equals(\"double\")) return DOUBLE_PRECISION_THRESHOLD;\n- else if(DATA_TYPE.equals(\"float\")) return SINGLE_PRECISION_THRESHOLD;\n- else throw new RuntimeException(\"Unsupported datatype:\" + DATA_TYPE);\n+ if(FLOATING_POINT_PRECISION.equals(\"double\")) return DOUBLE_PRECISION_THRESHOLD;\n+ else if(FLOATING_POINT_PRECISION.equals(\"single\")) return SINGLE_PRECISION_THRESHOLD;\n+ else throw new RuntimeException(\"Unsupported precision:\" + FLOATING_POINT_PRECISION);\n}\n@After\n@@ -263,7 +263,7 @@ public abstract class GPUTests extends AutomatedTestBase {\nformat.format(\n\"Relative error(%f) is more than threshold (%f). Expected = %f, Actual = %f, differed at [%d, %d]\",\nrelativeError, getTHRESHOLD(), expectedDouble, actualDouble, i, j);\n- if(DATA_TYPE.equals(\"double\"))\n+ if(FLOATING_POINT_PRECISION.equals(\"double\"))\nAssert.assertTrue(format.toString(), relativeError < getTHRESHOLD());\nelse\nAssert.assertTrue(format.toString(), relativeError < getTHRESHOLD() || absoluteError < getTHRESHOLD());\n@@ -324,7 +324,7 @@ public abstract class GPUTests extends AutomatedTestBase {\nprotected List<Object> runOnGPU(SparkSession spark, String scriptStr, Map<String, Object> inputs,\nList<String> outStrs) {\nMLContext gpuMLC = new MLContext(spark);\n- gpuMLC.setConfigProperty(\"sysml.gpu.dataType\", DATA_TYPE);\n+ gpuMLC.setConfigProperty(\"sysml.floating.point.precision\", FLOATING_POINT_PRECISION);\ngpuMLC.setGPU(true);\ngpuMLC.setForceGPU(true);\ngpuMLC.setStatistics(true);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Enable single precision GPU tests
49,736
28.10.2017 13:44:37
25,200
06d5bb073792345f7c4b7ecd0fb4454a335cc421
Avoid redundant computation of cudnnPoolingForward in max_pool_backward If the max_pool is invoked in the forward pass, then its output can be reused by the max_pool_backward rather than calling cudnnPoolingForward again. For sentence CNN with 2 epochs, this reduces the time for max_pool_backward from 6.361 to 2.966 seconds. Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -47,14 +47,23 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nprivate static final boolean THROW_ERROR_IF_INFERRED_SHAPE_MISMATCH = true;\n// -------------------------------------------------------------------------\n+ // Specifies the type of this hop\nprivate Hop.ConvOp op;\n-\nprivate int _maxNumThreads = -1; //-1 for unlimited\nprivate ConvolutionOp() {\n//default constructor for clone\n}\n+ /**\n+ * Create a hop from the builtin expression\n+ *\n+ * @param l name of the hop\n+ * @param dt datatype (only supports matrix datatype)\n+ * @param vt valuetype (only supports matrix valuetype)\n+ * @param o type of this hop\n+ * @param inp input hops\n+ */\npublic ConvolutionOp(String l, DataType dt, ValueType vt, ConvOp o, ArrayList<Hop> inp)\n{\nsuper(l, dt, vt);\n@@ -75,8 +84,7 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nHopsException.check(_input.size() >= 1, this, \"should have at least one input but has %d inputs\", _input.size());\n}\n- public ConvOp getOp()\n- {\n+ public ConvOp getOp() {\nreturn op;\n}\n@@ -163,77 +171,129 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nreturn input instanceof ConvolutionOp && ((ConvolutionOp) input).getOp() == ConvOp.DIRECT_CONV2D;\n}\n+ /**\n+ * Compares the input parameters for max_pool/max_pool_backward operations\n+ *\n+ * @return true if the following parameters match: stride=[stride, stride], padding=[pad, pad], input_shape=[numImg, numChannels, imgSize, imgSize], pool_size=[poolSize1, poolSize2]\n+ */\n+ private static boolean isPoolingParametersEqualAndKnown(ConvolutionParameters param1, ConvolutionParameters param2) {\n+ return isEqualAndKnown(param1.stride_h, param2.stride_h) && isEqualAndKnown(param1.stride_w, param2.stride_w) &&\n+ isEqualAndKnown(param1.pad_h, param2.pad_h) && isEqualAndKnown(param1.pad_w, param2.pad_w) &&\n+ isEqualAndKnown(param1.R, param2.R) && isEqualAndKnown(param1.S, param2.S) &&\n+ isEqualAndKnown(param1.N, param2.N) && isEqualAndKnown(param1.C, param2.C) &&\n+ isEqualAndKnown(param1.H, param2.H) && isEqualAndKnown(param1.W, param2.W);\n+ }\n+\n+ private static boolean isEqualAndKnown(int val1, int val2) {\n+ return val1 >= 0 && val2 >= 0 && val1 == val2;\n+ }\n+\n+ /**\n+ * Returns the output lop of maxpool operation with same parameters as this hop.\n+ * If corresponding output lop is not found or if this is not a max_pool_backward operation, this function returns null\n+ *\n+ * @return output lop of maxpool operation with same parameters as this hop\n+ * @throws HopsException if error\n+ * @throws LopsException if error\n+ */\n+ private Lop getMaxPoolOutputLop() throws HopsException, LopsException {\n+ if(op != ConvOp.MAX_POOLING_BACKWARD)\n+ return null;\n+\n+ Hop inputImage = getInput().get(0);\n+ for(Hop tmpParent : inputImage.getParent()) {\n+ if(!(tmpParent instanceof ConvolutionOp))\n+ continue;\n+ ConvolutionOp parent = (ConvolutionOp) tmpParent;\n+ if(parent.getOp() == ConvOp.MAX_POOLING && isPoolingParametersEqualAndKnown(parent._cachedParams, _cachedParams)) {\n+ return parent.constructLops();\n+ }\n+ }\n+ return null;\n+ }\n+\npublic Lop constructConvolutionLops(ExecType et, ArrayList<Hop> inputs) throws HopsException, LopsException {\nif(inputs.size() != getNumExpectedInputs())\nthrow new HopsException(\"Incorrect number of inputs for \" + op.name());\n- Lop in = null; Lop in2 = null;\n- ArrayList<Hop> inputs1 = inputs;\n- int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n+ // ---------------------------------------------------------------\n+ // Deal with fused operators and contruct lhsInputLop/optionalRhsInputLop\n+ Lop lhsInputLop = null; Lop optionalRhsInputLop = null;\n+ ArrayList<Hop> inputsOfPotentiallyFusedOp = inputs;\nOperationTypes lopOp = HopsConv2Lops.get(op);\n// RELU_MAX_POOLING and RELU_MAX_POOLING_BACKWARD is extremely useful for CP backend\n// by reducing unnecessary sparse-to-dense-to-sparse conversion.\n// For other backends, this operators is not necessary as it reduces an additional relu operator.\nif(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\n- in = inputs.get(0).getInput().get(0).constructLops();\n+ lhsInputLop = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\nelse if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && isInputReLU(inputs.get(0))) {\n- in = inputs.get(0).getInput().get(0).constructLops();\n+ lhsInputLop = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING_BACKWARD;\n}\nelse if(OptimizerUtils.ALLOW_OPERATOR_FUSION && op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {\nlopOp = OperationTypes.DIRECT_CONV2D_BIAS_ADD;\n// the first lop is image\n- in = inputs.get(0).getInput().get(0).constructLops();\n+ lhsInputLop = inputs.get(0).getInput().get(0).constructLops();\n// the second lop is bias\n- in2 = inputs.get(1).constructLops();\n+ optionalRhsInputLop = inputs.get(1).constructLops();\n// Use the inputs from conv2d rather than bias_add\n- inputs1 = inputs.get(0).getInput();\n+ inputsOfPotentiallyFusedOp = inputs.get(0).getInput();\n}\nelse {\n- in = inputs.get(0).constructLops();\n+ lhsInputLop = inputs.get(0).constructLops();\n}\n+ // ---------------------------------------------------------------\n-// // TODO: Inserting reblock requires knowing columns apriori\n-// ConvolutionTransform transform1 = new ConvolutionTransform(addReblockIfNecessary(et, lopOp, in), lopOp, getDataType(), getValueType(), et, k);\n-// setReblockedOutputDimension(et, transform1);\n- double cpIntermediateMemEstimate = computeIntermediateMemEstimate(-1, -1, -1 );\n+ // ---------------------------------------------------------------\n+ // Compute intermediate memory budget that can be passed to GPU operators\n+ // for better CuDNN operator selection at runtime\n+ double intermediateMemEstimate = computeIntermediateMemEstimate(-1, -1, -1 );\nif(et == ExecType.GPU && _dim1 > 0 && _dim2 > 0) {\n// This enables us to compile more efficient matrix-matrix CuDNN operation instead of\n// row-by-row invocation of multiple vector-matrix CuDNN operations.\n// This is possible as the operations on GPU are single-threaded\ndouble optimisticIntermediateMemEstimate = GPUContextPool.initialGPUMemBudget() - getOutputMemEstimate() - inputs.get(0).getOutputMemEstimate();\n- if(in2 != null) {\n+ if(optionalRhsInputLop != null) {\noptimisticIntermediateMemEstimate -= inputs.get(1).getOutputMemEstimate();\n}\n- cpIntermediateMemEstimate = Math.max(cpIntermediateMemEstimate, optimisticIntermediateMemEstimate);\n+ intermediateMemEstimate = Math.max(intermediateMemEstimate, optimisticIntermediateMemEstimate);\n}\n- ConvolutionTransform transform1 = new ConvolutionTransform(in, lopOp, getDataType(), getValueType(), et, k, cpIntermediateMemEstimate);\n- setOutputDimensions(transform1);\n+ // ---------------------------------------------------------------\n- setLineNumbers(transform1);\n- in.addOutput(transform1);\n+ // Contruct the lop\n+ ConvolutionTransform convolutionLop = new ConvolutionTransform(lhsInputLop, lopOp,\n+ getDataType(), getValueType(), et, OptimizerUtils.getConstrainedNumThreads(_maxNumThreads), intermediateMemEstimate);\n- if(in2 != null) {\n- transform1.addInput(in2);\n- in2.addOutput(transform1);\n- }\n+ // Propagate the output dimensions and the line number of ConvolutionOp to ConvolutionTransform\n+ setOutputDimensions(convolutionLop);\n+ setLineNumbers(convolutionLop);\n- // stride1, stride2, padding1, padding2\n- // input_shape1, input_shape2, input_shape3, input_shape4,\n- // filter_shape1, filter_shape2, filter_shape3, filter_shape4\n- for( int i=1; i < inputs1.size(); i++ )\n- {\n- Lop ltmp = inputs1.get(i).constructLops();\n- transform1.addInput(ltmp);\n- ltmp.addOutput(transform1);\n- }\n- transform1.setLevel(); //force order of added lops\n- return transform1;\n+ // ---------------------------------------------------------------\n+ // Add input/output for parent lops of convolutionLop\n+ lhsInputLop.addOutput(convolutionLop);\n+ if(optionalRhsInputLop != null) {\n+ convolutionLop.addInput(optionalRhsInputLop);\n+ optionalRhsInputLop.addOutput(convolutionLop);\n+ }\n+ for( int i=1; i < inputsOfPotentiallyFusedOp.size(); i++ ) {\n+ Lop ltmp = inputsOfPotentiallyFusedOp.get(i).constructLops();\n+ convolutionLop.addInput(ltmp);\n+ ltmp.addOutput(convolutionLop);\n+ }\n+ // Only valid for MAX_POOLING_BACKWARD on GPU\n+ Lop optionalMaxPoolOutput = (et == ExecType.GPU) ? getMaxPoolOutputLop() : null;\n+ if(optionalMaxPoolOutput != null) {\n+ convolutionLop.addInput(optionalMaxPoolOutput);\n+ optionalMaxPoolOutput.addOutput(convolutionLop);\n+ }\n+ convolutionLop.setLevel(); //force order of added lops\n+ // ---------------------------------------------------------------\n+ return convolutionLop;\n}\n@@ -453,12 +513,10 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nExecType REMOTE = OptimizerUtils.isSparkExecutionMode() ? ExecType.SPARK : ExecType.MR;\n- if( _etypeForced != null )\n- {\n+ if( _etypeForced != null ) {\n_etype = _etypeForced;\n}\n- else\n- {\n+ else {\nif ( OptimizerUtils.isMemoryBasedOptLevel() ) {\n_etype = findExecTypeByMemEstimate();\n}\n@@ -479,8 +537,9 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nreturn _etype;\n}\n- // Caching parameters speed-ups dynamic recompilation time by avoiding unnecessary computeSizeInformation\n+ // Parameters recomputed in refreshSizeInformation and passed across many calls of getDim\nprivate ConvolutionParameters _cachedParams = new ConvolutionParameters(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, _maxNumThreads);\n+\n// stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n// filter_shape1, filter_shape2, filter_shape3, filter_shape4\n@@ -494,16 +553,16 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nimageHeightHop = getInput().get(8);\nfilterHeightHop = getInput().get(12);\n_cachedParams.setIfUnknown(\n- getInput().get(6),\n- getInput().get(7),\n- imageHeightHop,\n- getInput().get(9),\n- getInput().get(10),\n- filterHeightHop,\n- getInput().get(13),\n- getInput().get(2),\n- getInput().get(3),\n- getInput().get(4),\n+ getInput().get(6), // N\n+ getInput().get(7), // C\n+ imageHeightHop, // H\n+ getInput().get(9), // W\n+ getInput().get(10), // K\n+ filterHeightHop, // R\n+ getInput().get(13), // S\n+ getInput().get(2), // stride_h\n+ getInput().get(3), // stride_w\n+ getInput().get(4), // pad+h\ngetInput().get(5), _maxNumThreads);\n}\nelse {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ConvolutionGPUInstruction.java", "diff": "@@ -92,8 +92,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction {\nif( ( opcode.equalsIgnoreCase(\"conv2d\")\n|| opcode.equalsIgnoreCase(\"conv2d_backward_filter\")\n- || opcode.equalsIgnoreCase(\"conv2d_backward_data\")\n- || opcode.equalsIgnoreCase(\"maxpooling_backward\")) ) {\n+ || opcode.equalsIgnoreCase(\"conv2d_backward_data\")) ) {\nInstructionUtils.checkNumFields(parts, 16);\nCPOperand in1 = new CPOperand(parts[1]);\nCPOperand in2 = new CPOperand(parts[2]);\n@@ -119,6 +118,39 @@ public class ConvolutionGPUInstruction extends GPUInstruction {\nreturn new ConvolutionGPUInstruction(in1, in2, out, opcode, str, stride,\npadding, input_shape, filter_shape, Double.parseDouble(parts[16]));\n}\n+ else if( opcode.equalsIgnoreCase(\"maxpooling_backward\") ) {\n+ boolean withMaxPoolOut = false;\n+ if(parts.length == 18) {\n+ withMaxPoolOut = true;\n+ }\n+ else\n+ InstructionUtils.checkNumFields(parts, 16);\n+ CPOperand in1 = new CPOperand(parts[1]);\n+ CPOperand in2 = new CPOperand(parts[2]);\n+ CPOperand in3 = withMaxPoolOut ? new CPOperand(parts[15]) : null;\n+ CPOperand out = withMaxPoolOut ? new CPOperand(parts[16]) : new CPOperand(parts[15]);\n+ double memBudget = withMaxPoolOut ? Double.parseDouble(parts[17]) : Double.parseDouble(parts[16]);\n+\n+ ArrayList<CPOperand> stride = new ArrayList<>();\n+ ArrayList<CPOperand> padding = new ArrayList<>();\n+ ArrayList<CPOperand> input_shape = new ArrayList<>();\n+ ArrayList<CPOperand> filter_shape = new ArrayList<>();\n+ stride.add(new CPOperand(parts[3]));\n+ stride.add(new CPOperand(parts[4]));\n+ padding.add(new CPOperand(parts[5]));\n+ padding.add(new CPOperand(parts[6]));\n+ input_shape.add(new CPOperand(parts[7]));\n+ input_shape.add(new CPOperand(parts[8]));\n+ input_shape.add(new CPOperand(parts[9]));\n+ input_shape.add(new CPOperand(parts[10]));\n+ filter_shape.add(new CPOperand(parts[11]));\n+ filter_shape.add(new CPOperand(parts[12]));\n+ filter_shape.add(new CPOperand(parts[13]));\n+ filter_shape.add(new CPOperand(parts[14]));\n+\n+ return new ConvolutionGPUInstruction(in1, in2, in3, out, opcode, str, stride,\n+ padding, input_shape, filter_shape, memBudget);\n+ }\nelse if (opcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\nInstructionUtils.checkNumFields(parts, 17);\nCPOperand in1 = new CPOperand(parts[1]);\n@@ -324,7 +356,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction {\nelse if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\")) {\nMatrixObject image = getMatrixInputForGPUInstruction(ec, _input1.getName());\nMatrixObject dout = getMatrixInputForGPUInstruction(ec, _input2.getName());\n-\n+ MatrixObject maxPoolOutput = _input3 != null ? getMatrixInputForGPUInstruction(ec, _input3.getName()) : null;\nif(dout.getNumRows() != N || dout.getNumColumns() != C*P*Q)\nthrow new DMLRuntimeException(\"Incorrect dimensions for dout in maxpooling_backward\");\nif(image.getNumRows() != N || image.getNumColumns() != C*H*W)\n@@ -333,7 +365,7 @@ public class ConvolutionGPUInstruction extends GPUInstruction {\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, _output.getName(), N, C * H * W);\n- LibMatrixCuDNN.maxpoolingBackward(ec.getGPUContext(0), getExtendedOpcode(), image, dout, out, N, C, H, W,\n+ LibMatrixCuDNN.maxpoolingBackward(ec.getGPUContext(0), getExtendedOpcode(), image, dout, maxPoolOutput, out, N, C, H, W,\nK, R, S, pad_h, pad_w, stride_h, stride_w, P, Q, _intermediateMemoryBudget);\n}\nelse {\n@@ -346,7 +378,8 @@ public class ConvolutionGPUInstruction extends GPUInstruction {\nif ( !instOpcode.equalsIgnoreCase(\"maxpooling\") )\nec.releaseMatrixInputForGPUInstruction(_input2.getName());\n- if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\"))\n+ if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\") ||\n+ (instOpcode.equalsIgnoreCase(\"maxpooling_backward\") && _input3 != null))\nec.releaseMatrixInputForGPUInstruction(_input3.getName());\nec.releaseMatrixOutputForGPUInstruction(_output.getName());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -519,6 +519,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n* @param instName the invoking instruction's name for record {@link Statistics}.\n* @param image image as matrix object\n* @param dout delta matrix, output of previous layer\n+ * @param maxpoolOutput (optional and can be null) output of maxpool forward function\n* @param outputBlock output matrix\n* @param N batch size\n* @param C number of channels\n@@ -537,12 +538,14 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static void maxpoolingBackward(GPUContext gCtx, String instName, MatrixObject image, MatrixObject dout,\n- MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\n+ MatrixObject maxpoolOutput, MatrixObject outputBlock, int N, int C, int H, int W, int K, int R,\nint S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\nint Q, double intermediateMemoryBudget) throws DMLRuntimeException {\nlong CHW = C*H*W; long CPQ = C*P*Q;\nlong NCHW = N*CHW; long NCPQ = N*CPQ;\n+ final boolean isMaxPoolOutputProvided = maxpoolOutput != null;\n+\nif(NCHW < maxNumElementsOfCuDNNTensor && NCPQ < maxNumElementsOfCuDNNTensor) {\n// Filter and output are accounted as dense in the memory estimation for conv2dBackwardData\nlong overhead = isInSparseFormat(gCtx, image) ? OptimizerUtils.estimateSizeExactSparsity(N, CHW, 1.0) : 0;\n@@ -551,19 +554,26 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nif(overhead <= intermediateMemoryBudget) {\nPointer x = getDensePointerForCuDNN(gCtx, image, instName);\nPointer dy = getDensePointerForCuDNN(gCtx, dout, instName);\n- cudnnMaxpoolingBackward(gCtx, instName, x, dy, dx, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n+ Pointer y = isMaxPoolOutputProvided ? getDensePointerForCuDNN(gCtx, maxpoolOutput, instName) : null;\n+ cudnnMaxpoolingBackward(gCtx, instName, x, dy, y, dx, N, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\nelse {\nLibMatrixCuDNNInputRowFetcher imgFetcher = new LibMatrixCuDNNInputRowFetcher(gCtx, instName, image);\nLibMatrixCuDNNInputRowFetcher doutFetcher = new LibMatrixCuDNNInputRowFetcher(gCtx, instName, dout);\n+ LibMatrixCuDNNInputRowFetcher maxPoolOutFetcher = isMaxPoolOutputProvided ? new LibMatrixCuDNNInputRowFetcher(gCtx, instName, maxpoolOutput) : null;\nfor(int n = 0; n < N; n++) {\n- cudnnMaxpoolingBackward(gCtx, instName, imgFetcher.getNthRow(n), doutFetcher.getNthRow(n),\n+ Pointer x = imgFetcher.getNthRow(n);\n+ Pointer dy = doutFetcher.getNthRow(n);\n+ Pointer y = isMaxPoolOutputProvided ? maxPoolOutFetcher.getNthRow(n) : null;\n+ cudnnMaxpoolingBackward(gCtx, instName, x, dy, y,\ndx.withByteOffset(n*CHW*sizeOfDataType),\n1, C, H, W, K, R, S, pad_h, pad_w, stride_h, stride_w, P, Q);\n}\n// Deallocate temporary array to hold one element of input\nimgFetcher.close();\ndoutFetcher.close();\n+ if(isMaxPoolOutputProvided)\n+ maxPoolOutFetcher.close();\n}\n}\nelse {\n@@ -572,36 +582,33 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n}\nprivate static void cudnnMaxpoolingBackward(GPUContext gCtx, String instName,\n- Pointer x, Pointer dy, Pointer dx,\n+ Pointer x, Pointer dy, Pointer y, Pointer dx,\nint N, int C, int H, int W, int K, int R,\nint S, int pad_h, int pad_w, int stride_h, int stride_w, int P,\nint Q) throws DMLRuntimeException {\nif(LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : maxpoolingBackward\" + \", GPUContext=\" + gCtx);\n}\n- Pointer y = null;\n+\n+ boolean isMaxPoolOutputProvided = (y != null);\ntry(LibMatrixCuDNNPoolingDescriptors desc =\nLibMatrixCuDNNPoolingDescriptors.cudnnMaxpoolingBackwardDescriptors(gCtx, instName, N, C, H, W, K, R, S,\npad_h, pad_w, stride_h, stride_w, P, Q)) {\nlong t1=0, t2=0, t3=0;\n+ int status;\n+ if(!isMaxPoolOutputProvided) {\nif (GPUStatistics.DISPLAY_STATISTICS) t1 = System.nanoTime();\n-\n- // Calling PoolForward first, y is one of the inputs for poolBackward\n- // TODO: Remove calling poolForward after necessary changes at language level for poolBackward\nlong numBytes = N*C*P*Q*sizeOfDataType;\ny = gCtx.allocate(numBytes);\n-\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_CUDNN_INIT, System.nanoTime() - t1);\n-\nif (GPUStatistics.DISPLAY_STATISTICS) t2 = System.nanoTime();\n- int status = cudnnPoolingForward(getCudnnHandle(gCtx), desc.poolingDesc, one(), desc.xDesc, x, zero(), desc.yDesc, y);\n+ status = cudnnPoolingForward(getCudnnHandle(gCtx), desc.poolingDesc, one(), desc.xDesc, x, zero(), desc.yDesc, y);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_MAXPOOLING_FORWARD_LIB, System.nanoTime() - t2);\n-\nif(status != jcuda.jcudnn.cudnnStatus.CUDNN_STATUS_SUCCESS) {\nthrow new DMLRuntimeException(\"Could not executed cudnnPoolingForward before cudnnPoolingBackward: \" + jcuda.jcudnn.cudnnStatus.stringFor(status));\n}\n-\n+ }\nif (GPUStatistics.DISPLAY_STATISTICS) t3 = System.nanoTime();\nstatus = cudnnPoolingBackward(getCudnnHandle(gCtx), desc.poolingDesc, one(), desc.yDesc, y, desc.dyDesc, dy, desc.xDesc, x, zero(), desc.dxDesc, dx);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_MAXPOOLING_BACKWARD_LIB, System.nanoTime() - t3);\n@@ -615,7 +622,7 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nfinally {\nlong t4=0;\nif (GPUStatistics.DISPLAY_STATISTICS) t4 = System.nanoTime();\n- if(y != null)\n+ if(!isMaxPoolOutputProvided)\ngCtx.cudaFreeHelper(instName, y);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_CUDNN_CLEANUP, System.nanoTime() - t4);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java", "diff": "@@ -575,6 +575,88 @@ public class NeuralNetworkOpTests extends GPUTests {\n+ }\n+ }\n+ }\n+ }\n+\n+\n+ @Test\n+ @Ignore\n+ public void testMaxPoolBackwardWithMaxpoolOut() {\n+ String scriptStr = \"tmp = max_pool(image, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], pool_size=[R,S]); print(sum(tmp)); O = max_pool_backward(image, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], pool_size=[R,S])\";\n+\n+ for (long N : Nlst) {\n+ for (long C : Clst) {\n+ for (long H : Hlst) {\n+ long W = H;\n+ for (long R : Rlst) {\n+ long S = R;\n+ for (long strideH : strideLst) {\n+ long strideW = strideH;\n+ for (long padH : padLst) {\n+ long padW = padH;\n+ for (double sparsity : sparsitylst) {\n+\n+ // pool is smaller than image + padding\n+ if (R > (H + padH) || S > (W + padW))\n+ continue;\n+\n+ // Make sure ops fit in GPU memory and within constraints of cudnn\n+ long imageSize = N * C * H * W * 8l;\n+ if (imageSize > MAX_OP_SIZE) // image size\n+ continue;\n+ long poolSize = R * S * 8l;\n+ if (poolSize > MAX_OP_SIZE) // filter size\n+ continue;\n+\n+ int P = (int) ConvolutionUtils.getP(H, R, strideH, padH);\n+ int Q = (int) ConvolutionUtils.getQ(W, S, strideW, padW);\n+\n+ long doutSize = N * C * P * Q * 8l;\n+ if (doutSize > MAX_OP_SIZE) // dout/output size\n+ continue;\n+\n+ double imageSizeInMB = imageSize / (1024.0 * 1024.0);\n+ double poolSizeInMB = poolSize / (1024.0 * 1024.0);\n+ double doutSizeInMB = doutSize / (1024.0 * 1024.0);\n+ System.out\n+ .format(\"max_pool_backward, image[%d,%d,%d,%d](%.1fMB), pool[%d,%d](%.1f), dout[%d,%d,%d,%d](%.1fMB), stride[%d,%d], padding[%d,%d]\",\n+ N, C, H, W, imageSizeInMB, R, S, poolSizeInMB, N, C,\n+ P, Q, doutSizeInMB, strideH, strideW, padH, padW);\n+\n+ Matrix image = generateInputMatrix(spark, (int) N,\n+ (int) (C * H * W), -127.0, 127, sparsity, seed, true);\n+ Matrix dout = generateInputMatrix(spark, (int) N, (int) (C * P * Q),\n+ -127.0, 127, sparsity, seed, true);\n+ HashMap<String, Object> inputs = new HashMap<>();\n+ inputs.put(\"N\", N);\n+ inputs.put(\"C\", C);\n+ inputs.put(\"H\", H);\n+ inputs.put(\"W\", W);\n+ inputs.put(\"R\", R);\n+ inputs.put(\"S\", S);\n+ inputs.put(\"strideH\", strideH);\n+ inputs.put(\"strideW\", strideW);\n+ inputs.put(\"padH\", padH);\n+ inputs.put(\"padW\", padW);\n+ inputs.put(\"image\", image);\n+ inputs.put(\"dout\", dout);\n+ List<Object> outCPU = runOnCPU(spark, scriptStr, inputs,\n+ Arrays.asList(\"O\"));\n+ List<Object> outGPU = runOnGPU(spark, scriptStr, inputs,\n+ Arrays.asList(\"O\"));\n+ assertHeavyHitterPresent(\"gpu_maxpooling_backward\");\n+ assertEqualObjects(outCPU.get(0), outGPU.get(0));\n+ clearGPUMemory();\n+ }\n+ }\n+ }\n+ }\n+\n+\n+\n+\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Avoid redundant computation of cudnnPoolingForward in max_pool_backward - If the max_pool is invoked in the forward pass, then its output can be reused by the max_pool_backward rather than calling cudnnPoolingForward again. For sentence CNN with 2 epochs, this reduces the time for max_pool_backward from 6.361 to 2.966 seconds. Closes #691.
49,738
29.10.2017 16:06:55
25,200
d75a669a46381a0a5b54109e7b207613e17ab54e
[MINOR] Fix consistency task partitioning in mm, mmchain, codegen row
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofRowwise.java", "diff": "@@ -39,7 +39,6 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseRow;\nimport org.apache.sysml.runtime.matrix.data.SparseRowVector;\n-import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class SpoofRowwise extends SpoofOperator\n@@ -198,11 +197,9 @@ public abstract class SpoofRowwise extends SpoofOperator\n//core parallel execute\nExecutorService pool = Executors.newFixedThreadPool( k );\n- int nk = (a instanceof CompressedMatrixBlock) ? k :\n- UtilFunctions.roundToNext(Math.min(8*k,m/32), k);\n- int blklen = (int)(Math.ceil((double)m/nk));\n- if( a instanceof CompressedMatrixBlock )\n- blklen = BitmapEncoder.getAlignedBlocksize(blklen);\n+ ArrayList<Integer> blklens = (a instanceof CompressedMatrixBlock) ?\n+ LibMatrixMult.getAlignedBlockSizes(m, k, BitmapEncoder.BITMAP_BLOCK_SZ) :\n+ LibMatrixMult.getBalancedBlockSizesDefault(m, k, false);\ntry\n{\n@@ -210,8 +207,8 @@ public abstract class SpoofRowwise extends SpoofOperator\n//execute tasks\nArrayList<ParColAggTask> tasks = new ArrayList<>();\nint outLen = out.getNumRows() * out.getNumColumns();\n- for( int i=0; i<nk & i*blklen<m; i++ )\n- tasks.add(new ParColAggTask(a, b, scalars, n, n2, outLen, i*blklen, Math.min((i+1)*blklen, m)));\n+ for( int i=0, lb=0; i<blklens.size(); lb+=blklens.get(i), i++ )\n+ tasks.add(new ParColAggTask(a, b, scalars, n, n2, outLen, lb, lb+blklens.get(i)));\nList<Future<double[]>> taskret = pool.invokeAll(tasks);\n//aggregate partial results\nint len = _type.isColumnAgg() ? out.getNumRows()*out.getNumColumns() : 1;\n@@ -222,8 +219,8 @@ public abstract class SpoofRowwise extends SpoofOperator\nelse {\n//execute tasks\nArrayList<ParExecTask> tasks = new ArrayList<>();\n- for( int i=0; i<nk & i*blklen<m; i++ )\n- tasks.add(new ParExecTask(a, b, out, scalars, n, n2, i*blklen, Math.min((i+1)*blklen, m)));\n+ for( int i=0, lb=0; i<blklens.size(); lb+=blklens.get(i), i++ )\n+ tasks.add(new ParExecTask(a, b, out, scalars, n, n2, lb, lb+blklens.get(i)));\nList<Future<Long>> taskret = pool.invokeAll(tasks);\n//aggregate nnz, no need to aggregate results\nlong nnz = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -209,8 +209,7 @@ public class LibMatrixMult\ntry {\nExecutorService pool = Executors.newFixedThreadPool( k );\nArrayList<MatrixMultTask> tasks = new ArrayList<>();\n- int nk = (pm2r||pm2c) ? k : UtilFunctions.roundToNext(Math.min(8*k,num/32), k);\n- ArrayList<Integer> blklens = getBalancedBlockSizes(num, nk);\n+ ArrayList<Integer> blklens = getBalancedBlockSizesDefault(num, k, (pm2r||pm2c));\nfor( int i=0, lb=0; i<blklens.size(); lb+=blklens.get(i), i++ )\ntasks.add(new MatrixMultTask(m1, m2, ret, tm2, pm2r, pm2c, lb, lb+blklens.get(i)));\n//execute tasks\n@@ -321,11 +320,10 @@ public class LibMatrixMult\n//(currently: always parallelization over number of rows)\ntry {\nExecutorService pool = Executors.newFixedThreadPool( k );\n+ ArrayList<Integer> blklens = getBalancedBlockSizesDefault(mX.rlen, k, true);\nArrayList<MatrixMultChainTask> tasks = new ArrayList<>();\n- int blklen = (int)(Math.ceil((double)mX.rlen/k));\n- blklen += (blklen%24 != 0)?24-blklen%24:0;\n- for( int i=0; i<k & i*blklen<mX.rlen; i++ )\n- tasks.add(new MatrixMultChainTask(mX, mV, mW, ct, i*blklen, Math.min((i+1)*blklen, mX.rlen)));\n+ for( int i=0, lb=0; i<blklens.size(); lb+=blklens.get(i), i++ )\n+ tasks.add(new MatrixMultChainTask(mX, mV, mW, ct, lb, lb+blklens.get(i)));\n//execute tasks\nList<Future<double[]>> taskret = pool.invokeAll(tasks);\npool.shutdown();\n@@ -1606,10 +1604,18 @@ public class LibMatrixMult\nfinal int blocksizeI = 24; // constraint: factor of 4\nfinal int blocksizeJ = 1024;\ndouble[] tmp = new double[blocksizeI];\n+ final int bn = (ru-rl) % blocksizeI;\n+\n+ //compute rest (not aligned to blocksize)\n+ for( int i=rl, aix=rl*cd; i < rl+bn; i++, aix+=cd ) {\n+ double val = dotProduct(a, b, aix, 0, cd);\n+ val *= (weights) ? w[i] : 1;\n+ val -= (weights2) ? w[i] : 0;\n+ vectMultiplyAdd(val, a, c, aix, 0, cd);\n+ }\n//blockwise mmchain computation\n- final int bn = ru - ru % blocksizeI; //rl blocksize aligned\n- for( int bi=rl; bi < bn; bi+=blocksizeI )\n+ for( int bi=rl+bn; bi < ru; bi+=blocksizeI )\n{\n//compute 1st matrix-vector for row block\nArrays.fill(tmp, 0);\n@@ -1633,14 +1639,6 @@ public class LibMatrixMult\na, c, aix, aix+cd, aix+2*cd, aix+3*cd, bj, bjmin);\n}\n}\n-\n- //compute rest (not aligned to blocksize)\n- for( int i=bn, aix=bn*cd; i < ru; i++, aix+=cd ) {\n- double val = dotProduct(a, b, aix, 0, cd);\n- val *= (weights) ? w[i] : 1;\n- val -= (weights2) ? w[i] : 0;\n- vectMultiplyAdd(val, a, c, aix, 0, cd);\n- }\n}\nprivate static void matrixMultChainSparse(MatrixBlock mX, MatrixBlock mV, MatrixBlock mW, MatrixBlock ret, ChainType ct, int rl, int ru)\n@@ -3578,7 +3576,7 @@ public class LibMatrixMult\npublic static boolean checkParColumnAgg(MatrixBlock m1, int k, boolean inclFLOPs) {\nreturn (8L * m1.clen * k <= MEM_OVERHEAD_THRESHOLD\n- && (!inclFLOPs || 4L * m1.rlen * m1.clen >= PAR_MINFLOP_THRESHOLD));\n+ && (!inclFLOPs || 4L * m1.rlen * m1.clen / (m1.sparse?2:1) >= PAR_MINFLOP_THRESHOLD));\n}\nprivate static boolean checkParMatrixMultRightInputRows( MatrixBlock m1, MatrixBlock m2, int k ) {\n@@ -3676,6 +3674,20 @@ public class LibMatrixMult\n}\n+ public static ArrayList<Integer> getBalancedBlockSizesDefault(int len, int k, boolean constK) {\n+ int nk = constK ? k : UtilFunctions.roundToNext(Math.min(8*k,len/32), k);\n+ return getBalancedBlockSizes(len, nk);\n+ }\n+\n+ public static ArrayList<Integer> getAlignedBlockSizes(int len, int k, int align) {\n+ int blklen = (int)(Math.ceil((double)len/k));\n+ blklen += ((blklen%align != 0) ? align-blklen%align : 0);\n+ ArrayList<Integer> ret = new ArrayList<>();\n+ for(int i=0; i<len; i+=blklen)\n+ ret.add(Math.min(blklen, len-i));\n+ return ret;\n+ }\n+\nprivate static ArrayList<Integer> getBalancedBlockSizes(int len, int k) {\nArrayList<Integer> ret = new ArrayList<>();\nint base = len / k;\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix consistency task partitioning in mm, mmchain, codegen row
49,719
03.11.2017 14:29:13
25,200
f76f2138a4847f2ca52b5bf511907f4838b240b8
[MINOR] added additional examples
[ { "change_type": "MODIFY", "old_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "new_path": "samples/jupyter-notebooks/DML Tips and Tricks (aka Fun With DML).ipynb", "diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"1. [Cross Validation](#CrossValidation)\\n\",\n+ \"1. [Replace NaN with mode](#NaN2Mode)\\n\",\n+ \"* [Use sample builtin function to create sample from matrix](#sample)\\n\",\n+ \"* [Count of Matching Values in two Matrices/Vectors](#MatchinRows)\\n\",\n+ \"* [Cross Validation](#CrossValidation)\\n\",\n\"* [Value-based join of two Matrices](#JoinMatrices)\\n\",\n\"* [Filter Matrix to include only Frequent Column Values](#FilterMatrix)\\n\",\n\"* [Construct (sparse) Matrix from (rowIndex, colIndex, values) triplets](#Construct_sparse_Matrix)\\n\",\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 15,\n\"metadata\": {\n\"collapsed\": false,\n- \"scrolled\": true\n+ \"scrolled\": false\n},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"2017-08-18 21:33:18 UTC\\n\"\n+ \"2017-09-22 07:57:57 UTC\\n\"\n]\n}\n],\n\"print (ml.buildTime())\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Replace NaN with mode<a id=\\\"NaN2Mode\\\" />\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"This functions replaces NaN in column with mode of column\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 13,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"Before: \\n\",\n+ \"1.000 NaN\\n\",\n+ \"1.000 NaN\\n\",\n+ \"1.000 2.000\\n\",\n+ \"2.000 1.000\\n\",\n+ \"1.000 2.000\\n\",\n+ \"\\n\",\n+ \"After: \\n\",\n+ \"1.000 2.000\\n\",\n+ \"1.000 2.000\\n\",\n+ \"1.000 2.000\\n\",\n+ \"2.000 1.000\\n\",\n+ \"1.000 2.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"prog=\\\"\\\"\\\"\\n\",\n+ \"# Function for NaN-aware replacement with mode\\n\",\n+ \"replaceNaNwithMode = function (matrix[double] X, integer colId) \\n\",\n+ \" return (matrix[double] X) \\n\",\n+ \"{\\n\",\n+ \" Xi = replace (target=X[,colId], pattern=0/0, replacement=max(X[,colId])+1) # replace NaN with largest value + 1\\n\",\n+ \" agg = aggregate (target=Xi, groups=Xi, fn=\\\"count\\\") # count each distinct value\\n\",\n+ \" mode = as.scalar (rowIndexMax(t(agg[1:nrow(agg)-1, ]))) # mode is max frequent value except last value\\n\",\n+ \" X[,colId] = replace (target=Xi, pattern=max(Xi), replacement=mode) # fill in mode\\n\",\n+ \"}\\n\",\n+ \"\\n\",\n+ \"X = matrix('1 NaN 1 NaN 1 2 2 1 1 2', rows = 5, cols = 2)\\n\",\n+ \"\\n\",\n+ \"Y = replaceNaNwithMode (X, 2)\\n\",\n+ \"\\n\",\n+ \"print (\\\"Before: \\\\n\\\" + toString(X))\\n\",\n+ \"print (\\\"After: \\\\n\\\" + toString(Y))\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"with jvm_stdout(True):\\n\",\n+ \" ml.execute(dml(prog))\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Use sample builtin function to create sample from matrix<a id=\\\"sample\\\" />\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Use sample() function, create permutation matrix using table(), and pull sample from X.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 18,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"X: \\n\",\n+ \"2.000 1.000\\n\",\n+ \"8.000 3.000\\n\",\n+ \"5.000 6.000\\n\",\n+ \"7.000 9.000\\n\",\n+ \"4.000 4.000\\n\",\n+ \"\\n\",\n+ \"sv: \\n\",\n+ \"1.000\\n\",\n+ \"4.000\\n\",\n+ \"\\n\",\n+ \"samples: \\n\",\n+ \"2.000 1.000\\n\",\n+ \"7.000 9.000\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"prog=\\\"\\\"\\\"\\n\",\n+ \"X = matrix ('2 1 8 3 5 6 7 9 4 4', rows = 5, cols = 2 )\\n\",\n+ \"\\n\",\n+ \"nbrSamples = 2\\n\",\n+ \"\\n\",\n+ \"sv = order (target = sample (nrow (X), nbrSamples, FALSE)) # samples w/o replacement, and order \\n\",\n+ \"P = table (seq (1, nbrSamples), sv, nbrSamples, nrow(X)) # permutation matrix\\n\",\n+ \"samples = P %*% X; # apply P to perform selection\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"print (\\\"X: \\\\n\\\" + toString(X))\\n\",\n+ \"print (\\\"sv: \\\\n\\\" + toString(sv))\\n\",\n+ \"print (\\\"samples: \\\\n\\\" + toString(samples))\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"with jvm_stdout(True):\\n\",\n+ \" ml.execute(dml(prog))\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Count of Matching Values in two Matrices/Vectors<a id=\\\"MatchingRows\\\" />\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Given two matrices/vectors X and Y, get a count of the rows where X and Y have the same value.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": 19,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [\n+ {\n+ \"name\": \"stdout\",\n+ \"output_type\": \"stream\",\n+ \"text\": [\n+ \"t(X): 8.000 4.000 5.000 4.000 9.000 10.000\\n\",\n+ \"\\n\",\n+ \"t(Y): 4.000 9.000 5.000 1.000 9.000 7.000\\n\",\n+ \"\\n\",\n+ \"Number of Matches: 2.0\\n\",\n+ \"\\n\",\n+ \"SystemML Statistics:\\n\",\n+ \"Total execution time:\\t\\t0.001 sec.\\n\",\n+ \"Number of executed Spark inst:\\t0.\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ }\n+ ],\n+ \"source\": [\n+ \"prog=\\\"\\\"\\\"\\n\",\n+ \"X = matrix('8 4 5 4 9 10', rows = 6, cols = 1)\\n\",\n+ \"Y = matrix('4 9 5 1 9 7 ', rows = 6, cols = 1)\\n\",\n+ \"\\n\",\n+ \"matches = sum (X == Y)\\n\",\n+ \"\\n\",\n+ \"print (\\\"t(X): \\\" + toString(t(X)))\\n\",\n+ \"print (\\\"t(Y): \\\" + toString(t(Y)))\\n\",\n+ \"print (\\\"Number of Matches: \\\" + matches + \\\"\\\\n\\\")\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"with jvm_stdout(True):\\n\",\n+ \" ml.execute(dml(prog))\"\n+ ]\n+ },\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"cell_type\": \"code\",\n\"execution_count\": 4,\n\"metadata\": {\n- \"collapsed\": false\n+ \"collapsed\": true\n},\n\"outputs\": [\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] added additional examples
49,738
03.11.2017 14:23:50
25,200
14c410ce06f3a5c56d1bcb1ac509fab4a0711f5f
[MINOR] Performance function invocation of dml-bodied UDFs This patch slightly improved the function invocation performance of dml-bodied UDFs from 452K/s to 521K/s. Furthermore, this also includes a fix of the test for LinregCG over compressed data.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -63,6 +63,10 @@ public class LocalVariableMap implements Cloneable\nreturn localMap.keySet();\n}\n+ public Set<Entry<String, Data>> entrySet() {\n+ return localMap.entrySet();\n+ }\n+\n/**\n* Retrieves the data object given its name.\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -633,7 +633,7 @@ public class ParForProgramBlock extends ForProgramBlock\n//preserve shared input/result variables of cleanup\nArrayList<String> varList = ec.getVarList();\n- HashMap<String, Boolean> varState = ec.pinVariables(varList);\n+ boolean[] varState = ec.pinVariables(varList);\ntry\n{\n@@ -1329,7 +1329,7 @@ public class ParForProgramBlock extends ForProgramBlock\n}\n}\n- private void cleanupSharedVariables( ExecutionContext ec, HashMap<String,Boolean> varState )\n+ private void cleanupSharedVariables( ExecutionContext ec, boolean[] varState )\nthrows DMLRuntimeException\n{\n//TODO needs as precondition a systematic treatment of persistent read information.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java", "diff": "package org.apache.sysml.runtime.controlprogram.context;\nimport java.util.ArrayList;\n-import java.util.HashMap;\nimport java.util.List;\nimport org.apache.commons.logging.Log;\n@@ -151,6 +150,11 @@ public class ExecutionContext {\nreturn _variables.get(name);\n}\n+ public Data getVariable(CPOperand operand) throws DMLRuntimeException {\n+ return operand.getDataType().isScalar() ?\n+ getScalarInput(operand) : getVariable(operand.getName());\n+ }\n+\npublic void setVariable(String name, Data val) {\n_variables.put(name, val);\n}\n@@ -528,30 +532,25 @@ public class ExecutionContext {\n* The function returns the OLD \"clean up\" state of matrix objects.\n*\n* @param varList variable list\n- * @return map of old cleanup state of matrix objects\n+ * @return indicator vector of old cleanup state of matrix objects\n*/\n- public HashMap<String,Boolean> pinVariables(ArrayList<String> varList)\n+ public boolean[] pinVariables(ArrayList<String> varList)\n{\n//2-pass approach since multiple vars might refer to same matrix object\n- HashMap<String, Boolean> varsState = new HashMap<>();\n+ boolean[] varsState = new boolean[varList.size()];\n//step 1) get current information\n- for( String var : varList )\n- {\n- Data dat = _variables.get(var);\n- if( dat instanceof MatrixObject ) {\n- MatrixObject mo = (MatrixObject)dat;\n- varsState.put( var, mo.isCleanupEnabled() );\n- }\n+ for( int i=0; i<varList.size(); i++ ) {\n+ Data dat = _variables.get(varList.get(i));\n+ if( dat instanceof MatrixObject )\n+ varsState[i] = ((MatrixObject)dat).isCleanupEnabled();\n}\n//step 2) pin variables\n- for( String var : varList ) {\n- Data dat = _variables.get(var);\n- if( dat instanceof MatrixObject ) {\n- MatrixObject mo = (MatrixObject)dat;\n- mo.enableCleanup(false);\n- }\n+ for( int i=0; i<varList.size(); i++ ) {\n+ Data dat = _variables.get(varList.get(i));\n+ if( dat instanceof MatrixObject )\n+ ((MatrixObject)dat).enableCleanup(false);\n}\nreturn varsState;\n@@ -573,11 +572,11 @@ public class ExecutionContext {\n* @param varList variable list\n* @param varsState variable state\n*/\n- public void unpinVariables(ArrayList<String> varList, HashMap<String,Boolean> varsState) {\n- for( String var : varList) {\n- Data dat = _variables.get(var);\n+ public void unpinVariables(ArrayList<String> varList, boolean[] varsState) {\n+ for( int i=0; i<varList.size(); i++ ) {\n+ Data dat = _variables.get(varList.get(i));\nif( dat instanceof MatrixObject )\n- ((MatrixObject)dat).enableCleanup(varsState.get(var));\n+ ((MatrixObject)dat).enableCleanup(varsState[i]);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java", "diff": "package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\n-import java.util.Collection;\n-import java.util.HashMap;\nimport java.util.HashSet;\n-import java.util.LinkedList;\n+import java.util.Map.Entry;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.lops.Lop;\n@@ -43,6 +41,21 @@ import org.apache.sysml.runtime.instructions.InstructionUtils;\npublic class FunctionCallCPInstruction extends CPInstruction {\nprivate String _functionName;\nprivate String _namespace;\n+ private final CPOperand[] _boundInputs;\n+ private final ArrayList<String> _boundInputNames;\n+ private final ArrayList<String> _boundOutputNames;\n+ private HashSet<String> _expectRetVars = null;\n+\n+ private FunctionCallCPInstruction(String namespace, String functName, CPOperand[] boundInputs,\n+ ArrayList<String> boundInputNames, ArrayList<String> boundOutputNames, String istr) {\n+ super(null, functName, istr);\n+ _cptype = CPINSTRUCTION_TYPE.External;\n+ _functionName = functName;\n+ _namespace = namespace;\n+ _boundInputs = boundInputs;\n+ _boundInputNames = boundInputNames;\n+ _boundOutputNames = boundOutputNames;\n+ }\npublic String getFunctionName() {\nreturn _functionName;\n@@ -52,23 +65,6 @@ public class FunctionCallCPInstruction extends CPInstruction {\nreturn _namespace;\n}\n- // stores both the bound input and output parameters\n- private ArrayList<CPOperand> _boundInputParamOperands;\n- private ArrayList<String> _boundInputParamNames;\n- private ArrayList<String> _boundOutputParamNames;\n-\n- private FunctionCallCPInstruction(String namespace, String functName, ArrayList<CPOperand> boundInParamOperands,\n- ArrayList<String> boundInParamNames, ArrayList<String> boundOutParamNames, String istr) {\n- super(null, functName, istr);\n- _cptype = CPINSTRUCTION_TYPE.External;\n- _functionName = functName;\n- _namespace = namespace;\n- _boundInputParamOperands = boundInParamOperands;\n- _boundInputParamNames = boundInParamNames;\n- _boundOutputParamNames = boundOutParamNames;\n-\n- }\n-\npublic static FunctionCallCPInstruction parseInstruction(String str)\nthrows DMLRuntimeException\n{\n@@ -78,20 +74,17 @@ public class FunctionCallCPInstruction extends CPInstruction {\nString functionName = parts[2];\nint numInputs = Integer.valueOf(parts[3]);\nint numOutputs = Integer.valueOf(parts[4]);\n- ArrayList<CPOperand> boundInParamOperands = new ArrayList<>();\n- ArrayList<String> boundInParamNames = new ArrayList<>();\n- ArrayList<String> boundOutParamNames = new ArrayList<>();\n+ CPOperand[] boundInputs = new CPOperand[numInputs];\n+ ArrayList<String> boundInputNames = new ArrayList<>();\n+ ArrayList<String> boundOutputNames = new ArrayList<>();\nfor (int i = 0; i < numInputs; i++) {\n- CPOperand operand = new CPOperand(parts[5 + i]);\n- boundInParamOperands.add(operand);\n- boundInParamNames.add(operand.getName());\n+ boundInputs[i] = new CPOperand(parts[5 + i]);\n+ boundInputNames.add(boundInputs[i].getName());\n}\n- for (int i = 0; i < numOutputs; i++) {\n- boundOutParamNames.add(parts[5 + numInputs + i]);\n- }\n-\n- return new FunctionCallCPInstruction ( namespace,functionName,\n- boundInParamOperands, boundInParamNames, boundOutParamNames, str );\n+ for (int i = 0; i < numOutputs; i++)\n+ boundOutputNames.add(parts[5 + numInputs + i]);\n+ return new FunctionCallCPInstruction ( namespace,\n+ functionName, boundInputs, boundInputNames, boundOutputNames, str );\n}\n@Override\n@@ -120,10 +113,10 @@ public class FunctionCallCPInstruction extends CPInstruction {\n// get the function program block (stored in the Program object)\nFunctionProgramBlock fpb = ec.getProgram().getFunctionProgramBlock(_namespace, _functionName);\n- // sanity check number of function paramters\n- if( _boundInputParamNames.size() < fpb.getInputParams().size() ) {\n+ // sanity check number of function parameters\n+ if( _boundInputs.length < fpb.getInputParams().size() ) {\nthrow new DMLRuntimeException(\"Number of bound input parameters does not match the function signature \"\n- + \"(\"+_boundInputParamNames.size()+\", but \"+fpb.getInputParams().size()+\" expected)\");\n+ + \"(\"+_boundInputs.length+\", but \"+fpb.getInputParams().size()+\" expected)\");\n}\n// create bindings to formal parameters for given function call\n@@ -131,35 +124,31 @@ public class FunctionCallCPInstruction extends CPInstruction {\nLocalVariableMap functionVariables = new LocalVariableMap();\nfor( int i=0; i<fpb.getInputParams().size(); i++)\n{\n- DataIdentifier currFormalParam = fpb.getInputParams().get(i);\n- String currFormalParamName = currFormalParam.getName();\n- Data currFormalParamValue = null;\n-\n- CPOperand operand = _boundInputParamOperands.get(i);\n- String varname = operand.getName();\n//error handling non-existing variables\n- if( !operand.isLiteral() && !ec.containsVariable(varname) ) {\n- throw new DMLRuntimeException(\"Input variable '\"+varname+\"' not existing on call of \" +\n+ CPOperand input = _boundInputs[i];\n+ if( !input.isLiteral() && !ec.containsVariable(input.getName()) ) {\n+ throw new DMLRuntimeException(\"Input variable '\"+input.getName()+\"' not existing on call of \" +\nDMLProgram.constructFunctionKey(_namespace, _functionName) + \" (line \"+getLineNum()+\").\");\n}\n//get input matrix/frame/scalar\n- currFormalParamValue = (operand.getDataType()!=DataType.SCALAR) ? ec.getVariable(varname) :\n- ec.getScalarInput(varname, operand.getValueType(), operand.isLiteral());\n+ DataIdentifier currFormalParam = fpb.getInputParams().get(i);\n+ Data value = ec.getVariable(input);\n//graceful value type conversion for scalar inputs with wrong type\n- if( currFormalParamValue.getDataType() == DataType.SCALAR\n- && currFormalParamValue.getValueType() != currFormalParam.getValueType() )\n+ if( value.getDataType() == DataType.SCALAR\n+ && value.getValueType() != currFormalParam.getValueType() )\n{\n- currFormalParamValue = ScalarObjectFactory.createScalarObject(\n- currFormalParam.getValueType(), (ScalarObject) currFormalParamValue);\n+ value = ScalarObjectFactory.createScalarObject(\n+ currFormalParam.getValueType(), (ScalarObject)value);\n}\n- functionVariables.put(currFormalParamName, currFormalParamValue);\n+ //set input parameter\n+ functionVariables.put(currFormalParam.getName(), value);\n}\n// Pin the input variables so that they do not get deleted\n// from pb's symbol table at the end of execution of function\n- HashMap<String,Boolean> pinStatus = ec.pinVariables(_boundInputParamNames);\n+ boolean[] pinStatus = ec.pinVariables(_boundInputNames);\n// Create a symbol table under a new execution context for the function invocation,\n// and copy the function arguments into the created table.\n@@ -182,29 +171,29 @@ public class FunctionCallCPInstruction extends CPInstruction {\nString fname = DMLProgram.constructFunctionKey(_namespace, _functionName);\nthrow new DMLRuntimeException(\"error executing function \" + fname, e);\n}\n- LocalVariableMap retVars = fn_ec.getVariables();\n// cleanup all returned variables w/o binding\n- Collection<String> retVarnames = new LinkedList<>(retVars.keySet());\n- HashSet<String> probeVars = new HashSet<>();\n+ if( _expectRetVars == null ) {\n+ _expectRetVars = new HashSet<>();\nfor(DataIdentifier di : fpb.getOutputParams())\n- probeVars.add(di.getName());\n- for( String var : retVarnames ) {\n- if( !probeVars.contains(var) ) //cleanup candidate\n- {\n- Data dat = fn_ec.removeVariable(var);\n- if( dat != null && dat instanceof MatrixObject )\n- fn_ec.cleanupMatrixObject((MatrixObject)dat);\n+ _expectRetVars.add(di.getName());\n}\n+\n+ LocalVariableMap retVars = fn_ec.getVariables();\n+ for( Entry<String,Data> var : retVars.entrySet() ) {\n+ if( _expectRetVars.contains(var.getKey()) )\n+ continue;\n+ //cleanup unexpected return values to avoid leaks\n+ if( var.getValue() instanceof MatrixObject )\n+ fn_ec.cleanupMatrixObject((MatrixObject)var.getValue());\n}\n// Unpin the pinned variables\n- ec.unpinVariables(_boundInputParamNames, pinStatus);\n+ ec.unpinVariables(_boundInputNames, pinStatus);\n// add the updated binding for each return variable to the variables in original symbol table\nfor (int i=0; i< fpb.getOutputParams().size(); i++){\n-\n- String boundVarName = _boundOutputParamNames.get(i);\n+ String boundVarName = _boundOutputNames.get(i);\nData boundValue = retVars.get(fpb.getOutputParams().get(i).getName());\nif (boundValue == null)\nthrow new DMLRuntimeException(boundVarName + \" was not assigned a return value\");\n@@ -240,14 +229,12 @@ public class FunctionCallCPInstruction extends CPInstruction {\nLOG.debug(\"ExternalBuiltInFunction: \" + this.toString());\n}\n- public ArrayList<String> getBoundInputParamNames()\n- {\n- return _boundInputParamNames;\n+ public ArrayList<String> getBoundInputParamNames() {\n+ return _boundInputNames;\n}\n- public ArrayList<String> getBoundOutputParamNames()\n- {\n- return _boundOutputParamNames;\n+ public ArrayList<String> getBoundOutputParamNames() {\n+ return _boundOutputNames;\n}\npublic void setFunctionName(String fname)\n@@ -277,6 +264,4 @@ public class FunctionCallCPInstruction extends CPInstruction {\nreturn sb.substring( 0, sb.length()-Lop.OPERAND_DELIMITOR.length() );\n}\n-\n-\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "diff": "@@ -100,23 +100,22 @@ public class CompressedLinregCG extends AutomatedTestBase\ntry\n{\n- String TEST_NAME = testname;\n- TestConfiguration config = getTestConfiguration(TEST_NAME);\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n/* This is for running the junit test the new way, i.e., construct the arguments directly */\n- String HOME = SCRIPT_DIR + \"functions/codegen/\";\n+ String HOME1 = SCRIPT_DIR + \"functions/compress/\";\n+ String HOME2 = SCRIPT_DIR + \"functions/codegen/\";\nfullDMLScriptName = \"scripts/algorithms/LinearRegCG.dml\";\nprogramArgs = new String[]{ \"-explain\", \"-stats\", \"-nvargs\", \"X=\"+input(\"X\"), \"Y=\"+input(\"y\"),\n\"icpt=\"+String.valueOf(intercept), \"tol=\"+String.valueOf(epsilon),\n\"maxi=\"+String.valueOf(maxiter), \"reg=\"+String.valueOf(regular), \"B=\"+output(\"w\")};\n- fullRScriptName = HOME + \"Algorithm_LinregCG.R\";\n+ fullRScriptName = HOME2 + \"Algorithm_LinregCG.R\";\nrCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" +\n- HOME + INPUT_DIR + \" \" +\n+ HOME1 + INPUT_DIR + \" \" +\nString.valueOf(intercept) + \" \" + String.valueOf(epsilon) + \" \" +\n- String.valueOf(maxiter) + \" \" + String.valueOf(regular) + HOME + EXPECTED_DIR;\n-\n- loadTestConfiguration(config);\n+ String.valueOf(maxiter) + \" \" + String.valueOf(regular) + \" \"+ HOME1 + EXPECTED_DIR;\n//generate actual datasets\ndouble[][] X = getRandomMatrix(rows, cols, 1, 1, sparse?sparsity2:sparsity1, 7);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance function invocation of dml-bodied UDFs This patch slightly improved the function invocation performance of dml-bodied UDFs from 452K/s to 521K/s. Furthermore, this also includes a fix of the test for LinregCG over compressed data.
49,738
03.11.2017 18:04:29
25,200
4f60ded3543b0475f8e2bf987febc33e66e2652e
[MINOR] Performance library UDF rowClassMeet (sparse inputs, grouping) This patch makes some minor performance improvements to the existing rowClassMeet UDF, by (1) using the codegen sparse side inputs for more efficient, zero-copy access, and (2) a more time- and memory-efficient hash grouping approach.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/CodegenUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/CodegenUtils.java", "diff": "@@ -46,7 +46,10 @@ import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.hops.codegen.SpoofCompiler;\nimport org.apache.sysml.hops.codegen.SpoofCompiler.CompilerType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.codegen.SpoofOperator.SideInput;\n+import org.apache.sysml.runtime.codegen.SpoofOperator.SideInputSparseCell;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.LocalFileUtils;\nimport org.apache.sysml.utils.Statistics;\nimport org.codehaus.janino.SimpleCompiler;\n@@ -152,6 +155,13 @@ public class CodegenUtils\nreturn ret;\n}\n+ public static SideInput createSideInput(MatrixBlock in) {\n+ SideInput ret = (in.isInSparseFormat() || !in.isAllocated()) ?\n+ new SideInput(null, in, in.getNumColumns()) :\n+ new SideInput(in.getDenseBlock(), null, in.getNumColumns());\n+ return (ret.mdat != null) ? new SideInputSparseCell(ret) : ret;\n+ }\n+\n////////////////////////////\n//JANINO-specific methods (used for spark environments)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java", "diff": "@@ -249,6 +249,10 @@ public abstract class SpoofOperator implements Serializable\npublic double[] values(int r) {\nreturn ddat;\n}\n+ public double getValue(int r, int c) {\n+ return SpoofOperator.getValue(this, clen, r, c);\n+ }\n+ public void reset() {}\n}\npublic static class SideInputSparseRow extends SideInput {\n@@ -315,5 +319,9 @@ public abstract class SpoofOperator implements Serializable\nreturn (currColPos < currLen && indexes[currColPos]==colIndex) ?\nvalues[currColPos] : 0;\n}\n+ @Override\n+ public void reset() {\n+ currColPos = 0;\n+ }\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/udf/lib/RowClassMeet.java", "new_path": "src/main/java/org/apache/sysml/udf/lib/RowClassMeet.java", "diff": "package org.apache.sysml.udf.lib;\nimport java.io.IOException;\n-import java.util.ArrayList;\n-import java.util.Arrays;\n-import java.util.Comparator;\n-import java.util.Iterator;\n+import java.util.HashMap;\nimport java.util.Map.Entry;\n-import java.util.TreeMap;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.controlprogram.caching.CacheException;\n-import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.codegen.CodegenUtils;\n+import org.apache.sysml.runtime.codegen.SpoofOperator.SideInput;\n+import org.apache.sysml.runtime.compress.utils.IntArrayList;\nimport org.apache.sysml.runtime.matrix.data.InputInfo;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.udf.FunctionParameter;\nimport org.apache.sysml.udf.Matrix;\nimport org.apache.sysml.udf.PackageFunction;\nimport org.apache.sysml.udf.Matrix.ValueType;\n+\n/**\n* Performs following operation:\n* # Computes the intersection (\"meet\") of equivalence classes for\n@@ -73,8 +72,6 @@ public class RowClassMeet extends PackageFunction {\nprivate static final long serialVersionUID = 1L;\nprivate Matrix CMat, NMat;\n- private MatrixBlock A, B, C, N;\n- private int nr, nc;\n@Override\npublic int getNumFunctionOutputs() {\n@@ -83,124 +80,75 @@ public class RowClassMeet extends PackageFunction {\n@Override\npublic FunctionParameter getFunctionOutput(int pos) {\n- if(pos == 0)\n- return CMat;\n- else if(pos == 1)\n- return NMat;\n- else\n+ switch( pos ) {\n+ case 0: return CMat;\n+ case 1: return NMat;\n+ default:\nthrow new RuntimeException(\"RowClassMeet produces only one output\");\n}\n-\n-\n- public class ClassLabels {\n- public double aVal;\n- public double bVal;\n- public ClassLabels(double aVal, double bVal) {\n- this.aVal = aVal;\n- this.bVal = bVal;\n- }\n- }\n-\n- public class ClassLabelComparator implements Comparator<ClassLabels> {\n- Integer tmp1, tmp2;\n- @Override\n- public int compare(ClassLabels o1, ClassLabels o2) {\n- if(o1.aVal != o2.aVal) {\n- tmp1 = (int) o1.aVal;\n- tmp2 = (int) o2.aVal;\n- }\n- else {\n- tmp1 = (int) o1.bVal;\n- tmp2 = (int) o2.bVal;\n- }\n- return tmp1.compareTo(tmp2);\n- }\n- }\n-\n- double [] getRow(MatrixBlock B, double [] bRow, int i) {\n- if(B.getNumRows() == 1)\n- i = 0;\n- Arrays.fill(bRow, 0);\n- if(B.isInSparseFormat()) {\n- Iterator<IJV> iter = B.getSparseBlockIterator(i, i+1);\n- while(iter.hasNext()) {\n- IJV ijv = iter.next();\n- bRow[ijv.getJ()] = ijv.getV();\n- }\n- }\n- else {\n- double [] denseBlk = B.getDenseBlock();\n- if(denseBlk != null)\n- System.arraycopy(denseBlk, i*B.getNumColumns(), bRow, 0, B.getNumColumns());\n- }\n- return bRow;\n}\n@Override\npublic void execute() {\n- try {\n- A = ((Matrix) getFunctionInput(0)).getMatrixObject().acquireRead();\n- B = ((Matrix) getFunctionInput(1)).getMatrixObject().acquireRead();\n- nr = Math.max(A.getNumRows(), B.getNumRows());\n- nc = Math.max(A.getNumColumns(), B.getNumColumns());\n-\n- double [] bRow = new double[B.getNumColumns()];\n- CMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\n- C = new MatrixBlock(nr, nc, false);\n- C.allocateDenseBlock();\n- NMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\n- N = new MatrixBlock(nr, nc, false);\n- N.allocateDenseBlock();\n-\n- double [] cBlk = C.getDenseBlock();\n- double [] nBlk = N.getDenseBlock();\n-\n- if(B.getNumRows() == 1)\n- getRow(B, bRow, 0);\n-\n- for(int i = 0; i < A.getNumRows(); i++) {\n- if(B.getNumRows() != 1)\n- getRow(B, bRow, i);\n-\n- // Create class labels\n- TreeMap<ClassLabels, ArrayList<Integer>> classLabelMapping = new TreeMap<>(new ClassLabelComparator());\n+ try\n+ {\n+ MatrixBlock A = ((Matrix) getFunctionInput(0)).getMatrixObject().acquireRead();\n+ MatrixBlock B = ((Matrix) getFunctionInput(1)).getMatrixObject().acquireRead();\n+ int nr = Math.max(A.getNumRows(), B.getNumRows());\n+ int nc = Math.max(A.getNumColumns(), B.getNumColumns());\n+ MatrixBlock C = new MatrixBlock(nr, nc, false).allocateBlock();\n+ MatrixBlock N = new MatrixBlock(nr, nc, false).allocateBlock();\n+ double[] dC = C.getDenseBlock();\n+ double[] dN = N.getDenseBlock();\n+ //wrap both A and B into side inputs for efficient sparse access\n+ SideInput sB = CodegenUtils.createSideInput(B);\n+ boolean mv = (B.getNumRows() == 1);\n+ int numCols = Math.min(A.getNumColumns(), B.getNumColumns());\n+\n+ HashMap<ClassLabel, IntArrayList> classLabelMapping = new HashMap<>();\n+\n+ for(int i=0, ai=0; i < A.getNumRows(); i++, ai+=A.getNumColumns()) {\n+ classLabelMapping.clear(); sB.reset();\nif( A.isInSparseFormat() ) {\n- Iterator<IJV> iter = A.getSparseBlockIterator(i, i+1);\n- while(iter.hasNext()) {\n- IJV ijv = iter.next();\n- int j = ijv.getJ();\n- double aVal = ijv.getV();\n- if(aVal != 0 && bRow[j] != 0) {\n- ClassLabels key = new ClassLabels(aVal, bRow[j]);\n+ if(A.getSparseBlock()==null || A.getSparseBlock().isEmpty(i))\n+ continue;\n+ int alen = A.getSparseBlock().size(i);\n+ int apos = A.getSparseBlock().pos(i);\n+ int[] aix = A.getSparseBlock().indexes(i);\n+ double[] avals = A.getSparseBlock().values(i);\n+ for(int k=apos; k<apos+alen; k++) {\n+ if( aix[k] >= numCols ) break;\n+ int bval = (int)sB.getValue(mv?0:i, aix[k]);\n+ if( bval != 0 ) {\n+ ClassLabel key = new ClassLabel((int)avals[k], bval);\nif(!classLabelMapping.containsKey(key))\n- classLabelMapping.put(key, new ArrayList<Integer>());\n- classLabelMapping.get(key).add(j);\n+ classLabelMapping.put(key, new IntArrayList());\n+ classLabelMapping.get(key).appendValue(aix[k]);\n}\n}\n}\nelse {\ndouble [] denseBlk = A.getDenseBlock();\n- if(denseBlk != null) {\n- int offset = i*A.getNumColumns();\n- for(int j = 0; j < A.getNumColumns(); j++) {\n- double aVal = denseBlk[offset + j];\n- if(aVal != 0 && bRow[j] != 0) {\n- ClassLabels key = new ClassLabels(aVal, bRow[j]);\n+ if(denseBlk == null) break;\n+ for(int j = 0; j < numCols; j++) {\n+ int aVal = (int) denseBlk[ai+j];\n+ int bVal = (int) sB.getValue(mv?0:i, j);\n+ if(aVal != 0 && bVal != 0) {\n+ ClassLabel key = new ClassLabel(aVal, bVal);\nif(!classLabelMapping.containsKey(key))\n- classLabelMapping.put(key, new ArrayList<Integer>());\n- classLabelMapping.get(key).add(j);\n- }\n+ classLabelMapping.put(key, new IntArrayList());\n+ classLabelMapping.get(key).appendValue(j);\n}\n}\n}\n-\nint labelID = 1;\n- for(Entry<ClassLabels, ArrayList<Integer>> entry : classLabelMapping.entrySet()) {\n- double nVal = entry.getValue().size();\n- for(Integer j : entry.getValue()) {\n- nBlk[i*nc + j] = nVal;\n- cBlk[i*nc + j] = labelID;\n+ for(Entry<ClassLabel, IntArrayList> entry : classLabelMapping.entrySet()) {\n+ int nVal = entry.getValue().size();\n+ int[] list = entry.getValue().extractValues();\n+ for(int k=0, off=i*nc; k<nVal; k++) {\n+ dN[off+list[k]] = nVal;\n+ dC[off+list[k]] = labelID;\n}\nlabelID++;\n}\n@@ -208,23 +156,37 @@ public class RowClassMeet extends PackageFunction {\n((Matrix) getFunctionInput(0)).getMatrixObject().release();\n((Matrix) getFunctionInput(1)).getMatrixObject().release();\n- } catch (CacheException e) {\n- throw new RuntimeException(\"Error while executing RowClassMeet\", e);\n- }\n- try {\n- C.recomputeNonZeros();\n- C.examSparsity();\n+ //prepare outputs\n+ C.recomputeNonZeros(); C.examSparsity();\n+ CMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\nCMat.setMatrixDoubleArray(C, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n- N.recomputeNonZeros();\n- N.examSparsity();\n+ N.recomputeNonZeros(); N.examSparsity();\n+ NMat = new Matrix( createOutputFilePathAndName( \"TMP\" ), nr, nc, ValueType.Double );\nNMat.setMatrixDoubleArray(N, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n- } catch (DMLRuntimeException e) {\n- throw new RuntimeException(\"Error while executing RowClassMeet\", e);\n- } catch (IOException e) {\n+ }\n+ catch (DMLRuntimeException | IOException e) {\nthrow new RuntimeException(\"Error while executing RowClassMeet\", e);\n}\n}\n-\n+ private static class ClassLabel {\n+ public int aVal;\n+ public int bVal;\n+ public ClassLabel(int aVal, int bVal) {\n+ this.aVal = aVal;\n+ this.bVal = bVal;\n+ }\n+ @Override\n+ public int hashCode() {\n+ return UtilFunctions.intHashCode(aVal, bVal);\n+ }\n+ @Override\n+ public boolean equals(Object o) {\n+ if( !(o instanceof ClassLabel) )\n+ return false;\n+ ClassLabel that = (ClassLabel) o;\n+ return aVal == that.aVal && bVal == that.bVal;\n+ }\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance library UDF rowClassMeet (sparse inputs, grouping) This patch makes some minor performance improvements to the existing rowClassMeet UDF, by (1) using the codegen sparse side inputs for more efficient, zero-copy access, and (2) a more time- and memory-efficient hash grouping approach.
49,719
03.11.2017 22:24:04
25,200
ae819a253097da796e689764e4926d46228e055e
[Minor] update creation of lite jar
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n-<!--\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n--->\n-<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n+<!-- * Licensed to the Apache Software Foundation (ASF) under one * or more\n+ contributor license agreements. See the NOTICE file * distributed with this\n+ work for additional information * regarding copyright ownership. The ASF\n+ licenses this file * to you under the Apache License, Version 2.0 (the *\n+ \"License\"); you may not use this file except in compliance * with the License.\n+ You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0\n+ * * Unless required by applicable law or agreed to in writing, * software\n+ distributed under the License is distributed on an * \"AS IS\" BASIS, WITHOUT\n+ WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the\n+ License for the * specific language governing permissions and limitations\n+ * under the License. -->\n+<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n+ xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n<modelVersion>4.0.0</modelVersion>\n<parent>\n<groupId>org.apache</groupId>\n<build>\n- <!-- Adds scripts to main jar, in-memory jar, sources jar, and standalone jar -->\n+ <!-- Adds scripts to main jar, in-memory jar, sources jar, and standalone\n+ jar -->\n<resources>\n<resource>\n<directory>scripts</directory>\n<executions>\n<execution>\n<id>default-jar</id>\n- <goals><goal>jar</goal></goals>\n+ <goals>\n+ <goal>jar</goal>\n+ </goals>\n<phase>package</phase>\n<configuration>\n<archive>\n<!-- Third argument prevents Java from popping up lots of windows on\nMacOS -->\n- <argLine>-Dfile.encoding=UTF-8 -Xmx2g -Xms2g -Xmn200m -Djava.awt.headless=true</argLine>\n+ <argLine>-Dfile.encoding=UTF-8 -Xmx2g -Xms2g -Xmn200m\n+ -Djava.awt.headless=true</argLine>\n<includes>\n<!-- All tests are integration tests as far as Maven is concerned. -->\n</configuration>\n</execution>\n<execution>\n- <!-- NOTE: We don't clean up systemml.egg-info since this makes it difficult\n- to uninstall a dev version after a maven clean -->\n+ <!-- NOTE: We don't clean up systemml.egg-info since this makes it\n+ difficult to uninstall a dev version after a maven clean -->\n<id>clean-python-files</id>\n<phase>clean</phase>\n<goals>\n<phase>package</phase>\n<configuration>\n<target name=\"copy and rename JAR\">\n- <copy file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\" tofile=\"${project.build.directory}/SystemML.jar\" />\n+ <copy\n+ file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\"\n+ tofile=\"${project.build.directory}/SystemML.jar\" />\n</target>\n</configuration>\n<goals>\n<profile>\n<!-- Can be used to ignore doclint javadoc issues -->\n<id>ignore-doclint</id>\n- <!-- <activation>\n- <jdk>[1.8,)</jdk>\n- </activation> -->\n+ <!-- <activation> <jdk>[1.8,)</jdk> </activation> -->\n<properties>\n<javadoc.opts>-Xdoclint:none</javadoc.opts>\n</properties>\n</profile>\n<profile>\n- <!-- Profile to create binary distributions.\n- Execute with `mvn clean package -P distribution` -->\n+ <!-- Profile to create binary distributions. Execute with `mvn clean package\n+ -P distribution` -->\n<id>distribution</id>\n<build>\n<plugins>\n<artifactId>maven-javadoc-plugin</artifactId>\n<version>2.10.3</version>\n<configuration>\n- <!-- Need to include the following packages, so exclude others:\n- org.apache.sysml.api\n- org.apache.sysml.runtime.instructions.spark.utils (for RDDConverterUtils, etc)\n- org.apache.sysml.runtime.matrix (for MatrixCharacteristics, etc)\n- org.apache.sysml.runtime.matrix.data (for MatrixIndexes, MatrixBlock, etc)\n- org.apache.sysml.udf\n- -->\n+ <!-- Need to include the following packages, so exclude others: org.apache.sysml.api\n+ org.apache.sysml.runtime.instructions.spark.utils (for RDDConverterUtils,\n+ etc) org.apache.sysml.runtime.matrix (for MatrixCharacteristics, etc) org.apache.sysml.runtime.matrix.data\n+ (for MatrixIndexes, MatrixBlock, etc) org.apache.sysml.udf -->\n<excludePackageNames>caffe:org.apache.sysml.conf:org.apache.sysml.debug:org.apache.sysml.hops:org.apache.sysml.lops:org.apache.sysml.parser:org.apache.sysml.runtime.controlprogram:org.apache.sysml.runtime.functionobjects:org.apache.sysml.runtime.instructions.cp:org.apache.sysml.runtime.instructions.cpfile:org.apache.sysml.runtime.instructions.mr:org.apache.sysml.runtime.instructions.spark.data:org.apache.sysml.runtime.instructions.spark.functions:org.apache.sysml.runtime.io:org.apache.sysml.runtime.matrix.data.hadoopfix:org.apache.sysml.runtime.matrix.mapred:org.apache.sysml.runtime.matrix.operators:org.apache.sysml.runtime.matrix.sort:org.apache.sysml.runtime.transform:org.apache.sysml.runtime.util:org.apache.sysml.utils:org.apache.sysml.yarn</excludePackageNames>\n<additionalparam>${javadoc.opts}</additionalparam>\n</configuration>\n<descriptors>\n<descriptor>src/assembly/lite.xml</descriptor>\n</descriptors>\n+ <archive>\n+ <manifest>\n+ <mainClass>org.apache.sysml.api.DMLScript</mainClass>\n+ </manifest>\n+ <manifestEntries>\n+ <Build-Time>${maven.build.timestamp}</Build-Time>\n+ <Group-Id>${project.groupId}</Group-Id>\n+ <Artifact-Id>${project.artifactId}-lite</Artifact-Id>\n+ <Version>${project.version}</Version>\n+ </manifestEntries>\n+ </archive>\n</configuration>\n</execution>\n</executions>\n</profile>\n<profile>\n- <!-- Profile to create standalone jar.\n- Execute with `mvn clean package -P standalone-jar` -->\n+ <!-- Profile to create standalone jar. Execute with `mvn clean package\n+ -P standalone-jar` -->\n<id>standalone-jar</id>\n<build>\n<plugins>\n" }, { "change_type": "MODIFY", "old_path": "src/assembly/lite.xml", "new_path": "src/assembly/lite.xml", "diff": "<dependencySets>\n<dependencySet>\n<includes>\n- <include>*:antlr4-runtime</include>\n+ <include>*:antlr4</include>\n</includes>\n<unpackOptions>\n<includes>\n</includes>\n<unpackOptions>\n<includes>\n+ <include>org/apache/commons/cli/OptionValidator.class</include>\n+ <include>org/apache/commons/cli/Util.class</include>\n<include>org/apache/commons/cli/AlreadySelectedException.class</include>\n<include>org/apache/commons/cli/CommandLine.class</include>\n<include>org/apache/commons/cli/CommandLineParser.class</include>\n<unpackOptions>\n<includes>\n<include>META-INF/services/org.apache.hadoop.fs.FileSystem</include>\n+ <include>common-version-info.properties</include>\n<include>org/apache/hadoop/log/metrics/EventCounter.class</include>\n+ <include>org/apache/hadoop/security/Groups$CachedGroups.class</include>\n+ <include>org/apache/hadoop/security/token/Token.class</include>\n+ <include>org/apache/hadoop/security/token/TokenIdentifier.class</include>\n+ <include>org/apache/hadoop/util/GenericOptionsParser.class</include>\n<include>org/apache/hadoop/util/ShutdownHookManager$2.class</include>\n<include>org/apache/hadoop/HadoopIllegalArgumentException.class</include>\n<include>org/apache/hadoop/conf/Configurable.class</include>\n<unpack>true</unpack>\n</dependencySet>\n+ <dependencySet>\n+ <includes>\n+ <include>*:hadoop-mapreduce-client-jobclient</include>\n+ </includes>\n+ <unpackOptions>\n+ <includes>\n+ <include>org/apache/hadoop/mapred/YarnClientProtocolProvider.class</include>\n+ </includes>\n+ </unpackOptions>\n+ <scope>compile</scope>\n+ <unpack>true</unpack>\n+ </dependencySet>\n+\n<dependencySet>\n<includes>\n<include>*:jcl-over-slf4j</include>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/lite/BuildLite.java", "new_path": "src/main/java/org/apache/sysml/utils/lite/BuildLite.java", "diff": "@@ -92,6 +92,14 @@ public class BuildLite {\nadditionalResources.add(\"org/apache/hadoop/util/ShutdownHookManager$2.class\");\nadditionalResources.add(\"org/apache/hadoop/log/metrics/EventCounter.class\");\n+ additionalResources.add(\"org/apache/hadoop/util/GenericOptionsParser.class\");\n+ additionalResources.add(\"org/apache/hadoop/security/token/Token.class\");\n+ additionalResources.add(\"org/apache/hadoop/security/token/TokenIdentifier.class\");\n+ additionalResources.add(\"org/apache/hadoop/security/Groups$CachedGroups.class\");\n+ additionalResources.add(\"org/apache/commons/cli/OptionValidator.class\");\n+ additionalResources.add(\"org/apache/commons/cli/Util.class\");\n+ additionalResources.add(\"common-version-info.properties\");\n+\n}\n/**\n@@ -104,7 +112,17 @@ public class BuildLite {\nhadoopCommonResources.add(\"META-INF/services/org.apache.hadoop.fs.FileSystem\");\nhadoopCommonResources.add(\"org/apache/hadoop/util/ShutdownHookManager$2.class\");\nhadoopCommonResources.add(\"org/apache/hadoop/log/metrics/EventCounter.class\");\n+ hadoopCommonResources.add(\"org/apache/hadoop/util/GenericOptionsParser.class\");\n+ hadoopCommonResources.add(\"org/apache/hadoop/security/token/Token.class\");\n+ hadoopCommonResources.add(\"org/apache/hadoop/security/token/TokenIdentifier.class\");\n+ hadoopCommonResources.add(\"org/apache/hadoop/security/Groups$CachedGroups.class\");\n+ hadoopCommonResources.add(\"common-version-info.properties\");\nadditionalJarToFileMappingsForDependencySets.put(\"hadoop-common\", hadoopCommonResources);\n+\n+ SortedSet<String> commonsCliResources = new TreeSet<String>();\n+ commonsCliResources.add(\"org/apache/commons/cli/OptionValidator.class\");\n+ commonsCliResources.add(\"org/apache/commons/cli/Util.class\");\n+ additionalJarToFileMappingsForDependencySets.put(\"commons-cli\", commonsCliResources);\n}\n/**\n" } ]
Java
Apache License 2.0
apache/systemds
[Minor] update creation of lite jar
49,738
04.11.2017 15:18:40
25,200
aaa94818e4104c5b711507b759b372426ca1198c
[HOTFIX] Fix JMLC tests (write/read order, tmp dir, fixed seeds)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/lite/BuildLiteExecution.java", "new_path": "src/main/java/org/apache/sysml/utils/lite/BuildLiteExecution.java", "diff": "@@ -46,7 +46,8 @@ import org.apache.sysml.runtime.util.DataConverter;\n*/\npublic class BuildLiteExecution {\n- protected static Logger log = Logger.getLogger(BuildLiteExecution.class);\n+ private static Logger log = Logger.getLogger(BuildLiteExecution.class);\n+ private static final String ROOT = \"functions/jmlc/temp/\";\npublic static void main(String[] args) throws Exception {\n@@ -94,11 +95,11 @@ public class BuildLiteExecution {\n\"write(predicted_y, \\\"./tmp\\\", format=\\\"text\\\");\\n\";\n/* @formatter:on */\n- File file = new File(\"temp/scoring-example.dml\");\n+ File file = new File(ROOT+\"scoring-example.dml\");\nFileUtils.writeStringToFile(file, scriptString);\nConnection conn = getConfiguredConnection();\n- String dml = conn.readScript(\"temp/scoring-example.dml\");\n+ String dml = conn.readScript(ROOT+\"scoring-example.dml\");\nPreparedScript script = conn.prepareScript(dml, new String[] { \"W\", \"X\" }, new String[] { \"predicted_y\" },\nfalse);\n@@ -158,17 +159,17 @@ public class BuildLiteExecution {\npublic static void jmlcWriteMatrix() throws Exception {\nConnection conn = getConfiguredConnection();\nPreparedScript script = conn.prepareScript(\n- \"x=matrix('1 2 3 4',rows=2,cols=2);write(x,'temp/x.csv',format='csv');\", new String[] {},\n+ \"x=matrix('1 2 3 4',rows=2,cols=2);write(x,'\"+ROOT+\"x.csv',format='csv');\", new String[] {},\nnew String[] {}, false);\nscript.executeScript();\n/* @formatter:off */\nString scriptString =\n\"m = matrix('1 2 3 0 0 0 7 8 9 0 0 0', rows=4, cols=3)\\n\" +\n- \"write(m, 'temp/m.txt', format='text')\\n\" +\n- \"write(m, 'temp/m.mm', format='mm')\\n\" +\n- \"write(m, 'temp/m.csv', format='csv')\\n\" +\n- \"write(m, 'temp/m.binary', format='binary')\\n\";\n+ \"write(m, '\"+ROOT+\"m.txt', format='text')\\n\" +\n+ \"write(m, '\"+ROOT+\"m.mm', format='mm')\\n\" +\n+ \"write(m, '\"+ROOT+\"m.csv', format='csv')\\n\" +\n+ \"write(m, '\"+ROOT+\"m.binary', format='binary')\\n\";\n/* @formatter:on */\nscript = conn.prepareScript(scriptString, new String[] {}, new String[] {}, false);\n@@ -179,12 +180,12 @@ public class BuildLiteExecution {\npublic static void jmlcReadMatrix() throws Exception {\nConnection conn = getConfiguredConnection();\n- PreparedScript script = conn.prepareScript(\"x=read('temp/x.csv',format='csv');y=x*2;print(toString(y));\",\n+ PreparedScript script = conn.prepareScript(\"x=read('\"+ROOT+\"x.csv',format='csv');y=x*2;print(toString(y));\",\nnew String[] {}, new String[] {}, false);\nscript.executeScript();\n/* @formatter:off */\n- String scriptString = \"m = read('temp/m.csv',format='csv')\\n\" +\n+ String scriptString = \"m = read('\"+ROOT+\"m.csv',format='csv')\\n\" +\n\"print(toString(m))\\n\" +\n\"print('min:' + min(m))\\n\" +\n\"print('max:' + max(m))\\n\" +\n@@ -201,9 +202,9 @@ public class BuildLiteExecution {\n// note: the following can be set to work using the following setting\n// in the Connection class: cconf.set(ConfigType.IGNORE_READ_WRITE_METADATA, false);\n- // \"m2=read('temp/m.txt', format='text')\\n\" +\n- // \"m3=read('temp/m.mm', format='mm')\\n\" +\n- // \"m4=read('temp/m.binary', format='binary')\\n\" +\n+ // \"m2=read('\"+ROOT+\"m.txt', format='text')\\n\" +\n+ // \"m3=read('\"+ROOT+\"m.mm', format='mm')\\n\" +\n+ // \"m4=read('\"+ROOT+\"m.binary', format='binary')\\n\" +\n// \"print('m2:'+toString(m2))\\n\" +\n// \"print('m3:'+toString(m3))\\n\" +\n// \"print('m4:'+toString(m4))\\n\";\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/BuildLiteJarTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/BuildLiteJarTest.java", "diff": "@@ -46,12 +46,8 @@ public class BuildLiteJarTest extends AutomatedTestBase\n}\n@Test\n- public void testJMLCWriteMatrix() throws Exception {\n+ public void testJMLCWriteReadMatrix() throws Exception {\nBuildLiteExecution.jmlcWriteMatrix();\n- }\n-\n- @Test\n- public void testJMLCReadMatrix() throws Exception {\nBuildLiteExecution.jmlcReadMatrix();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "diff": "@@ -181,12 +181,8 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nprivate ArrayList<double[][]> generateInputs( int num, int rows, int cols, double sparsity )\n{\nArrayList<double[][]> ret = new ArrayList<double[][]>();\n-\nfor( int i=0; i<num; i++ )\n- {\n- double[][] X = getRandomMatrix(rows, cols, -1, 1, sparsity, System.nanoTime());\n- ret.add(X);\n- }\n+ ret.add(getRandomMatrix(rows, cols, -1, 1, sparsity, i));\nreturn ret;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix JMLC tests (write/read order, tmp dir, fixed seeds)
49,738
04.11.2017 21:23:57
25,200
55c4c0b9737ad18356758e31957b8030a9aa8138
[MINOR] Performance shallow reshape and sparse cbind operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -445,7 +445,11 @@ public class LibMatrixReorg\n//check for same dimensions\nif( rlen==rows && clen == cols ) {\n- out.copy(in); //incl dims, nnz\n+ //copy incl dims, nnz\n+ if( SHALLOW_COPY_REORG )\n+ out.copyShallow(in);\n+ else\n+ out.copy(in);\nreturn out;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -761,17 +761,15 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\nelse //SPARSE <- DENSE\n{\n- for( int i=0; i<that.rlen; i++ )\n- {\n- int aix = rowoffset+i;\n- for( int j=0, bix=i*that.clen; j<that.clen; j++ )\n- {\n- double val = that.denseBlock[bix+j];\n- if( val != 0 ) {\n- //create sparserow only if required\n+ double[] b = that.denseBlock;\n+ final int bm = that.rlen;\n+ final int bn = that.clen;\n+ for( int i=0, aix=rowoffset, bix=0; i<bm; i++, aix++, bix+=bn )\n+ for( int j=0; j<bn; j++ ) {\n+ final double bval = b[bix+j];\n+ if( bval != 0 ) {\nsparseBlock.allocate(aix, estimatedNNzsPerRow, clen);\n- sparseBlock.append(aix, coloffset+j, val);\n- }\n+ sparseBlock.append(aix, coloffset+j, bval);\n}\n}\n}\n@@ -1351,6 +1349,17 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\ncopyDenseToDense(that);\n}\n+ public void copyShallow(MatrixBlock that) {\n+ rlen = that.rlen;\n+ clen = that.clen;\n+ nonZeros = that.nonZeros;\n+ sparse = that.sparse;\n+ if( !sparse )\n+ denseBlock = that.denseBlock;\n+ else\n+ sparseBlock = that.sparseBlock;\n+ }\n+\nprivate void copySparseToSparse(MatrixBlock that)\n{\nthis.nonZeros=that.nonZeros;\n@@ -3546,7 +3555,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//core append operation\n//copy left and right input into output\n- if( !result.sparse ) //DENSE\n+ if( !result.sparse && nnz!=0 ) //DENSE\n{\nif( cbind ) {\nresult.copy(0, m-1, 0, clen-1, this, false);\n@@ -3563,13 +3572,12 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n}\n- else //SPARSE\n+ else if(nnz != 0) //SPARSE\n{\n//adjust sparse rows if required\n- if( !this.isEmptyBlock(false) || !Arrays.stream(that).allMatch(mb -> mb.isEmptyBlock(false)) ) {\nresult.allocateSparseRowsBlock();\n//allocate sparse rows once for cbind\n- if( cbind && result.getSparseBlock() instanceof SparseBlockMCSR ) {\n+ if( cbind && nnz > rlen && result.getSparseBlock() instanceof SparseBlockMCSR ) {\nSparseBlock sblock = result.getSparseBlock();\nfor( int i=0; i<result.rlen; i++ ) {\nfinal int row = i; //workaround for lambda compile issue\n@@ -3578,7 +3586,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nsblock.allocate(i, lnnz);\n}\n}\n- }\n//core append operation\nresult.appendToSparse(this, 0, 0);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance shallow reshape and sparse cbind operations
49,768
07.11.2017 16:29:58
28,800
bc781fcef778bed9cb6bff686f841c3040297213
Create Release Creation Process doc Closes
[ { "change_type": "MODIFY", "old_path": "docs/release-process.md", "new_path": "docs/release-process.md", "diff": "@@ -139,9 +139,7 @@ Verify that the snapshot is now available at\n# Release Candidate Build and Deployment\n-To be written. (Describe how the release candidate is built, including checksums. Describe how\n-the release candidate is deployed to servers for review.)\n-\n+For detailed information, please see [SystemML Release Creation Process](release-creation-process.html).\n# Release Candidate Checklist\n@@ -488,4 +486,3 @@ Commit the update to `documentation.html` to publish the website update.\nThe versioned project documentation is now deployed to the main website, and the\n[Documentation Page](http://systemml.apache.org/documentation) contains a link to the versioned documentation.\n-\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-848] Create Release Creation Process doc Closes #697.
49,738
07.11.2017 19:52:00
28,800
8a1f98e1be2d7bf8e3400d75bf13ced021c52977
Fix codegen row/cell tpl compilation w/ unknown sizes This patch fixes codegen compilation issues with unknown sizes during initial compilation. Typically, dynamic recompilation corrects such invalid generated operators, but for example, in JMLC deployments dynamic recompilation is disabled by default.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java", "diff": "@@ -75,8 +75,8 @@ public class TemplateCell extends TemplateBase\npublic boolean open(Hop hop) {\nreturn hop.dimsKnown() && isValidOperation(hop)\n&& !(hop.getDim1()==1 && hop.getDim2()==1)\n- || (hop instanceof IndexingOp && (((IndexingOp)hop)\n- .isColLowerEqualsUpper() || hop.getDim2()==1));\n+ || (hop instanceof IndexingOp && hop.getInput().get(0).getDim2() > 0\n+ && (((IndexingOp)hop).isColLowerEqualsUpper() || hop.getDim2()==1));\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java", "diff": "@@ -95,7 +95,8 @@ public class TemplateRow extends TemplateBase\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol\n&& hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1\n&& HopRewriteUtils.isAggUnaryOp(hop, SUPPORTED_ROW_AGG))\n- || (hop instanceof IndexingOp && HopRewriteUtils.isColumnRangeIndexing((IndexingOp)hop));\n+ || (hop instanceof IndexingOp && hop.getInput().get(0).getDim2() > 0\n+ && HopRewriteUtils.isColumnRangeIndexing((IndexingOp)hop));\n}\n@Override\n@@ -449,7 +450,7 @@ public class TemplateRow extends TemplateBase\nout = new CNodeTernary(cdata1,\nTemplateUtils.createCNodeData(new LiteralOp(hop.getInput().get(0).getDim2()), true),\nTemplateUtils.createCNodeData(hop.getInput().get(4), true),\n- (!hop.dimsKnown()||hop.getDim2()>1) ? TernaryType.LOOKUP_RVECT1 : TernaryType.LOOKUP_RC1);\n+ (hop.getDim2() != 1) ? TernaryType.LOOKUP_RVECT1 : TernaryType.LOOKUP_RC1);\n}\nif( out == null ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2008] Fix codegen row/cell tpl compilation w/ unknown sizes This patch fixes codegen compilation issues with unknown sizes during initial compilation. Typically, dynamic recompilation corrects such invalid generated operators, but for example, in JMLC deployments dynamic recompilation is disabled by default.
49,738
08.11.2017 13:49:50
28,800
a03065299a11487655863f4077df9c4af97829e1
[MINOR] Additional input/output verification in flaky JMLC test
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "diff": "@@ -39,7 +39,6 @@ import org.junit.Test;\npublic class MulticlassSVMScoreTest extends AutomatedTestBase\n{\n-\nprivate final static String TEST_NAME = \"m-svm-score\";\nprivate final static String TEST_DIR = \"functions/jmlc/\";\nprivate final static String MODEL_FILE = \"sentiment_model.mtx\";\n@@ -49,15 +48,17 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nprivate final static int rows = 107;\nprivate final static int cols = 46; //fixed\n- private final static int nRuns = 5;\n+ private final static int nRuns = 3;\nprivate final static double sparsity1 = 0.7;\nprivate final static double sparsity2 = 0.1;\n+ //This testcase recently caused intermittent test failures on jenkins that are not\n+ //reproducible in local environments; hence we perform additional sanity checks here.\n+ private final static boolean CHECK_IN_OUT = true;\n@Override\n- public void setUp()\n- {\n+ public void setUp() {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] { \"predicted_y\" }) );\n}\n@@ -89,9 +90,13 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\n//generate inputs\nArrayList<double[][]> Xset = generateInputs(nRuns, rows, cols, sparse?sparsity2:sparsity1);\n+ if( CHECK_IN_OUT )\n+ checkSelfEquivalence(Xset, rows, cols);\n//run DML via JMLC\nArrayList<double[][]> Yset = execDMLScriptviaJMLC( Xset, flags );\n+ if( CHECK_IN_OUT )\n+ checkSelfEquivalence(Yset, rows, 1);\n//run R and compare results to DML result\nString HOME = SCRIPT_DIR + TEST_DIR;\n@@ -162,13 +167,11 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nret.add(Y); //keep result for comparison\n}\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nex.printStackTrace();\nthrow new IOException(ex);\n}\n- finally\n- {\n+ finally {\nif( conn != null )\nconn.close();\n}\n@@ -178,13 +181,18 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nreturn ret;\n}\n- private ArrayList<double[][]> generateInputs( int num, int rows, int cols, double sparsity )\n- {\n+ private ArrayList<double[][]> generateInputs( int num, int rows, int cols, double sparsity ) {\nArrayList<double[][]> ret = new ArrayList<double[][]>();\nfor( int i=0; i<num; i++ )\n- ret.add(getRandomMatrix(rows, cols, -1, 1, sparsity, i));\n-\n+ ret.add(getRandomMatrix(rows, cols, -1, 1, sparsity, 7));\nreturn ret;\n}\n+ private void checkSelfEquivalence(ArrayList<double[][]> data, int rows, int cols) {\n+ if( data == null || data.size() < 2 )\n+ return;\n+ double[][] data0 = data.get(0);\n+ for(int i=1; i<data.size(); i++)\n+ TestUtils.compareMatrices(data0, data.get(i), rows, cols, eps);\n+ }\n}\n\\ No newline at end of file\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Additional input/output verification in flaky JMLC test