author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
488,282 | 21.03.2017 19:30:21 | -3,600 | babbf2677c5db65d51a7372adfcdb4862fdcd14c | Adds information about using bash to documentation.
Adds
**Note:** Make sure to use bash, as other shells are not fully compatible
and may cause hard to debug problems.
to the doc/source/devref/devstack.rst to warn that bash should be used.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "doc/source/devref/devstack.rst",
"new_path": "doc/source/devref/devstack.rst",
"diff": "@@ -47,6 +47,9 @@ Install DevStack\nThe instructions assume that you've decided to install DevStack into\nUbuntu 14.04 system.\n+**Note:** Make sure to use bash, as other shells are not fully compatible\n+and may cause hard to debug problems.\n+\n1. Clone DevStack:\n.. sourcecode:: console\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adds information about using bash to documentation.
Adds
**Note:** Make sure to use bash, as other shells are not fully compatible
and may cause hard to debug problems.
to the doc/source/devref/devstack.rst to warn that bash should be used.
Change-Id: I70f73f26ce8bc75e46cd858c4b33a6a7aff3cdce
Closes-Bug: 1674779 |
488,278 | 27.03.2017 20:09:52 | 10,800 | cd890b86e39a4dfb6cf7f908f31ecc0ed1ac41a2 | [APIv2] Convert update methods to use PATCH
all update operations should synchronize around using PATCH
instead of PUT for partial resource updates.
Partial-Implements: bp v2-api-experimental-impl | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/cluster_templates.py",
"new_path": "sahara/api/v2/cluster_templates.py",
"diff": "@@ -49,7 +49,7 @@ def cluster_templates_get(cluster_template_id):\nreturn u.to_wrapped_dict(api.get_cluster_template, cluster_template_id)\[email protected]('/cluster-templates/<cluster_template_id>')\[email protected]('/cluster-templates/<cluster_template_id>')\[email protected](\"data-processing:cluster-templates:modify\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected](ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA,\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/data_sources.py",
"new_path": "sahara/api/v2/data_sources.py",
"diff": "@@ -56,7 +56,7 @@ def data_source_delete(data_source_id):\nreturn u.render()\[email protected]('/data-sources/<data_source_id>')\[email protected]('/data-sources/<data_source_id>')\[email protected](\"data-processing:data-sources:modify\")\[email protected]_exists(api.get_data_source, 'data_source_id')\[email protected](v_d_s_schema.DATA_SOURCE_UPDATE_SCHEMA)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_binaries.py",
"new_path": "sahara/api/v2/job_binaries.py",
"diff": "@@ -68,7 +68,7 @@ def job_binary_data(job_binary_id):\nreturn data\[email protected]('/job-binaries/<job_binary_id>')\[email protected]('/job-binaries/<job_binary_id>')\[email protected](\"data-processing:job-binaries:modify\")\[email protected](v_j_b_schema.JOB_BINARY_UPDATE_SCHEMA, v_j_b.check_job_binary)\ndef job_binary_update(job_binary_id, data):\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/node_group_templates.py",
"new_path": "sahara/api/v2/node_group_templates.py",
"diff": "@@ -51,7 +51,7 @@ def node_group_templates_get(node_group_template_id):\napi.get_node_group_template, node_group_template_id)\[email protected]('/node-group-templates/<node_group_template_id>')\[email protected]('/node-group-templates/<node_group_template_id>')\[email protected](\"data-processing:node-group-templates:modify\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA,\n"
}
] | Python | Apache License 2.0 | openstack/sahara | [APIv2] Convert update methods to use PATCH
all update operations should synchronize around using PATCH
instead of PUT for partial resource updates.
Partial-Implements: bp v2-api-experimental-impl
Change-Id: I079965a4efad24dd08b60a555ba3897cb18bd6df |
488,272 | 04.04.2017 12:15:11 | 10,800 | bff74cd49329a7b4d5c4fc0312a876109d0fa1a5 | Upgrading Spark version to 2.1.0 | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/spark/plugin.py",
"new_path": "sahara/plugins/spark/plugin.py",
"diff": "@@ -68,7 +68,7 @@ class SparkProvider(p.ProvisioningPluginBase):\nreturn result\ndef get_versions(self):\n- return ['1.6.0', '1.3.1']\n+ return ['2.1.0', '1.6.0', '1.3.1']\ndef get_configs(self, hadoop_version):\nreturn c_helper.get_plugin_configs()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"new_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"diff": "@@ -74,6 +74,14 @@ class SparkPluginTest(base.SaharaWithDbTestCase):\nself._test_engine('1.6.0', edp.JOB_TYPE_SHELL,\nengine.SparkShellJobEngine)\n+ def test_plugin21_edp_engine(self):\n+ self._test_engine('2.1.0', edp.JOB_TYPE_SPARK,\n+ engine.SparkJobEngine)\n+\n+ def test_plugin22_shell_engine(self):\n+ self._test_engine('2.1.0', edp.JOB_TYPE_SHELL,\n+ engine.SparkShellJobEngine)\n+\ndef _test_engine(self, version, job_type, eng):\ncluster_dict = self._init_cluster_dict(version)\n@@ -140,6 +148,8 @@ class SparkProviderTest(base.SaharaTestCase):\nres['1.3.1'])\nself.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\nres['1.6.0'])\n+ self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\n+ res['2.1.0'])\ndef test_edp_config_hints(self):\nprovider = pl.SparkProvider()\n@@ -159,3 +169,11 @@ class SparkProviderTest(base.SaharaTestCase):\nres = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"1.6.0\")\nself.assertEqual({'args': [], 'configs': []},\nres['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"2.1.0\")\n+ self.assertEqual({'args': [], 'configs': []},\n+ res['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"2.1.0\")\n+ self.assertEqual({'args': [], 'configs': []},\n+ res['job_config'])\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Upgrading Spark version to 2.1.0
Change-Id: I6d4a70bdf3f033018fa9cd90b35374e842704e53 |
488,272 | 03.04.2017 16:17:58 | 10,800 | 5cadd6eae262bbac3913bc1b1d98873cca7fcfbf | Added support to Storm 1.1.0
Adding support for Storm 1.1.0 and tests as well | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/add-storm-version-1_1_0-3e10b34824706a62.yaml",
"diff": "+---\n+features:\n+ - Storm 1.1.0 is supported in Storm plugin.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/storm/plugin.py",
"new_path": "sahara/plugins/storm/plugin.py",
"diff": "@@ -51,7 +51,7 @@ class StormProvider(p.ProvisioningPluginBase):\n\"cluster without any management consoles.\"))\ndef get_versions(self):\n- return ['0.9.2', '1.0.1']\n+ return ['0.9.2', '1.0.1', '1.1.0']\ndef get_configs(self, storm_version):\nreturn c_helper.get_plugin_configs()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/storm/test_plugin.py",
"new_path": "sahara/tests/unit/plugins/storm/test_plugin.py",
"diff": "@@ -71,10 +71,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\n+ cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\ncluster_data_092['node_groups'] = data\ncluster_data_101['node_groups'] = data\n+ cluster_data_110['node_groups'] = data\n- clusters = [cluster_data_092, cluster_data_101]\n+ clusters = [cluster_data_092, cluster_data_101, cluster_data_110]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -162,10 +164,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\n+ cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\ncluster_data_092['node_groups'] = data\ncluster_data_101['node_groups'] = data\n+ cluster_data_110['node_groups'] = data\n- clusters = [cluster_data_092, cluster_data_101]\n+ clusters = [cluster_data_092, cluster_data_101, cluster_data_110]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -194,10 +198,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\n+ cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\ncluster_data_092['node_groups'] = data\ncluster_data_101['node_groups'] = data\n+ cluster_data_110['node_groups'] = data\n- clusters = [cluster_data_092, cluster_data_101]\n+ clusters = [cluster_data_092, cluster_data_101, cluster_data_110]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -230,10 +236,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_092 = self._get_cluster('cluster_0.9.2', '0.9.2')\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\n+ cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\ncluster_data_092['node_groups'] = data\ncluster_data_101['node_groups'] = data\n+ cluster_data_110['node_groups'] = data\n- clusters = [cluster_data_092, cluster_data_101]\n+ clusters = [cluster_data_092, cluster_data_101, cluster_data_110]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -276,3 +284,11 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ndef test_plugin101_edp_storm_pyleus_engine(self):\nself._test_engine('1.0.1', edp.JOB_TYPE_PYLEUS,\nengine.StormJobEngine)\n+\n+ def test_plugin110_edp_storm_engine(self):\n+ self._test_engine('1.1.0', edp.JOB_TYPE_STORM,\n+ engine.StormJobEngine)\n+\n+ def test_plugin110_edp_storm_pyleus_engine(self):\n+ self._test_engine('1.1.0', edp.JOB_TYPE_PYLEUS,\n+ engine.StormJobEngine)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Added support to Storm 1.1.0
Adding support for Storm 1.1.0 and tests as well
Change-Id: I627090dd1d69f548af47d6743a1e135e0e320def |
488,272 | 04.04.2017 09:29:21 | 10,800 | 55dc2d8dc272888d8f0a5f4efd6b77e0874334d1 | Adding labels support to Storm
Adding support to labels to the Storm plugin as well as marking storm
version 0.9.2 as deprecated. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/deprecate-storm-version-092.yaml-b9ff2b9ebbb983fc.yaml",
"diff": "+---\n+deprecations:\n+ - Storm version 0.9.2 is deprecated.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/storm/plugin.py",
"new_path": "sahara/plugins/storm/plugin.py",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n+import copy\n+\nfrom oslo_log import log as logging\nimport six\nimport yaml\n@@ -50,8 +52,20 @@ class StormProvider(p.ProvisioningPluginBase):\n_(\"This plugin provides an ability to launch Storm \"\n\"cluster without any management consoles.\"))\n+ def get_labels(self):\n+ default = {'enabled': {'status': True}, 'stable': {'status': True}}\n+ deprecated = {'enabled': {'status': True},\n+ 'deprecated': {'status': True}}\n+ result = {'plugin_labels': copy.deepcopy(default)}\n+ result['version_labels'] = {\n+ '1.1.0': copy.deepcopy(default),\n+ '1.0.1': copy.deepcopy(default),\n+ '0.9.2': copy.deepcopy(deprecated),\n+ }\n+ return result\n+\ndef get_versions(self):\n- return ['0.9.2', '1.0.1']\n+ return ['0.9.2', '1.0.1', '1.1.0']\ndef get_configs(self, storm_version):\nreturn c_helper.get_plugin_configs()\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding labels support to Storm
Adding support to labels to the Storm plugin as well as marking storm
version 0.9.2 as deprecated.
Change-Id: If58ddce1324341da48fb1d1d3da3d456316c1d10 |
488,278 | 07.04.2017 12:31:31 | 10,800 | 4cc57f181dc03db27f0940348ec0c05dc6626b64 | [APIv2] Rename hadoop_version
hadoop_version should be changed to plugin_version.
This patch only changes json requests, the complete
change should be done when APIv2 is stable
and APIv1 deprecated. Along with this should be a
data model change as well.
Partial-Implements: bp v2-api-experimental-impl | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/cluster_templates.py",
"new_path": "sahara/api/v2/cluster_templates.py",
"diff": "@@ -36,9 +36,13 @@ def cluster_templates_list():\[email protected]('/cluster-templates')\[email protected](\"data-processing:cluster-templates:create\")\[email protected](ct_schema.CLUSTER_TEMPLATE_SCHEMA,\[email protected](ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2,\nv_ct.check_cluster_template_create)\ndef cluster_templates_create(data):\n+ # renaming hadoop_version -> plugin_version\n+ # this can be removed once APIv1 is deprecated\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.render(api.create_cluster_template(data).to_wrapped_dict())\n@@ -52,9 +56,11 @@ def cluster_templates_get(cluster_template_id):\[email protected]('/cluster-templates/<cluster_template_id>')\[email protected](\"data-processing:cluster-templates:modify\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected](ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA,\[email protected](ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2,\nv_ct.check_cluster_template_update)\ndef cluster_templates_update(cluster_template_id, data):\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.to_wrapped_dict(\napi.update_cluster_template, cluster_template_id, data)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -38,16 +38,22 @@ def clusters_list():\[email protected]('/clusters')\[email protected](\"data-processing:clusters:create\")\[email protected](v_c_schema.CLUSTER_SCHEMA, v_c.check_cluster_create)\[email protected](v_c_schema.CLUSTER_SCHEMA_V2, v_c.check_cluster_create)\ndef clusters_create(data):\n+ # renaming hadoop_version -> plugin_version\n+ # this can be removed once APIv1 is deprecated\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.render(api.create_cluster(data).to_wrapped_dict())\[email protected]('/clusters/multiple')\[email protected](\"data-processing:clusters:create\")\[email protected](\n- v_c_schema.MULTIPLE_CLUSTER_SCHEMA, v_c.check_multiple_clusters_create)\n+ v_c_schema.MULTIPLE_CLUSTER_SCHEMA_V2, v_c.check_multiple_clusters_create)\ndef clusters_create_multiple(data):\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.render(api.create_multiple_clusters(data))\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/node_group_templates.py",
"new_path": "sahara/api/v2/node_group_templates.py",
"diff": "@@ -37,9 +37,13 @@ def node_group_templates_list():\[email protected]('/node-group-templates')\[email protected](\"data-processing:node-group-templates:create\")\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA,\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA_V2,\nv_ngt.check_node_group_template_create)\ndef node_group_templates_create(data):\n+ # renaming hadoop_version -> plugin_version\n+ # this can be removed once APIv1 is deprecated\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.render(api.create_node_group_template(data).to_wrapped_dict())\n@@ -54,9 +58,11 @@ def node_group_templates_get(node_group_template_id):\[email protected]('/node-group-templates/<node_group_template_id>')\[email protected](\"data-processing:node-group-templates:modify\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA,\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2,\nv_ngt.check_node_group_template_update)\ndef node_group_templates_update(node_group_template_id, data):\n+ data['hadoop_version'] = data['plugin_version']\n+ del data['plugin_version']\nreturn u.to_wrapped_dict(\napi.update_node_group_template, node_group_template_id, data)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/base.py",
"new_path": "sahara/service/validations/base.py",
"diff": "@@ -104,10 +104,14 @@ def check_node_group_configs(plugin_name, hadoop_version, ng_configs,\ndef check_all_configurations(data):\n- pl_confs = _get_plugin_configs(data['plugin_name'], data['hadoop_version'])\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\n+\n+ pl_confs = _get_plugin_configs(data['plugin_name'], data[plugin_version])\nif data.get('cluster_configs'):\n- check_node_group_configs(data['plugin_name'], data['hadoop_version'],\n+ check_node_group_configs(data['plugin_name'], data[plugin_version],\ndata['cluster_configs'],\nplugin_configs=pl_confs)\n@@ -115,7 +119,7 @@ def check_all_configurations(data):\ncheck_duplicates_node_groups_names(data['node_groups'])\nfor ng in data['node_groups']:\ncheck_node_group_basic_fields(data['plugin_name'],\n- data['hadoop_version'],\n+ data[plugin_version],\nng, pl_confs)\n# NodeGroup related checks\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/cluster_template_schema.py",
"new_path": "sahara/service/validations/cluster_template_schema.py",
"diff": "@@ -21,11 +21,13 @@ from sahara.service.validations import shares\ndef _build_ng_schema_for_cluster_tmpl():\ncl_tmpl_ng_schema = copy.deepcopy(ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA)\n- cl_tmpl_ng_schema['properties'].update({\"count\": {\"type\": \"integer\"}})\n- cl_tmpl_ng_schema[\"required\"] = ['name', 'flavor_id',\n- 'node_processes', 'count']\n- del cl_tmpl_ng_schema['properties']['hadoop_version']\n- del cl_tmpl_ng_schema['properties']['plugin_name']\n+ cl_tmpl_ng_schema[\"properties\"].update({\"count\": {\"type\": \"integer\"}})\n+ cl_tmpl_ng_schema[\"required\"] = [\"name\", \"flavor_id\",\n+ \"node_processes\", \"count\"]\n+\n+ del cl_tmpl_ng_schema[\"properties\"][\"plugin_name\"]\n+ del cl_tmpl_ng_schema[\"properties\"][\"hadoop_version\"]\n+\nreturn cl_tmpl_ng_schema\n@@ -34,7 +36,7 @@ _cluster_tmpl_ng_schema = _build_ng_schema_for_cluster_tmpl()\ndef _build_ng_tmpl_schema_for_cluster_template():\ncl_tmpl_ng_tmpl_schema = copy.deepcopy(_cluster_tmpl_ng_schema)\n- cl_tmpl_ng_tmpl_schema['properties'].update(\n+ cl_tmpl_ng_tmpl_schema[\"properties\"].update(\n{\n\"node_group_template_id\": {\n\"type\": \"string\",\n@@ -48,6 +50,7 @@ def _build_ng_tmpl_schema_for_cluster_template():\n_cluster_tmpl_ng_tmpl_schema = _build_ng_tmpl_schema_for_cluster_template()\n+\nCLUSTER_TEMPLATE_SCHEMA = {\n\"type\": \"object\",\n\"properties\": {\n@@ -112,5 +115,18 @@ CLUSTER_TEMPLATE_SCHEMA = {\n]\n}\n+# APIv2: renaming hadoop_version -> plugin_version\n+CLUSTER_TEMPLATE_SCHEMA_V2 = copy.deepcopy(CLUSTER_TEMPLATE_SCHEMA)\n+del CLUSTER_TEMPLATE_SCHEMA_V2[\"properties\"][\"hadoop_version\"]\n+CLUSTER_TEMPLATE_SCHEMA_V2[\"required\"].remove(\"hadoop_version\")\n+CLUSTER_TEMPLATE_SCHEMA_V2[\"properties\"].update({\n+ \"plugin_version\": {\n+ \"type\": \"string\",\n+ }})\n+CLUSTER_TEMPLATE_SCHEMA_V2[\"required\"].append(\"plugin_version\")\n+\nCLUSTER_TEMPLATE_UPDATE_SCHEMA = copy.copy(CLUSTER_TEMPLATE_SCHEMA)\nCLUSTER_TEMPLATE_UPDATE_SCHEMA[\"required\"] = []\n+\n+CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2 = copy.copy(CLUSTER_TEMPLATE_SCHEMA_V2)\n+CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2[\"required\"] = []\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/cluster_templates.py",
"new_path": "sahara/service/validations/cluster_templates.py",
"diff": "@@ -22,21 +22,25 @@ from sahara.service.validations import shares\ndef check_cluster_template_create(data, **kwargs):\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\n+\nb.check_cluster_template_unique_name(data['name'])\nb.check_plugin_name_exists(data['plugin_name'])\nb.check_plugin_supports_version(data['plugin_name'],\n- data['hadoop_version'])\n+ data[plugin_version])\nif data.get('default_image_id'):\nb.check_image_registered(data['default_image_id'])\nb.check_required_image_tags(data['plugin_name'],\n- data['hadoop_version'],\n+ data[plugin_version],\ndata['default_image_id'])\nb.check_all_configurations(data)\nif data.get('anti_affinity'):\n- b.check_node_processes(data['plugin_name'], data['hadoop_version'],\n+ b.check_node_processes(data['plugin_name'], data[plugin_version],\ndata['anti_affinity'])\nif data.get('neutron_management_network'):\n@@ -61,22 +65,26 @@ def check_cluster_template_usage(cluster_template_id, **kwargs):\ndef check_cluster_template_update(cluster_template_id, data, **kwargs):\n- if data.get('plugin_name') and not data.get('hadoop_version'):\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\n+\n+ if data.get('plugin_name') and not data.get(plugin_version):\nraise ex.InvalidReferenceException(\n- _(\"You must specify a hadoop_version value \"\n- \"for your plugin_name\"))\n+ _(\"You must specify a %s value \"\n+ \"for your plugin_name\") % plugin_version)\nif data.get('plugin_name'):\nplugin = data['plugin_name']\n- version = data['hadoop_version']\n+ version = data[plugin_version]\nb.check_plugin_name_exists(plugin)\nb.check_plugin_supports_version(plugin, version)\nb.check_all_configurations(data)\nelse:\ncluster_template = api.get_cluster_template(cluster_template_id)\nplugin = cluster_template.plugin_name\n- if data.get('hadoop_version'):\n- version = data.get('hadoop_version')\n+ if data.get(plugin_version):\n+ version = data.get(plugin_version)\nb.check_plugin_supports_version(plugin, version)\nelse:\nversion = cluster_template.hadoop_version\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/clusters.py",
"new_path": "sahara/service/validations/clusters.py",
"diff": "@@ -41,11 +41,14 @@ def check_multiple_clusters_create(data, **kwargs):\ndef _check_cluster_create(data):\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\nb.check_plugin_name_exists(data['plugin_name'])\nb.check_plugin_supports_version(data['plugin_name'],\n- data['hadoop_version'])\n+ data[plugin_version])\nb.check_plugin_labels(\n- data['plugin_name'], data['hadoop_version'])\n+ data['plugin_name'], data[plugin_version])\nif data.get('cluster_template_id'):\nct_id = data['cluster_template_id']\n@@ -53,7 +56,7 @@ def _check_cluster_create(data):\nif not data.get('node_groups'):\nb.check_node_groups_in_cluster_templates(data['name'],\ndata['plugin_name'],\n- data['hadoop_version'],\n+ data[plugin_version],\nct_id)\nif data.get('user_keypair_id'):\n@@ -63,7 +66,7 @@ def _check_cluster_create(data):\nif default_image_id:\nb.check_image_registered(default_image_id)\nb.check_required_image_tags(data['plugin_name'],\n- data['hadoop_version'],\n+ data[plugin_version],\ndefault_image_id)\nelse:\nraise ex.NotFoundException('default_image_id',\n@@ -72,7 +75,7 @@ def _check_cluster_create(data):\nb.check_all_configurations(data)\nif data.get('anti_affinity'):\n- b.check_node_processes(data['plugin_name'], data['hadoop_version'],\n+ b.check_node_processes(data['plugin_name'], data[plugin_version],\ndata['anti_affinity'])\nif data.get('node_groups'):\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/clusters_schema.py",
"new_path": "sahara/service/validations/clusters_schema.py",
"diff": "import copy\n+import sahara.exceptions as ex\nfrom sahara.service.health import verification_base\nimport sahara.service.validations.cluster_template_schema as ct_schema\nfrom sahara.service.validations import shares\n@@ -25,8 +26,14 @@ def _build_node_groups_schema():\nreturn schema['properties']['node_groups']\n-def _build_cluster_schema():\n+def _build_cluster_schema(api_version='v1'):\n+ if api_version == 'v1':\ncluster_schema = copy.deepcopy(ct_schema.CLUSTER_TEMPLATE_SCHEMA)\n+ elif api_version == 'v2':\n+ cluster_schema = copy.deepcopy(ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2)\n+ else:\n+ raise ex.InvalidDataException('Invalid API version %s' % api_version)\n+\ncluster_schema['properties'].update({\n\"is_transient\": {\n\"type\": \"boolean\"\n@@ -43,6 +50,7 @@ def _build_cluster_schema():\nCLUSTER_SCHEMA = _build_cluster_schema()\n+CLUSTER_SCHEMA_V2 = _build_cluster_schema('v2')\nMULTIPLE_CLUSTER_SCHEMA = copy.deepcopy(CLUSTER_SCHEMA)\nMULTIPLE_CLUSTER_SCHEMA['properties'].update({\n@@ -51,6 +59,13 @@ MULTIPLE_CLUSTER_SCHEMA['properties'].update({\n}})\nMULTIPLE_CLUSTER_SCHEMA['required'].append('count')\n+MULTIPLE_CLUSTER_SCHEMA_V2 = copy.deepcopy(CLUSTER_SCHEMA_V2)\n+MULTIPLE_CLUSTER_SCHEMA_V2['properties'].update({\n+ \"count\": {\n+ \"type\": \"integer\"\n+ }})\n+MULTIPLE_CLUSTER_SCHEMA_V2['required'].append('count')\n+\nCLUSTER_UPDATE_SCHEMA = {\n\"type\": \"object\",\n\"properties\": {\n@@ -118,5 +133,4 @@ CLUSTER_SCALING_SCHEMA = {\n\"required\": [\"add_node_groups\"]\n}\n]\n-\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/node_group_template_schema.py",
"new_path": "sahara/service/validations/node_group_template_schema.py",
"diff": "@@ -27,7 +27,7 @@ NODE_GROUP_TEMPLATE_SCHEMA = {\n\"format\": \"valid_name_hostname\",\n},\n\"flavor_id\": {\n- 'type': 'flavor',\n+ \"type\": \"flavor\",\n},\n\"plugin_name\": {\n\"type\": \"string\",\n@@ -110,7 +110,20 @@ NODE_GROUP_TEMPLATE_SCHEMA = {\n]\n}\n+# APIv2: renaming hadoop_version -> plugin_version\n+NODE_GROUP_TEMPLATE_SCHEMA_V2 = copy.deepcopy(NODE_GROUP_TEMPLATE_SCHEMA)\n+del NODE_GROUP_TEMPLATE_SCHEMA_V2[\"properties\"][\"hadoop_version\"]\n+NODE_GROUP_TEMPLATE_SCHEMA_V2[\"required\"].remove(\"hadoop_version\")\n+NODE_GROUP_TEMPLATE_SCHEMA_V2[\"properties\"].update({\n+ \"plugin_version\": {\n+ \"type\": \"string\",\n+ }})\n+NODE_GROUP_TEMPLATE_SCHEMA_V2[\"required\"].append(\"plugin_version\")\n+\n# For an update we do not require any fields but we want the given\n# fields to be validated\nNODE_GROUP_TEMPLATE_UPDATE_SCHEMA = copy.copy(NODE_GROUP_TEMPLATE_SCHEMA)\nNODE_GROUP_TEMPLATE_UPDATE_SCHEMA[\"required\"] = []\n+\n+NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2 = copy.copy(NODE_GROUP_TEMPLATE_SCHEMA_V2)\n+NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2[\"required\"] = []\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/node_group_templates.py",
"new_path": "sahara/service/validations/node_group_templates.py",
"diff": "@@ -22,16 +22,20 @@ from sahara.service.validations import shares\ndef check_node_group_template_create(data, **kwargs):\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\n+\nb.check_node_group_template_unique_name(data['name'])\nb.check_plugin_name_exists(data['plugin_name'])\nb.check_plugin_supports_version(data['plugin_name'],\n- data['hadoop_version'])\n+ data[plugin_version])\nb.check_node_group_basic_fields(data['plugin_name'],\n- data['hadoop_version'], data)\n+ data[plugin_version], data)\nif data.get('image_id'):\nb.check_image_registered(data['image_id'])\nb.check_required_image_tags(data['plugin_name'],\n- data['hadoop_version'],\n+ data[plugin_version],\ndata['image_id'])\nif data.get('shares'):\nshares.check_shares(data['shares'])\n@@ -63,21 +67,25 @@ def check_node_group_template_usage(node_group_template_id, **kwargs):\ndef check_node_group_template_update(node_group_template_id, data, **kwargs):\n- if data.get('plugin_name') and not data.get('hadoop_version'):\n+ plugin_version = 'hadoop_version'\n+ if data.get('plugin_version'):\n+ plugin_version = 'plugin_version'\n+\n+ if data.get('plugin_name') and not data.get(plugin_version):\nraise ex.InvalidReferenceException(\n- _(\"You must specify a hadoop_version value \"\n- \"for your plugin_name\"))\n+ _(\"You must specify a %s value \"\n+ \"for your plugin_name\") % plugin_version)\nif data.get('plugin_name'):\nplugin = data.get('plugin_name')\n- version = data.get('hadoop_version')\n+ version = data.get(plugin_version)\nb.check_plugin_name_exists(plugin)\nb.check_plugin_supports_version(plugin, version)\nelse:\nngt = api.get_node_group_template(node_group_template_id)\nplugin = ngt.plugin_name\n- if data.get('hadoop_version'):\n- version = data.get('hadoop_version')\n+ if data.get(plugin_version):\n+ version = data.get(plugin_version)\nb.check_plugin_supports_version(plugin, version)\nelse:\nversion = ngt.hadoop_version\n"
}
] | Python | Apache License 2.0 | openstack/sahara | [APIv2] Rename hadoop_version
hadoop_version should be changed to plugin_version.
This patch only changes json requests, the complete
change should be done when APIv2 is stable
and APIv1 deprecated. Along with this should be a
data model change as well.
Partial-Implements: bp v2-api-experimental-impl
Change-Id: Id59703ce49741d024ac9dee09d28f8f515c9806a |
488,278 | 11.04.2017 11:08:59 | 10,800 | 83722b4c5e840513ea768937cc93bef0055c9b38 | Inefficient validation checks
Inefficient validation checks, to check unique object name Sahara
retrieves all entries from database.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/base.py",
"new_path": "sahara/service/validations/base.py",
"diff": "@@ -258,11 +258,11 @@ def check_volume_type_exists(volume_type):\n# Cluster creation related checks\n-def check_cluster_unique_name(name):\n- if name in [cluster.name for cluster in api.get_clusters(\n- tenant_id=context.ctx().tenant_id)]:\n+def check_cluster_unique_name(cluster_name):\n+ if cluster_name in [cluster.name for cluster in\n+ api.get_clusters(name=cluster_name)]:\nraise ex.NameAlreadyExistsException(\n- _(\"Cluster with name '%s' already exists\") % name)\n+ _(\"Cluster with name '%s' already exists\") % cluster_name)\ndef check_cluster_hostnames_lengths(cluster_name, node_groups):\n@@ -294,11 +294,13 @@ def check_network_exists(net_id):\n# Cluster templates related checks\n-def check_cluster_template_unique_name(name):\n- if name in [t.name for t in api.get_cluster_templates(\n- tenant_id=context.ctx().tenant_id)]:\n+def check_cluster_template_unique_name(cluster_tmpl_name):\n+ if cluster_tmpl_name in [cluster_tmpl.name for cluster_tmpl in\n+ api.get_cluster_templates(\n+ name=cluster_tmpl_name)]:\nraise ex.NameAlreadyExistsException(\n- _(\"Cluster template with name '%s' already exists\") % name)\n+ _(\"Cluster template with name '%s' already exists\") %\n+ cluster_tmpl_name)\ndef check_cluster_template_exists(cluster_template_id):\n@@ -320,11 +322,12 @@ def check_node_groups_in_cluster_templates(cluster_name, plugin_name,\n# NodeGroup templates related checks\n-def check_node_group_template_unique_name(name):\n- if name in [t.name for t in api.get_node_group_templates(\n- tenant_id=context.ctx().tenant_id)]:\n+def check_node_group_template_unique_name(ng_tmpl_name):\n+ if ng_tmpl_name in [ng_tmpl.name for ng_tmpl in\n+ api.get_node_group_templates(name=ng_tmpl_name)]:\nraise ex.NameAlreadyExistsException(\n- _(\"NodeGroup template with name '%s' already exists\") % name)\n+ _(\"NodeGroup template with name '%s' already exists\") %\n+ ng_tmpl_name)\ndef check_node_group_template_exists(ng_tmpl_id):\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/edp/base.py",
"new_path": "sahara/service/validations/edp/base.py",
"diff": "@@ -50,11 +50,13 @@ job_configs = {\n}\n-def check_data_source_unique_name(name):\n- if name in [ds.name for ds in conductor.data_source_get_all(\n- context.ctx())]:\n- raise ex.NameAlreadyExistsException(_(\"Data source with name '%s' \"\n- \"already exists\") % name)\n+def check_data_source_unique_name(ds_name):\n+ if ds_name in [ds.name for ds in\n+ conductor.data_source_get_all(context.ctx(),\n+ name=ds_name)]:\n+ raise ex.NameAlreadyExistsException(\n+ _(\"Data source with name '%s' \"\n+ \"already exists\") % ds_name)\ndef check_data_source_exists(data_source_id):\n@@ -63,10 +65,12 @@ def check_data_source_exists(data_source_id):\n_(\"DataSource with id '%s' doesn't exist\") % data_source_id)\n-def check_job_unique_name(name):\n- if name in [j.name for j in conductor.job_get_all(context.ctx())]:\n+def check_job_unique_name(job_name):\n+ if job_name in [job.name for job in\n+ conductor.job_get_all(context.ctx(),\n+ name=job_name)]:\nraise ex.NameAlreadyExistsException(_(\"Job with name '%s' \"\n- \"already exists\") % name)\n+ \"already exists\") % job_name)\ndef check_job_binary_internal_exists(jbi_id):\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Inefficient validation checks
Inefficient validation checks, to check unique object name Sahara
retrieves all entries from database.
Change-Id: I6494d3ac3f5d793ec4a47dce598dc42ca06a303e
Closes-Bug: 1212225 |
488,261 | 24.03.2017 00:38:31 | -19,080 | 997a16ed851f695f7c86e55e8caf7fcee3b7dbba | added timeout function in health check function
No timer for health check function, could hang in case of issues.
assigned a configurable timer.
Partial-Bug:1622553 | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/health/common.py",
"new_path": "sahara/service/health/common.py",
"diff": "@@ -38,7 +38,9 @@ health_opts = [\nhelp=\"Option to enable verifications for all clusters\"),\ncfg.IntOpt('verification_periodic_interval', default=600,\nhelp=\"Interval between two consecutive periodic tasks for\"\n- \"verifications, in seconds.\")\n+ \"verifications, in seconds.\"),\n+ cfg.IntOpt('verification_timeout', default=600,\n+ help=\"Time limit for health check function, in seconds.\")\n]\nhealth_opts_group = cfg.OptGroup(\n'cluster_verifications', title='Options to configure verifications')\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/health/health_check_base.py",
"new_path": "sahara/service/health/health_check_base.py",
"diff": "@@ -17,13 +17,14 @@ import abc\nimport functools\nimport threading\n+from eventlet import timeout as e_timeout\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport six\nfrom sahara import conductor\nfrom sahara import context\n-from sahara import exceptions\n+from sahara import exceptions as ex\nfrom sahara.i18n import _\nfrom sahara.i18n import _LE\nfrom sahara.plugins import base as plugin_base\n@@ -36,7 +37,7 @@ CONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n-class BaseHealthError(exceptions.SaharaException):\n+class BaseHealthError(ex.SaharaException):\nmessage_template = _(\"Cluster health is %(status)s. Reason: %(reason)s\")\ncode = 'HEALTH_ERROR'\nstatus = 'UNKNOWN'\n@@ -109,6 +110,9 @@ class BasicHealthCheck(object):\nsender.health_notify(self.cluster, self.health_check)\ndef execute(self):\n+ timeout = CONF.cluster_verifications.verification_timeout\n+ try:\n+ with e_timeout.Timeout(timeout, ex.TimeoutException(timeout)):\nif not self.is_available():\nreturn\nself._indicate_start()\n@@ -121,6 +125,9 @@ class BasicHealthCheck(object):\nstatus = exc.status\nelse:\nstatus = common.HEALTH_STATUS_RED\n+ except ex.TimeoutException:\n+ result = _(\"Health check timed out\")\n+ status = common.HEALTH_STATUS_YELLOW\nself._write_result(status, result)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | added timeout function in health check function
No timer for health check function, could hang in case of issues.
assigned a configurable timer.
Partial-Bug:1622553
Change-Id: Id5d045a0642b57e479fc6993436fc7c2eceb15bd |
488,278 | 09.04.2017 12:16:01 | 10,800 | 21045d3c3922e4c161c8ce1fd37a76c7531966b2 | [APIv2] Rename oozie_job_id
oozie_job_id should be changed to engine_job_id.
This patch only changes json responses, the complete
change should be done when APIv2 is stable
and APIv1 deprecated. Along with this should be a
data model change as well.
Partial-Implements: bp v2-api-experimental-impl | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/jobs.py",
"new_path": "sahara/api/v2/jobs.py",
"diff": "@@ -32,6 +32,12 @@ rest = u.RestV2('jobs', __name__)\nv.validate_sorting_job_executions)\ndef jobs_list():\nresult = api.job_execution_list(**u.get_request_args().to_dict())\n+ # APIv2: renaming oozie_job_id -> engine_job_id\n+ # once APIv1 is deprecated this can be\n+ # removed\n+ for je in result:\n+ je['engine_job_id'] = je['oozie_job_id']\n+ del je['oozie_job_id']\nreturn u.render(res=result, name='jobs')\n@@ -46,15 +52,20 @@ def jobs_execute(data):\[email protected](\"data-processing:job-executions:get\")\[email protected]_exists(api.get_job_execution, id='job_id')\ndef jobs_get(job_id):\n- return u.to_wrapped_dict(api.get_job_execution, job_id)\n+ result = u.to_wrapped_dict_no_render(api.get_job_execution, job_id)\n+ result['engine_job_id'] = result['oozie_job_id']\n+ del result['oozie_job_id']\n+ return u.render(result)\[email protected]('/jobs/<job_id>/refresh-status')\[email protected](\"data-processing:job-executions:refresh_status\")\[email protected]_exists(api.get_job_execution, id='job_id')\ndef jobs_status(job_id):\n- return u.to_wrapped_dict(\n- api.get_job_execution_status, job_id)\n+ result = u.to_wrapped_dict_no_render(api.get_job_execution_status, job_id)\n+ result['engine_job_id'] = result['oozie_job_id']\n+ del result['oozie_job_id']\n+ return u.render(result)\[email protected]('/jobs/<job_id>/cancel')\n@@ -62,7 +73,10 @@ def jobs_status(job_id):\[email protected]_exists(api.get_job_execution, id='job_id')\[email protected](None, v_j_e.check_job_execution_cancel)\ndef jobs_cancel(job_id):\n- return u.to_wrapped_dict(api.cancel_job_execution, job_id)\n+ result = u.to_wrapped_dict_no_render(api.cancel_job_execution, job_id)\n+ result['engine_job_id'] = result['oozie_job_id']\n+ del result['oozie_job_id']\n+ return u.render(result)\[email protected]('/jobs/<job_id>')\n@@ -71,8 +85,11 @@ def jobs_cancel(job_id):\[email protected](\nv_j_e_schema.JOB_EXEC_UPDATE_SCHEMA, v_j_e.check_job_execution_update)\ndef jobs_update(job_id, data):\n- return u.to_wrapped_dict(\n+ result = u.to_wrapped_dict_no_render(\napi.update_job_execution, job_id, data)\n+ result['engine_job_id'] = result['oozie_job_id']\n+ del result['oozie_job_id']\n+ return u.render(result)\[email protected]('/jobs/<job_id>')\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/api.py",
"new_path": "sahara/utils/api.py",
"diff": "@@ -330,10 +330,14 @@ def not_found(error):\ndef to_wrapped_dict(func, id, *args, **kwargs):\n+ return render(to_wrapped_dict_no_render(func, id, *args, **kwargs))\n+\n+\n+def to_wrapped_dict_no_render(func, id, *args, **kwargs):\nobj = func(id, *args, **kwargs)\nif obj is None:\ne = ex.NotFoundException(\n{'id': id}, _('Object with %s not found'))\nreturn not_found(e)\n- return render(obj.to_wrapped_dict())\n+ return obj.to_wrapped_dict()\n"
}
] | Python | Apache License 2.0 | openstack/sahara | [APIv2] Rename oozie_job_id
oozie_job_id should be changed to engine_job_id.
This patch only changes json responses, the complete
change should be done when APIv2 is stable
and APIv1 deprecated. Along with this should be a
data model change as well.
Change-Id: I2ecfbb56d1e53d6b005ae6a52c70576238341da2
Partial-Implements: bp v2-api-experimental-impl |
488,272 | 29.05.2017 17:04:03 | 10,800 | d1a0ce06e24fae3d4ae465605034d6a82c0a1ec2 | Changing reconcile to test_only
In order to make the code easier to understand we came to a conclusion
that changing the variable reconcile to test_only makes more sense. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/devref/image-gen.rst",
"new_path": "doc/source/devref/image-gen.rst",
"diff": "@@ -258,9 +258,9 @@ Two variables are always available to scripts run under this framework:\n* ``distro``: The distro of the image, in case you want to switch on distro\nwithin your script (rather than by using the os_case validator).\n-* ``reconcile``: If this value equates to boolean true, then the script should\n+* ``test_only``: If this value equates to boolean false, then the script should\nattempt to change the image or instance if it does not already meet the\n- specification. If this equates to boolean false, the script should exit with\n+ specification. If this equates to boolean true, the script should exit with\na failure code if the image or instance does not already meet the\nspecification.\n@@ -369,16 +369,16 @@ in the OpenStack context.) We will, of course, focus on that framework here.\n\"\"\"Gets the argument set taken by the plugin's image generator\"\"\"\ndef pack_image(self, hadoop_version, remote,\n- reconcile=True, image_arguments=None):\n+ test_only=False, image_arguments=None):\n\"\"\"Packs an image for registration in Glance and use by Sahara\"\"\"\n- def validate_images(self, cluster, reconcile=True, image_arguments=None):\n+ def validate_images(self, cluster, test_only=False, image_arguments=None):\n\"\"\"Validates the image to be used by a cluster\"\"\"\nThe validate_images method is called after Heat provisioning of your cluster,\n-but before cluster configuration. If the reconcile keyword of this method is\n-set to False, the method should only test the instances without modification.\n-If it is set to True, the method should make any necessary changes (this can\n+but before cluster configuration. If the test_only keyword of this method is\n+set to True, the method should only test the instances without modification.\n+If it is set to False, the method should make any necessary changes (this can\nbe used to allow clusters to be spun up from clean, OS-only images.) This\nmethod is expected to use an ssh remote to communicate with instances, as\nper normal in Sahara.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/images.py",
"new_path": "sahara/plugins/images.py",
"diff": "@@ -54,20 +54,20 @@ def transform_exception(from_type, to_type, transform_func=None):\nreturn decorator\n-def validate_instance(instance, validators, reconcile=True, **kwargs):\n+def validate_instance(instance, validators, test_only=False, **kwargs):\n\"\"\"Runs all validators against the specified instance.\n:param instance: An instance to validate.\n:param validators: A sequence of ImageValidators.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:raises ImageValidationError: If validation fails.\n\"\"\"\nwith instance.remote() as remote:\nfor validator in validators:\n- validator.validate(remote, reconcile=reconcile, **kwargs)\n+ validator.validate(remote, test_only=test_only, **kwargs)\nclass ImageArgument(object):\n@@ -120,7 +120,7 @@ class ImageArgument(object):\narg.get('required'),\narg.get('choices'))\nfor name, arg in six.iteritems(spec)}\n- reserved_names = ['distro', 'reconcile']\n+ reserved_names = ['distro', 'test_only']\nfor name, arg in six.iteritems(arguments):\nif name in reserved_names:\nraise p_ex.ImageValidationSpecificationError(\n@@ -150,12 +150,12 @@ class ImageValidator(object):\n\"\"\"Validates the image spawned to an instance via a set of rules.\"\"\"\[email protected]\n- def validate(self, remote, reconcile=True, **kwargs):\n+ def validate(self, remote, test_only=False, **kwargs):\n\"\"\"Validates the image.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:raises ImageValidationError: If validation fails.\n@@ -168,7 +168,7 @@ class SaharaImageValidatorBase(ImageValidator):\n\"\"\"Base class for Sahara's native image validation.\"\"\"\nDISTRO_KEY = 'distro'\n- RECONCILE_KEY = 'reconcile'\n+ TEST_ONLY_KEY = 'test_only'\nORDERED_VALIDATORS_SCHEMA = {\n\"type\": \"array\",\n@@ -294,13 +294,13 @@ class SaharaImageValidatorBase(ImageValidator):\ndef __nonzero__(self):\nreturn False\n- def try_validate(self, remote, reconcile=True,\n+ def try_validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate, but returns rather than raising on failure.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -309,7 +309,7 @@ class SaharaImageValidatorBase(ImageValidator):\n\"\"\"\ntry:\nself.validate(\n- remote, reconcile=reconcile,\n+ remote, test_only=test_only,\nimage_arguments=image_arguments, **kwargs)\nreturn True\nexcept p_ex.ImageValidationError as exc:\n@@ -368,7 +368,7 @@ class SaharaImageValidator(SaharaImageValidatorBase):\nself.arguments = arguments\n@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate the image.\n@@ -376,8 +376,8 @@ class SaharaImageValidator(SaharaImageValidatorBase):\nsteps such as distro discovery.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -403,7 +403,7 @@ class SaharaImageValidator(SaharaImageValidatorBase):\nelse:\nargument_values[name] = value\nargument_values[self.DISTRO_KEY] = remote.get_os_distrib()\n- self.validator.validate(remote, reconcile=reconcile,\n+ self.validator.validate(remote, test_only=test_only,\nimage_arguments=argument_values)\n@@ -496,17 +496,17 @@ class SaharaPackageValidator(SaharaImageValidatorBase):\nself.packages = packages\n@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate package installation on the image.\n- Even if reconcile=True, attempts to verify previous package\n+ Even if test_only=False, attempts to verify previous package\ninstallation offline before using networked tools to validate or\ninstall new packages.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -523,7 +523,7 @@ class SaharaPackageValidator(SaharaImageValidatorBase):\ncheck(self, remote)\nexcept (ex.SubprocessException, ex.RemoteCommandException,\nRuntimeError):\n- if reconcile:\n+ if not test_only:\ninstall(self, remote)\ncheck(self, remote)\nelse:\n@@ -560,7 +560,7 @@ class SaharaPackageValidator(SaharaImageValidatorBase):\nclass SaharaScriptValidator(SaharaImageValidatorBase):\n\"\"\"A validator that runs a script on the instance.\"\"\"\n- _DEFAULT_ENV_VARS = [SaharaImageValidatorBase.RECONCILE_KEY,\n+ _DEFAULT_ENV_VARS = [SaharaImageValidatorBase.TEST_ONLY_KEY,\nSaharaImageValidatorBase.DISTRO_KEY]\nSPEC_SCHEMA = {\n@@ -652,25 +652,25 @@ class SaharaScriptValidator(SaharaImageValidatorBase):\nself.output_var = output_var\n@transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError)\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate by running a script on the image.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\nargument name.\n- Note that the key SIV_RECONCILE will be set to 1 if the script\n- should reconcile and 0 otherwise; all scripts should act on this\n+ Note that the key SIV_TEST_ONLY will be set to 1 if the script\n+ should test_only and 0 otherwise; all scripts should act on this\ninput if possible. The key SIV_DISTRO will also contain the\ndistro representation, per `lsb_release -is`.\n:raises ImageValidationError: If validation fails.\n\"\"\"\narguments = copy.deepcopy(image_arguments)\n- arguments[self.RECONCILE_KEY] = 1 if reconcile else 0\n+ arguments[self.TEST_ONLY_KEY] = 1 if test_only else 0\nscript = \"\\n\".join([\"%(env_vars)s\",\n\"%(script)s\"])\nenv_vars = \"\\n\".join(\"export %s=%s\" % (key, value) for (key, value)\n@@ -712,11 +712,11 @@ class SaharaAggregateValidator(SaharaImageValidatorBase):\nclass SaharaAnyValidator(SaharaAggregateValidator):\n\"\"\"A list of validators, only one of which must succeed.\"\"\"\n- def _try_all(self, remote, reconcile=True,\n+ def _try_all(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\nresults = []\nfor validator in self.validators:\n- result = validator.try_validate(remote, reconcile=reconcile,\n+ result = validator.try_validate(remote, test_only=test_only,\nimage_arguments=image_arguments,\n**kwargs)\nresults.append(result)\n@@ -724,28 +724,28 @@ class SaharaAnyValidator(SaharaAggregateValidator):\nbreak\nreturn results\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate any of the contained validators.\n- Note that if reconcile=True, this validator will first run all\n- contained validators using reconcile=False, and succeed immediately\n+ Note that if test_only=False, this validator will first run all\n+ contained validators using test_only=True, and succeed immediately\nshould any pass validation. If all fail, it will only then run them\n- using reconcile=True, and again succeed immediately should any pass.\n+ using test_only=False, and again succeed immediately should any pass.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\nargument name.\n:raises ImageValidationError: If validation fails.\n\"\"\"\n- results = self._try_all(remote, reconcile=False,\n+ results = self._try_all(remote, test_only=True,\nimage_arguments=image_arguments)\n- if reconcile and not any(results):\n- results = self._try_all(remote, reconcile=True,\n+ if not test_only and not any(results):\n+ results = self._try_all(remote, test_only=False,\nimage_arguments=image_arguments)\nif not any(results):\nraise p_ex.AllValidationsFailedError(result.exception for result\n@@ -755,12 +755,13 @@ class SaharaAnyValidator(SaharaAggregateValidator):\nclass SaharaAllValidator(SaharaAggregateValidator):\n\"\"\"A list of validators, all of which must succeed.\"\"\"\n- def validate(self, remote, reconcile=True, image_arguments=None, **kwargs):\n+ def validate(self, remote, test_only=False, image_arguments=None,\n+ **kwargs):\n\"\"\"Attempts to validate all of the contained validators.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -768,7 +769,7 @@ class SaharaAllValidator(SaharaAggregateValidator):\n:raises ImageValidationError: If validation fails.\n\"\"\"\nfor validator in self.validators:\n- validator.validate(remote, reconcile=reconcile,\n+ validator.validate(remote, test_only=test_only,\nimage_arguments=image_arguments)\n@@ -818,7 +819,7 @@ class SaharaOSCaseValidator(SaharaImageValidatorBase):\n\"\"\"\nself.distros = distros\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate depending on distro.\n@@ -828,8 +829,8 @@ class SaharaOSCaseValidator(SaharaImageValidatorBase):\nIf no keys match, no validators are run, and validation proceeds.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -842,7 +843,7 @@ class SaharaOSCaseValidator(SaharaImageValidatorBase):\nfor distro, validator in self.distros:\nif distro in matches:\nvalidator.validate(\n- remote, reconcile=reconcile,\n+ remote, test_only=test_only,\nimage_arguments=image_arguments)\nbreak\n@@ -901,13 +902,13 @@ class SaharaArgumentCaseValidator(SaharaImageValidatorBase):\nself.argument_name = argument_name\nself.cases = cases\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate depending on argument value.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n@@ -921,7 +922,7 @@ class SaharaArgumentCaseValidator(SaharaImageValidatorBase):\nvalue = image_arguments[arg]\nif value in self.cases:\nself.cases[value].validate(\n- remote, reconcile=reconcile,\n+ remote, test_only=test_only,\nimage_arguments=image_arguments)\n@@ -972,13 +973,13 @@ class SaharaArgumentSetterValidator(SaharaImageValidatorBase):\nself.argument_name = argument_name\nself.value = value\n- def validate(self, remote, reconcile=True,\n+ def validate(self, remote, test_only=False,\nimage_arguments=None, **kwargs):\n\"\"\"Attempts to validate depending on argument value.\n:param remote: A remote socket to the instance.\n- :param reconcile: If false, all validators will only verify that a\n- desired state is present, and fail if it is not. If true, all\n+ :param test_only: If true, all validators will only verify that a\n+ desired state is present, and fail if it is not. If false, all\nvalidators will attempt to enforce the desired state if possible,\nand succeed if this enforcement succeeds.\n:param image_arguments: A dictionary of image argument values keyed by\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/provisioning.py",
"new_path": "sahara/plugins/provisioning.py",
"diff": "@@ -135,24 +135,24 @@ class ProvisioningPluginBase(plugins_base.PluginInterface):\n@plugins_base.optional\ndef pack_image(self, hadoop_version, remote,\n- reconcile=True, image_arguments=None):\n+ test_only=False, image_arguments=None):\n\"\"\"Packs an image for registration in Glance and use by Sahara\n:param remote: A remote (usually of type\nsahara.cli.image_pack.api.ImageRemote) that serves as a handle to\nthe image to modify. Note that this image will be modified\nin-place, not copied.\n- :param reconcile: If set to False, this method will only test to\n+ :param test_only: If set to True, this method will only test to\nensure that the image already meets the plugin's requirements.\nThis can be used to test images without modification. If set to\n- True per the default, this method will modify the image if any\n+ False per the default, this method will modify the image if any\nrequirements are not met.\n:param image_arguments: A dict of image argument name to argument\nvalue.\n:raises: sahara.plugins.exceptions.ImageValidationError: If the method\n- fails to modify the image to specification (if reconcile is True),\n+ fails to modify the image to specification (if test_only is False),\nor if the method finds that the image does not meet the\n- specification (if reconcile is False).\n+ specification (if test_only is True).\n:raises: sahara.plugins.exceptions.ImageValidationSpecificationError:\nIf the specification for image generation or validation is itself\nin error and cannot be executed without repair.\n@@ -160,22 +160,22 @@ class ProvisioningPluginBase(plugins_base.PluginInterface):\npass\n@plugins_base.optional\n- def validate_images(self, cluster, reconcile=True, image_arguments=None):\n+ def validate_images(self, cluster, test_only=False, image_arguments=None):\n\"\"\"Validates the image to be used by a cluster.\n:param cluster: The object handle to a cluster which has active\ninstances ready to generate remote handles.\n- :param reconcile: If set to False, this method will only test to\n+ :param test_only: If set to True, this method will only test to\nensure that the image already meets the plugin's requirements.\nThis can be used to test images without modification. If set to\n- True per the default, this method will modify the image if any\n+ False per the default, this method will modify the image if any\nrequirements are not met.\n:param image_arguments: A dict of image argument name to argument\nvalue.\n:raises: sahara.plugins.exceptions.ImageValidationError: If the method\n- fails to modify the image to specification (if reconcile is True),\n+ fails to modify the image to specification (if test_only is False),\nor if the method finds that the image does not meet the\n- specification (if reconcile is False).\n+ specification (if test_only is True).\n:raises: sahara.plugins.exceptions.ImageValidationSpecificationError:\nIf the specification for image generation or validation is itself\nin error and cannot be executed without repair.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/ops.py",
"new_path": "sahara/service/ops.py",
"diff": "@@ -289,7 +289,7 @@ def _provision_cluster(cluster_id):\ncluster, c_u.CLUSTER_STATUS_CONFIGURING)\ncontext.set_step_type(_(\"Plugin: configure cluster\"))\nif hasattr(plugin, 'validate_images'):\n- plugin.validate_images(cluster, reconcile=True)\n+ plugin.validate_images(cluster, test_only=False)\nshares.mount_shares(cluster)\nplugin.configure_cluster(cluster)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/test_images.py",
"new_path": "sahara/tests/unit/plugins/test_images.py",
"diff": "@@ -47,14 +47,14 @@ class TestImages(b.SaharaTestCase):\nvalidator = cls.from_spec('test_images.py', {}, resource_roots)\nself.assertIsInstance(validator, cls)\n- self.assertEqual(validator.env_vars, ['reconcile', 'distro'])\n+ self.assertEqual(validator.env_vars, ['test_only', 'distro'])\nvalidator = cls.from_spec(\n{'test_images.py': {'env_vars': ['extra-file', 'user']}},\n{}, resource_roots)\nself.assertIsInstance(validator, cls)\nself.assertEqual(validator.env_vars,\n- ['reconcile', 'distro',\n+ ['test_only', 'distro',\n'extra-file', 'user'])\ndef test_all_spec(self):\n@@ -181,7 +181,7 @@ class TestImages(b.SaharaTestCase):\npackages = [cls.Package(\"java\", \"8\")]\nvalidator = images.SaharaPackageValidator(packages)\nremote = mock.Mock()\n- validator.validate(remote, reconcile=False,\n+ validator.validate(remote, test_only=True,\nimage_arguments=image_arguments)\nremote.execute_command.assert_called_with(\n\"rpm -q java-8\", run_as_root=True)\n@@ -193,7 +193,7 @@ class TestImages(b.SaharaTestCase):\nremote.execute_command.side_effect = (\nex.RemoteCommandException(\"So bad!\"))\ntry:\n- validator.validate(remote, reconcile=False,\n+ validator.validate(remote, test_only=True,\nimage_arguments=image_arguments)\nexcept p_ex.ImageValidationError as e:\nself.assertIn(\"So bad!\", e.message)\n@@ -212,7 +212,7 @@ class TestImages(b.SaharaTestCase):\nremote.execute_command.side_effect = side_effect\ntry:\n- validator.validate(remote, reconcile=True,\n+ validator.validate(remote, test_only=False,\nimage_arguments=image_arguments)\nexcept p_ex.ImageValidationError as e:\nself.assertIn(\"So bad!\", e.message)\n@@ -229,7 +229,7 @@ class TestImages(b.SaharaTestCase):\npackages = [cls.Package(\"java\", \"8\")]\nvalidator = images.SaharaPackageValidator(packages)\nremote = mock.Mock()\n- validator.validate(remote, reconcile=False,\n+ validator.validate(remote, test_only=True,\nimage_arguments=image_arguments)\nremote.execute_command.assert_called_with(\n\"dpkg -s java-8\", run_as_root=True)\n@@ -241,7 +241,7 @@ class TestImages(b.SaharaTestCase):\nremote.execute_command.side_effect = (\nex.RemoteCommandException(\"So bad!\"))\ntry:\n- validator.validate(remote, reconcile=False,\n+ validator.validate(remote, test_only=True,\nimage_arguments=image_arguments)\nexcept p_ex.ImageValidationError as e:\nself.assertIn(\"So bad!\", e.message)\n@@ -256,7 +256,7 @@ class TestImages(b.SaharaTestCase):\nremote.execute_command.side_effect = (\nex.RemoteCommandException(\"So bad!\"))\ntry:\n- validator.validate(remote, reconcile=True,\n+ validator.validate(remote, test_only=False,\nimage_arguments=image_arguments)\nexcept p_ex.ImageValidationError as e:\nself.assertIn(\"So bad!\", e.message)\n@@ -279,7 +279,7 @@ class TestImages(b.SaharaTestCase):\nexecute_command=mock.Mock(\nreturn_value=(0, 'fedora')))\n- validator.validate(remote, reconcile=True,\n+ validator.validate(remote, test_only=False,\nimage_arguments=image_arguments)\ncall = [mock.call(map_rep + cmd, run_as_root=True)]\nremote.execute_command.assert_has_calls(call)\n@@ -293,31 +293,31 @@ class TestImages(b.SaharaTestCase):\ndef __init__(self, mock_validate):\nself.mock_validate = mock_validate\n- def validate(self, remote, reconcile=True, **kwargs):\n- self.mock_validate(remote, reconcile=reconcile, **kwargs)\n+ def validate(self, remote, test_only=False, **kwargs):\n+ self.mock_validate(remote, test_only=test_only, **kwargs)\n# One success short circuits validation\nalways_tells_the_truth = FakeValidator(mock.Mock())\nvalidator = cls([always_tells_the_truth, always_tells_the_truth])\n- validator.validate(None, reconcile=True)\n+ validator.validate(None, test_only=False)\nself.assertEqual(always_tells_the_truth.mock_validate.call_count, 1)\n- # All failures fails, and calls with reconcile=False on all first\n+ # All failures fails, and calls with test_only=True on all first\nalways_lies = FakeValidator(\nmock.Mock(side_effect=p_ex.ImageValidationError(\"Oh no!\")))\nvalidator = cls([always_lies, always_lies])\ntry:\n- validator.validate(None, reconcile=True)\n+ validator.validate(None, test_only=False)\nexcept p_ex.ImageValidationError:\npass\nself.assertEqual(always_lies.mock_validate.call_count, 4)\n- # But it fails after a first pass if reconcile=False.\n+ # But it fails after a first pass if test_only=True.\nalways_lies = FakeValidator(\nmock.Mock(side_effect=p_ex.ImageValidationError(\"Oh no!\")))\nvalidator = cls([always_lies, always_lies])\ntry:\n- validator.validate(None, reconcile=False)\n+ validator.validate(None, test_only=True)\nexcept p_ex.ImageValidationError:\npass\nself.assertEqual(always_lies.mock_validate.call_count, 2)\n@@ -327,7 +327,7 @@ class TestImages(b.SaharaTestCase):\nalways_lies = FakeValidator(\nmock.Mock(side_effect=p_ex.ImageValidationError(\"Oh no!\")))\nvalidator = cls([always_lies, always_tells_the_truth])\n- validator.validate(None, reconcile=True)\n+ validator.validate(None, test_only=False)\nself.assertEqual(always_lies.mock_validate.call_count, 1)\nself.assertEqual(always_tells_the_truth.mock_validate.call_count, 1)\n@@ -337,10 +337,10 @@ class TestImages(b.SaharaTestCase):\n# All pass\nalways_tells_the_truth = mock.Mock()\nvalidator = cls([always_tells_the_truth, always_tells_the_truth])\n- validator.validate(None, reconcile=True)\n+ validator.validate(None, test_only=False)\nself.assertEqual(always_tells_the_truth.validate.call_count, 2)\nalways_tells_the_truth.validate.assert_called_with(\n- None, reconcile=True, image_arguments=None)\n+ None, test_only=False, image_arguments=None)\n# Second fails\nalways_tells_the_truth = mock.Mock()\n@@ -348,15 +348,15 @@ class TestImages(b.SaharaTestCase):\nside_effect=p_ex.ImageValidationError(\"Boom!\")))\nvalidator = cls([always_tells_the_truth, always_lies])\ntry:\n- validator.validate(None, reconcile=False)\n+ validator.validate(None, test_only=True)\nexcept p_ex.ImageValidationError:\npass\nself.assertEqual(always_tells_the_truth.validate.call_count, 1)\nself.assertEqual(always_lies.validate.call_count, 1)\nalways_tells_the_truth.validate.assert_called_with(\n- None, reconcile=False, image_arguments=None)\n+ None, test_only=True, image_arguments=None)\nalways_lies.validate.assert_called_with(\n- None, reconcile=False, image_arguments=None)\n+ None, test_only=True, image_arguments=None)\n# First fails\nalways_tells_the_truth = mock.Mock()\n@@ -364,12 +364,12 @@ class TestImages(b.SaharaTestCase):\nside_effect=p_ex.ImageValidationError(\"Boom!\")))\nvalidator = cls([always_lies, always_tells_the_truth])\ntry:\n- validator.validate(None, reconcile=False, image_arguments={})\n+ validator.validate(None, test_only=True, image_arguments={})\nexcept p_ex.ImageValidationError:\npass\nself.assertEqual(always_lies.validate.call_count, 1)\nalways_lies.validate.assert_called_with(\n- None, reconcile=False, image_arguments={})\n+ None, test_only=True, image_arguments={})\nself.assertEqual(always_tells_the_truth.validate.call_count, 0)\ndef test_os_case_validator(self):\n@@ -382,12 +382,12 @@ class TestImages(b.SaharaTestCase):\ndistros = [centos, redhat]\nimage_arguments = {images.SaharaImageValidator.DISTRO_KEY: \"centos\"}\nvalidator = cls(distros)\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(centos.validator.validate.call_count, 1)\nself.assertEqual(redhat.validator.validate.call_count, 0)\ncentos.validator.validate.assert_called_with(\n- None, reconcile=True, image_arguments=image_arguments)\n+ None, test_only=False, image_arguments=image_arguments)\n# Families match\ncentos = Distro(\"centos\", mock.Mock())\n@@ -395,12 +395,12 @@ class TestImages(b.SaharaTestCase):\ndistros = [centos, redhat]\nimage_arguments = {images.SaharaImageValidator.DISTRO_KEY: \"fedora\"}\nvalidator = cls(distros)\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(centos.validator.validate.call_count, 0)\nself.assertEqual(redhat.validator.validate.call_count, 1)\nredhat.validator.validate.assert_called_with(\n- None, reconcile=True, image_arguments=image_arguments)\n+ None, test_only=False, image_arguments=image_arguments)\n# Non-matches do nothing\ncentos = Distro(\"centos\", mock.Mock())\n@@ -408,7 +408,7 @@ class TestImages(b.SaharaTestCase):\ndistros = [centos, redhat]\nimage_arguments = {images.SaharaImageValidator.DISTRO_KEY: \"ubuntu\"}\nvalidator = cls(distros)\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(centos.validator.validate.call_count, 0)\nself.assertEqual(redhat.validator.validate.call_count, 0)\n@@ -423,12 +423,12 @@ class TestImages(b.SaharaTestCase):\ncases = {\"value\": match,\n\"another_value\": nomatch}\nvalidator = cls(\"argument\", cases)\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(match.validate.call_count, 1)\nself.assertEqual(nomatch.validate.call_count, 0)\nmatch.validate.assert_called_with(\n- None, reconcile=True, image_arguments=image_arguments)\n+ None, test_only=False, image_arguments=image_arguments)\n# Non-matches do nothing\nimage_arguments = {\"argument\": \"value\"}\n@@ -436,7 +436,7 @@ class TestImages(b.SaharaTestCase):\ncases = {\"some_value\": nomatch,\n\"another_value\": nomatch}\nvalidator = cls(\"argument\", cases)\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(nomatch.validate.call_count, 0)\n@@ -446,14 +446,14 @@ class TestImages(b.SaharaTestCase):\n# Old variable is overwritten\nimage_arguments = {\"argument\": \"value\"}\nvalidator = cls(\"argument\", \"new_value\")\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(image_arguments[\"argument\"], \"new_value\")\n# New variable is set\nimage_arguments = {\"argument\": \"value\"}\nvalidator = cls(\"another_argument\", \"value\")\n- validator.validate(None, reconcile=True,\n+ validator.validate(None, test_only=False,\nimage_arguments=image_arguments)\nself.assertEqual(image_arguments,\n{\"argument\": \"value\", \"another_argument\": \"value\"})\n@@ -465,11 +465,11 @@ class TestImages(b.SaharaTestCase):\nremote = mock.Mock(get_os_distrib=mock.Mock(\nreturn_value=\"centos\"))\nvalidator = cls(sub_validator, {})\n- validator.validate(remote, reconcile=True, image_arguments={})\n+ validator.validate(remote, test_only=False, image_arguments={})\nexpected_map = {images.SaharaImageValidatorBase.DISTRO_KEY: \"centos\"}\nsub_validator.validate.assert_called_with(\n- remote, reconcile=True, image_arguments=expected_map)\n+ remote, test_only=False, image_arguments=expected_map)\nexpected_map = {images.SaharaImageValidatorBase.DISTRO_KEY: \"centos\"}\n- validator.validate(remote, reconcile=False, image_arguments={})\n+ validator.validate(remote, test_only=True, image_arguments={})\nsub_validator.validate.assert_called_with(\n- remote, reconcile=False, image_arguments=expected_map)\n+ remote, test_only=True, image_arguments=expected_map)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Changing reconcile to test_only
In order to make the code easier to understand we came to a conclusion
that changing the variable reconcile to test_only makes more sense.
Change-Id: Ia3cca7a1615c690f9e7af6aff0a393ef0fc06e10 |
488,324 | 09.06.2017 18:41:23 | -28,800 | f972dc054648b32614a16e7c1d7a54c267b5d59f | Add test to sahara/plugins/vanilla/v2_7_1/versionhandler.py
Add tests of all methods in versionhandler.py | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/v2_7_1/test_versionhandler.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+import six\n+\n+from sahara.plugins.vanilla.hadoop2 import run_scripts as run\n+from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts\n+from sahara.plugins.vanilla.v2_7_1.edp_engine import EdpOozieEngine\n+from sahara.plugins.vanilla.v2_7_1.edp_engine import EdpSparkEngine\n+from sahara.plugins.vanilla.v2_7_1 import versionhandler as v_h\n+from sahara.tests.unit import base\n+from sahara.tests.unit import testutils\n+\n+\n+class TestConfig(object):\n+ def __init__(self, applicable_target, name, default_value):\n+ self.applicable_target = applicable_target\n+ self.name = name\n+ self.default_value = default_value\n+\n+\n+class VersionHandlerTest(base.SaharaTestCase):\n+ plugin_path = 'sahara.plugins.vanilla.'\n+ plugin_hadoop2_path = 'sahara.plugins.vanilla.hadoop2.'\n+\n+ def setUp(self):\n+ super(VersionHandlerTest, self).setUp()\n+ self.cluster = mock.Mock()\n+ self.vh = v_h.VersionHandler()\n+\n+ def test_get_plugin_configs(self):\n+ self.vh.pctx['all_confs'] = 'haha'\n+ conf = self.vh.get_plugin_configs()\n+ self.assertEqual(conf, 'haha')\n+\n+ def test_get_node_processes(self):\n+ processes = self.vh.get_node_processes()\n+ for k, v in six.iteritems(processes):\n+ for p in v:\n+ self.assertIsInstance(p, str)\n+\n+ @mock.patch(plugin_hadoop2_path +\n+ 'validation.validate_cluster_creating')\n+ def test_validate(self, validate_create):\n+ self.vh.pctx = mock.Mock()\n+ self.vh.validate(self.cluster)\n+ validate_create.assert_called_once_with(self.vh.pctx,\n+ self.cluster)\n+\n+ @mock.patch(plugin_path +\n+ 'v2_7_1.versionhandler.VersionHandler.update_infra')\n+ def test_update_infra(self, update_infra):\n+ self.vh.update_infra(self.cluster)\n+ update_infra.assert_called_once_with(self.cluster)\n+\n+ @mock.patch(plugin_hadoop2_path + 'config.configure_cluster')\n+ def test_configure_cluster(self, configure_cluster):\n+ self.vh.pctx = mock.Mock()\n+ self.vh.configure_cluster(self.cluster)\n+ configure_cluster.assert_called_once_with(self.vh.pctx, self.cluster)\n+\n+ @mock.patch('sahara.swift.swift_helper.install_ssl_certs')\n+ @mock.patch(plugin_hadoop2_path + 'keypairs.provision_keypairs')\n+ @mock.patch('sahara.plugins.utils.get_instances')\n+ @mock.patch('sahara.utils.cluster.get_instances')\n+ def test_start_cluster(self, c_get_instances, u_get_instances,\n+ provision_keypairs, install_ssl_certs):\n+ self.vh.pctx = mock.Mock()\n+ instances = mock.Mock()\n+ s_scripts.start_namenode = mock.Mock()\n+ s_scripts.start_secondarynamenode = mock.Mock()\n+ s_scripts.start_resourcemanager = mock.Mock()\n+ s_scripts.start_historyserver = mock.Mock()\n+ s_scripts.start_oozie = mock.Mock()\n+ s_scripts.start_hiveserver = mock.Mock()\n+ s_scripts.start_spark = mock.Mock()\n+ c_get_instances.return_value = instances\n+ u_get_instances.return_value = instances\n+ run.await_datanodes = mock.Mock()\n+ run.start_dn_nm_processes = mock.Mock()\n+ self.vh._set_cluster_info = mock.Mock()\n+ self.vh.start_cluster(self.cluster)\n+ provision_keypairs.assert_called_once_with(self.cluster)\n+ s_scripts.start_namenode.assert_called_once_with(self.cluster)\n+ s_scripts.start_secondarynamenode.assert_called_once_with(self.cluster)\n+ s_scripts.start_resourcemanager.assert_called_once_with(self.cluster)\n+ s_scripts.start_historyserver.assert_called_once_with(self.cluster)\n+ s_scripts.start_oozie.assert_called_once_with(self.vh.pctx,\n+ self.cluster)\n+ s_scripts.start_hiveserver.assert_called_once_with(self.vh.pctx,\n+ self.cluster)\n+ s_scripts.start_spark.assert_called_once_with(self.cluster)\n+ run.start_dn_nm_processes.assert_called_once_with(instances)\n+ run.await_datanodes.assert_called_once_with(self.cluster)\n+ install_ssl_certs.assert_called_once_with(instances)\n+ self.vh._set_cluster_info.assert_called_once_with(self.cluster)\n+\n+ @mock.patch(plugin_hadoop2_path + 'scaling.decommission_nodes')\n+ def test_decommission_nodes(self, decommission_nodes):\n+ self.vh.pctx = mock.Mock()\n+ cluster = mock.Mock()\n+ instances = mock.Mock()\n+ self.vh.decommission_nodes(cluster, instances)\n+ decommission_nodes.assert_called_once_with(self.vh.pctx,\n+ cluster,\n+ instances)\n+\n+ @mock.patch(plugin_hadoop2_path +\n+ 'validation.validate_additional_ng_scaling')\n+ @mock.patch(plugin_hadoop2_path +\n+ 'validation.validate_existing_ng_scaling')\n+ def test_validate_scaling(self, vls, vla):\n+ self.vh.pctx['all_confs'] = [TestConfig('HDFS', 'dfs.replication', -1)]\n+ ng1 = testutils.make_ng_dict('ng1', '40', ['namenode'], 1)\n+ ng2 = testutils.make_ng_dict('ng2', '41', ['datanode'], 2)\n+ ng3 = testutils.make_ng_dict('ng3', '42', ['datanode'], 3)\n+ additional = [ng2['id'], ng3['id']]\n+ existing = {ng2['id']: 1}\n+ cluster = testutils.create_cluster('test-cluster', 'tenant1', 'fake',\n+ '0.1', [ng1, ng2, ng3])\n+ self.vh.validate_scaling(cluster, existing, additional)\n+ vla.assert_called_once_with(cluster, additional)\n+ vls.assert_called_once_with(self.vh.pctx, cluster, existing)\n+\n+ @mock.patch(plugin_hadoop2_path + 'scaling.scale_cluster')\n+ @mock.patch(plugin_hadoop2_path + 'keypairs.provision_keypairs')\n+ def test_scale_cluster(self, provision_keypairs, scale_cluster):\n+ self.vh.pctx = mock.Mock()\n+ instances = mock.Mock()\n+ self.vh.scale_cluster(self.cluster, instances)\n+ provision_keypairs.assert_called_once_with(self.cluster,\n+ instances)\n+ scale_cluster.assert_called_once_with(self.vh.pctx,\n+ self.cluster,\n+ instances)\n+\n+ @mock.patch(\"sahara.conductor.API.cluster_update\")\n+ @mock.patch(\"sahara.context.ctx\")\n+ @mock.patch(plugin_path + 'utils.get_namenode')\n+ @mock.patch(plugin_path + 'utils.get_resourcemanager')\n+ @mock.patch(plugin_path + 'utils.get_historyserver')\n+ @mock.patch(plugin_path + 'utils.get_oozie')\n+ @mock.patch(plugin_path + 'utils.get_spark_history_server')\n+ def test_set_cluster_info(self, get_spark_history_server, get_oozie,\n+ get_historyserver, get_resourcemanager,\n+ get_namenode, ctx, cluster_update):\n+ get_spark_history_server.return_value.management_ip = '1.2.3.0'\n+ get_oozie.return_value.get_ip_or_dns_name = mock.Mock(\n+ return_value='1.2.3.1')\n+ get_historyserver.return_value.get_ip_or_dns_name = mock.Mock(\n+ return_value='1.2.3.2')\n+ get_resourcemanager.return_value.get_ip_or_dns_name = mock.Mock(\n+ return_value='1.2.3.3')\n+ get_namenode.return_value.get_ip_or_dns_name = mock.Mock(\n+ return_value='1.2.3.4')\n+ get_namenode.return_value.hostname = mock.Mock(\n+ return_value='testnode')\n+ self.vh._set_cluster_info(self.cluster)\n+ info = {'YARN': {\n+ 'Web UI': 'http://1.2.3.3:8088',\n+ 'ResourceManager': 'http://1.2.3.3:8032'\n+ },\n+ 'HDFS': {\n+ 'Web UI': 'http://1.2.3.4:50070',\n+ 'NameNode': 'hdfs://testnode:9000'\n+ },\n+ 'JobFlow': {\n+ 'Oozie': 'http://1.2.3.1:11000'\n+ },\n+ 'MapReduce JobHistory Server': {\n+ 'Web UI': 'http://1.2.3.2:19888'\n+ },\n+ 'Apache Spark': {\n+ 'Spark UI': 'http://1.2.3.0:4040',\n+ 'Spark History Server UI': 'http://1.2.3.0:18080'\n+ }\n+ }\n+ cluster_update.assert_called_once_with(ctx(), self.cluster,\n+ {'info': info})\n+\n+ @mock.patch(\"sahara.service.edp.job_utils.get_plugin\")\n+ @mock.patch('sahara.plugins.utils.get_instance')\n+ @mock.patch('os.path.join')\n+ def test_get_edp_engine(self, join, get_instance, get_plugin):\n+ job_type = ''\n+ ret = self.vh.get_edp_engine(self.cluster, job_type)\n+ self.assertEqual(ret, None)\n+\n+ job_type = 'Java'\n+ ret = self.vh.get_edp_engine(self.cluster, job_type)\n+ self.assertIsInstance(ret, EdpOozieEngine)\n+\n+ job_type = 'Spark'\n+ ret = self.vh.get_edp_engine(self.cluster, job_type)\n+ self.assertIsInstance(ret, EdpSparkEngine)\n+\n+ def test_get_edp_job_types(self):\n+ job_types = ['Hive', 'Java', 'MapReduce',\n+ 'MapReduce.Streaming', 'Pig', 'Shell', 'Spark']\n+ self.assertEqual(self.vh.get_edp_job_types(), job_types)\n+\n+ def test_get_edp_config_hints(self):\n+ job_type = 'Java'\n+ ret = {'job_config': {'args': [], 'configs': []}}\n+ self.assertEqual(self.vh.get_edp_config_hints(job_type), ret)\n+\n+ @mock.patch(plugin_hadoop2_path + 'utils.delete_oozie_password')\n+ @mock.patch(plugin_hadoop2_path + 'keypairs.drop_key')\n+ def test_on_terminate_cluster(self, delete_oozie_password, drop_key):\n+ self.vh.on_terminate_cluster(self.cluster)\n+ delete_oozie_password.assert_called_once_with(self.cluster)\n+ drop_key.assert_called_once_with(self.cluster)\n+\n+ @mock.patch(plugin_hadoop2_path + 'config.get_open_ports')\n+ def test_get_open_ports(self, get_open_ports):\n+ node_group = mock.Mock()\n+ self.vh.get_open_ports(node_group)\n+ get_open_ports.assert_called_once_with(node_group)\n+\n+ @mock.patch(plugin_hadoop2_path +\n+ 'recommendations_utils.recommend_configs')\n+ def test_recommend_configs(self, recommend_configs):\n+ scaling = mock.Mock()\n+ configs = mock.Mock()\n+ self.vh.pctx['all_confs'] = configs\n+ self.vh.recommend_configs(self.cluster, scaling)\n+ recommend_configs.assert_called_once_with(self.cluster,\n+ configs,\n+ scaling)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/testutils.py",
"new_path": "sahara/tests/unit/testutils.py",
"diff": "@@ -30,7 +30,8 @@ def create_cluster(name, tenant, plugin, version, node_groups, **kwargs):\ndef make_ng_dict(name, flavor, processes, count, instances=None, **kwargs):\ninstances = instances or []\n- dct = {'name': name, 'flavor_id': flavor, 'node_processes': processes,\n+ dct = {'id': uuidutils.generate_uuid(), 'name': name,\n+ 'flavor_id': flavor, 'node_processes': processes,\n'count': count, 'instances': instances, 'node_configs': {},\n'security_groups': None, 'auto_security_group': False,\n'availability_zone': None, 'volumes_availability_zone': None,\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/v2_7_1/versionhandler.py
Add tests of all methods in versionhandler.py
Change-Id: I4b538f79fc43e4c143116df567fee4bd26f329b9 |
488,324 | 12.06.2017 15:42:40 | -28,800 | 9ee149b7d478cbca3946dab5febe8d88a0660216 | Add test to sahara/plugins/vanilla/v2_7_1/config_helper.py
Add tests of methods in config_helper.py | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/v2_7_1/test_config_helper.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.plugins import provisioning as p\n+from sahara.plugins.vanilla.hadoop2 import config_helper as h_helper\n+from sahara.plugins.vanilla.v2_7_1 import config_helper as v_helper\n+from sahara.tests.unit import base\n+\n+\n+class TestConfigHelper(base.SaharaTestCase):\n+\n+ plugin_path = 'sahara.plugins.vanilla.v2_7_1.'\n+ plugin_hadoop_path = 'sahara.plugins.vanilla.hadoop2.'\n+\n+ def setUp(self):\n+ super(TestConfigHelper, self).setUp()\n+\n+ @mock.patch(plugin_hadoop_path + 'config_helper.PLUGIN_GENERAL_CONFIGS')\n+ @mock.patch(plugin_path + 'config_helper.PLUGIN_ENV_CONFIGS')\n+ @mock.patch(plugin_path + 'config_helper.PLUGIN_XML_CONFIGS')\n+ @mock.patch(plugin_path + 'config_helper._get_spark_configs')\n+ def test_init_all_configs(self, _get_spark_configs,\n+ PLUGIN_XML_CONFIGS,\n+ PLUGIN_ENV_CONFIGS,\n+ PLUGIN_GENERAL_CONFIGS):\n+ configs = []\n+ configs.extend(PLUGIN_XML_CONFIGS)\n+ configs.extend(PLUGIN_ENV_CONFIGS)\n+ configs.extend(PLUGIN_GENERAL_CONFIGS)\n+ configs.extend(_get_spark_configs())\n+ init_configs = v_helper._init_all_configs()\n+ self.assertEqual(init_configs, configs)\n+\n+ def test_get_spark_configs(self):\n+ h_helper.SPARK_CONFS = {\n+ 'Spark': {\n+ 'OPTIONS': [{\n+ 'name': 'test',\n+ 'description': 'This is a test',\n+ 'default': 'default',\n+ 'priority': 1\n+ }]\n+ }\n+ }\n+ spark_configs = v_helper._get_spark_configs()\n+ for i in spark_configs:\n+ self.assertIsInstance(i, p.Config)\n+\n+ def test_get_plugin_configs(self):\n+ self.assertEqual(v_helper.get_plugin_configs(),\n+ v_helper.PLUGIN_CONFIGS)\n+\n+ def test_get_xml_configs(self):\n+ self.assertEqual(v_helper.get_xml_configs(),\n+ v_helper.PLUGIN_XML_CONFIGS)\n+\n+ def test_get_env_configs(self):\n+ self.assertEqual(v_helper.get_env_configs(),\n+ v_helper.ENV_CONFS)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/v2_7_1/config_helper.py
Add tests of methods in config_helper.py
Change-Id: I3fe7706475ad618b8eebcb9bad7a7994d1089162 |
488,324 | 13.06.2017 12:00:41 | -28,800 | de3c0d6dcb545c270e7e074e910041e862aee311 | Add test to sahara/plugins/vanilla/hadoop2/config_helper.py
Add tests of most methods in config_helper.py,
except init_xml_configs() | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_config_helper.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+from oslo_config import cfg\n+\n+from sahara import exceptions as ex\n+from sahara.plugins import provisioning as p\n+from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper\n+from sahara.tests.unit import base\n+\n+\n+class TestConfigHelper(base.SaharaTestCase):\n+\n+ plugin_path = 'sahara.plugins.vanilla.hadoop2.'\n+\n+ def setUp(self):\n+ super(TestConfigHelper, self).setUp()\n+ self.pctx = mock.Mock()\n+ self.applicable_target = mock.Mock()\n+ self.name = mock.Mock()\n+ self.cluster = mock.Mock()\n+ self.CONF = cfg.CONF\n+ self.CONF.import_opt(\"enable_data_locality\",\n+ \"sahara.topology.topology_helper\")\n+\n+ def test_init_env_configs(self):\n+ ENV_CONFS = {\n+ \"YARN\": {\n+ 'ResourceManager Heap Size': 1024,\n+ 'NodeManager Heap Size': 1024\n+ },\n+ \"HDFS\": {\n+ 'NameNode Heap Size': 1024,\n+ 'SecondaryNameNode Heap Size': 1024,\n+ 'DataNode Heap Size': 1024\n+ },\n+ \"MapReduce\": {\n+ 'JobHistoryServer Heap Size': 1024\n+ },\n+ \"JobFlow\": {\n+ 'Oozie Heap Size': 1024\n+ }\n+ }\n+ configs = c_helper.init_env_configs(ENV_CONFS)\n+ for config in configs:\n+ self.assertIsInstance(config, p.Config)\n+\n+ def test_init_general_configs(self):\n+ sample_configs = [c_helper.ENABLE_SWIFT, c_helper.ENABLE_MYSQL,\n+ c_helper.DATANODES_STARTUP_TIMEOUT,\n+ c_helper.DATANODES_DECOMMISSIONING_TIMEOUT,\n+ c_helper.NODEMANAGERS_DECOMMISSIONING_TIMEOUT]\n+ self.CONF.enable_data_locality = False\n+ self.assertEqual(c_helper._init_general_configs(), sample_configs)\n+\n+ sample_configs.append(c_helper.ENABLE_DATA_LOCALITY)\n+ self.CONF.enable_data_locality = True\n+ self.assertEqual(c_helper._init_general_configs(), sample_configs)\n+\n+ def test_get_config_value(self):\n+ cluster = mock.Mock()\n+ ng = mock.Mock()\n+ ng.configuration.return_value = mock.Mock()\n+ ng.configuration.return_value.get.return_value = mock.Mock()\n+ cl = 'test'\n+ ng.configuration.return_value.get.return_value.get.return_value = cl\n+ cluster.node_groups = [ng]\n+ cl_param = c_helper.get_config_value('pctx', 'service',\n+ 'name', cluster)\n+ self.assertEqual(cl, cl_param)\n+\n+ all_confs = mock.Mock()\n+ all_confs.applicable_target = 'service'\n+ all_confs.name = 'name'\n+ all_confs.default_value = 'default'\n+ pctx = {'all_confs': [all_confs]}\n+ value = c_helper.get_config_value(pctx, 'service', 'name')\n+ self.assertEqual(value, 'default')\n+\n+ pctx = {'all_confs': []}\n+ self.assertRaises(ex.NotFoundException, c_helper.get_config_value,\n+ pctx, 'service', 'name')\n+\n+ @mock.patch(plugin_path + 'config_helper.get_config_value')\n+ def test_is_swift_enabled(self, get_config_value):\n+ target = c_helper.ENABLE_SWIFT.applicable_target\n+ name = c_helper.ENABLE_SWIFT.name\n+ c_helper.is_swift_enabled(self.pctx, self.cluster)\n+ get_config_value.assert_called_once_with(self.pctx, target,\n+ name, self.cluster)\n+\n+ @mock.patch(plugin_path + 'config_helper.get_config_value')\n+ def test_is_mysql_enabled(self, get_config_value):\n+ target = c_helper.ENABLE_MYSQL.applicable_target\n+ name = c_helper.ENABLE_MYSQL.name\n+ c_helper.is_mysql_enabled(self.pctx, self.cluster)\n+ get_config_value.assert_called_once_with(self.pctx, target,\n+ name, self.cluster)\n+\n+ @mock.patch(plugin_path + 'config_helper.get_config_value')\n+ def test_is_data_locality_enabled(self, get_config_value):\n+ self.CONF.enable_data_locality = False\n+ enabled = c_helper.is_data_locality_enabled(self.pctx, self.cluster)\n+ self.assertEqual(enabled, False)\n+\n+ self.CONF.enable_data_locality = True\n+ target = c_helper.ENABLE_DATA_LOCALITY.applicable_target\n+ name = c_helper.ENABLE_DATA_LOCALITY.name\n+ c_helper.is_data_locality_enabled(self.pctx, self.cluster)\n+ get_config_value.assert_called_once_with(self.pctx, target,\n+ name, self.cluster)\n+\n+ def test_get_spark_opt_default(self):\n+ c_helper.SPARK_CONFS = {'Spark': {\n+ 'OPTIONS': [{'name': 'test_name',\n+ 'default': 'test'}]}\n+ }\n+ opt_name = 'tt'\n+ default = c_helper._get_spark_opt_default(opt_name)\n+ self.assertEqual(default, None)\n+\n+ opt_name = 'test_name'\n+ default = c_helper._get_spark_opt_default(opt_name)\n+ self.assertEqual(default, 'test')\n+\n+ def test_generate_spark_env_configs(self):\n+ configs = 'HADOOP_CONF_DIR=/opt/hadoop/etc/hadoop\\n' \\\n+ 'YARN_CONF_DIR=/opt/hadoop/etc/hadoop'\n+ ret = c_helper.generate_spark_env_configs(self.cluster)\n+ self.assertEqual(ret, configs)\n+\n+ @mock.patch('sahara.plugins.utils.get_config_value_or_default')\n+ def test_generate_spark_executor_classpath(self,\n+ get_config_value_or_default):\n+ get_config_value_or_default.return_value = None\n+ path = 'Executor extra classpath'\n+ ret = c_helper.generate_spark_executor_classpath(self.cluster)\n+ get_config_value_or_default.assert_called_once_with('Spark',\n+ path,\n+ self.cluster)\n+ self.assertEqual(ret, '\\n')\n+\n+ get_config_value_or_default.return_value = 'test'\n+ ret = c_helper.generate_spark_executor_classpath(self.cluster)\n+ self.assertEqual(ret, 'spark.executor.extraClassPath test')\n+\n+ @mock.patch('sahara.utils.files.get_file_text')\n+ @mock.patch('sahara.plugins.utils.get_config_value_or_default')\n+ def test_generate_job_cleanup_config(self,\n+ get_config_value_or_default,\n+ get_file_text):\n+ cron = 'MINIMUM_CLEANUP_MEGABYTES={minimum_cleanup_megabytes};' + \\\n+ 'MINIMUM_CLEANUP_SECONDS={minimum_cleanup_seconds};' + \\\n+ 'MAXIMUM_CLEANUP_SECONDS={maximum_cleanup_seconds};'\n+ script = 'MINIMUM_CLEANUP_MEGABYTES=1;' + \\\n+ 'MINIMUM_CLEANUP_SECONDS=1;' + \\\n+ 'MAXIMUM_CLEANUP_SECONDS=1;'\n+ job_conf = {'valid': True,\n+ 'cron': (cron,),\n+ 'script': script}\n+ get_file_text.return_value = cron\n+ get_config_value_or_default.return_value = 1\n+ ret = c_helper.generate_job_cleanup_config(self.cluster)\n+ self.assertEqual(get_config_value_or_default.call_count, 3)\n+ self.assertEqual(get_file_text.call_count, 2)\n+ self.assertEqual(ret, job_conf)\n+\n+ job_conf = {'valid': False}\n+ get_config_value_or_default.return_value = 0\n+ ret = c_helper.generate_job_cleanup_config(self.cluster)\n+ self.assertEqual(get_config_value_or_default.call_count, 6)\n+ self.assertEqual(ret, job_conf)\n+\n+ @mock.patch('sahara.plugins.utils.get_config_value_or_default')\n+ def test_get_spark_home(self, get_config_value_or_default):\n+ get_config_value_or_default.return_value = 1\n+ self.assertEqual(c_helper.get_spark_home(self.cluster), 1)\n+ get_config_value_or_default.assert_called_once_with('Spark',\n+ 'Spark home',\n+ self.cluster)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/hadoop2/config_helper.py
Add tests of most methods in config_helper.py,
except init_xml_configs()
Change-Id: I24df2250778ad422347ba77a3146e28ba027ad3c |
488,324 | 13.06.2017 14:46:30 | -28,800 | 9ea4344a2f68e94994395ce6f717b8bc1958683d | Add test to sahara/plugins/vanilla/hadoop2/oozie_helper.py
Add tests of two methods in oozie_helper.py | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_oozie_helper.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper\n+from sahara.tests.unit import base\n+\n+\n+class TestOozieHelper(base.SaharaTestCase):\n+\n+ def setUp(self):\n+ super(TestOozieHelper, self).setUp()\n+\n+ def test_get_oozie_required_xml_configs(self):\n+ hadoop_conf_dir = '/root'\n+ configs = {\n+ 'oozie.service.ActionService.executor.ext.classes':\n+ 'org.apache.oozie.action.email.EmailActionExecutor,'\n+ 'org.apache.oozie.action.hadoop.HiveActionExecutor,'\n+ 'org.apache.oozie.action.hadoop.ShellActionExecutor,'\n+ 'org.apache.oozie.action.hadoop.SqoopActionExecutor,'\n+ 'org.apache.oozie.action.hadoop.DistcpActionExecutor',\n+\n+ 'oozie.service.SchemaService.wf.ext.schemas':\n+ 'shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,'\n+ 'email-action-0.1.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,'\n+ 'hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,'\n+ 'sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,'\n+ 'ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,'\n+ 'oozie-sla-0.1.xsd,oozie-sla-0.2.xsd',\n+\n+ 'oozie.service.JPAService.create.db.schema': 'false',\n+ 'oozie.service.HadoopAccessorService.hadoop.configurations':\n+ '*=/root'\n+ }\n+ ret = o_helper.get_oozie_required_xml_configs(hadoop_conf_dir)\n+ self.assertEqual(ret, configs)\n+\n+ @mock.patch('sahara.plugins.vanilla.hadoop2.utils.get_oozie_password')\n+ def test_get_oozie_mysql_configs(self, get_oozie_password):\n+ get_oozie_password.return_value = '123'\n+ configs = {\n+ 'oozie.service.JPAService.jdbc.driver':\n+ 'com.mysql.jdbc.Driver',\n+ 'oozie.service.JPAService.jdbc.url':\n+ 'jdbc:mysql://localhost:3306/oozie',\n+ 'oozie.service.JPAService.jdbc.username': 'oozie',\n+ 'oozie.service.JPAService.jdbc.password': '123'\n+ }\n+ cluster = mock.Mock()\n+ ret = o_helper.get_oozie_mysql_configs(cluster)\n+ get_oozie_password.assert_called_once_with(cluster)\n+ self.assertEqual(ret, configs)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/hadoop2/oozie_helper.py
Add tests of two methods in oozie_helper.py
Change-Id: I76f31854c8b57df0fd1f5d45045347580619b215 |
488,324 | 13.06.2017 18:51:31 | -28,800 | 8c8c9353e02723a404b142dc95d3ddf78d3535dc | Add test to edp_engine.py
Add test to edp_engine.py in sahara/plugins/vanilla/ and
sahara/plugins/vanilla/hadoop2/. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_edp_engine.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.plugins import exceptions as ex\n+from sahara.plugins.vanilla.hadoop2 import edp_engine\n+from sahara.service.edp import job_utils\n+from sahara.tests.unit import base as sahara_base\n+\n+\n+class EdpOozieEngineTest(sahara_base.SaharaTestCase):\n+\n+ engine_path = 'sahara.service.edp.oozie.engine.'\n+\n+ def setUp(self):\n+ super(EdpOozieEngineTest, self).setUp()\n+ self.cluster = mock.Mock()\n+ job_utils.get_plugin = mock.Mock(return_value='test_plugins')\n+ self.engine = edp_engine.EdpOozieEngine(self.cluster)\n+\n+ def test_get_hdfs_user(self):\n+ self.assertEqual(self.engine.get_hdfs_user(), 'hadoop')\n+\n+ def test_get_name_node_uri(self):\n+ cluster = {'info': {\n+ 'HDFS': {\n+ 'NameNode': 'test_url'}}}\n+ ret = self.engine.get_name_node_uri(cluster)\n+ self.assertEqual(ret, 'test_url')\n+\n+ def test_get_oozie_server_uri(self):\n+ cluster = {'info': {\n+ 'JobFlow': {\n+ 'Oozie': 'test_url'}}}\n+ ret = self.engine.get_oozie_server_uri(cluster)\n+ self.assertEqual(ret, 'test_url/oozie/')\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_oozie')\n+ def test_get_oozie_server(self, get_oozie):\n+ get_oozie.return_value = 'bingo'\n+ ret = self.engine.get_oozie_server(self.cluster)\n+ get_oozie.assert_called_once_with(self.cluster)\n+ self.assertEqual(ret, 'bingo')\n+\n+ @mock.patch(engine_path + 'OozieJobEngine.validate_job_execution')\n+ @mock.patch('sahara.plugins.utils.get_instances_count')\n+ def test_validate_job_execution(self,\n+ get_instances_count,\n+ validate_job_execution):\n+ job = mock.Mock()\n+ data = mock.Mock()\n+ get_instances_count.return_value = 0\n+ self.assertRaises(ex.InvalidComponentCountException,\n+ self.engine.validate_job_execution,\n+ self.cluster, job, data)\n+\n+ get_instances_count.return_value = 1\n+ self.engine.validate_job_execution(self.cluster, job, data)\n+ validate_job_execution.assert_called_once_with(self.cluster,\n+ job, data)\n+\n+ @mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop2')\n+ def test_create_hdfs_dir(self, create_dir_hadoop2):\n+ self.engine.get_hdfs_user = mock.Mock(return_value='test_user')\n+ remote = mock.Mock()\n+ dir_name = mock.Mock()\n+ self.engine.create_hdfs_dir(remote, dir_name)\n+ create_dir_hadoop2.assert_called_once_with(remote, dir_name,\n+ 'test_user')\n+\n+ def test_get_resource_manager_uri(self):\n+ cluster = {'info': {\n+ 'YARN': {\n+ 'ResourceManager': 'test_url'}}}\n+ ret = self.engine.get_resource_manager_uri(cluster)\n+ self.assertEqual(ret, 'test_url')\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to edp_engine.py
Add test to edp_engine.py in sahara/plugins/vanilla/ and
sahara/plugins/vanilla/hadoop2/.
Change-Id: I011d2b214f283eb21ab9f77ad6096bd307dbaa41 |
488,324 | 14.06.2017 14:25:21 | -28,800 | dfd4b86c2af32fa20a9b2bc269cdf40c0d5e56e4 | Add test to sahara/plugins/vanilla/hadoop2/starting_scripts.py
Add unit test to starting_scripts.py. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_starting_scripts.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts\n+from sahara.tests.unit import base\n+\n+\n+class StartingScriptsTest(base.SaharaTestCase):\n+\n+ plugins_path = 'sahara.plugins.vanilla.'\n+\n+ def setUp(self):\n+ super(StartingScriptsTest, self).setUp()\n+ self.cluster = mock.Mock()\n+\n+ @mock.patch(plugins_path + 'utils.get_namenode')\n+ @mock.patch(plugins_path + 'hadoop2.starting_scripts._start_namenode')\n+ def test_start_namenode(self, _start_namenode, get_namenode):\n+ namenode = mock.Mock()\n+ get_namenode.return_value = namenode\n+ s_scripts.start_namenode(self.cluster)\n+ get_namenode.assert_called_once_with(self.cluster)\n+ _start_namenode.assert_called_once_with(namenode)\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.start_hadoop_process')\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.format_namenode')\n+ def test__start_namenode(self, format_namenode,\n+ start_hadoop_process,\n+ check_cluster_exists):\n+ check_cluster_exists.return_value = None\n+ nn = mock.Mock()\n+ s_scripts._start_namenode(nn)\n+ format_namenode.assert_called_once_with(nn)\n+ start_hadoop_process.assert_called_once_with(nn, 'namenode')\n+\n+ @mock.patch(plugins_path +\n+ 'hadoop2.starting_scripts._start_secondarynamenode')\n+ @mock.patch(plugins_path + 'utils.get_secondarynamenode')\n+ def test_start_secondarynamenode(self, get_secondarynamenode,\n+ _start_secondarynamenode):\n+ get_secondarynamenode.return_value = 0\n+ s_scripts.start_secondarynamenode(self.cluster)\n+ get_secondarynamenode.assert_called_once_with(self.cluster)\n+\n+ get_secondarynamenode.return_value = 1\n+ s_scripts.start_secondarynamenode(self.cluster)\n+ _start_secondarynamenode.assert_called_once_with(1)\n+ self.assertEqual(get_secondarynamenode.call_count, 2)\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.start_hadoop_process')\n+ def test__start_secondarynamenode(self, start_hadoop_process,\n+ check_cluster_exists):\n+ check_cluster_exists.return_value = None\n+ snn = mock.Mock()\n+ s_scripts._start_secondarynamenode(snn)\n+ start_hadoop_process.assert_called_once_with(snn,\n+ 'secondarynamenode')\n+\n+ @mock.patch(plugins_path +\n+ 'hadoop2.starting_scripts._start_resourcemanager')\n+ @mock.patch(plugins_path + 'utils.get_resourcemanager')\n+ def test_start_resourcemanager(self, get_resourcemanager,\n+ _start_resourcemanager):\n+ get_resourcemanager.return_value = 0\n+ s_scripts.start_resourcemanager(self.cluster)\n+ get_resourcemanager.assert_called_once_with(self.cluster)\n+\n+ get_resourcemanager.return_value = 1\n+ s_scripts.start_resourcemanager(self.cluster)\n+ _start_resourcemanager.assert_called_once_with(1)\n+ self.assertEqual(get_resourcemanager.call_count, 2)\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.start_yarn_process')\n+ def test__start_resourcemanager(self, start_yarn_process,\n+ check_cluster_exists):\n+ check_cluster_exists.return_value = None\n+ snn = mock.Mock()\n+ s_scripts._start_resourcemanager(snn)\n+ start_yarn_process.assert_called_once_with(snn,\n+ 'resourcemanager')\n+\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.start_historyserver')\n+ @mock.patch(plugins_path + 'utils.get_historyserver')\n+ def test_start_historyserver(self, get_historyserver,\n+ start_historyserver):\n+ get_historyserver.return_value = 0\n+ s_scripts.start_historyserver(self.cluster)\n+ get_historyserver.assert_called_once_with(self.cluster)\n+\n+ get_historyserver.return_value = 1\n+ s_scripts.start_historyserver(self.cluster)\n+ start_historyserver.assert_called_once_with(1)\n+ self.assertEqual(get_historyserver.call_count, 2)\n+\n+ @mock.patch(plugins_path + 'hadoop2.run_scripts.start_oozie_process')\n+ @mock.patch(plugins_path + 'utils.get_oozie')\n+ def test_start_oozie(self, get_oozie, start_oozie_process):\n+ pctx = mock.Mock()\n+ get_oozie.return_value = 0\n+ s_scripts.start_oozie(pctx, self.cluster)\n+ get_oozie.assert_called_once_with(self.cluster)\n+\n+ get_oozie.return_value = 1\n+ s_scripts.start_oozie(pctx, self.cluster)\n+ start_oozie_process.assert_called_once_with(pctx, 1)\n+ self.assertEqual(get_oozie.call_count, 2)\n+\n+ @mock.patch(plugins_path +\n+ 'hadoop2.run_scripts.start_hiveserver_process')\n+ @mock.patch(plugins_path + 'utils.get_hiveserver')\n+ def test_start_hiveserver(self, get_hiveserver,\n+ start_hiveserver_process):\n+ pctx = mock.Mock()\n+ get_hiveserver.return_value = 0\n+ s_scripts.start_hiveserver(pctx, self.cluster)\n+ get_hiveserver.assert_called_once_with(self.cluster)\n+\n+ get_hiveserver.return_value = 1\n+ s_scripts.start_hiveserver(pctx, self.cluster)\n+ start_hiveserver_process.assert_called_once_with(pctx, 1)\n+ self.assertEqual(get_hiveserver.call_count, 2)\n+\n+ @mock.patch(plugins_path +\n+ 'hadoop2.run_scripts.start_spark_history_server')\n+ @mock.patch(plugins_path + 'utils.get_spark_history_server')\n+ def test_start_spark(self, get_spark_history_server,\n+ start_spark_history_server):\n+ get_spark_history_server.return_value = 0\n+ s_scripts.start_spark(self.cluster)\n+ get_spark_history_server.assert_called_once_with(self.cluster)\n+\n+ get_spark_history_server.return_value = 1\n+ s_scripts.start_spark(self.cluster)\n+ start_spark_history_server.assert_called_once_with(1)\n+ self.assertEqual(get_spark_history_server.call_count, 2)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/hadoop2/starting_scripts.py
Add unit test to starting_scripts.py.
Change-Id: I0465ef317e88d16d33626141c359625a86854192 |
488,323 | 20.06.2017 15:24:46 | -28,800 | 5c9d5a7408044b0ff9f96a42d8c90694ee41d54f | Fixes a typo in quickstart.rst
Fixes a typo in quickstart.rst quickly | [
{
"change_type": "MODIFY",
"old_path": "doc/source/devref/quickstart.rst",
"new_path": "doc/source/devref/quickstart.rst",
"diff": "@@ -550,7 +550,7 @@ The next two commands will create input and output data sources in swift.\n--url \"swift://integration.sahara/input.txt\" input\n$ openstack dataprocessing data source create --type swift --username admin --password admin \\\n- --url \"swift://integration.sahara/output.txt\" input\n+ --url \"swift://integration.sahara/output.txt\" output\nIf you want to create data sources in hdfs, use valid hdfs urls:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixes a typo in quickstart.rst
Fixes a typo in quickstart.rst quickly
Change-Id: I24ce5e24e74c3646dafc1be2210ebb18d7ec4cff |
488,324 | 19.06.2017 17:37:56 | -28,800 | 1a9aeb868b8b9da6b0a372fff57c1027725a5dcd | Add test to sahara/plugins/vanilla/hadoop2/run_scripts.py
Add unit test to run_scripts.py | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_run_scripts.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.i18n import _\n+from sahara.plugins import utils as pu\n+from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper\n+from sahara.plugins.vanilla.hadoop2 import run_scripts as rs\n+from sahara.tests.unit import base\n+from sahara.utils import edp\n+from sahara.utils import files\n+\n+\n+class RunScriptsTest(base.SaharaTestCase):\n+\n+ PLUGINS_PATH = 'sahara.plugins.vanilla.hadoop2.'\n+\n+ def setUp(self):\n+ super(RunScriptsTest, self).setUp()\n+ self.instance = mock.Mock()\n+ self.r = mock.Mock()\n+ self.remote = mock.Mock(return_value=self.r)\n+ self.remote.__enter__ = self.remote\n+ self.remote.__exit__ = mock.Mock()\n+ self.instance.remote.return_value = self.remote\n+\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._start_processes')\n+ @mock.patch('sahara.context.set_current_instance_id')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ @mock.patch('sahara.plugins.utils.instances_with_services')\n+ def test_start_dn_nm_processes(self, instances_with_services,\n+ add_provisioning_step,\n+ set_current_instance_id,\n+ _start_processes):\n+ ins = mock.Mock()\n+ ins.cluster_id = '111'\n+ ins.instance_id = '123'\n+ ins.instance_name = 'ins_1'\n+ instances = [ins]\n+ instances_with_services.return_value = instances\n+ mess = pu.start_process_event_message('DataNodes, NodeManagers')\n+ ins.node_group.node_processes = ['datanode', 'test']\n+ rs.start_dn_nm_processes(instances)\n+ instances_with_services.assert_called_once_with(\n+ instances, ['datanode', 'nodemanager'])\n+ add_provisioning_step.assert_called_once_with('111', mess, 1)\n+ set_current_instance_id.assert_called_once_with('123')\n+ _start_processes.assert_called_once_with(ins, ['datanode'])\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ def test_start_processes_datanode(self, check_cluster_exists):\n+ processes = ['datanode']\n+ rs._start_processes(self.instance, processes)\n+ self.r.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"hadoop-daemon.sh start datanode\" hadoop')\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ def test_start_processes_nodemanager(self, check_cluster_exists):\n+ processes = ['nodemanager']\n+ rs._start_processes(self.instance, processes)\n+ self.r.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"yarn-daemon.sh start nodemanager\" hadoop')\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ def test_start_processes_both(self, check_cluster_exists):\n+ processes = ['datanode', 'nodemanager']\n+ rs._start_processes(self.instance, processes)\n+ cmd_1 = 'sudo su - -c \"hadoop-daemon.sh start datanode\" hadoop'\n+ cmd_2 = 'sudo su - -c \"yarn-daemon.sh start nodemanager\" hadoop'\n+ calls = [mock.call(cmd_1), mock.call(cmd_2)]\n+ self.r.execute_command.assert_has_calls(calls, any_order=True)\n+\n+ def test_start_hadoop_process(self):\n+ process = 'test'\n+ rs.start_hadoop_process(self.instance, process)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"hadoop-daemon.sh start %s\" hadoop' % process)\n+\n+ def test_start_yarn_process(self):\n+ process = 'test'\n+ rs.start_yarn_process(self.instance, process)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"yarn-daemon.sh start %s\" hadoop' % process)\n+\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_start_historyserver(self, add_provisioning_step,\n+ check_cluster_exists):\n+ rs.start_historyserver(self.instance)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"mr-jobhistory-daemon.sh start historyserver\" ' +\n+ 'hadoop')\n+\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._start_oozie')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._oozie_share_lib')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._start_mysql')\n+ @mock.patch(PLUGINS_PATH + 'config_helper.is_mysql_enabled')\n+ @mock.patch(PLUGINS_PATH + 'utils.get_oozie_password')\n+ @mock.patch('sahara.context.set_current_instance_id')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_start_oozie_process(self, add_provisioning_step,\n+ check_cluster_exists,\n+ set_current_instance_id, get_oozie_password,\n+ is_mysql_enabled, _start_mysql,\n+ _oozie_share_lib, _start_oozie):\n+ self.instance.instance_id = '112233'\n+ pctx = mock.Mock()\n+ is_mysql_enabled.return_value = True\n+ sql_script = files.get_file_text(\n+ 'plugins/vanilla/hadoop2/resources/create_oozie_db.sql')\n+ get_oozie_password.return_value = '123'\n+ pwd_script = sql_script.replace('password', '123')\n+ rs.start_oozie_process(pctx, self.instance)\n+ set_current_instance_id.assert_called_once_with('112233')\n+ is_mysql_enabled.assert_called_once_with(pctx,\n+ self.instance.cluster)\n+ _start_mysql.assert_called_once_with(self.r)\n+ self.r.write_file_to.assert_called_once_with('create_oozie_db.sql',\n+ pwd_script)\n+ self.r.execute_command.assert_called_once_with(\n+ 'mysql -u root < create_oozie_db.sql && '\n+ 'rm create_oozie_db.sql')\n+ _oozie_share_lib.assert_called_once_with(self.r)\n+ _start_oozie.assert_called_once_with(self.r)\n+\n+ @mock.patch(PLUGINS_PATH + 'config_helper.get_spark_home')\n+ @mock.patch('sahara.context.set_current_instance_id')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_start_spark_history_server(self, add_provisioning_step,\n+ check_cluster_exists,\n+ set_current_instance_id,\n+ get_spark_home):\n+ get_spark_home.return_value = '/spark'\n+ rs.start_spark_history_server(self.instance)\n+ get_spark_home.assert_called_once_with(self.instance.cluster)\n+ set_current_instance_id.assert_called_once_with(\n+ self.instance.instance_id)\n+ self.r.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"bash /spark/sbin/start-history-server.sh\" hadoop')\n+\n+ def test_format_namenode(self):\n+ rs.format_namenode(self.instance)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"hdfs namenode -format\" hadoop')\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_namenode')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_refresh_hadoop_nodes(self, add_provisioning_step,\n+ check_cluster_exists, get_namenode):\n+ cluster = mock.Mock()\n+ get_namenode.return_value = self.instance\n+ rs.refresh_hadoop_nodes(cluster)\n+ get_namenode.assert_called_once_with(cluster)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"hdfs dfsadmin -refreshNodes\" hadoop')\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_resourcemanager')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_refresh_yarn_nodes(self, add_provisioning_step,\n+ check_cluster_exists, get_resourcemanager):\n+ cluster = mock.Mock()\n+ get_resourcemanager.return_value = self.instance\n+ rs.refresh_yarn_nodes(cluster)\n+ get_resourcemanager.assert_called_once_with(cluster)\n+ self.remote.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"yarn rmadmin -refreshNodes\" hadoop')\n+\n+ def test_oozie_share_lib(self):\n+ cmd_1 = 'sudo su - -c \"mkdir /tmp/oozielib && ' \\\n+ 'tar zxf /opt/oozie/oozie-sharelib-*.tar.gz -C ' \\\n+ '/tmp/oozielib && ' \\\n+ 'hadoop fs -mkdir /user && ' \\\n+ 'hadoop fs -mkdir /user/hadoop && ' \\\n+ 'hadoop fs -put /tmp/oozielib/share /user/hadoop/ && ' \\\n+ 'rm -rf /tmp/oozielib\" hadoop'\n+ cmd_2 = 'sudo su - -c \"/opt/oozie/bin/ooziedb.sh ' \\\n+ 'create -sqlfile oozie.sql ' \\\n+ '-run Validate DB Connection\" hadoop'\n+ command = [mock.call(cmd_1),\n+ mock.call(cmd_2)]\n+ rs._oozie_share_lib(self.r)\n+ self.r.execute_command.assert_has_calls(command, any_order=True)\n+\n+ def test_start_mysql(self):\n+ rs._start_mysql(self.r)\n+ self.r.execute_command.assert_called_once_with('/opt/start-mysql.sh')\n+\n+ def test_start_oozie(self):\n+ rs._start_oozie(self.r)\n+ self.r.execute_command.assert_called_once_with(\n+ 'sudo su - -c \"/opt/oozie/bin/oozied.sh start\" hadoop')\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_namenode')\n+ @mock.patch('sahara.plugins.vanilla.utils.get_datanodes')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ @mock.patch('sahara.utils.poll_utils.plugin_option_poll')\n+ def test_await_datanodes(self, plugin_option_poll, add_provisioning_step,\n+ check_cluster_exists, get_datanodes,\n+ get_namenode):\n+ cluster = mock.Mock()\n+ get_datanodes.return_value = ['node1']\n+ r = mock.Mock()\n+ remote = mock.Mock(return_value=r)\n+ remote.__enter__ = remote\n+ remote.__exit__ = mock.Mock()\n+ namenode = mock.Mock()\n+ namenode.remote.return_value = remote\n+ get_namenode.return_value = namenode\n+ mess = _('Waiting on 1 datanodes to start up')\n+ test_data = {'remote': r, 'count': 1}\n+ timeout = c_helper.DATANODES_STARTUP_TIMEOUT\n+ rs.await_datanodes(cluster)\n+ get_datanodes.assert_called_once_with(cluster)\n+ get_namenode.assert_called_once_with(cluster)\n+ plugin_option_poll.assert_called_once_with(cluster,\n+ rs._check_datanodes_count,\n+ timeout, mess, 1, test_data)\n+\n+ def test_check_datanodes_count(self):\n+ self.r.execute_command = mock.Mock(return_value=(0, '1'))\n+ self.assertEqual(rs._check_datanodes_count(self.r, 0), True)\n+\n+ self.assertEqual(rs._check_datanodes_count(self.r, 1), True)\n+ self.r.execute_command.assert_called_once_with(\n+ 'sudo su -lc \"hdfs dfsadmin -report\" hadoop | '\n+ 'grep \\'Live datanodes\\|Datanodes available:\\' | '\n+ 'grep -o \\'[0-9]\\+\\' | head -n 1')\n+\n+ def test_hive_create_warehouse_dir(self):\n+ rs._hive_create_warehouse_dir(self.r)\n+ self.r.execute_command.assert_called_once_with(\n+ \"sudo su - -c 'hadoop fs -mkdir -p \"\n+ \"/user/hive/warehouse' hadoop\")\n+\n+ def test_hive_copy_shared_conf(self):\n+ dest = '/root/test.xml'\n+ rs._hive_copy_shared_conf(self.r, dest)\n+ self.r.execute_command.assert_called_once_with(\n+ \"sudo su - -c 'hadoop fs -mkdir -p /root && \"\n+ \"hadoop fs -put /opt/hive/conf/hive-site.xml \"\n+ \"/root/test.xml' hadoop\")\n+\n+ def test_hive_create_db(self):\n+ rs._hive_create_db(self.r)\n+ self.r.execute_command.assert_called_once_with(\n+ 'mysql -u root < /tmp/create_hive_db.sql')\n+\n+ def test_hive_metastore_start(self):\n+ rs._hive_metastore_start(self.r)\n+ self.r.execute_command.assert_called_once_with(\n+ \"sudo su - -c 'nohup /opt/hive/bin/hive\"\n+ \" --service metastore > /dev/null &' hadoop\")\n+\n+ @mock.patch(PLUGINS_PATH + 'utils.get_hive_password')\n+ @mock.patch(PLUGINS_PATH + 'config_helper.is_mysql_enabled')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._hive_metastore_start')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._hive_create_db')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._start_mysql')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._hive_copy_shared_conf')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts._hive_create_warehouse_dir')\n+ @mock.patch('sahara.plugins.vanilla.utils.get_oozie')\n+ @mock.patch('sahara.context.set_current_instance_id')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_start_hiveserver_process(self, add_provisioning_step,\n+ check_cluster_exists,\n+ set_current_instance_id, get_oozie,\n+ _hive_create_warehouse_dir,\n+ _hive_copy_shared_conf, _start_mysql,\n+ _hive_create_db, _hive_metastore_start,\n+ is_mysql_enabled, get_hive_password):\n+ pctx = mock.Mock()\n+ path = edp.get_hive_shared_conf_path('hadoop')\n+ is_mysql_enabled.return_value = True\n+ cluster = self.instance.cluster\n+ ng_cluster = self.instance.node_group.cluster\n+ get_oozie.return_value = None\n+ sql_script = files.get_file_text(\n+ 'plugins/vanilla/hadoop2/resources/create_hive_db.sql')\n+ get_hive_password.return_value = '123'\n+ pwd_script = sql_script.replace('{{password}}', '123')\n+ rs.start_hiveserver_process(pctx, self.instance)\n+ set_current_instance_id.assert_called_once_with(\n+ self.instance.instance_id)\n+ _hive_create_warehouse_dir.assert_called_once_with(self.r)\n+ _hive_copy_shared_conf.assert_called_once_with(self.r, path)\n+ is_mysql_enabled.assert_called_once_with(pctx, cluster)\n+ get_oozie.assert_called_once_with(ng_cluster)\n+ _start_mysql.assert_called_once_with(self.r)\n+ get_hive_password.assert_called_once_with(cluster)\n+ self.r.write_file_to.assert_called_once_with(\n+ '/tmp/create_hive_db.sql', pwd_script)\n+ _hive_create_db.assert_called_once_with(self.r)\n+ _hive_metastore_start.assert_called_once_with(self.r)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/hadoop2/run_scripts.py
Add unit test to run_scripts.py
Change-Id: I997b10acc84dda715dea75234290d780bf777cad |
488,324 | 16.06.2017 18:40:02 | -28,800 | 31f5c2ba15e3cdcf468aa24a3e893e223f3bc31f | Add test to sahara/plugins/vanilla/hadoop2/scaling.py
Add unit test to scaling.py | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_scaling.py",
"diff": "+# Copyright (c) 2017 EasyStack Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import mock\n+\n+from sahara.i18n import _\n+from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper\n+from sahara.plugins.vanilla.hadoop2 import scaling\n+from sahara.plugins.vanilla.hadoop2 import utils as pu\n+from sahara.tests.unit import base\n+\n+\n+class ScalingTest(base.SaharaTestCase):\n+\n+ PLUGINS_PATH = 'sahara.plugins.vanilla.hadoop2.'\n+\n+ def setUp(self):\n+ super(ScalingTest, self).setUp()\n+ self.cluster = mock.Mock()\n+ self.instances = mock.Mock()\n+ self.r = mock.Mock()\n+ self.r.execute_command = mock.Mock()\n+ self.instance = mock.Mock()\n+ self.instance.remote.return_value.__enter__ = mock.Mock(\n+ return_value=self.r)\n+ self.instance.remote.return_value.__exit__ = mock.Mock()\n+\n+ @mock.patch('sahara.swift.swift_helper.install_ssl_certs')\n+ @mock.patch('sahara.plugins.vanilla.utils.get_resourcemanager')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts.start_dn_nm_processes')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts.refresh_yarn_nodes')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts.refresh_hadoop_nodes')\n+ @mock.patch(PLUGINS_PATH + 'scaling._update_include_files')\n+ @mock.patch(PLUGINS_PATH + 'config.configure_topology_data')\n+ @mock.patch(PLUGINS_PATH + 'config.configure_instances')\n+ def test_scale_cluster(self, configure_instances,\n+ configure_topology_data,\n+ _update_include_files,\n+ refresh_hadoop_nodes,\n+ refresh_yarn_nodes,\n+ start_dn_nm_processes,\n+ get_resourcemanager,\n+ install_ssl_certs):\n+ get_resourcemanager.return_value = 'node1'\n+ pctx = mock.Mock()\n+ scaling.scale_cluster(pctx, self.cluster, self.instances)\n+ configure_instances.assert_called_once_with(pctx, self.instances)\n+ _update_include_files.assert_called_once_with(self.cluster)\n+ refresh_hadoop_nodes.assert_called_once_with(self.cluster)\n+ get_resourcemanager.assert_called_once_with(self.cluster)\n+ refresh_yarn_nodes.assert_called_once_with(self.cluster)\n+ configure_topology_data.assert_called_once_with(pctx, self.cluster)\n+ start_dn_nm_processes.assert_called_once_with(self.instances)\n+ install_ssl_certs.assert_called_once_with(self.instances)\n+\n+ def test_get_instances_with_service(self):\n+ ins_1 = mock.Mock()\n+ ins_1.node_group.node_processes = ['nodename']\n+ ins_2 = mock.Mock()\n+ ins_2.node_group.node_processes = ['nodedata']\n+ instances = [ins_1, ins_2]\n+ service = 'nodename'\n+ ret = scaling._get_instances_with_service(instances, service)\n+ self.assertEqual(ret, [ins_1])\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_nodemanagers')\n+ @mock.patch('sahara.plugins.vanilla.utils.get_datanodes')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ @mock.patch('sahara.plugins.utils.generate_fqdn_host_names')\n+ @mock.patch('sahara.plugins.utils.get_instances')\n+ def test_update_include_files(self, get_instances,\n+ generate_fqdn_host_names,\n+ add_provisioning_step,\n+ check_cluster_exists,\n+ get_datanodes, get_nodemanagers):\n+ DIR = scaling.HADOOP_CONF_DIR\n+ host = '1.2.3.4'\n+ ins_1 = mock.Mock()\n+ ins_1.id = 'instance_1'\n+ ins_2 = mock.Mock()\n+ ins_2.id = 'instance_2'\n+ ins_3 = mock.Mock()\n+ ins_3.id = 'instance_3'\n+ ins_4 = mock.Mock()\n+ ins_4.id = 'instance_4'\n+ dec_instances = [ins_1, ins_2]\n+ get_instances.return_value = [self.instance]\n+ get_datanodes.return_value = [ins_3]\n+ get_nodemanagers.return_value = [ins_4]\n+ generate_fqdn_host_names.return_value = host\n+ scaling._update_include_files(self.cluster, dec_instances)\n+ get_instances.assert_called_once_with(self.cluster)\n+ get_datanodes.assert_called_once_with(self.cluster)\n+ get_nodemanagers.assert_called_once_with(self.cluster)\n+ count = generate_fqdn_host_names.call_count\n+ self.assertEqual(count, 2)\n+ command_calls = [mock.call(\n+ 'sudo su - -c \"echo \\'%s\\' > %s/dn-include\" hadoop' % (\n+ host, DIR)), mock.call(\n+ 'sudo su - -c \"echo \\'%s\\' > %s/nm-include\" hadoop' % (\n+ host, DIR))]\n+ self.r.execute_command.assert_has_calls(command_calls, any_order=True)\n+\n+ @mock.patch('sahara.plugins.vanilla.utils.get_resourcemanager')\n+ @mock.patch(PLUGINS_PATH + 'config.configure_topology_data')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts.refresh_yarn_nodes')\n+ @mock.patch(PLUGINS_PATH + 'run_scripts.refresh_hadoop_nodes')\n+ @mock.patch(PLUGINS_PATH + 'scaling._update_exclude_files')\n+ @mock.patch(PLUGINS_PATH + 'scaling._clear_exclude_files')\n+ @mock.patch(PLUGINS_PATH + 'scaling._update_include_files')\n+ @mock.patch(PLUGINS_PATH + 'scaling._check_datanodes_decommission')\n+ @mock.patch(PLUGINS_PATH + 'scaling._check_nodemanagers_decommission')\n+ @mock.patch(PLUGINS_PATH + 'scaling._get_instances_with_service')\n+ def test_decommission_nodes(self, _get_instances_with_service,\n+ _check_nodemanagers_decommission,\n+ _check_datanodes_decommission,\n+ _update_include_files, _clear_exclude_files,\n+ _update_exclude_files, refresh_hadoop_nodes,\n+ refresh_yarn_nodes, configure_topology_data,\n+ get_resourcemanager):\n+ data = 'test_data'\n+ _get_instances_with_service.return_value = data\n+ get_resourcemanager.return_value = 'node1'\n+ pctx = mock.Mock()\n+ scaling.decommission_nodes(pctx, self.cluster, self.instances)\n+ get_instances_count = _get_instances_with_service.call_count\n+ self.assertEqual(get_instances_count, 2)\n+ _update_exclude_files.assert_called_once_with(self.cluster,\n+ self.instances)\n+ refresh_count = refresh_hadoop_nodes.call_count\n+ self.assertEqual(refresh_count, 2)\n+ get_resourcemanager.assert_called_once_with(self.cluster)\n+ refresh_yarn_nodes.assert_called_once_with(self.cluster)\n+ _check_nodemanagers_decommission.assert_called_once_with(\n+ self.cluster, data)\n+ _check_datanodes_decommission.assert_called_once_with(\n+ self.cluster, data)\n+ _update_include_files.assert_called_once_with(self.cluster,\n+ self.instances)\n+ _clear_exclude_files.assert_called_once_with(self.cluster)\n+ configure_topology_data.assert_called_once_with(pctx, self.cluster)\n+\n+ @mock.patch(PLUGINS_PATH + 'scaling._get_instances_with_service')\n+ @mock.patch('sahara.plugins.utils.generate_fqdn_host_names')\n+ @mock.patch('sahara.plugins.utils.get_instances')\n+ def test_update_exclude_files(self, get_instances,\n+ generate_fqdn_host_names,\n+ get_instances_with_service):\n+ node = mock.Mock()\n+ get_instances_with_service.return_value = node\n+ host = '1.2.3.4'\n+ generate_fqdn_host_names.return_value = host\n+ get_instances.return_value = [self.instance]\n+ scaling._update_exclude_files(self.cluster, self.instances)\n+ service_calls = [mock.call(self.instances, 'datanode'),\n+ mock.call(self.instances, 'nodemanager')]\n+ get_instances_with_service.assert_has_calls(service_calls,\n+ any_order=True)\n+ self.assertEqual(generate_fqdn_host_names.call_count, 2)\n+ get_instances.assert_called_once_with(self.cluster)\n+ DIR = scaling.HADOOP_CONF_DIR\n+ command_calls = [mock.call(\n+ 'sudo su - -c \"echo \\'%s\\' > %s/dn-exclude\" hadoop' % (\n+ host, DIR)), mock.call(\n+ 'sudo su - -c \"echo \\'%s\\' > %s/nm-exclude\" hadoop' % (\n+ host, DIR))]\n+ self.r.execute_command.assert_has_calls(command_calls, any_order=True)\n+\n+ @mock.patch('sahara.plugins.utils.get_instances')\n+ def test_clear_exclude_files(self, get_instances):\n+ get_instances.return_value = [self.instance]\n+ scaling._clear_exclude_files(self.cluster)\n+ get_instances.assert_called_once_with(self.cluster)\n+ DIR = scaling.HADOOP_CONF_DIR\n+ calls = [mock.call('sudo su - -c \"echo > %s/dn-exclude\" hadoop' %\n+ DIR),\n+ mock.call('sudo su - -c \"echo > %s/nm-exclude\" hadoop' %\n+ DIR)]\n+ self.r.execute_command.assert_has_calls(calls, any_order=True)\n+\n+ def test_is_decommissioned(self):\n+ def check_func(cluster):\n+ statuses = {'status': cluster}\n+ return statuses\n+ ins = mock.Mock()\n+ ins.fqdn.return_value = 'status'\n+ instances = [ins]\n+ cluster = 'decommissioned'\n+ ret = scaling.is_decommissioned(cluster, check_func, instances)\n+ self.assertEqual(ret, True)\n+\n+ cluster = 'active'\n+ ret = scaling.is_decommissioned(cluster, check_func, instances)\n+ self.assertEqual(ret, False)\n+\n+ @mock.patch('sahara.utils.poll_utils.plugin_option_poll')\n+ def test_check_decommission(self, plugin_option_poll):\n+ check_func = mock.Mock()\n+ option = mock.Mock()\n+ is_dec = scaling.is_decommissioned\n+ mess = _(\"Wait for decommissioning\")\n+ sample_dict = {'cluster': self.cluster,\n+ 'check_func': check_func,\n+ 'instances': self.instances}\n+ scaling._check_decommission(self.cluster, self.instances,\n+ check_func, option)\n+ plugin_option_poll.assert_called_once_with(self.cluster, is_dec,\n+ option, mess, 5,\n+ sample_dict)\n+\n+ @mock.patch(PLUGINS_PATH + 'scaling._check_decommission')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_check_nodemanagers_decommission(self, add_provisioning_step,\n+ check_cluster_exists,\n+ _check_decommission):\n+ timeout = c_helper.NODEMANAGERS_DECOMMISSIONING_TIMEOUT\n+ status = pu.get_nodemanagers_status\n+ scaling._check_nodemanagers_decommission(self.cluster, self.instances)\n+ _check_decommission.assert_called_once_with(self.cluster,\n+ self.instances,\n+ status, timeout)\n+\n+ @mock.patch(PLUGINS_PATH + 'scaling._check_decommission')\n+ @mock.patch('sahara.utils.cluster.check_cluster_exists')\n+ @mock.patch('sahara.utils.cluster_progress_ops.add_provisioning_step')\n+ def test_check_datanodes_decommission(self, add_provisioning_step,\n+ check_cluster_exists,\n+ _check_decommission):\n+ timeout = c_helper.DATANODES_DECOMMISSIONING_TIMEOUT\n+ status = pu.get_datanodes_status\n+ scaling._check_datanodes_decommission(self.cluster, self.instances)\n+ _check_decommission.assert_called_once_with(self.cluster,\n+ self.instances,\n+ status, timeout)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add test to sahara/plugins/vanilla/hadoop2/scaling.py
Add unit test to scaling.py
Change-Id: Id2c4754c6046caaaeb0ea1693ed11e3d299fd782 |
488,324 | 27.06.2017 17:18:10 | -28,800 | 227d12e60257622acae6747d289b2c9aa98c3df8 | Fix direct patches of methods in test_versionhandler.py
Direct patches of methods in unit test may cause errors in other tests.
I use the form of decorators to fix them. | [
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/vanilla/v2_7_1/test_versionhandler.py",
"new_path": "sahara/tests/unit/plugins/vanilla/v2_7_1/test_versionhandler.py",
"diff": "import mock\nimport six\n-from sahara.plugins.vanilla.hadoop2 import run_scripts as run\n-from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts\nfrom sahara.plugins.vanilla.v2_7_1.edp_engine import EdpOozieEngine\nfrom sahara.plugins.vanilla.v2_7_1.edp_engine import EdpSparkEngine\nfrom sahara.plugins.vanilla.v2_7_1 import versionhandler as v_h\n@@ -72,25 +70,19 @@ class VersionHandlerTest(base.SaharaTestCase):\nself.vh.configure_cluster(self.cluster)\nconfigure_cluster.assert_called_once_with(self.vh.pctx, self.cluster)\n+ @mock.patch(plugin_path + 'v2_7_1.versionhandler.run')\n+ @mock.patch(plugin_path + 'v2_7_1.versionhandler.s_scripts')\[email protected]('sahara.swift.swift_helper.install_ssl_certs')\[email protected](plugin_hadoop2_path + 'keypairs.provision_keypairs')\[email protected]('sahara.plugins.utils.get_instances')\[email protected]('sahara.utils.cluster.get_instances')\ndef test_start_cluster(self, c_get_instances, u_get_instances,\n- provision_keypairs, install_ssl_certs):\n+ provision_keypairs, install_ssl_certs,\n+ s_scripts, run):\nself.vh.pctx = mock.Mock()\ninstances = mock.Mock()\n- s_scripts.start_namenode = mock.Mock()\n- s_scripts.start_secondarynamenode = mock.Mock()\n- s_scripts.start_resourcemanager = mock.Mock()\n- s_scripts.start_historyserver = mock.Mock()\n- s_scripts.start_oozie = mock.Mock()\n- s_scripts.start_hiveserver = mock.Mock()\n- s_scripts.start_spark = mock.Mock()\nc_get_instances.return_value = instances\nu_get_instances.return_value = instances\n- run.await_datanodes = mock.Mock()\n- run.start_dn_nm_processes = mock.Mock()\nself.vh._set_cluster_info = mock.Mock()\nself.vh.start_cluster(self.cluster)\nprovision_keypairs.assert_called_once_with(self.cluster)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix direct patches of methods in test_versionhandler.py
Direct patches of methods in unit test may cause errors in other tests.
I use the form of decorators to fix them.
Change-Id: I22d92921ae6074be3f4fbd542af8aa9f4c5c5543 |
488,282 | 27.06.2017 21:19:56 | -7,200 | b3d3793d15574519ab66282e08866415d3891fce | Add export of node group templates
Partially-Implements: bp portable-node-group-and-cluster-templates
This change adds function to REST api to enable user to export node
group template to JSON, so that it can be later imported on another
deployment. | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v10.py",
"new_path": "sahara/api/v10.py",
"diff": "@@ -191,6 +191,30 @@ def node_group_templates_delete(node_group_template_id):\nreturn u.render()\n+def _node_group_template_export_helper(template):\n+ template.pop('id')\n+ template.pop('updated_at')\n+ template.pop('created_at')\n+ template['flavor_id'] = '{flavor_id}'\n+ template['security_groups'] = '{security_groups}'\n+ template['image_id'] = '{image_id}'\n+ template['tenant_id'] = '{tenant_id}'\n+ template['floating_ip_pool'] = '{floating_ip_pool}'\n+\n+\[email protected]('/node-group-templates/<node_group_template_id>/export')\[email protected](\"data-processing:node-group-templates:get\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\n+def node_group_template_export(node_group_template_id):\n+ content = u.to_wrapped_dict_no_render(\n+ api.export_node_group_template, node_group_template_id)\n+ _node_group_template_export_helper(content['node_group_template'])\n+ res = u.render(content)\n+ res.headers.add('Content-Disposition', 'attachment',\n+ filename='node_group_template.json')\n+ return res\n+\n+\n# Plugins ops\[email protected]('/plugins')\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/api/v10.py",
"new_path": "sahara/service/api/v10.py",
"diff": "@@ -213,6 +213,10 @@ def update_node_group_template(id, values):\nreturn conductor.node_group_template_update(context.ctx(), id, values)\n+def export_node_group_template(id):\n+ return conductor.node_group_template_get(context.ctx(), id)\n+\n+\n# Plugins ops\ndef get_plugins():\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add export of node group templates
Partially-Implements: bp portable-node-group-and-cluster-templates
This change adds function to REST api to enable user to export node
group template to JSON, so that it can be later imported on another
deployment.
Change-Id: I7a2ef7e5cff70e6034c1222252fbf7c5c35a7e1c |
488,299 | 05.04.2017 12:14:40 | -19,080 | 5221c00acaf6c4fe577994dbb74a9fa4bff98d65 | Fixes the "tox -e docs" warnings
The below warning are fixed:
WARNING: document isn't included in any toctree
WARNING: Could not lex literal_block as "http"
WARNING: Could not lex literal_block as "json" | [
{
"change_type": "MODIFY",
"old_path": "doc/source/conf.py",
"new_path": "doc/source/conf.py",
"diff": "@@ -148,7 +148,7 @@ html_title = 'Sahara'\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = ['_static']\n+# html_static_path = ['_static']\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/index.rst",
"new_path": "doc/source/index.rst",
"diff": "@@ -96,6 +96,7 @@ Developer Guide\ndevref/testing\ndevref/log.guidelines\ndevref/apiv2\n+ devref/image-gen\n**Background Concepts for Sahara**\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/restapi.rst",
"new_path": "doc/source/restapi.rst",
"diff": "@@ -37,13 +37,13 @@ path.\nExample:\n-.. sourcecode:: http\n+.. sourcecode:: text\nGET /v1.1/{project_id}/clusters.json\nor\n-.. sourcecode:: http\n+.. sourcecode:: text\nGET /v1.1/{project_id}/clusters\nAccept: application/json\n@@ -62,7 +62,7 @@ This parameter must be a positive integer number.\nExample:\nGet 15 clusters after cluster with id=d62ad147-5c10-418c-a21a-3a6597044f29:\n-.. sourcecode:: http\n+.. sourcecode:: text\nGET /v1.1/{project_id}/clusters?limit=15&marker=d62ad147-5c10-418c-a21a-3a6597044f29\n@@ -73,13 +73,13 @@ for sorting objects. Sahara API supports ascending and descending sorting.\nExamples:\nSort clusters by name:\n-.. sourcecode:: http\n+.. sourcecode:: text\nGET /v1.1/{project_id}/clusters?sort_by=name\nSort clusters by date of creation in descending order:\n-.. sourcecode:: http\n+.. sourcecode:: text\nGET /v1.1/{project_id}/clusters?sort_by=-created_at\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/userdoc/advanced.configuration.guide.rst",
"new_path": "doc/source/userdoc/advanced.configuration.guide.rst",
"diff": "@@ -593,9 +593,11 @@ following configs for the cluster:\n.. sourcecode:: json\n- cluster_configs: {\n+ {\n+ \"cluster_configs\": {\n\"general\": {\n- \"URL of NTP server\": \"your_server.net\",\n+ \"URL of NTP server\": \"your_server.net\"\n+ }\n}\n}\n@@ -604,9 +606,11 @@ configs for the cluster:\n.. sourcecode:: json\n+ {\n\"cluster_configs\": {\n\"general\": {\n- \"Enable NTP service\": false,\n+ \"Enable NTP service\": false\n+ }\n}\n}\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixes the "tox -e docs" warnings
The below warning are fixed:
WARNING: document isn't included in any toctree
WARNING: Could not lex literal_block as "http"
WARNING: Could not lex literal_block as "json"
Change-Id: Ied4b16a57bf597ccb1e0cd12e433275864b2ad61 |
488,266 | 13.07.2017 13:26:47 | 0 | 952800ff0f9b9c7e573152c4c115cf5d74f9aa21 | Update Documention link | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.rst",
"new_path": "CONTRIBUTING.rst",
"diff": "@@ -5,7 +5,7 @@ you must follow the steps in this page:\nYou can find more Sahara-specific info in our How To Participate guide:\n- http://docs.openstack.org/developer/sahara/devref/how_to_participate.html\n+ https://docs.openstack.org/sahara/latest/devref/how_to_participate.html\nOnce those steps have been completed, changes to OpenStack\nshould be submitted for review via the Gerrit tool, following\n"
},
{
"change_type": "MODIFY",
"old_path": "README.rst",
"new_path": "README.rst",
"diff": "@@ -14,13 +14,13 @@ Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara\nLaunchpad project: https://launchpad.net/sahara\n-Sahara docs site: http://docs.openstack.org/developer/sahara\n+Sahara docs site: https://docs.openstack.org/sahara/latest/\nRoadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap\n-Quickstart guide: http://docs.openstack.org/developer/sahara/devref/quickstart.html\n+Quickstart guide: https://docs.openstack.org/sahara/latest/devref/quickstart.html\n-How to participate: http://docs.openstack.org/developer/sahara/devref/how_to_participate.html\n+How to participate: https://docs.openstack.org/sahara/latest/devref/how_to_participate.html\nSource: http://git.openstack.org/cgit/openstack/sahara\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/devref/development.guidelines.rst",
"new_path": "doc/source/devref/development.guidelines.rst",
"diff": "@@ -79,7 +79,7 @@ Documentation Guidelines\nAll Sahara docs are written using Sphinx / RST and located in the main repo\nin the ``doc`` directory. You can add or edit pages here to update the\n-http://docs.openstack.org/developer/sahara site.\n+https://docs.openstack.org/sahara/latest/ site.\nThe documentation in docstrings should follow the `PEP 257`_ conventions\n(as mentioned in the `PEP 8`_ guidelines).\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/devref/testing.rst",
"new_path": "doc/source/devref/testing.rst",
"diff": "@@ -33,4 +33,4 @@ Additional tests reside in the sahara-tests repository (as above):\n* CLI tests check read-only operations using the Sahara CLI.\nFor more information about these tests, please read\n-http://docs.openstack.org/developer/sahara-tests/tempest-plugin.html\n+`Tempest Integration of Sahara <https://docs.openstack.org/sahara-tests/latest/tempest-plugin.html>`_.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/userdoc/sahara_on_ironic.rst",
"new_path": "doc/source/userdoc/sahara_on_ironic.rst",
"diff": "@@ -15,7 +15,7 @@ from the bare metal performance with self-service resource provisioning.\n<http://docs.openstack.org/developer/ironic/deploy/install-guide.html>`_\n3. Install Sahara as described in the `Sahara Installation Guide\n- <http://docs.openstack.org/developer/sahara/userdoc/installation.guide.html>`_\n+ <https://docs.openstack.org/sahara/latest/userdoc/installation.guide.html>`_\n4. Build the Sahara image and prepare it for uploading to Glance:\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.cfg",
"new_path": "setup.cfg",
"diff": "@@ -16,7 +16,7 @@ classifiers =\nOperating System :: POSIX :: Linux\nauthor = OpenStack\nauthor-email = [email protected]\n-home-page = http://docs.openstack.org/developer/sahara/\n+home-page = https://docs.openstack.org/sahara/latest/\n[global]\nsetup-hooks = pbr.hooks.setup_hook\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Update Documention link
Change-Id: I39f0bc1c7b5ef8114f6391058ab4378638b50907 |
488,323 | 24.07.2017 11:34:08 | -28,800 | 13a523e008f10cb6ed88690a47c79f329b8d500d | Bad request exception for unsupported content type
Request without 'content-type: application/json' specified will
return '500 Internal Server Error', it should return more reasonable
'400 Bad Request'.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/utils/api.py",
"new_path": "sahara/utils/api.py",
"diff": "@@ -218,7 +218,8 @@ def render(res=None, resp_type=None, status=None, name=None, **kwargs):\nresp_type = RT_JSON\nserializer = wsgi.JSONDictSerializer()\nelse:\n- abort_and_log(400, _(\"Content type '%s' isn't supported\") % resp_type)\n+ raise ex.InvalidDataException(\n+ _(\"Content type '%s' isn't supported\") % resp_type)\nbody = serializer.serialize(res)\nresp_type = str(resp_type)\n@@ -243,7 +244,7 @@ def request_data():\nif not content_type or content_type in RT_JSON:\ndeserializer = wsgi.JSONDeserializer()\nelse:\n- abort_and_log(400,\n+ raise ex.InvalidDataException(\n_(\"Content type '%s' isn't supported\") % content_type)\n# parsed request data to avoid unwanted re-parsings\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Bad request exception for unsupported content type
Request without 'content-type: application/json' specified will
return '500 Internal Server Error', it should return more reasonable
'400 Bad Request'.
Change-Id: I45fe78c6c29257cac7cf4f8711d95350b6f8bf58
Closes-Bug: 1705665 |
488,282 | 26.07.2017 19:04:39 | -7,200 | 59abd1e3019c470ae37d0129e16a363c6b624067 | Fix export of node group templates
Depends-on: | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v10.py",
"new_path": "sahara/api/v10.py",
"diff": "@@ -195,10 +195,11 @@ def _node_group_template_export_helper(template):\ntemplate.pop('id')\ntemplate.pop('updated_at')\ntemplate.pop('created_at')\n+ template.pop('tenant_id')\n+ template.pop('is_default')\ntemplate['flavor_id'] = '{flavor_id}'\ntemplate['security_groups'] = '{security_groups}'\ntemplate['image_id'] = '{image_id}'\n- template['tenant_id'] = '{tenant_id}'\ntemplate['floating_ip_pool'] = '{floating_ip_pool}'\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix export of node group templates
Change-Id: Iee59b84765c50ec47370f4c72a1d59b24ac5ddb2
Depends-on: I7a2ef7e5cff70e6034c1222252fbf7c5c35a7e1c |
488,323 | 29.07.2017 11:42:13 | 14,400 | a76a9011df7cb8385315255dc98da5322e7a2bc1 | Fix error during node group template update
this is because an optional key is referenced in calling validation
function which results in KeyError when key doesn't exist, and it
does not ever actually get used.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/base.py",
"new_path": "sahara/service/validations/base.py",
"diff": "@@ -165,7 +165,7 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng,\n_(\"You must specify a volumes_size parameter\"))\nif ng.get('floating_ip_pool'):\n- check_floatingip_pool_exists(ng['name'], ng['floating_ip_pool'])\n+ check_floatingip_pool_exists(ng['floating_ip_pool'])\nif ng.get('security_groups'):\ncheck_security_groups_exist(ng['security_groups'])\n@@ -194,7 +194,7 @@ def check_security_groups_exist(security_groups):\nsg, _(\"Security group '%s' not found\"))\n-def check_floatingip_pool_exists(ng_name, pool_id):\n+def check_floatingip_pool_exists(pool_id):\nnetwork = None\nif CONF.use_neutron:\nnetwork = neutron.get_network(pool_id)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix error during node group template update
this is because an optional key is referenced in calling validation
function which results in KeyError when key doesn't exist, and it
does not ever actually get used.
Change-Id: I12be69b5caf81289c2b8070fab8d2083709d6cf7
Closes-Bug: #1672516 |
488,323 | 26.07.2017 11:16:22 | -28,800 | 0921792d9377ecc5ef465bb441f870c3adc54d52 | Fix TypeError when get resource list
Fix TypeError when get resource list where marker doesn't in the
candidate resources during prev/next marker picking progress.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/db/sqlalchemy/api.py",
"new_path": "sahara/db/sqlalchemy/api.py",
"diff": "@@ -82,6 +82,8 @@ def _get_prev_and_next_objects(objects, limit, marker, order=None):\nif obj.id == marker.id:\nposition = pos\nbreak\n+ else:\n+ return None, None\nif position - limit >= 0:\nprev_marker = objects[position - limit].id\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/db/test_utils.py",
"new_path": "sahara/tests/unit/db/test_utils.py",
"diff": "@@ -40,6 +40,9 @@ class TestPaginationUtils(testtools.TestCase):\nres = api._get_prev_and_next_objects(query, 5, mock.MagicMock(id=4))\nself.assertEqual((None, 9), res)\n+ res = api._get_prev_and_next_objects(query, 5, mock.MagicMock(id=100))\n+ self.assertEqual((None, None), res)\n+\ndef test_parse_sorting_args(self):\nself.assertEqual((\"name\", \"desc\"), api._parse_sorting_args(\"-name\"))\nself.assertEqual((\"name\", \"asc\"), api._parse_sorting_args(\"name\"))\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix TypeError when get resource list
Fix TypeError when get resource list where marker doesn't in the
candidate resources during prev/next marker picking progress.
Change-Id: I29876b90f0784a36efa4d4e83f3ee23e8235f9fa
Closes-Bug: #1706490 |
488,323 | 05.08.2017 22:14:14 | -28,800 | 067a2816324b476800959c499f9a8659993ba67b | enable heat during devstack installation
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "devstack/README.rst",
"new_path": "devstack/README.rst",
"diff": "[[local|localrc]]\nenable_plugin sahara git://git.openstack.org/openstack/sahara\n+ enable_plugin heat git://git.openstack.org/openstack/heat\nOptionally, a git refspec may be provided as follows:\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/devstack.rst",
"new_path": "doc/source/contributor/devstack.rst",
"diff": "@@ -102,6 +102,9 @@ and may cause hard to debug problems.\n# Enable sahara\nenable_plugin sahara git://git.openstack.org/openstack/sahara\n+ # Enable heat\n+ enable_plugin heat git://git.openstack.org/openstack/heat\n+\nIn cases where you need to specify a git refspec (branch, tag, or commit hash)\nfor the sahara in-tree devstack plugin (or sahara repo), it should be\nappended to the git repo URL as follows:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | enable heat during devstack installation
Change-Id: I0693e135f57e979bf61c9f1ce064596a6272d64c
Closes-Bug: #1708839 |
488,282 | 28.08.2017 16:03:57 | -7,200 | 62a869ac1d0464aa186ab7bf26bed34a75fc4b1f | Add export of cluster templates
Partially-Implements: bp portable-node-group-and-cluster-templates
This change adds functions to sahara to enable export of ct to JSON. | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v10.py",
"new_path": "sahara/api/v10.py",
"diff": "@@ -143,6 +143,29 @@ def cluster_templates_delete(cluster_template_id):\nreturn u.render()\n+def _cluster_template_export_helper(template):\n+ template.pop('id')\n+ template.pop('updated_at')\n+ template.pop('created_at')\n+ template.pop('tenant_id')\n+ template.pop('is_default')\n+ template['default_image_id'] = '{default_image_id}'\n+ template['node_groups'] = '{node_groups}'\n+\n+\[email protected]('/cluster-templates/<cluster_template_id>/export')\[email protected](\"data-processing:cluster-templates:get\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\n+def cluster_template_export(cluster_template_id):\n+ content = u.to_wrapped_dict_no_render(\n+ api.get_cluster_template, cluster_template_id)\n+ _cluster_template_export_helper(content['cluster_template'])\n+ res = u.render(content)\n+ res.headers.add('Content-Disposition', 'attachment',\n+ filename='cluster_template.json')\n+ return res\n+\n+\n# NodeGroupTemplate ops\[email protected]('/node-group-templates')\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add export of cluster templates
Partially-Implements: bp portable-node-group-and-cluster-templates
This change adds functions to sahara to enable export of ct to JSON.
Change-Id: Ib5a2df4013840050b8034bcefd68040f508d0b31 |
488,282 | 01.09.2017 14:58:21 | -7,200 | 8735c26d86868e1b56e6a56e15508554b7d35e34 | Add docs about template portability.
Partially-Implements: bp portable-node-group-and-cluster-templates | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/overview.rst",
"new_path": "doc/source/user/overview.rst",
"diff": "@@ -36,6 +36,11 @@ specify node-scoped parameters here, they will work as defaults for node\ngroups. Also with the REST interface, during cluster creation a user can\noverride template parameters for both cluster and node groups.\n+Templates are portable - they can be exported to JSON files and imported\n+later either on the same deployment or on another one. To import an exported\n+template, replace the placeholder values with appropriate ones. This can be\n+accomplished easily through the CLI or UI, or be done manually.\n+\nProvisioning Plugins\n--------------------\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Add docs about template portability.
Partially-Implements: bp portable-node-group-and-cluster-templates
Change-Id: Iaef30a5309c79b56d430022f5bf098473f9c3a3c |
488,323 | 11.09.2017 15:49:43 | -28,800 | a37b8711efaca3ba1a46eb5a00b72159b8a2b128 | fix duplicated ntp configuration
the ntp service will be configured again on existing nodes
when scale a cluster, which turns out duplicated config
at the end of '/etc/ntp.conf' and results in syntax error
when restart ntp service.
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/ntp_service.py",
"new_path": "sahara/service/ntp_service.py",
"diff": "@@ -107,12 +107,12 @@ def retrieve_ntp_server_url(cluster):\nreturn cl_configs[target][name]\n-def configure_ntp(cluster_id):\n+def configure_ntp(cluster_id, instance_ids=None):\ncluster = conductor.cluster_get(context.ctx(), cluster_id)\nif not is_ntp_enabled(cluster):\nLOG.debug(\"Don't configure NTP on cluster\")\nreturn\n- instances = c_u.get_instances(cluster)\n+ instances = c_u.get_instances(cluster, instance_ids)\nurl = retrieve_ntp_server_url(cluster)\nwith context.ThreadGroup() as tg:\nfor instance in instances:\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/ops.py",
"new_path": "sahara/service/ops.py",
"diff": "@@ -341,7 +341,7 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map):\n# Setting up new nodes with the plugin\nif instance_ids:\n- ntp_service.configure_ntp(cluster_id)\n+ ntp_service.configure_ntp(cluster_id, instance_ids)\ncluster = c_u.change_cluster_status(\ncluster, c_u.CLUSTER_STATUS_CONFIGURING)\ninstances = c_u.get_instances(cluster, instance_ids)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | fix duplicated ntp configuration
the ntp service will be configured again on existing nodes
when scale a cluster, which turns out duplicated config
at the end of '/etc/ntp.conf' and results in syntax error
when restart ntp service.
Change-Id: I3d59823474b44498e1d6961f328d9b9d6f795ed6
Closes-Bug: #1716307 |
488,306 | 22.09.2017 14:01:22 | -3,600 | 88b427e98858990a35378370ba4b27691f6e6189 | Allow cluster create with no security groups
Got an unbound variable error without this fix:
UnboundLocalError: local variable 'sec_groups'
referenced before assignment
This appears to have been caused by the move to use the neutron client
in the following commit:
Closes-Bug: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/heat/templates.py",
"new_path": "sahara/service/heat/templates.py",
"diff": "@@ -456,7 +456,6 @@ class ClusterStack(object):\ninst_name = _get_inst_name(ng)\nprivate_net = self.cluster.neutron_management_network\n- if ng.security_groups or ng.auto_security_group:\nsec_groups = self._get_security_groups(ng)\n# Check if cluster contains user key-pair and include it to template.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/heat/test_templates.py",
"new_path": "sahara/tests/unit/service/heat/test_templates.py",
"diff": "@@ -109,6 +109,17 @@ class TestClusterTemplate(BaseTestClusterTemplate):\nactual = heat_template._get_security_groups(ng2)\nself.assertEqual(expected, actual)\n+ def test_get_security_groups_empty(self):\n+ ng1, _ = self._make_node_groups()\n+ ng1['security_groups'] = None\n+ ng1['auto_security_group'] = False\n+ cluster = self._make_cluster('private_net', ng1, ng1)\n+ heat_template = self._make_heat_template(cluster, ng1, ng1)\n+\n+ ng1 = [ng for ng in cluster.node_groups if ng.name == \"master\"][0]\n+ actual = heat_template._get_security_groups(ng1)\n+ self.assertEqual([], actual)\n+\ndef _generate_auto_security_group_template(self, use_neutron):\nself.override_config('use_neutron', use_neutron)\nng1, ng2 = self._make_node_groups('floating')\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Allow cluster create with no security groups
Got an unbound variable error without this fix:
UnboundLocalError: local variable 'sec_groups'
referenced before assignment
This appears to have been caused by the move to use the neutron client
in the following commit:
b4fb2c404c371a5ba9b28a3ac769c94e69b8c141
Co-Authored-By: Jeremy Freudberg <[email protected]>
Closes-Bug: #1718940
Change-Id: I4f0b5a6b27ee2fb7821ec42faba3a69ce5a4bb78 |
488,288 | 11.10.2017 15:54:26 | 0 | 96cb88759f907809b93e3aec2007eb9e9b80079a | Fix typo in advanced-configuration-guide.rst and manager.py
Replace 'allows' with 'allow'
Replace 'have' with 'has' | [
{
"change_type": "MODIFY",
"old_path": "doc/source/admin/advanced-configuration-guide.rst",
"new_path": "doc/source/admin/advanced-configuration-guide.rst",
"diff": "@@ -85,7 +85,7 @@ DHCP and specify DNS server ip addresses (e.g. 1.1.1.1 and 2.2.2.2) in\n``DNS Name Servers`` field in the ``Subnet Details``. If the subnet already\nexists and changing it or creating new one is impossible then Sahara will\nmanually change ``/etc/resolv.conf`` file on every instance of the cluster (if\n-``nameservers`` list have been specified in ``sahara.conf``). In this case,\n+``nameservers`` list has been specified in ``sahara.conf``). In this case,\nthough, Sahara cannot guarantee that these changes will not be overwritten by\nDHCP or other services of the existing network. Sahara has a health check for\ntrack this situation (and if it occurs the health status will be red).\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/conductor/manager.py",
"new_path": "sahara/conductor/manager.py",
"diff": "@@ -104,7 +104,7 @@ class ConductorManager(db_base.Base):\n\"\"\"This class aimed to conduct things.\nThe methods in the base API for sahara-conductor are various proxy\n- operations that allows other services to get specific work done without\n+ operations that allow other services to get specific work done without\nlocally accessing the database.\nAdditionally it performs some template-to-object copying magic.\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix typo in advanced-configuration-guide.rst and manager.py
Replace 'allows' with 'allow'
Replace 'have' with 'has'
Change-Id: I35f068b56a8c6982ca90f3a93ae1076339da9959 |
488,272 | 16.02.2017 13:22:08 | 10,800 | d214c228a097929f2409d9c1676ac2961978a52b | Image generation for Ambari Plugin
Adds image generation for Ambari | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/ambari_2_4_image_generation_validation-47eabb9fa90384c8.yaml",
"diff": "+---\n+features:\n+ - Enables the creation and validation of Ambari 2.4 images using the\n+ new image generation process where libguestfs replaces the use of DIB.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/plugin.py",
"new_path": "sahara/plugins/ambari/plugin.py",
"diff": "@@ -23,6 +23,7 @@ from sahara.plugins.ambari import deploy\nfrom sahara.plugins.ambari import edp_engine\nfrom sahara.plugins.ambari import health\nfrom sahara.plugins.ambari import validation\n+from sahara.plugins import images\nfrom sahara.plugins import kerberos\nfrom sahara.plugins import provisioning as p\nfrom sahara.plugins import utils as plugin_utils\n@@ -263,3 +264,28 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):\ndef get_health_checks(self, cluster):\nreturn health.get_health_checks(cluster)\n+\n+ validator = images.SaharaImageValidator.from_yaml(\n+ 'plugins/ambari/resources/images/image.yaml',\n+ resource_roots=['plugins/ambari/resources/images'])\n+\n+ def get_image_arguments(self, hadoop_version):\n+ if hadoop_version != '2.4':\n+ return NotImplemented\n+ return self.validator.get_argument_list()\n+\n+ def pack_image(self, hadoop_version, remote,\n+ test_only=False, image_arguments=None):\n+ self.validator.validate(remote, test_only=test_only,\n+ image_arguments=image_arguments)\n+\n+ def validate_images(self, cluster, test_only=False, image_arguments=None):\n+ image_arguments = self.get_image_arguments(cluster['hadoop_version'])\n+ if not test_only:\n+ instances = plugin_utils.get_instances(cluster)\n+ else:\n+ instances = plugin_utils.get_instances(cluster)[0]\n+ for instance in instances:\n+ with instance.remote() as r:\n+ self.validator.validate(r, test_only=test_only,\n+ image_arguments=image_arguments)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/disable_ambari",
"diff": "+#!/usr/bin/env bash\n+\n+if [ $test_only -eq 0 ]; then\n+ chkconfig ambari-server off\n+ chkconfig ambari-agent off\n+else\n+ exit 0\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/disable_firewall",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files iptables.service | grep 'enabled' | wc -l)\n+\n+if [ $check -eq 1 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ if type -p systemctl && [[ \"$(systemctl --no-pager list-unit-files firewalld)\" =~ 'enabled' ]]; then\n+ systemctl disable firewalld\n+ fi\n+\n+ if type -p service; then\n+ service ip6tables save\n+ service iptables save\n+ chkconfig ip6tables off\n+ chkconfig iptables off\n+ fi\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/disable_selinux",
"diff": "+#!/bin/bash\n+\n+check=$(cat /etc/selinux/config | grep 'SELINUX=disabled' | wc -l)\n+\n+if [ $check -eq 0 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ config=/etc/selinux/config\n+ [ -e $config ] && sed -i \"s%^\\(SELINUX=\\s*\\).*$%SELINUX=disabled%\" $config\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/setup_java_home",
"diff": "+#!/bin/bash\n+\n+JAVA_RC=\"/etc/profile.d/99-java.sh\"\n+JAVA_BIN_RC=\"/etc/profile.d/98-java-bin.sh\"\n+\n+if [ ! -f $JAVA_RC ]; then\n+ if [ $test_only -eq 0 ]; then\n+ case \"$java_distro\" in\n+ openjdk )\n+ JRE_HOME=\"/usr/lib/jvm/java-openjdk/jre\"\n+ JDK_HOME=\"/usr/lib/jvm/java-openjdk\"\n+ ;;\n+ oracle-java )\n+ JRE_HOME=\"/usr/java/oracle-jdk\"\n+ JDK_HOME=\"/usr/java/oracle-jdk\"\n+ ;;\n+ esac\n+\n+ echo \"export JAVA_HOME=$JRE_HOME\" >> $JAVA_RC\n+ chmod +x $JAVA_RC\n+\n+ echo \"export PATH=$JRE_HOME/bin:\\$PATH\" >> $JAVA_BIN_RC\n+ echo \"export PATH=$JDK_HOME/bin:\\$PATH\" >> $JAVA_BIN_RC\n+ chmod +x $JAVA_BIN_RC\n+\n+ alternatives --install /usr/bin/java java $JRE_HOME/bin/java 200000\n+ alternatives --install /usr/bin/javac javac $JDK_HOME/bin/javac 200000\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/unlimited_security_artifacts",
"diff": "+#!/bin/bash\n+\n+if [ ! -d /tmp/UnlimitedPolicy/ ]; then\n+ if [ $test_only -eq 0 ]; then\n+ mkdir /tmp/UnlimitedPolicy/\n+ wget http://tarballs.openstack.org/sahara/dist/common-artifacts/local_policy.jar -O /tmp/UnlimitedPolicy/local_policy.jar\n+ wget http://tarballs.openstack.org/sahara/dist/common-artifacts/US_export_policy.jar -O /tmp/UnlimitedPolicy/US_export_policy.jar\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/wget_repo",
"diff": "+#!/usr/bin/env bash\n+\n+if [ ! -f /etc/yum.repos.d/ambari.repo ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/$ambari_version/ambari.repo -O /etc/yum.repos.d/ambari.repo\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/common/add_jar",
"diff": "+#! /bin/bash\n+\n+hadoop=\"2.7.1\"\n+\n+HDFS_LIB_DIR=${hdfs_lib_dir:-\"/usr/share/hadoop/lib\"}\n+JAR_BUILD_DATE=\"2016-03-17\"\n+SWIFT_LIB_URI=\"http://tarballs.openstack.org/sahara/dist/hadoop-openstack/hadoop-openstack-${hadoop}.jar\"\n+HADOOP_SWIFT_JAR_NAME=hadoop-openstack.jar\n+\n+if [ ! -f $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME ]; then\n+ if [ $test_only -eq 0 ]; then\n+ if [ -z \"${swift_url:-}\" ]; then\n+ wget -O $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME $SWIFT_LIB_URI\n+ else\n+ wget -O $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME $swift_url\n+ fi\n+\n+ if [ $? -ne 0 ]; then\n+ echo -e \"Could not download Swift Hadoop FS implementation.\\nAborting\"\n+ exit 1\n+ fi\n+\n+ chmod 0644 $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME\n+ else\n+ exit 0\n+ fi\n+fi\n+\n+\n+\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/common/oracle_java",
"diff": "+#!/bin/bash\n+\n+\n+# NOTE: $(dirname $0) is read-only, use space under $TARGET_ROOT\n+JAVA_LOCATION=${JAVA_TARGET_LOCATION:-\"/usr/java\"}\n+JAVA_NAME=\"oracle-jdk\"\n+JAVA_HOME=$JAVA_LOCATION/$JAVA_NAME\n+JAVA_DOWNLOAD_URL=${JAVA_DOWNLOAD_URL:-\"http://download.oracle.com/otn-pub/java/jdk/7u51-b13/jdk-7u51-linux-x64.tar.gz\"}\n+\n+if [ ! -d $JAVA_LOCATION ]; then\n+ if [ $test_only -eq 0 ]; then\n+ echo \"Begin: installation of Java\"\n+ mkdir -p $JAVA_LOCATION\n+\n+ if [ -n \"$JAVA_DOWNLOAD_URL\" ]; then\n+ JAVA_FILE=$(basename $JAVA_DOWNLOAD_URL)\n+ wget --no-check-certificate --no-cookies -c \\\n+ --header \"Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie\" \\\n+ -O $JAVA_LOCATION/$JAVA_FILE $JAVA_DOWNLOAD_URL\n+ elif [ -n \"$JAVA_FILE\" ]; then\n+ install -D -g root -o root -m 0755 $(dirname $0)/$JAVA_FILE $JAVA_LOCATION\n+ fi\n+\n+ cd $JAVA_LOCATION\n+\n+ echo \"Decompressing Java archive\"\n+ echo -e \"\\n\" | tar -zxf $JAVA_FILE\n+ echo \"Setting up $JAVA_NAME\"\n+ chown -R root:root $JAVA_LOCATION\n+ JAVA_DIR=`ls -1 $JAVA_LOCATION | grep -v tar.gz`\n+ ln -s $JAVA_LOCATION/$JAVA_DIR $JAVA_HOME\n+\n+ setup-java-home $JAVA_HOME $JAVA_HOME\n+\n+ rm $JAVA_FILE\n+\n+ echo \"End: installation of Java\"\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/resources/images/image.yaml",
"new_path": "sahara/plugins/ambari/resources/images/image.yaml",
"diff": "arguments:\n- fish:\n- description: awesome\n- default: trout\n+ ambari_version:\n+ description: The version of Ambari to install. Defaults to 2.2.1.0.\n+ default: 2.2.1.0\n+ choices:\n+ - 2.2.0.0 # HDP 2.3\n+ - 2.2.1.0 # HDP 2.4\n+ java_distro:\n+ default: openjdk\n+ description: The distribution of Java to install. Defaults to openjdk.\n+ choices:\n+ - openjdk\n+ - oracle-java\n+ hdfs_lib_dir:\n+ default: /opt\n+ description: The path to HDFS lib. Defaults to /opt.\n+ required: False\n+ swift_url:\n+ default: https://tarballs.openstack.org/sahara/dist/hadoop-openstack/master/\n+ description: Location of the swift jar file.\n+ required: False\n+\n+validators:\n+ - argument_case:\n+ argument_name: java_distro\n+ cases:\n+ openjdk:\n+ - package: java-1.7.0-openjdk-devel\n+ oracle-java:\n+ - script: common/oracle_java\n+ - os_case:\n+ - centos:\n+ - script: centos/disable_selinux\n+ - script:\n+ centos/setup_java_home:\n+ env_vars: [java_distro]\n+ - package: wget\n+ - script:\n+ centos/wget_repo:\n+ env_vars: [ambari_version]\n+ - package: redhat-lsb\n+ - package:\n+ - mariadb\n+ - mariadb-libs\n+ - mariadb-server\n+ - mysql-connector-java\n+ - package: ntp\n+ - package:\n+ - ambari-metrics-monitor\n+ - ambari-server\n+ - ambari-metrics-collector\n+ - ambari-metrics-hadoop-sink\n+ - package: nmap-ncat\n+ - package: fuse-libs\n+ - package: snappy-devel\n+ - ubuntu:\n+ - script:\n+ ubuntu/wget_repo:\n+ env_vars: [ambari_version]\n+ - package:\n+ - ambari-metrics-assembly\n+ - netcat\n+ - package: fuse\n+ - package:\n+ - mysql-client-5.5\n+ - mysql-server-5.5\n+ - libmysql-java\n+ - package: ambari-agent\n+ - package:\n+ - unzip\n+ - zip\n+ - curl\n+ - tar\n+ - rpcbind\n+ - rng-tools\n+ - iptables-services\n+ - os_case:\n+ - centos:\n+ - script: centos/disable_ambari\n+ - script: centos/disable_firewall\n+ - script:\n+ common/add_jar:\n+ env_vars: [hdfs_lib_dir, swift_url]\n+ - script:\n+ centos/unlimited_security_artifacts:\n+ env_vars: [unlimited_security_location]\n+ - ubuntu:\n+ - script:\n+ common/add_jar:\n+ env_vars: [hdfs_lib_dir, swift_url]\n+ - os_case:\n+ - centos:\n+ - package:\n+ - krb5-server\n+ - krb5-libs\n+ - krb5-workstation\n+ - ubuntu:\n+ - package:\n+ - krb5-admin-server\n+ - libpam-krb5\n+ - krb5-user\n+ - ldap-utils\n-validators: []\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/ubuntu/wget_repo",
"diff": "+#!/usr/bin/env bash\n+\n+if [ ! -f /etc/apt/sources.list.d/ambari.list ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget http://public-repo-1.hortonworks.com/ambari/ubuntu12/2.x/updates/$ambari_version/ambari.list -O /etc/apt/sources.list.d/ambari.list\n+ apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/images.py",
"new_path": "sahara/plugins/images.py",
"diff": "@@ -684,7 +684,7 @@ class SaharaScriptValidator(SaharaImageValidatorBase):\npath = '/tmp/%s.sh' % uuidutils.generate_uuid()\nremote.write_file_to(path, script, run_as_root=True)\n_sudo(remote, 'chmod +x %s' % path)\n- code, stdout = _sudo(remote, '%s' % path)\n+ code, stdout = _sudo(remote, path)\nif self.output_var:\nimage_arguments[self.output_var] = stdout\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/gate/build-images",
"new_path": "tools/gate/build-images",
"diff": "@@ -67,6 +67,9 @@ case \"$PLUGIN\" in\n\"cloudera\")\nbuild_images \"cdh\" \"5.7.0\" \"centos7\"\n;;\n+ \"ambari\")\n+ build_images \"ambari\" \"2.4\" \"centos7\"\n+ ;;\n*)\necho \"Invalid version\"\n;;\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Image generation for Ambari Plugin
Adds image generation for Ambari
Change-Id: I0e798696de7fb2fcf72d826ad6b7356f526db836 |
488,272 | 04.10.2017 17:40:01 | 10,800 | 157ce33a2b4c207954da6f0b904315ca7cd0a365 | Image generation for CDH 5.9.0
Adds image generation and validation for CDH 5.9.0 | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/cdh_5_9_0_image_generation_validation-19d10e6468e30b4f.yaml",
"diff": "+---\n+features:\n+ - Enables the creation and validation of CDH 5.9.0 images using the\n+ new image generation process where libguestfs replaces the use of DIB.\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/images.py",
"diff": "+# Copyright (c) 2016 Red Hat, Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from sahara.plugins import images\n+from sahara.plugins import utils as plugin_utils\n+\n+\n+_validator = images.SaharaImageValidator.from_yaml(\n+ 'plugins/cdh/v5_9_0/resources/images/image.yaml',\n+ resource_roots=['plugins/cdh/v5_9_0/resources/images'])\n+\n+\n+def get_image_arguments():\n+ return _validator.get_argument_list()\n+\n+\n+def pack_image(remote, test_only=False, image_arguments=None):\n+ _validator.validate(remote, test_only=test_only,\n+ image_arguments=image_arguments)\n+\n+\n+def validate_images(cluster, test_only=False, image_arguments=None):\n+ image_arguments = get_image_arguments()\n+ if not test_only:\n+ instances = plugin_utils.get_instances(cluster)\n+ else:\n+ instances = plugin_utils.get_instances(cluster)[0]\n+ for instance in instances:\n+ with instance.remote() as r:\n+ _validator.validate(r, test_only=test_only,\n+ image_arguments=image_arguments)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/centos/disable_firewall",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files iptables.service | grep 'enabled' | wc -l)\n+\n+if [ $check -eq 1 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ if type -p systemctl && [[ \"$(systemctl --no-pager list-unit-files firewalld)\" =~ 'enabled' ]]; then\n+ systemctl disable firewalld\n+ fi\n+\n+ if type -p service; then\n+ service ip6tables save\n+ service iptables save\n+ chkconfig ip6tables off\n+ chkconfig iptables off\n+ fi\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/centos/selinux_permissive",
"diff": "+#!/bin/bash\n+\n+check=$(cat /etc/selinux/config | grep \"SELINUX=permissive\" | wc -l)\n+echo $check\n+\n+if [ $check -eq 0 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ echo \"SELINUX=permissive\" > /etc/selinux/config\n+ echo \"SELINUXTYPE=targeted\" >> /etc/selinux/config\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/centos/turn_off_services",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files oozie.service | grep 'enabled' | wc -l)\n+\n+if [ \"$check\" != \"on\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ for i in cloudera-scm-agent \\\n+ cloudera-scm-server \\\n+ cloudera-scm-server-db \\\n+ hadoop-hdfs-datanode \\\n+ hadoop-hdfs-namenode \\\n+ hadoop-hdfs-secondarynamenode \\\n+ hadoop-mapreduce-historyserver \\\n+ hadoop-yarn-nodemanager \\\n+ hadoop-yarn-resourcemanager \\\n+ hive-metastore \\\n+ hive-server2 \\\n+ hive-webhcat-server \\\n+ hue \\\n+ oozie \\\n+ postgresql \\\n+ impala-catalog \\\n+ impala-server \\\n+ impala-state-store \\\n+ solr-server \\\n+ spark-history-server\n+ do\n+ chkconfig $i off\n+ done\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/centos/wget_repo",
"diff": "+#!/bin/bash\n+\n+if [ ! -f /etc/yum.repos.d/cloudera-cdh5.repo ]; then\n+ if [ $test_only -eq 0 ]; then\n+ echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"name=Cloudera's Distribution for Hadoop, Version 5\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"baseurl=http://archive.cloudera.com/cdh5/redhat/7/x86_64/cdh/$plugin_version/\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"gpgkey = http://archive.cloudera.com/cdh5/redhat/7/x86_64/cdh/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo\n+\n+ echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo\n+ echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo \"baseurl=http://archive.cloudera.com/cm5/redhat/7/x86_64/cm/$plugin_version/\" >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo \"gpgkey = http://archive.cloudera.com/cm5/redhat/7/x86_64/cm/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo\n+\n+ echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo\n+ echo \"name=Cloudera's Distribution for navigator-Keytrustee, Version 5\" >> /etc/yum.repos.d/kms.repo\n+ echo \"baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/7/x86_64/navigator-keytrustee/$plugin_version/\" >> /etc/yum.repos.d/kms.repo\n+ echo \"gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/7/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/kms.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo\n+\n+ echo \"[cloudera-kafka]\" > /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"name=Cloudera's Distribution for kafka, Version 2.0.2\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"baseurl=http://archive.cloudera.com/kafka/redhat/7/x86_64/kafka/2.0.2/\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"gpgkey = http://archive.cloudera.com/kafka/redhat/7/x86_64/kafka/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"gpgcheck = 1\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+\n+ yum clean all\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/add_jar",
"diff": "+#!/usr/bin/env bash\n+\n+hadoop=\"2.6.0\"\n+\n+HADOOP_SWIFT_JAR_NAME=\"hadoop-openstack-$hadoop-cdh$plugin_version.jar\"\n+\n+HADOOP_SWIFT_BUILD_LOCATION=\"http://tarballs.openstack.org/sahara/dist/hadoop-openstack/master\"\n+SWIFT_LIB_URI=\"$HADOOP_SWIFT_BUILD_LOCATION/hadoop-openstack-${hadoop}.jar\"\n+HADOOP_SWIFT_JAR_NAME=\"hadoop-openstack.jar\"\n+\n+if [ ! -f $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME $SWIFT_LIB_URI\n+\n+ if [ $? -ne 0 ]; then\n+ echo -e \"Could not download Swift Hadoop FS implementation.\\nAborting\"\n+ exit 1\n+ fi\n+\n+ chmod 0644 $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME\n+ else\n+ exit 0\n+ fi\n+fi\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/install_cloudera",
"diff": "+#!/bin/bash\n+\n+HADOOP_OPENSTACK_5_9_x_URL=\"https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh$plugin_version/hadoop-openstack-2.6.0-cdh$plugin_version.jar\"\n+\n+dest=/usr/lib/hadoop-mapreduce/hadoop-openstack.jar\n+\n+if [ ! -L \"/usr/lib/oozie/oozie-sharelib-yarn.tar.gz\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $dest $HADOOP_OPENSTACK_5_9_x_URL\n+\n+ ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz\n+ ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz\n+ ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/install_extjs",
"diff": "+#!/bin/bash\n+\n+EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\n+EXTJS_DOWNLOAD_URL=\"http://tarballs.openstack.org/sahara/dist/common-artifacts/ext-2.2.zip\"\n+\n+extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+\n+if [ ! -n \"$EXTJS_DESTINATION_DIR\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n+\n+ mkdir -p $EXTJS_DESTINATION_DIR\n+ if [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n+ unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ rm -f $extjs_archive\n+ else\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ fi\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/unlimited_security_artifacts",
"diff": "+#!/bin/bash\n+\n+if [ ! -n /tmp/UnlimitedPolicy/ ]; then\n+ if [ $test_only -eq 0 ]; then\n+ mkdir /tmp/UnlimitedPolicy/\n+ wget $unlimited_security_location/local_policy.jar -O /tmp/UnlimitedPolicy/local_policy.jar\n+ wget $unlimited_security_location/US_export_policy.jar -O /tmp/UnlimitedPolicy/US_export_policy.jar\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/image.yaml",
"diff": "+arguments:\n+ java_distro:\n+ default: cloudera-jdk\n+ description: The distribution of Java to install. Defaults to openjdk.\n+ choices:\n+ - openjdk\n+ - oracle-java\n+ - cloudera-jdk\n+ plugin_version:\n+ default: 5.9.0\n+ description: The distribution of CDH to install. Defaults to 5.9.0.\n+ hidden: True\n+ required: False\n+ choices:\n+ - 5.9.0\n+ - 5.9.1\n+ - 5.9.2\n+ - 5.9.3\n+ hdfs_lib_dir:\n+ default: /usr/lib/hadoop-mapreduce\n+ description: The path to HDFS_LIB_DIR. Default to /usr/lib/hadoop-mapreduce\n+ required: False\n+ unlimited_security_location:\n+ default: http://sahara-files.mirantis.com/kerberos-artifacts/\n+ description: Place where UnlimitedSecurity polices are located\n+ required: False\n+\n+validators:\n+ - script: common/install_extjs\n+ - os_case:\n+ - centos:\n+ - package: wget\n+ - script:\n+ centos/wget_repo:\n+ env_vars: [plugin_version]\n+ - ubuntu:\n+ - script:\n+ ubuntu/wget_repo:\n+ env_vars: [plugin_version]\n+ - argument_case:\n+ argument_name: java_distro\n+ cases:\n+ openjdk:\n+ - any:\n+ - all:\n+ - package: java-1.8.0-openjdk-devel\n+ - all:\n+ - package: java-1.7.0-openjdk-devel\n+ cloudera-jdk:\n+ - all:\n+ - package: oracle-j2sdk1.7\n+ - package: ntp\n+ - package:\n+ - cloudera-manager-agent\n+ - cloudera-manager-daemons\n+ - cloudera-manager-server\n+ - cloudera-manager-server-db-2\n+ - package:\n+ - hadoop-hdfs-namenode\n+ - hadoop-hdfs-datanode\n+ - hadoop-hdfs-secondarynamenode\n+ - hadoop-mapreduce\n+ - hadoop-mapreduce-historyserver\n+ - hadoop-yarn-nodemanager\n+ - hadoop-yarn-resourcemanager\n+ - package:\n+ - hbase\n+ - hbase-solr\n+ - package:\n+ - hive-hcatalog\n+ - hive-metastore\n+ - hive-server2\n+ - hive-webhcat-server\n+ - hue\n+ - package:\n+ - oozie\n+ - spark-core\n+ - os_case:\n+ - centos:\n+ - package: spark-history-server\n+ - package: zookeeper\n+ - package: unzip\n+ - package: flume-ng\n+ - package: hadoop-kms\n+ - package:\n+ - impala\n+ - impala-server\n+ - impala-state-store\n+ - impala-catalog\n+ - impala-shell\n+ - package: keytrustee-keyprovider\n+ - package:\n+ - sentry\n+ - solr-server\n+ - solr-doc\n+ - search\n+ - sqoop2\n+ - package:\n+ - kafka\n+ - kafka-server\n+ - script:\n+ common/install_cloudera:\n+ env_vars: [plugin_version]\n+ - os_case:\n+ - centos:\n+ - script: centos/turn_off_services\n+ - ubuntu:\n+ - script: ubuntu/turn_off_services\n+ - script:\n+ common/add_jar:\n+ env_vars: [plugin_version, hdfs_lib_dir]\n+ - script:\n+ common/unlimited_security_artifacts:\n+ env_vars: [unlimited_security_location]\n+ - os_case:\n+ - centos:\n+ - package:\n+ - krb5-server\n+ - krb5-libs\n+ - krb5-workstation\n+ - rng-tools\n+ - package: iptables-services\n+ - script: centos/selinux_permissive\n+ - script: centos/disable_firewall\n+ - package: nmap-ncat\n+ - ubuntu:\n+ - package:\n+ - krb5-admin-server\n+ - libpam-krb5\n+ - ldap-utils\n+ - krb5-user\n+ - rng-tools\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/ubuntu/turn_off_services",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files oozie.service | grep 'enabled' | wc -l)\n+\n+if [ \"$check\" != \"on\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ for i in cloudera-scm-agent \\\n+ cloudera-scm-server \\\n+ cloudera-scm-server-db \\\n+ hadoop-hdfs-datanode \\\n+ hadoop-hdfs-namenode \\\n+ hadoop-hdfs-secondarynamenode \\\n+ hadoop-mapreduce-historyserver \\\n+ hadoop-yarn-nodemanager \\\n+ hadoop-yarn-resourcemanager \\\n+ hive-metastore \\\n+ hive-server2 \\\n+ hive-webhcat-server \\\n+ hue \\\n+ oozie \\\n+ postgresql \\\n+ impala-catalog \\\n+ impala-server \\\n+ impala-state-store \\\n+ solr-server \\\n+ spark-history-server\n+ do\n+ update-rc.d -f $i remove\n+ done\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/ubuntu/wget_repo",
"diff": "+#!/bin/bash\n+\n+if [ ! -f /etc/apt/sources.list.d/cdh5.list ]; then\n+ if [ $test_only -eq 0 ]; then\n+ # Add repository with postgresql package (it's dependency of cloudera packages)\n+ # Base image doesn't contain this repo\n+ echo \"deb http://nova.clouds.archive.ubuntu.com/ubuntu/ trusty universe multiverse main\" >> /etc/apt/sources.list\n+\n+ # Cloudera repositories\n+ echo \"deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$plugin_version contrib\" > /etc/apt/sources.list.d/cdh5.list\n+ echo \"deb-src http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$plugin_version contrib\" >> /etc/apt/sources.list.d/cdh5.list\n+\n+ wget -qO - http://archive-primary.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/archive.key | apt-key add -\n+\n+ echo \"deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$plugin_version contrib\" > /etc/apt/sources.list.d/cm5.list\n+ echo \"deb-src http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$plugin_version contrib\" >> /etc/apt/sources.list.d/cm5.list\n+\n+ wget -qO - http://archive-primary.cloudera.com/cm5/ubuntu/trusty/amd64/cm/archive.key | apt-key add -\n+\n+ wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/cloudera.list\n+ wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/archive.key | apt-key add -\n+\n+ # add Kafka repository\n+ echo 'deb http://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/ trusty-kafka2.0.2 contrib' >> /etc/apt/sources.list\n+ wget -qO - https://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/archive.key | apt-key add -\n+\n+ #change repository priority\n+ echo 'Package: zookeeper' >> /etc/apt/preferences.d/cloudera-pin\n+ echo 'Pin: origin \"archive.cloudera.com\"' >> /etc/apt/preferences.d/cloudera-pin\n+ echo 'Pin-Priority: 1001' >> /etc/apt/preferences.d/cloudera-pin\n+\n+ apt-get update\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_9_0/versionhandler.py",
"new_path": "sahara/plugins/cdh/v5_9_0/versionhandler.py",
"diff": "@@ -19,6 +19,7 @@ from sahara.plugins.cdh.v5_9_0 import cloudera_utils\nfrom sahara.plugins.cdh.v5_9_0 import config_helper\nfrom sahara.plugins.cdh.v5_9_0 import deploy\nfrom sahara.plugins.cdh.v5_9_0 import edp_engine\n+from sahara.plugins.cdh.v5_9_0 import images\nfrom sahara.plugins.cdh.v5_9_0 import plugin_utils\nfrom sahara.plugins.cdh.v5_9_0 import validation\n@@ -32,4 +33,5 @@ class VersionHandler(avm.BaseVersionHandler):\nself.plugin_utils = plugin_utils.PluginUtilsV590()\nself.deploy = deploy\nself.edp_engine = edp_engine\n+ self.images = images\nself.validation = validation.ValidatorV590()\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/gate/build-images",
"new_path": "tools/gate/build-images",
"diff": "@@ -66,6 +66,7 @@ setup_build_env\ncase \"$PLUGIN\" in\n\"cloudera\")\nbuild_images \"cdh\" \"5.7.0\" \"centos7\"\n+ build_images \"cdh\" \"5.9.0\" \"centos7\"\n;;\n*)\necho \"Invalid version\"\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Image generation for CDH 5.9.0
Adds image generation and validation for CDH 5.9.0
Change-Id: Id1d63ffb695b66df6252da96fc8540b50871eaa3 |
488,272 | 18.10.2017 16:22:06 | 10,800 | 03c6ebaecf622df15fb35d55a885ed99c77c5425 | Image generation for CDH 5.11.0
Adds image generation and validation for CDH 5.11.0 | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/cdh_5_11_0_image_generation_validation-6334ef6d04950935.yaml",
"diff": "+---\n+features:\n+ - Enables the creation and validation of CDH 5.11.0 images using the\n+ new image generation process where libguestfs replaces the use of DIB.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/images.py",
"diff": "+# Copyright (c) 2016 Red Hat, Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from sahara.plugins import images\n+from sahara.plugins import utils as plugin_utils\n+\n+\n+_validator = images.SaharaImageValidator.from_yaml(\n+ 'plugins/cdh/v5_11_0/resources/images/image.yaml',\n+ resource_roots=['plugins/cdh/v5_11_0/resources/images'])\n+\n+\n+def get_image_arguments():\n+ return _validator.get_argument_list()\n+\n+\n+def pack_image(remote, test_only=False, image_arguments=None):\n+ _validator.validate(remote, test_only=test_only,\n+ image_arguments=image_arguments)\n+\n+\n+def validate_images(cluster, test_only=False, image_arguments=None):\n+ image_arguments = get_image_arguments()\n+ if not test_only:\n+ instances = plugin_utils.get_instances(cluster)\n+ else:\n+ instances = plugin_utils.get_instances(cluster)[0]\n+ for instance in instances:\n+ with instance.remote() as r:\n+ _validator.validate(r, test_only=test_only,\n+ image_arguments=image_arguments)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/centos/disable_firewall",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files iptables.service | grep 'enabled' | wc -l)\n+\n+if [ $check -eq 1 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ if type -p systemctl && [[ \"$(systemctl --no-pager list-unit-files firewalld)\" =~ 'enabled' ]]; then\n+ systemctl disable firewalld\n+ fi\n+\n+ if type -p service; then\n+ service ip6tables save\n+ service iptables save\n+ chkconfig ip6tables off\n+ chkconfig iptables off\n+ fi\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/centos/selinux_permissive",
"diff": "+#!/bin/bash\n+\n+check=$(cat /etc/selinux/config | grep \"SELINUX=permissive\" | wc -l)\n+echo $check\n+\n+if [ $check -eq 0 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ echo \"SELINUX=permissive\" > /etc/selinux/config\n+ echo \"SELINUXTYPE=targeted\" >> /etc/selinux/config\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/centos/turn_off_services",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files oozie.service | grep 'enabled' | wc -l)\n+\n+if [ \"$check\" != \"on\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ for i in cloudera-scm-agent \\\n+ cloudera-scm-server \\\n+ cloudera-scm-server-db \\\n+ hadoop-hdfs-datanode \\\n+ hadoop-hdfs-namenode \\\n+ hadoop-hdfs-secondarynamenode \\\n+ hadoop-mapreduce-historyserver \\\n+ hadoop-yarn-nodemanager \\\n+ hadoop-yarn-resourcemanager \\\n+ hive-metastore \\\n+ hive-server2 \\\n+ hive-webhcat-server \\\n+ hue \\\n+ oozie \\\n+ postgresql \\\n+ impala-catalog \\\n+ impala-server \\\n+ impala-state-store \\\n+ solr-server \\\n+ spark-history-server\n+ do\n+ chkconfig $i off\n+ done\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/centos/wget_repo",
"diff": "+#!/bin/bash\n+\n+if [ ! -f /etc/yum.repos.d/cloudera-cdh5.repo ]; then\n+ if [ $test_only -eq 0 ]; then\n+ echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"name=Cloudera's Distribution for Hadoop, Version 5\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"baseurl=http://archive.cloudera.com/cdh5/redhat/7/x86_64/cdh/$plugin_version/\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo \"gpgkey = http://archive.cloudera.com/cdh5/redhat/7/x86_64/cdh/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-cdh5.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo\n+\n+ echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo\n+ echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo \"baseurl=http://archive.cloudera.com/cm5/redhat/7/x86_64/cm/$plugin_version/\" >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo \"gpgkey = http://archive.cloudera.com/cm5/redhat/7/x86_64/cm/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-manager.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo\n+\n+ echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo\n+ echo \"name=Cloudera's Distribution for navigator-Keytrustee, Version 5\" >> /etc/yum.repos.d/kms.repo\n+ echo \"baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/7/x86_64/navigator-keytrustee/$plugin_version/\" >> /etc/yum.repos.d/kms.repo\n+ echo \"gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/7/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/kms.repo\n+ echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo\n+\n+ echo \"[cloudera-kafka]\" > /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"name=Cloudera's Distribution for kafka, Version 2.0.2\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"baseurl=http://archive.cloudera.com/kafka/redhat/7/x86_64/kafka/2.0.2/\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"gpgkey = http://archive.cloudera.com/kafka/redhat/7/x86_64/kafka/RPM-GPG-KEY-cloudera\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+ echo \"gpgcheck = 1\" >> /etc/yum.repos.d/cloudera-kafka.repo\n+\n+ yum clean all\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/add_jar",
"diff": "+#!/usr/bin/env bash\n+\n+hadoop=\"2.6.0\"\n+\n+HADOOP_SWIFT_JAR_NAME=\"hadoop-openstack-$hadoop-cdh$plugin_version.jar\"\n+\n+HADOOP_SWIFT_BUILD_LOCATION=\"http://tarballs.openstack.org/sahara/dist/hadoop-openstack/master\"\n+SWIFT_LIB_URI=\"$HADOOP_SWIFT_BUILD_LOCATION/hadoop-openstack-${hadoop}.jar\"\n+HADOOP_SWIFT_JAR_NAME=\"hadoop-openstack.jar\"\n+\n+if [ ! -f $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME $SWIFT_LIB_URI\n+\n+ if [ $? -ne 0 ]; then\n+ echo -e \"Could not download Swift Hadoop FS implementation.\\nAborting\"\n+ exit 1\n+ fi\n+\n+ chmod 0644 $hdfs_lib_dir/$HADOOP_SWIFT_JAR_NAME\n+ else\n+ exit 0\n+ fi\n+fi\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/install_cloudera",
"diff": "+#!/bin/bash\n+\n+HADOOP_OPENSTACK_5_9_x_URL=\"https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh$plugin_version/hadoop-openstack-2.6.0-cdh$plugin_version.jar\"\n+\n+dest=/usr/lib/hadoop-mapreduce/hadoop-openstack.jar\n+\n+if [ ! -L \"/usr/lib/oozie/oozie-sharelib-yarn.tar.gz\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $dest $HADOOP_OPENSTACK_5_9_x_URL\n+\n+ ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz\n+ ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz\n+ ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/install_extjs",
"diff": "+#!/bin/bash\n+\n+EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\n+EXTJS_DOWNLOAD_URL=\"http://tarballs.openstack.org/sahara/dist/common-artifacts/ext-2.2.zip\"\n+\n+extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+\n+if [ ! -n \"$EXTJS_DESTINATION_DIR\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ wget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n+\n+ mkdir -p $EXTJS_DESTINATION_DIR\n+ if [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n+ unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ rm -f $extjs_archive\n+ else\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ fi\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/unlimited_security_artifacts",
"diff": "+#!/bin/bash\n+\n+if [ ! -n /tmp/UnlimitedPolicy/ ]; then\n+ if [ $test_only -eq 0 ]; then\n+ mkdir /tmp/UnlimitedPolicy/\n+ wget $unlimited_security_location/local_policy.jar -O /tmp/UnlimitedPolicy/local_policy.jar\n+ wget $unlimited_security_location/US_export_policy.jar -O /tmp/UnlimitedPolicy/US_export_policy.jar\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/image.yaml",
"diff": "+arguments:\n+ java_distro:\n+ default: cloudera-jdk\n+ description: The distribution of Java to install. Defaults to openjdk.\n+ choices:\n+ - openjdk\n+ - oracle-java\n+ - cloudera-jdk\n+ plugin_version:\n+ default: 5.11.0\n+ description: The distribution of CDH to install. Defaults to 5.11.0.\n+ hidden: True\n+ required: False\n+ choices:\n+ - 5.11.0\n+ - 5.11.1\n+ - 5.11.2\n+ hdfs_lib_dir:\n+ default: /usr/lib/hadoop-mapreduce\n+ description: The path to HDFS_LIB_DIR. Default to /usr/lib/hadoop-mapreduce\n+ required: False\n+ unlimited_security_location:\n+ default: http://sahara-files.mirantis.com/kerberos-artifacts/\n+ description: Place where UnlimitedSecurity polices are located\n+ required: False\n+\n+validators:\n+ - script: common/install_extjs\n+ - os_case:\n+ - centos:\n+ - package: wget\n+ - script:\n+ centos/wget_repo:\n+ env_vars: [plugin_version]\n+ - ubuntu:\n+ - script:\n+ ubuntu/wget_repo:\n+ env_vars: [plugin_version]\n+ - argument_case:\n+ argument_name: java_distro\n+ cases:\n+ openjdk:\n+ - any:\n+ - all:\n+ - package: java-1.8.0-openjdk-devel\n+ - all:\n+ - package: java-1.7.0-openjdk-devel\n+ cloudera-jdk:\n+ - all:\n+ - package: oracle-j2sdk1.7\n+ - package: ntp\n+ - package:\n+ - cloudera-manager-agent\n+ - cloudera-manager-daemons\n+ - cloudera-manager-server\n+ - cloudera-manager-server-db-2\n+ - package:\n+ - hadoop-hdfs-namenode\n+ - hadoop-hdfs-datanode\n+ - hadoop-hdfs-secondarynamenode\n+ - hadoop-mapreduce\n+ - hadoop-mapreduce-historyserver\n+ - hadoop-yarn-nodemanager\n+ - hadoop-yarn-resourcemanager\n+ - package:\n+ - hbase\n+ - hbase-solr\n+ - package:\n+ - hive-hcatalog\n+ - hive-metastore\n+ - hive-server2\n+ - hive-webhcat-server\n+ - hue\n+ - package:\n+ - oozie\n+ - spark-core\n+ - os_case:\n+ - centos:\n+ - package: spark-history-server\n+ - package: zookeeper\n+ - package: unzip\n+ - package: flume-ng\n+ - package: hadoop-kms\n+ - package:\n+ - impala\n+ - impala-server\n+ - impala-state-store\n+ - impala-catalog\n+ - impala-shell\n+ - package: keytrustee-keyprovider\n+ - package:\n+ - sentry\n+ - solr-server\n+ - solr-doc\n+ - search\n+ - sqoop2\n+ - package:\n+ - kafka\n+ - kafka-server\n+ - script:\n+ common/install_cloudera:\n+ env_vars: [plugin_version]\n+ - os_case:\n+ - centos:\n+ - script: centos/turn_off_services\n+ - ubuntu:\n+ - script: ubuntu/turn_off_services\n+ - script:\n+ common/add_jar:\n+ env_vars: [plugin_version, hdfs_lib_dir]\n+ - script:\n+ common/unlimited_security_artifacts:\n+ env_vars: [unlimited_security_location]\n+ - os_case:\n+ - centos:\n+ - package:\n+ - krb5-server\n+ - krb5-libs\n+ - krb5-workstation\n+ - rng-tools\n+ - package: iptables-services\n+ - script: centos/selinux_permissive\n+ - script: centos/disable_firewall\n+ - package: nmap-ncat\n+ - ubuntu:\n+ - package:\n+ - krb5-admin-server\n+ - libpam-krb5\n+ - ldap-utils\n+ - krb5-user\n+ - rng-tools\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/ubuntu/turn_off_services",
"diff": "+#!/bin/bash\n+\n+check=$(systemctl --no-pager list-unit-files oozie.service | grep 'enabled' | wc -l)\n+\n+if [ \"$check\" != \"on\" ]; then\n+ if [ $test_only -eq 0 ]; then\n+ for i in cloudera-scm-agent \\\n+ cloudera-scm-server \\\n+ cloudera-scm-server-db \\\n+ hadoop-hdfs-datanode \\\n+ hadoop-hdfs-namenode \\\n+ hadoop-hdfs-secondarynamenode \\\n+ hadoop-mapreduce-historyserver \\\n+ hadoop-yarn-nodemanager \\\n+ hadoop-yarn-resourcemanager \\\n+ hive-metastore \\\n+ hive-server2 \\\n+ hive-webhcat-server \\\n+ hue \\\n+ oozie \\\n+ postgresql \\\n+ impala-catalog \\\n+ impala-server \\\n+ impala-state-store \\\n+ solr-server \\\n+ spark-history-server\n+ do\n+ update-rc.d -f $i remove\n+ done\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/ubuntu/wget_repo",
"diff": "+#!/bin/bash\n+\n+if [ ! -f /etc/apt/sources.list.d/cdh5.list ]; then\n+ if [ $test_only -eq 0 ]; then\n+ # Add repository with postgresql package (it's dependency of cloudera packages)\n+ # Base image doesn't contain this repo\n+ echo -e 'deb http://nova.clouds.archive.ubuntu.com/ubuntu/ xenial universe multiverse main' >> /etc/apt/sources.list\n+\n+ # Cloudera repositories\n+ echo \"deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh xenial-cdh$plugin_version contrib\" > /etc/apt/sources.list.d/cdh5.list\n+ echo \"deb-src http://archive.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh xenial-cdh$plugin_version contrib\" >> /etc/apt/sources.list.d/cdh5.list\n+\n+ wget -qO - http://archive-primary.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh/archive.key | apt-key add -\n+\n+ echo \"deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/xenial/amd64/cm xenial-cm$plugin_version contrib\" > /etc/apt/sources.list.d/cm5.list\n+ echo \"deb-src http://archive.cloudera.com/cm5/ubuntu/xenial/amd64/cm xenial-cm$plugin_version contrib\" >> /etc/apt/sources.list.d/cm5.list\n+\n+ wget -qO - http://archive-primary.cloudera.com/cm5/ubuntu/xenial/amd64/cm/archive.key | apt-key add -\n+\n+ wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/xenial/amd64/navigator-keytrustee/cloudera.list\n+ wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/xenial/amd64/navigator-keytrustee/archive.key | apt-key add -\n+\n+ # add Kafka repository\n+ echo -e 'deb http://archive.cloudera.com/kafka/ubuntu/xenial/amd64/kafka/ xenial-kafka2.2.0 contrib' >> /etc/apt/sources.list\n+ wget -qO - https://archive.cloudera.com/kafka/ubuntu/xenial/amd64/kafka/archive.key | apt-key add -\n+\n+ #change repository priority\n+ echo -e 'Package: zookeeper\\nPin: origin \"archive.cloudera.com\"\\nPin-Priority: 1001' > /etc/apt/preferences.d/cloudera-pin\n+\n+ apt-get update\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_11_0/versionhandler.py",
"new_path": "sahara/plugins/cdh/v5_11_0/versionhandler.py",
"diff": "@@ -19,6 +19,7 @@ from sahara.plugins.cdh.v5_11_0 import cloudera_utils\nfrom sahara.plugins.cdh.v5_11_0 import config_helper\nfrom sahara.plugins.cdh.v5_11_0 import deploy\nfrom sahara.plugins.cdh.v5_11_0 import edp_engine\n+from sahara.plugins.cdh.v5_11_0 import images\nfrom sahara.plugins.cdh.v5_11_0 import plugin_utils\nfrom sahara.plugins.cdh.v5_11_0 import validation\n@@ -32,4 +33,5 @@ class VersionHandler(avm.BaseVersionHandler):\nself.plugin_utils = plugin_utils.PluginUtilsV5110()\nself.deploy = deploy\nself.edp_engine = edp_engine\n+ self.images = images\nself.validation = validation.ValidatorV5110()\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/gate/build-images",
"new_path": "tools/gate/build-images",
"diff": "@@ -67,6 +67,7 @@ case \"$PLUGIN\" in\n\"cloudera\")\nbuild_images \"cdh\" \"5.7.0\" \"centos7\"\nbuild_images \"cdh\" \"5.9.0\" \"centos7\"\n+ build_images \"cdh\" \"5.11.0\" \"centos7\"\n;;\n\"ambari\")\nbuild_images \"ambari\" \"2.4\" \"centos7\"\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Image generation for CDH 5.11.0
Adds image generation and validation for CDH 5.11.0
Change-Id: I09ea1e092e1ba05e7f36191869c4da798e94b287 |
488,272 | 06.12.2017 11:25:03 | 10,800 | 7140495c70ef34717342d3b3fa38a416e8ada46b | Upgrading Spark to version 2.2
Adding newest version of Spark to sahara | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/spark-2.2-d7c3a84bd52f735a.yaml",
"diff": "+---\n+features:\n+ - Adding Spark version 2.2 to Sahara.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/spark/plugin.py",
"new_path": "sahara/plugins/spark/plugin.py",
"diff": "@@ -63,7 +63,7 @@ class SparkProvider(p.ProvisioningPluginBase):\ndeprecated = {'enabled': {'status': True},\n'deprecated': {'status': True}}\nresult = {'plugin_labels': copy.deepcopy(default)}\n- stable_versions = ['2.1.0', '1.6.0']\n+ stable_versions = ['2.2', '2.1.0', '1.6.0']\nresult['version_labels'] = {\nversion: copy.deepcopy(\ndefault if version in stable_versions else deprecated\n@@ -72,7 +72,7 @@ class SparkProvider(p.ProvisioningPluginBase):\nreturn result\ndef get_versions(self):\n- return ['2.1.0', '1.6.0', '1.3.1']\n+ return ['2.2', '2.1.0', '1.6.0', '1.3.1']\ndef get_configs(self, hadoop_version):\nreturn c_helper.get_plugin_configs()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"new_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"diff": "@@ -80,10 +80,18 @@ class SparkPluginTest(base.SaharaWithDbTestCase):\nself._test_engine('2.1.0', edp.JOB_TYPE_SPARK,\nengine.SparkJobEngine)\n- def test_plugin22_shell_engine(self):\n+ def test_plugin21_shell_engine(self):\nself._test_engine('2.1.0', edp.JOB_TYPE_SHELL,\nengine.SparkShellJobEngine)\n+ def test_plugin22_edp_engine(self):\n+ self._test_engine('2.2', edp.JOB_TYPE_SPARK,\n+ engine.SparkJobEngine)\n+\n+ def test_plugin22_shell_engine(self):\n+ self._test_engine('2.2', edp.JOB_TYPE_SHELL,\n+ engine.SparkShellJobEngine)\n+\ndef _test_engine(self, version, job_type, eng):\ncluster_dict = self._init_cluster_dict(version)\n@@ -201,6 +209,8 @@ class SparkProviderTest(base.SaharaTestCase):\nres['1.6.0'])\nself.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\nres['2.1.0'])\n+ self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\n+ res['2.2'])\ndef test_edp_config_hints(self):\nprovider = pl.SparkProvider()\n@@ -225,6 +235,14 @@ class SparkProviderTest(base.SaharaTestCase):\nself.assertEqual({'args': [], 'configs': []},\nres['job_config'])\n- res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"2.1.0\")\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, \"2.1.0\")\n+ self.assertEqual({'args': [], 'configs': {}, 'params': {}},\n+ res['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"2.2\")\nself.assertEqual({'args': [], 'configs': []},\nres['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, \"2.2\")\n+ self.assertEqual({'args': [], 'configs': {}, 'params': {}},\n+ res['job_config'])\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Upgrading Spark to version 2.2
Adding newest version of Spark to sahara
Change-Id: Ia85e6f28d90789279fd319074ce0e230ffa98be4 |
488,272 | 22.11.2017 13:34:46 | 10,800 | b001ef2a55798922099e47abb4639125811169be | Decommission of a specific node
Adding the option to decommission specific node from cluster.
Partially implements bp: decommission-specific-instance | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -60,7 +60,7 @@ def clusters_create_multiple(data):\[email protected]('/clusters/<cluster_id>')\[email protected](\"data-processing:clusters:scale\")\[email protected]_exists(api.get_cluster, 'cluster_id')\[email protected](v_c_schema.CLUSTER_SCALING_SCHEMA, v_c_s.check_cluster_scaling)\[email protected](v_c_schema.CLUSTER_SCALING_SCHEMA_V2, v_c_s.check_cluster_scaling)\ndef clusters_scale(cluster_id, data):\nreturn u.to_wrapped_dict(api.scale_cluster, cluster_id, data)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/api/v2/clusters.py",
"new_path": "sahara/service/api/v2/clusters.py",
"diff": "@@ -54,9 +54,12 @@ def scale_cluster(id, data):\n# the next map is the main object we will work with\n# to_be_enlarged : {node_group_id: desired_amount_of_instances}\nto_be_enlarged = {}\n+ node_group_instance_map = {}\nfor ng in existing_node_groups:\nng_id = g.find(cluster.node_groups, name=ng['name'])['id']\nto_be_enlarged.update({ng_id: ng['count']})\n+ if 'instances' in ng:\n+ node_group_instance_map.update({ng_id: ng['instances']})\nadditional = construct_ngs_for_scaling(cluster, additional_node_groups)\ncluster = conductor.cluster_get(ctx, cluster)\n@@ -82,7 +85,8 @@ def scale_cluster(id, data):\nif node_group.id not in to_be_enlarged:\nto_be_enlarged[node_group.id] = node_group.count\n- api.OPS.provision_scaled_cluster(id, to_be_enlarged)\n+ api.OPS.provision_scaled_cluster(id, to_be_enlarged,\n+ node_group_instance_map)\nreturn cluster\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/heat/heat_engine.py",
"new_path": "sahara/service/heat/heat_engine.py",
"diff": "@@ -84,7 +84,7 @@ class HeatEngine(e.Engine):\nfor node_group in cluster.node_groups:\nconductor.node_group_update(ctx, node_group, {\"count\": 0})\n- def scale_cluster(self, cluster, target_count):\n+ def scale_cluster(self, cluster, target_count, instances_to_delete=None):\nctx = context.ctx()\nrollback_count = self._get_ng_counts(cluster)\n@@ -94,7 +94,8 @@ class HeatEngine(e.Engine):\ninst_ids = self._launch_instances(\ncluster, target_count, SCALE_STAGES,\n- update_stack=True, disable_rollback=False)\n+ update_stack=True, disable_rollback=False,\n+ instances_to_delete=instances_to_delete)\ncluster = conductor.cluster_get(ctx, cluster)\nc_u.clean_cluster_from_empty_ng(cluster)\n@@ -209,10 +210,12 @@ class HeatEngine(e.Engine):\[email protected]_wrapper(\nTrue, step=_('Create Heat stack'), param=('cluster', 1))\ndef _create_instances(self, cluster, target_count, update_stack=False,\n- disable_rollback=True):\n+ disable_rollback=True, instances_to_delete=None):\n+\nstack = ht.ClusterStack(cluster)\n- self._update_instance_count(stack, cluster, target_count)\n+ self._update_instance_count(stack, cluster, target_count,\n+ instances_to_delete)\nstack.instantiate(update_existing=update_stack,\ndisable_rollback=disable_rollback)\nheat.wait_stack_completion(\n@@ -221,12 +224,14 @@ class HeatEngine(e.Engine):\nreturn self._populate_cluster(cluster, stack)\ndef _launch_instances(self, cluster, target_count, stages,\n- update_stack=False, disable_rollback=True):\n+ update_stack=False, disable_rollback=True,\n+ instances_to_delete=None):\n# create all instances\ncluster = c_u.change_cluster_status(cluster, stages[0])\ninst_ids = self._create_instances(\n- cluster, target_count, update_stack, disable_rollback)\n+ cluster, target_count, update_stack, disable_rollback,\n+ instances_to_delete)\n# wait for all instances are up and networks ready\ncluster = c_u.change_cluster_status(cluster, stages[1])\n@@ -246,19 +251,27 @@ class HeatEngine(e.Engine):\nreturn inst_ids\n- def _update_instance_count(self, stack, cluster, target_count):\n+ def _update_instance_count(self, stack, cluster, target_count,\n+ instances_to_delete=None):\nctx = context.ctx()\n+ instances_name_to_delete = {}\n+ if instances_to_delete:\n+ for instance in instances_to_delete:\n+ node_group_id = instance['node_group']['id']\n+ if node_group_id not in instances_name_to_delete:\n+ instances_name_to_delete[node_group_id] = []\n+ instances_name_to_delete[node_group_id].append(\n+ instance['instance_name'])\n+\nfor node_group in cluster.node_groups:\ncount = target_count[node_group.id]\n- stack.add_node_group_extra(node_group.id, count,\n- self._generate_user_data_script)\n-\n- # if number of instances decreases, we need to drop\n- # the excessive ones\n- # instances list doesn't order by creating date, so we should\n- # sort it to make sure deleted instances same as heat deleted.\n- insts = sorted(\n- node_group.instances,\n- key=lambda x: int(x['instance_name'].split('-')[-1]))\n- for i in range(count, node_group.count):\n- conductor.instance_remove(ctx, insts[i])\n+ stack.add_node_group_extra(\n+ node_group.id, count, self._generate_user_data_script,\n+ instances_name_to_delete.get(node_group.id, None))\n+\n+ for inst in node_group.instances:\n+ if (instances_to_delete and\n+ node_group.id in instances_name_to_delete):\n+ if (inst.instance_name in\n+ instances_name_to_delete[node_group.id]):\n+ conductor.instance_remove(ctx, inst)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/heat/templates.py",
"new_path": "sahara/service/heat/templates.py",
"diff": "@@ -136,6 +136,10 @@ def _get_wc_waiter_name(inst_name):\nreturn '%s-wc-waiter' % inst_name\n+def _get_index_from_inst_name(inst_name):\n+ return inst_name.split('-')[-1]\n+\n+\nclass ClusterStack(object):\ndef __init__(self, cluster):\nself.cluster = cluster\n@@ -165,15 +169,16 @@ class ClusterStack(object):\nnode_group=ng.name, info=self.base_info))\ndef add_node_group_extra(self, node_group_id, node_count,\n- gen_userdata_func):\n+ gen_userdata_func, instances_to_delete=None):\nself.node_groups_extra[node_group_id] = {\n'node_count': node_count,\n- 'gen_userdata_func': gen_userdata_func\n+ 'gen_userdata_func': gen_userdata_func,\n+ 'instances_to_delete': instances_to_delete\n}\n- def _get_main_template(self):\n+ def _get_main_template(self, instances_to_delete=None):\noutputs = {}\n- resources = self._serialize_resources(outputs)\n+ resources = self._serialize_resources(outputs, instances_to_delete)\nreturn yaml.safe_dump({\n\"heat_template_version\": heat_common.HEAT_TEMPLATE_VERSION,\n\"description\": self.base_info,\n@@ -181,8 +186,9 @@ class ClusterStack(object):\n\"outputs\": outputs\n})\n- def instantiate(self, update_existing, disable_rollback=True):\n- main_tmpl = self._get_main_template()\n+ def instantiate(self, update_existing, disable_rollback=True,\n+ instances_to_delete=None):\n+ main_tmpl = self._get_main_template(instances_to_delete)\nkwargs = {\n'stack_name': self.cluster.stack_name,\n'timeout_mins': 180,\n@@ -241,7 +247,7 @@ class ClusterStack(object):\n}\n}\n- def _serialize_resources(self, outputs):\n+ def _serialize_resources(self, outputs, instances_to_delete=None):\nresources = {}\nif self.cluster.anti_affinity:\n@@ -250,14 +256,15 @@ class ClusterStack(object):\nresources.update(self._serialize_aa_server_group(i))\nfor ng in self.cluster.node_groups:\n- resources.update(self._serialize_ng_group(ng, outputs))\n+ resources.update(self._serialize_ng_group(ng, outputs,\n+ instances_to_delete))\nfor ng in self.cluster.node_groups:\nresources.update(self._serialize_auto_security_group(ng))\nreturn resources\n- def _serialize_ng_group(self, ng, outputs):\n+ def _serialize_ng_group(self, ng, outputs, instances_to_delete=None):\nng_file_name = \"file://\" + ng.name + \".yaml\"\nself.files[ng_file_name] = self._serialize_ng_file(ng)\n@@ -279,11 +286,19 @@ class ClusterStack(object):\nproperties[AUTO_SECURITY_GROUP_PARAM_NAME] = {\n'get_resource': g.generate_auto_security_group_name(ng)}\n+ removal_policies = []\n+ if self.node_groups_extra[ng.id]['instances_to_delete']:\n+ resource_list = []\n+ for name in self.node_groups_extra[ng.id]['instances_to_delete']:\n+ resource_list.append(_get_index_from_inst_name(name))\n+ removal_policies.append({'resource_list': resource_list})\n+\nreturn {\nng.name: {\n\"type\": \"OS::Heat::ResourceGroup\",\n\"properties\": {\n\"count\": self.node_groups_extra[ng.id]['node_count'],\n+ \"removal_policies\": removal_policies,\n\"resource_def\": {\n\"type\": ng_file_name,\n\"properties\": properties\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/ops.py",
"new_path": "sahara/service/ops.py",
"diff": "@@ -54,9 +54,11 @@ class LocalOps(object):\ncontext.spawn(\"cluster-creating-%s\" % cluster_id,\n_provision_cluster, cluster_id)\n- def provision_scaled_cluster(self, cluster_id, node_group_id_map):\n+ def provision_scaled_cluster(self, cluster_id, node_group_id_map,\n+ node_group_instance_map=None):\ncontext.spawn(\"cluster-scaling-%s\" % cluster_id,\n- _provision_scaled_cluster, cluster_id, node_group_id_map)\n+ _provision_scaled_cluster, cluster_id, node_group_id_map,\n+ node_group_instance_map)\ndef terminate_cluster(self, cluster_id):\ncontext.spawn(\"cluster-terminating-%s\" % cluster_id,\n@@ -94,9 +96,11 @@ class RemoteOps(rpc_utils.RPCClient):\ndef provision_cluster(self, cluster_id):\nself.cast('provision_cluster', cluster_id=cluster_id)\n- def provision_scaled_cluster(self, cluster_id, node_group_id_map):\n+ def provision_scaled_cluster(self, cluster_id, node_group_id_map,\n+ node_group_instance_map=None):\nself.cast('provision_scaled_cluster', cluster_id=cluster_id,\n- node_group_id_map=node_group_id_map)\n+ node_group_id_map=node_group_id_map,\n+ node_group_instance_map=node_group_instance_map)\ndef terminate_cluster(self, cluster_id):\nself.cast('terminate_cluster', cluster_id=cluster_id)\n@@ -143,8 +147,10 @@ class OpsServer(rpc_utils.RPCServer):\n_provision_cluster(cluster_id)\n@request_context\n- def provision_scaled_cluster(self, cluster_id, node_group_id_map):\n- _provision_scaled_cluster(cluster_id, node_group_id_map)\n+ def provision_scaled_cluster(self, cluster_id, node_group_id_map,\n+ node_group_instance_map=None):\n+ _provision_scaled_cluster(cluster_id, node_group_id_map,\n+ node_group_instance_map)\n@request_context\ndef terminate_cluster(self, cluster_id):\n@@ -314,20 +320,30 @@ def _provision_cluster(cluster_id):\n@ops_error_handler(\n_(\"Scaling cluster failed for the following reason(s): {reason}\"))\n-def _provision_scaled_cluster(cluster_id, node_group_id_map):\n+def _provision_scaled_cluster(cluster_id, node_group_id_map,\n+ node_group_instance_map=None):\nctx, cluster, plugin = _prepare_provisioning(cluster_id)\n# Decommissioning surplus nodes with the plugin\ncluster = c_u.change_cluster_status(\ncluster, c_u.CLUSTER_STATUS_DECOMMISSIONING)\n+ try:\ninstances_to_delete = []\n-\nfor node_group in cluster.node_groups:\nnew_count = node_group_id_map[node_group.id]\nif new_count < node_group.count:\n- instances_to_delete += node_group.instances[new_count:\n- node_group.count]\n+ if (node_group_instance_map and\n+ node_group.id in node_group_instance_map):\n+ for instance_ref in node_group_instance_map[\n+ node_group.id]:\n+ instance = _get_instance_obj(node_group.instances,\n+ instance_ref)\n+ instances_to_delete.append(instance)\n+\n+ while node_group.count - new_count > len(instances_to_delete):\n+ instances_to_delete.append(_get_random_instance_from_ng(\n+ node_group.instances, instances_to_delete))\nif instances_to_delete:\ncontext.set_step_type(_(\"Plugin: decommission cluster\"))\n@@ -337,8 +353,8 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map):\ncluster = c_u.change_cluster_status(\ncluster, c_u.CLUSTER_STATUS_SCALING)\ncontext.set_step_type(_(\"Engine: scale cluster\"))\n- instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)\n-\n+ instance_ids = INFRA.scale_cluster(cluster, node_group_id_map,\n+ instances_to_delete)\n# Setting up new nodes with the plugin\nif instance_ids:\nntp_service.configure_ntp(cluster_id, instance_ids)\n@@ -351,6 +367,30 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map):\nc_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE)\n_refresh_health_for_cluster(cluster_id)\n+ except Exception as e:\n+ c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE,\n+ six.text_type(e))\n+\n+\n+def _get_instance_obj(instances, instance_ref):\n+ for instance in instances:\n+ if (instance.instance_id == instance_ref or\n+ instance.instance_name == instance_ref):\n+ return instance\n+\n+ raise exceptions.NotFoundException(str(instance_ref),\n+ _(\"Instance %s not found\"))\n+\n+\n+def _get_random_instance_from_ng(instances, instances_to_delete):\n+ # instances list doesn't order by creating date, so we should\n+ # sort it to make sure deleted instances same as heat deleted.\n+ insts = sorted(instances,\n+ key=lambda x: int(x['instance_name'].split('-')[-1]))\n+ for instance in reversed(insts):\n+ if instance not in instances_to_delete:\n+ return instance\n+\n@ops_error_handler(\n_(\"Terminating cluster failed for the following reason(s): {reason}\"))\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/base.py",
"new_path": "sahara/service/validations/base.py",
"diff": "@@ -17,6 +17,7 @@ import collections\nimport novaclient.exceptions as nova_ex\nfrom oslo_config import cfg\n+from oslo_utils import uuidutils\nimport six\nfrom sahara import conductor as cond\n@@ -373,6 +374,40 @@ def check_resize(cluster, r_node_groups):\ncluster.hadoop_version,\nng_tmp['node_group_template'])\n+ for scaling_ng in r_node_groups:\n+ current_count = ng_map[scaling_ng['name']].count\n+ new_count = scaling_ng['count']\n+ count_diff = current_count - new_count\n+ if 'instances' in scaling_ng:\n+ if len(scaling_ng['instances']) > count_diff:\n+ raise ex.InvalidDataException(\n+ _(\"Number of specific instances (%(instance)s) to\"\n+ \" delete can not be greater than the count difference\"\n+ \" (%(count)s during scaling\")\n+ % {'instance': str(len(scaling_ng['instances'])),\n+ 'count': str(count_diff)})\n+ else:\n+ if len(scaling_ng['instances']) > 0:\n+ is_uuid = uuidutils.is_uuid_like(\n+ scaling_ng['instances'][0])\n+ if is_uuid:\n+ for instance in scaling_ng['instances']:\n+ if not uuidutils.is_uuid_like(instance):\n+ raise ex.InvalidReferenceException(\n+ _(\"You can only reference instances by\"\n+ \" Name or UUID, not both on the same\"\n+ \" request\"))\n+ else:\n+ for instance in scaling_ng['instances']:\n+ if uuidutils.is_uuid_like(instance):\n+ raise ex.InvalidReferenceException(\n+ _(\"You can only reference instances by\"\n+ \" Name or UUID, not both on the same\"\n+ \" request\"))\n+ _check_duplicates(scaling_ng['instances'],\n+ _(\"Duplicate entry for instances to\"\n+ \" delete\"))\n+\ndef check_add_node_groups(cluster, add_node_groups):\ncluster_ng_names = [ng.name for ng in cluster.node_groups]\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/clusters_schema.py",
"new_path": "sahara/service/validations/clusters_schema.py",
"diff": "@@ -134,3 +134,15 @@ CLUSTER_SCALING_SCHEMA = {\n}\n]\n}\n+\n+CLUSTER_SCALING_SCHEMA_V2 = copy.deepcopy(CLUSTER_SCALING_SCHEMA)\n+CLUSTER_SCALING_SCHEMA_V2['properties']['resize_node_groups'][\n+ 'items']['properties'].update(\n+ {\n+ \"instances\": {\n+ \"type\": \"array\",\n+ \"items\": {\n+ \"type\": \"string\",\n+ },\n+ }\n+ })\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/api/v2/base.py",
"new_path": "sahara/tests/unit/service/api/v2/base.py",
"diff": "@@ -77,6 +77,36 @@ SCALE_DATA = {\n]\n}\n+SCALE_DATA_SPECIFIC_INSTANCE = {\n+ 'resize_node_groups': [\n+ {\n+ 'name': 'ng_1',\n+ 'count': 3,\n+ },\n+ {\n+ 'name': 'ng_2',\n+ 'count': 2,\n+ 'instances': ['ng_2_0']\n+ }\n+ ],\n+ 'add_node_groups': []\n+}\n+\n+SCALE_DATA_N_SPECIFIC_INSTANCE = {\n+ 'resize_node_groups': [\n+ {\n+ 'name': 'ng_1',\n+ 'count': 3,\n+ },\n+ {\n+ 'name': 'ng_2',\n+ 'count': 1,\n+ 'instances': ['ng_2_0', 'ng_2_2']\n+ }\n+ ],\n+ 'add_node_groups': []\n+}\n+\nclass FakePlugin(pr_base.ProvisioningPluginBase):\n_info = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/api/v2/test_clusters.py",
"new_path": "sahara/tests/unit/service/api/v2/test_clusters.py",
"diff": "@@ -37,19 +37,54 @@ class FakeOps(object):\ndef provision_cluster(self, id):\nself.calls_order.append('ops.provision_cluster')\n+ cluster = conductor.cluster_get(context.ctx(), id)\n+ target_count = {}\n+ for node_group in cluster.node_groups:\n+ target_count[node_group.id] = node_group.count\n+\n+ for node_group in cluster.node_groups:\n+ conductor.node_group_update(context.ctx(),\n+ node_group, {\"count\": 0})\n+\n+ for node_group in cluster.node_groups:\n+ for i in range(target_count[node_group.id]):\n+ inst = {\n+ \"instance_id\": node_group.name + '_' + str(i),\n+ \"instance_name\": node_group.name + '_' + str(i)\n+ }\n+ conductor.instance_add(context.ctx(), node_group, inst)\nconductor.cluster_update(\ncontext.ctx(), id, {'status': c_u.CLUSTER_STATUS_ACTIVE})\n- def provision_scaled_cluster(self, id, to_be_enlarged):\n+ def provision_scaled_cluster(self, id, to_be_enlarged,\n+ node_group_instance_map=None):\nself.calls_order.append('ops.provision_scaled_cluster')\n+ cluster = conductor.cluster_get(context.ctx(), id)\n+\n# Set scaled to see difference between active and scaled\nfor (ng, count) in six.iteritems(to_be_enlarged):\n+ instances_to_delete = []\n+ if node_group_instance_map:\n+ if ng in node_group_instance_map:\n+ instances_to_delete = self._get_instance(\n+ cluster, node_group_instance_map[ng])\n+ for instance in instances_to_delete:\n+ conductor.instance_remove(context.ctx(), instance)\n+\nconductor.node_group_update(context.ctx(), ng, {'count': count})\nconductor.cluster_update(context.ctx(), id, {'status': 'Scaled'})\ndef terminate_cluster(self, id):\nself.calls_order.append('ops.terminate_cluster')\n+ def _get_instance(self, cluster, instances_to_delete):\n+ instances = []\n+ for node_group in cluster.node_groups:\n+ for instance in node_group.instances:\n+ if instance.instance_id in instances_to_delete:\n+ instances.append(instance)\n+ return instances\n+\nclass TestClusterApi(base.SaharaWithDbTestCase):\ndef setUp(self):\n@@ -134,6 +169,7 @@ class TestClusterApi(base.SaharaWithDbTestCase):\[email protected]('sahara.service.quotas.check_scaling', return_value=None)\ndef test_scale_cluster_success(self, check_scaling, check_cluster):\ncluster = api.create_cluster(api_base.SAMPLE_CLUSTER)\n+ cluster = api.get_cluster(cluster.id)\napi.scale_cluster(cluster.id, api_base.SCALE_DATA)\nresult_cluster = api.get_cluster(cluster.id)\nself.assertEqual('Scaled', result_cluster.status)\n@@ -156,6 +192,46 @@ class TestClusterApi(base.SaharaWithDbTestCase):\n'ops.provision_scaled_cluster',\n'ops.terminate_cluster'], self.calls_order)\n+ @mock.patch('sahara.service.quotas.check_cluster', return_value=None)\n+ @mock.patch('sahara.service.quotas.check_scaling', return_value=None)\n+ def test_scale_cluster_n_specific_instances_success(self, check_scaling,\n+ check_cluster):\n+ cluster = api.create_cluster(api_base.SAMPLE_CLUSTER)\n+ cluster = api.get_cluster(cluster.id)\n+ api.scale_cluster(cluster.id, api_base.SCALE_DATA_N_SPECIFIC_INSTANCE)\n+ result_cluster = api.get_cluster(cluster.id)\n+ self.assertEqual('Scaled', result_cluster.status)\n+ expected_count = {\n+ 'ng_1': 3,\n+ 'ng_2': 1,\n+ 'ng_3': 1,\n+ }\n+ ng_count = 0\n+ for ng in result_cluster.node_groups:\n+ self.assertEqual(expected_count[ng.name], ng.count)\n+ ng_count += 1\n+ self.assertEqual(1, result_cluster.node_groups[1].count)\n+ self.assertNotIn('ng_2_0',\n+ self._get_instances_ids(\n+ result_cluster.node_groups[1]))\n+ self.assertNotIn('ng_2_2',\n+ self._get_instances_ids(\n+ result_cluster.node_groups[1]))\n+ self.assertEqual(3, ng_count)\n+ api.terminate_cluster(result_cluster.id)\n+ self.assertEqual(\n+ ['get_open_ports', 'recommend_configs', 'validate',\n+ 'ops.provision_cluster', 'get_open_ports',\n+ 'recommend_configs', 'validate_scaling',\n+ 'ops.provision_scaled_cluster',\n+ 'ops.terminate_cluster'], self.calls_order)\n+\n+ def _get_instances_ids(self, node_group):\n+ instance_ids = []\n+ for instance in node_group.instances:\n+ instance_ids.append(instance.instance_id)\n+ return instance_ids\n+\[email protected]('sahara.service.quotas.check_cluster', return_value=None)\[email protected]('sahara.service.quotas.check_scaling', return_value=None)\ndef test_scale_cluster_failed(self, check_scaling, check_cluster):\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/test_ops.py",
"new_path": "sahara/tests/unit/service/test_ops.py",
"diff": "@@ -30,7 +30,8 @@ class FakeCluster(object):\nclass FakeNodeGroup(object):\nid = 'id'\ncount = 2\n- instances = [1, 2]\n+ instances = [{'instance_name': 'id-10', 'id': 2},\n+ {'instance_name': 'id-2', 'id': 1}]\nclass FakePlugin(mock.Mock):\n@@ -52,7 +53,8 @@ class FakePlugin(mock.Mock):\ndef decommission_nodes(self, cluster, instances_to_delete):\nTestOPS.SEQUENCE.append('decommission_nodes')\n- def scale_cluster(self, cluster, node_group_id_map):\n+ def scale_cluster(self, cluster, node_group_id_map,\n+ node_group_instance_map=None):\nTestOPS.SEQUENCE.append('plugin.scale_cluster')\ndef cluster_destroy(self, ctx, cluster):\n@@ -63,7 +65,8 @@ class FakeINFRA(object):\ndef create_cluster(self, cluster):\nTestOPS.SEQUENCE.append('create_cluster')\n- def scale_cluster(self, cluster, node_group_id_map):\n+ def scale_cluster(self, cluster, node_group_id_map,\n+ node_group_instance_map=None):\nTestOPS.SEQUENCE.append('INFRA.scale_cluster')\nreturn True\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/validation/test_cluster_scaling_validation.py",
"new_path": "sahara/tests/unit/service/validation/test_cluster_scaling_validation.py",
"diff": "@@ -211,6 +211,23 @@ class TestScalingValidation(u.ValidationTestCase):\nself.assertEqual(1, req_data.call_count)\nself._assert_calls(bad_req, bad_req_i)\n+ @mock.patch(\"sahara.utils.api.request_data\")\n+ @mock.patch(\"sahara.utils.api.bad_request\")\n+ def _assert_cluster_scaling_validation_v2(self,\n+ bad_req=None,\n+ req_data=None,\n+ data=None,\n+ bad_req_i=None):\n+ m_func = mock.Mock()\n+ m_func.__name__ = \"m_func\"\n+ req_data.return_value = data\n+ v.validate(c_schema.CLUSTER_SCALING_SCHEMA_V2,\n+ self._create_object_fun)(m_func)(data=data,\n+ cluster_id='42')\n+\n+ self.assertEqual(1, req_data.call_count)\n+ self._assert_calls(bad_req, bad_req_i)\n+\[email protected](\"sahara.service.api.OPS\")\ndef test_cluster_scaling_scheme_v_resize_ng(self, ops):\nops.get_engine_type_and_version.return_value = \"direct.1.1\"\n@@ -259,6 +276,55 @@ class TestScalingValidation(u.ValidationTestCase):\nu\"allowed ('flavor_id' was unexpected)\")\n)\n+ @mock.patch(\"sahara.service.api.OPS\")\n+ def test_cluster_scaling_scheme_v_resize_ng_v2(self, ops):\n+ ops.get_engine_type_and_version.return_value = \"direct.1.1\"\n+ self._create_object_fun = mock.Mock()\n+ data = {\n+ }\n+ self._assert_cluster_scaling_validation_v2(\n+ data=data,\n+ bad_req_i=(1, 'VALIDATION_ERROR',\n+ u'{} is not valid under any of the given schemas')\n+ )\n+ data = {\n+ 'resize_node_groups': [{}]\n+ }\n+ self._assert_cluster_scaling_validation_v2(\n+ data=data,\n+ bad_req_i=(1, 'VALIDATION_ERROR',\n+ u\"resize_node_groups[0]: 'name' is a required property\")\n+ )\n+ data = {\n+ 'resize_node_groups': [\n+ {\n+ 'name': 'a'\n+ }\n+ ]\n+ }\n+ self._assert_cluster_scaling_validation_v2(\n+ data=data,\n+ bad_req_i=(1, 'VALIDATION_ERROR',\n+ u\"resize_node_groups[0]: 'count' is a required \"\n+ u\"property\")\n+ )\n+ data = {\n+ 'resize_node_groups': [\n+ {\n+ 'name': 'a',\n+ 'flavor_id': '42',\n+ 'instances': ['id1'],\n+ 'count': 2\n+ }\n+ ]\n+ }\n+ self._assert_cluster_scaling_validation_v2(\n+ data=data,\n+ bad_req_i=(1, 'VALIDATION_ERROR',\n+ u\"resize_node_groups[0]: Additional properties are not \"\n+ u\"allowed ('flavor_id' was unexpected)\")\n+ )\n+\[email protected](\"sahara.service.api.OPS\")\ndef test_cluster_scaling_validation_add_ng(self, ops):\nops.get_engine_type_and_version.return_value = \"direct.1.1\"\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Decommission of a specific node
Adding the option to decommission specific node from cluster.
Partially implements bp: decommission-specific-instance
Change-Id: I1a858fecc1b32f91c76aea6db14e0d5a419211d1 |
488,272 | 16.01.2018 16:24:40 | 10,800 | d46981b47d1e0156aa6fc9d8879de796a428c33d | Changing expected value to job_template_id
Since we are only expecting one job_template_id does not make sense to
use job_templates_id name here | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/api/v2/jobs.py",
"new_path": "sahara/service/api/v2/jobs.py",
"diff": "@@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__)\ndef execute_job(data):\n# Elements common to all job types\n- job_templates_id = data['job_templates_id']\n+ job_template_id = data['job_template_id']\ncluster_id = data['cluster_id']\nconfigs = data.get('job_configs', {})\ninterface = data.get('interface', {})\n@@ -49,7 +49,7 @@ def execute_job(data):\nconfigs['job_execution_info'] = job_execution_info\njob_ex_dict = {'input_id': input_id, 'output_id': output_id,\n- 'job_id': job_templates_id, 'cluster_id': cluster_id,\n+ 'job_id': job_template_id, 'cluster_id': cluster_id,\n'info': {'status': edp.JOB_STATUS_PENDING},\n'job_configs': configs, 'extra': {},\n'interface': interface}\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Changing expected value to job_template_id
Since we are only expecting one job_template_id does not make sense to
use job_templates_id name here
Change-Id: I2c1425dce3db2b150f798e3c10f7ad5f798226d3 |
488,288 | 18.01.2018 09:08:30 | 0 | e1f46bb5c2acdf66aee52a237ca626742e76cd0f | Update url links in doc files of Sahara
replace 'http' with 'https' | [
{
"change_type": "MODIFY",
"old_path": "README.rst",
"new_path": "README.rst",
"diff": "@@ -22,7 +22,7 @@ Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html\nHow to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html\n-Source: http://git.openstack.org/cgit/openstack/sahara\n+Source: https://git.openstack.org/cgit/openstack/sahara\nBugs and feature requests: https://bugs.launchpad.net/sahara\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/how-to-participate.rst",
"new_path": "doc/source/contributor/how-to-participate.rst",
"diff": "@@ -22,7 +22,7 @@ Getting started\n* Sign `OpenStack Individual Contributor License Agreement <https://review.openstack.org/#/settings/agreements>`_\n* Make sure that your email is listed in `identities <https://review.openstack.org/#/settings/web-identities>`_\n-* Subscribe to code-reviews. Go to your settings on http://review.openstack.org\n+* Subscribe to code-reviews. Go to your settings on https://review.openstack.org\n* Go to ``watched projects``\n* Add ``openstack/sahara``, ``openstack/sahara-extra``,\n@@ -53,9 +53,9 @@ How to post your first patch for review\n* Checkout Sahara code from `its repository <https://git.openstack.org/cgit/openstack/sahara>`_\n-* Carefully read http://docs.openstack.org/infra/manual/developers.html#development-workflow\n+* Carefully read https://docs.openstack.org/infra/manual/developers.html#development-workflow\n- * Pay special attention to http://docs.openstack.org/infra/manual/developers.html#committing-a-change\n+ * Pay special attention to https://docs.openstack.org/infra/manual/developers.html#committing-a-change\n* Apply and commit your changes\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/edp.rst",
"new_path": "doc/source/user/edp.rst",
"diff": "@@ -30,7 +30,7 @@ The EDP features can be used from the sahara web UI which is described in the\n:doc:`dashboard-user-guide`.\nThe EDP features also can be used directly by a client through the\n-`REST api <http://developer.openstack.org/api-ref/data-processing/>`_\n+`REST api <https://developer.openstack.org/api-ref/data-processing/>`_\nEDP Concepts\n------------\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Update url links in doc files of Sahara
replace 'http' with 'https'
Change-Id: Iebee75d464a5f0c4b9b877814ec42ca9be946931 |
488,272 | 19.01.2018 16:56:51 | 10,800 | ab1f327c4db69beb8b65563ce1dcec21fffff215 | Switch sahara swift to work with keystone v3
Keystone v2 is gone and we need to update sahara swift to be able to use
keystone v3. | [
{
"change_type": "MODIFY",
"old_path": "sahara/swift/swift_helper.py",
"new_path": "sahara/swift/swift_helper.py",
"diff": "@@ -55,7 +55,7 @@ def get_swift_configs():\nconfigs = x.load_hadoop_xml_defaults('swift/resources/conf-template.xml')\nfor conf in configs:\nif conf['name'] == HADOOP_SWIFT_AUTH_URL:\n- conf['value'] = su.retrieve_auth_url() + \"tokens/\"\n+ conf['value'] = su.retrieve_auth_url() + \"auth/tokens/\"\nif conf['name'] == HADOOP_SWIFT_TENANT:\nconf['value'] = retrieve_tenant()\nif CONF.os_region_name and conf['name'] == HADOOP_SWIFT_REGION:\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/swift/utils.py",
"new_path": "sahara/swift/utils.py",
"diff": "@@ -29,14 +29,10 @@ SWIFT_URL_SUFFIX = SWIFT_URL_SUFFIX_START + 'sahara'\ndef retrieve_auth_url(endpoint_type=\"publicURL\"):\n- \"\"\"This function returns auth url v2.0 api.\n+ \"\"\"This function returns auth url v3 api.\n- Hadoop Swift library doesn't support keystone v3 api.\n\"\"\"\n- if CONF.use_domain_for_proxy_users:\n- version_suffix = 'v3/auth'\n- else:\n- version_suffix = 'v2.0'\n+ version_suffix = 'v3'\n# return auth url with trailing slash\nreturn clients_base.retrieve_auth_url(\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/edp/binary_retrievers/test_internal_swift.py",
"new_path": "sahara/tests/unit/service/edp/binary_retrievers/test_internal_swift.py",
"diff": "@@ -120,7 +120,7 @@ class TestInternalSwift(base.SaharaTestCase):\njob_binary.extra = dict(user='test', password='secret')\ni_s.get_raw_data_with_context(job_binary)\nself.assertEqual([mock.call(\n- auth_version='2.0',\n+ auth_version='3',\ncacert=None, insecure=False,\nmax_backoff=10,\npreauthtoken='testtoken',\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/edp/job_binaries/swift/test_swift_type.py",
"new_path": "sahara/tests/unit/service/edp/job_binaries/swift/test_swift_type.py",
"diff": "@@ -164,7 +164,7 @@ class TestSwiftType(base.SaharaTestCase):\njob_binary.extra = dict(user='test', password='secret')\nself.i_s.get_raw_data(job_binary, with_context=True)\nself.assertEqual([mock.call(\n- auth_version='2.0',\n+ auth_version='3',\ncacert=None, insecure=False,\nmax_backoff=10,\npreauthtoken='testtoken',\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/swift/test_utils.py",
"new_path": "sahara/tests/unit/swift/test_utils.py",
"diff": "@@ -28,7 +28,7 @@ class SwiftUtilsTest(testbase.SaharaTestCase):\[email protected]('sahara.utils.openstack.base.url_for')\ndef test_retrieve_auth_url(self, url_for_mock):\n- correct = \"https://127.0.0.1:8080/v2.0/\"\n+ correct = \"https://127.0.0.1:8080/v3/\"\ndef _assert(uri):\nurl_for_mock.return_value = uri\n@@ -37,23 +37,23 @@ class SwiftUtilsTest(testbase.SaharaTestCase):\n_assert(\"%s/\" % correct)\n_assert(\"https://127.0.0.1:8080\")\n_assert(\"https://127.0.0.1:8080/\")\n- _assert(\"https://127.0.0.1:8080/v2.0\")\n- _assert(\"https://127.0.0.1:8080/v2.0/\")\n+ _assert(\"https://127.0.0.1:8080/v3\")\n+ _assert(\"https://127.0.0.1:8080/v3/\")\[email protected]('sahara.utils.openstack.base.url_for')\ndef test_retrieve_auth_url_path_present(self, url_for_mock):\n- correct = \"https://127.0.0.1:8080/identity/v2.0/\"\n+ correct = \"https://127.0.0.1:8080/identity/v3/\"\ndef _assert(uri):\nurl_for_mock.return_value = uri\nself.assertEqual(correct, utils.retrieve_auth_url())\n_assert(\"https://127.0.0.1:8080/identity\")\n- _assert(\"https://127.0.0.1:8080/identity/v2.0/\")\n+ _assert(\"https://127.0.0.1:8080/identity/v3/\")\[email protected]('sahara.utils.openstack.base.url_for')\ndef test_retrieve_auth_url_without_port(self, url_for_mock):\n- correct = \"https://127.0.0.1/v2.0/\"\n+ correct = \"https://127.0.0.1/v3/\"\ndef _assert(uri):\nurl_for_mock.return_value = uri\n@@ -62,21 +62,5 @@ class SwiftUtilsTest(testbase.SaharaTestCase):\n_assert(\"%s/\" % correct)\n_assert(\"https://127.0.0.1\")\n_assert(\"https://127.0.0.1/\")\n- _assert(\"https://127.0.0.1/v2.0\")\n- _assert(\"https://127.0.0.1/v2.0/\")\n-\n- @mock.patch('sahara.utils.openstack.base.url_for')\n- def test_retrieve_auth_url_v3(self, url_for_mock):\n- self.override_config('use_domain_for_proxy_users', True)\n- correct = \"https://127.0.0.1/v3/auth/\"\n-\n- def _assert(uri):\n- url_for_mock.return_value = uri\n- self.assertEqual(correct, utils.retrieve_auth_url())\n-\n- _assert(\"%s/\" % correct)\n_assert(\"https://127.0.0.1/v3\")\n- _assert(\"https://127.0.0.1\")\n- _assert(\"https://127.0.0.1/\")\n- _assert(\"https://127.0.0.1/v2.0\")\n- _assert(\"https://127.0.0.1/v2.0/\")\n+ _assert(\"https://127.0.0.1/v3/\")\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/openstack/swift.py",
"new_path": "sahara/utils/openstack/swift.py",
"diff": "@@ -63,7 +63,7 @@ def client(username, password, trust_id=None):\nreturn client_from_token(k.token_from_auth(proxyauth))\nelse:\nreturn swiftclient.Connection(\n- auth_version='2.0',\n+ auth_version='3',\ncacert=CONF.swift.ca_file,\ninsecure=CONF.swift.api_insecure,\nauthurl=su.retrieve_auth_url(CONF.keystone.endpoint_type),\n@@ -80,7 +80,7 @@ def client_from_token(token=None):\nif not token:\ntoken = context.get_auth_token()\n'''return a Swift client authenticated from a token.'''\n- return swiftclient.Connection(auth_version='2.0',\n+ return swiftclient.Connection(auth_version='3',\ncacert=CONF.swift.ca_file,\ninsecure=CONF.swift.api_insecure,\npreauthurl=base.url_for(\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Switch sahara swift to work with keystone v3
Keystone v2 is gone and we need to update sahara swift to be able to use
keystone v3.
Change-Id: I65a2495b8afe2bc30a0db192e23c93cd6b71e437 |
488,272 | 30.01.2018 14:42:18 | 10,800 | 840ed9c92292b57158c2a485e12795b3bf75358e | Adding Storm doc
Finally writting this long due documentation for the Storm plugin.
Closes-bug: | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/index.rst",
"new_path": "doc/source/user/index.rst",
"diff": "@@ -27,6 +27,7 @@ Plugins\nvanilla-plugin\nambari-plugin\nspark-plugin\n+ storm-plugin\ncdh-plugin\nmapr-plugin\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/plugins.rst",
"new_path": "doc/source/user/plugins.rst",
"diff": "@@ -9,6 +9,7 @@ management/monitoring tools.\n* :doc:`vanilla-plugin` - deploys Vanilla Apache Hadoop\n* :doc:`ambari-plugin` - deploys Hortonworks Data Platform\n* :doc:`spark-plugin` - deploys Apache Spark with Cloudera HDFS\n+* :doc:`storm-plugin` - deploys Apache Storm\n* :doc:`mapr-plugin` - deploys MapR plugin with MapR File System\n* :doc:`cdh-plugin` - deploys Cloudera Hadoop\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "doc/source/user/storm-plugin.rst",
"diff": "+Storm Plugin\n+============\n+\n+The Storm plugin for sahara provides a way to provision Apache Storm clusters\n+on OpenStack in a single click and in an easily repeatable fashion.\n+\n+Currently Storm is installed in standalone mode, with no YARN support.\n+\n+Images\n+------\n+\n+For cluster provisioning, prepared images should be used. The Storm plugin\n+has been developed and tested with the images generated by\n+sahara-image-elements:\n+\n+* https://git.openstack.org/cgit/openstack/sahara-image-elements\n+\n+The Storm plugin requires an image to be tagged in the sahara image registry\n+with two tags: 'storm' and '<Storm version>' (e.g. '1.1.0').\n+\n+Also you should specify the username of the default cloud-user used in the\n+image.\n+\n+Note that the Storm cluster is deployed using the scripts available in the\n+Storm distribution, which allow the user to start all services (nimbus,\n+supervisors and zookeepers), stop all services and so on. As such Storm is not\n+deployed as a standard Ubuntu service and if the virtual machines are rebooted,\n+Storm will not be restarted.\n+\n+Storm configuration\n+-------------------\n+\n+Storm needs few parameters to work and has sensible defaults. If needed they\n+can be changed when creating the sahara cluster template. No node group\n+options are available.\n+\n+Once the cluster is ready, connect with ssh to the master using the `ubuntu`\n+user and the appropriate ssh key. Storm is installed in `/usr/local/storm` and\n+should be completely configured and ready to start executing jobs. At the\n+bottom of the cluster information page from the OpenStack dashboard, a link to\n+the Storm web interface is provided.\n+\n+Cluster Validation\n+------------------\n+\n+When a user creates a Storm cluster using the Storm plugin, the cluster\n+topology requested by user is verified for consistency.\n+\n+Currently there are the following limitations in cluster topology for the\n+Storm plugin:\n+\n++ Cluster must contain exactly one Storm nimbus\n++ Cluster must contain at least one Storm supervisor\n++ Cluster must contain at least one Zookeeper node\n+\n+The tested configuration has nimbus, supervisor, and Zookeeper processes each\n+running on their own nodes.\n+Another possible configuration is one node with nimbus alone, and additional\n+nodes each with supervisor and Zookeeper processes together.\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding Storm doc
Finally writting this long due documentation for the Storm plugin.
Change-Id: I94bb588a41e181ad3c27d371cfe55938fe0579f7
Closes-bug: #1696991 |
488,272 | 07.02.2018 18:22:52 | 10,800 | f2e0d471fc00f436a8ef17b4056bafff560959b7 | Small doc fixes found during doc day
As a pre-PTG preparation we did a small walktrough on the Sahara
documentation. Theses fixes are trivial fixes, mostly making
documentation consider bare metal and VMs. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/intro/overview.rst",
"new_path": "doc/source/intro/overview.rst",
"diff": "@@ -12,9 +12,9 @@ OpenStack. It is worth mentioning that Amazon has provided Hadoop for\nseveral years as Amazon Elastic MapReduce (EMR) service.\nSahara aims to provide users with a simple means to provision Hadoop, Spark,\n-and Storm clusters by specifying several parameters such as the version,\n-cluster topology, hardware node details and more. After a user fills in all\n-the parameters, sahara deploys the cluster in a few minutes. Also sahara\n+and Storm clusters by specifying several parameters such as the framework\n+version, cluster topology, hardware node details and more. After a user fills\n+in all the parameters, sahara deploys the cluster in a few minutes. Also sahara\nprovides means to scale an already provisioned cluster by adding or removing\nworker nodes on demand.\n@@ -53,6 +53,8 @@ The sahara product communicates with the following OpenStack services:\nare used to work with OpenStack, limiting a user's abilities in sahara to\ntheir OpenStack privileges.\n* Compute (nova) - used to provision VMs for data processing clusters.\n+* Bare metal (ironic) - used to provision Baremetal nodes for data processing\n+ clusters.\n* Orchestration (heat) - used to provision and orchestrate the deployment of\ndata processing clusters.\n* Image (glance) - stores VM images, each image containing an operating system\n@@ -90,8 +92,6 @@ For fast cluster provisioning a generic workflow will be as following:\n* for base images without a pre-installed framework, sahara will support\npluggable deployment engines that integrate with vendor tooling.\n- * you can download prepared up-to-date images from\n- http://sahara-files.mirantis.com/images/upstream/\n* define cluster configuration, including cluster size, topology, and\nframework parameters (for example, heap size):\n@@ -99,8 +99,8 @@ For fast cluster provisioning a generic workflow will be as following:\n* to ease the configuration of such parameters, configurable templates\nare provided.\n-* provision the cluster; sahara will provision VMs, install and configure\n- the data processing framework.\n+* provision the cluster; sahara will provision nodes (VMs or baremetal),\n+ install and configure the data processing framework.\n* perform operations on the cluster; add or remove nodes.\n* terminate the cluster when it is no longer needed.\n@@ -118,7 +118,8 @@ For analytics as a service, a generic workflow will be as following:\n* all cluster provisioning and job execution will happen transparently\nto the user.\n- * cluster will be removed automatically after job completion.\n+ * if using a transient cluster, it will be removed automatically after job\n+ completion.\n* get the results of computations (for example, from swift).\n@@ -129,28 +130,28 @@ While provisioning clusters through sahara, the user operates on three types\nof entities: Node Group Templates, Cluster Templates and Clusters.\nA Node Group Template describes a group of nodes within cluster. It contains\n-a list of hadoop processes that will be launched on each instance in a group.\n+a list of processes that will be launched on each instance in a group.\nAlso a Node Group Template may provide node scoped configurations for those\nprocesses. This kind of template encapsulates hardware parameters (flavor)\n-for the node VM and configuration for data processing framework processes\n+for the node instance and configuration for data processing framework processes\nrunning on the node.\nA Cluster Template is designed to bring Node Group Templates together to\nform a Cluster. A Cluster Template defines what Node Groups will be included\n-and how many instances will be created in each. Some data processing framework\n+and how many instances will be created for each. Some data processing framework\nconfigurations can not be applied to a single node, but to a whole Cluster.\nA user can specify these kinds of configurations in a Cluster Template. Sahara\nenables users to specify which processes should be added to an anti-affinity\ngroup within a Cluster Template. If a process is included into an\n-anti-affinity group, it means that VMs where this process is going to be\n+anti-affinity group, it means that instances where this process is going to be\nlaunched should be scheduled to different hardware hosts.\n-The Cluster entity represents a collection of VM instances that all have the\n-same data processing framework installed. It is mainly characterized by a VM\n+The Cluster entity represents a collection of instances that all have the\n+same data processing framework installed. It is mainly characterized by an\nimage with a pre-installed framework which will be used for cluster\ndeployment. Users may choose one of the pre-configured Cluster Templates to\n-start a Cluster. To get access to VMs after a Cluster has started, the user\n-should specify a keypair.\n+start a Cluster. To get access to instances after a Cluster has started, the\n+user should specify a keypair.\nSahara provides several constraints on cluster framework topology. You can see\nall constraints in the documentation for the appropriate plugin.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/dashboard-user-guide.rst",
"new_path": "doc/source/user/dashboard-user-guide.rst",
"diff": "@@ -16,7 +16,7 @@ Launching a cluster via the sahara UI\nRegistering an Image\n--------------------\n-1) Navigate to the \"Project\" dashboard, then the \"Data Processing\" tab, then\n+1) Navigate to the \"Project\" dashboard, then to the \"Data Processing\" tab, then\nclick on the \"Clusters\" panel and finally the \"Image Registry\" tab.\n2) From that page, click on the \"Register Image\" button at the top right\n@@ -33,7 +33,7 @@ Registering an Image\nCreate Node Group Templates\n---------------------------\n-1) Navigate to the \"Project\" dashboard, then the \"Data Processing\" tab, then\n+1) Navigate to the \"Project\" dashboard, then to the \"Data Processing\" tab, then\nclick on the \"Clusters\" panel and then the \"Node Group Templates\" tab.\n2) From that page, click on the \"Create Template\" button at the top right\n@@ -57,7 +57,7 @@ Create Node Group Templates\nCreate a Cluster Template\n-------------------------\n-1) Navigate to the \"Project\" dashboard, then the \"Data Processing\" tab, then\n+1) Navigate to the \"Project\" dashboard, then to the \"Data Processing\" tab, then\nclick on the \"Clusters\" panel and finally the \"Cluster Templates\" tab.\n2) From that page, click on the \"Create Template\" button at the top right\n@@ -87,7 +87,7 @@ Create a Cluster Template\nLaunching a Cluster\n-------------------\n-1) Navigate to the \"Project\" dashboard, then the \"Data Processing\" tab, then\n+1) Navigate to the \"Project\" dashboard, then to the \"Data Processing\" tab, then\nclick on the \"Clusters\" panel and lastly, click on the \"Clusters\" tab.\n2) Click on the \"Launch Cluster\" button at the top right\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/overview.rst",
"new_path": "doc/source/user/overview.rst",
"diff": "@@ -13,13 +13,13 @@ having a different role (set of processes).\nNode group parameters include Hadoop parameters like ``io.sort.mb`` or\n``mapred.child.java.opts``, and several infrastructure parameters like the\n-flavor for VMs or storage location (ephemeral drive or cinder volume).\n+flavor for instances or storage location (ephemeral drive or cinder volume).\nA cluster is characterized by its node groups and its parameters. Like a node\ngroup, a cluster has data processing framework and infrastructure parameters.\nAn example of a cluster-wide Hadoop parameter is ``dfs.replication``. For\ninfrastructure, an example could be image which will be used to launch cluster\n-VMs.\n+instances.\nTemplates\n---------\n@@ -32,14 +32,15 @@ corresponding entities. Their aim is to remove the burden of specifying all\nof the required parameters each time a user wants to launch a cluster.\nIn the REST interface, templates have extended functionality. First you can\n-specify node-scoped parameters here, they will work as defaults for node\n+specify node-scoped parameters, they will work as defaults for node\ngroups. Also with the REST interface, during cluster creation a user can\noverride template parameters for both cluster and node groups.\nTemplates are portable - they can be exported to JSON files and imported\n-later either on the same deployment or on another one. To import an exported\n+either on the same deployment or on another one. To import an exported\ntemplate, replace the placeholder values with appropriate ones. This can be\n-accomplished easily through the CLI or UI, or be done manually.\n+accomplished easily through the CLI or UI, or manually editing the template\n+file.\nProvisioning Plugins\n--------------------\n@@ -62,7 +63,7 @@ Image Registry\nOpenStack starts VMs based on a pre-built image with an installed OS. The image\nrequirements for sahara depend on the plugin and data processing framework\nversion. Some plugins require just a basic cloud image and will install the\n-framework on the VMs from scratch. Some plugins might require images with\n+framework on the instance from scratch. Some plugins might require images with\npre-installed frameworks or Hadoop distributions.\nThe Sahara Image Registry is a feature which helps filter out images during\n@@ -72,5 +73,5 @@ with Image Registry.\nFeatures\n--------\n-Sahara has several interesting features. The full list could be found there:\n+Sahara has several interesting features. The full list could be found here:\n:doc:`features`\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Small doc fixes found during doc day
As a pre-PTG preparation we did a small walktrough on the Sahara
documentation. Theses fixes are trivial fixes, mostly making
documentation consider bare metal and VMs.
Change-Id: I9dcf96700392053cc1ac239793c793f0e4a22dd8 |
488,272 | 20.12.2017 21:47:46 | 10,800 | e97e0abb2b15da0e0f7400c26ad75cb10afe319b | Adding Ambari 2.4.2.0 to image gen
We missed ambari 2.4.2.0 on ambari image gen
Also we are disabling CA checking for Centos/RHEL because the default SSL
certificate, which is generated when the Ambari server is installed, is
invalid.
Partial-bug: | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/centos/disable_certificate_check",
"diff": "+#!/bin/bash\n+\n+config=/etc/python/cert-verification.cfg\n+check=$(cat $config | grep 'verify=disable' | wc -l)\n+\n+if [ $check -eq 0 ]; then\n+ if [ $test_only -eq 0 ]; then\n+ [ -e $config ] && sed -i \"s%^\\(verify=\\s*\\).*$%verify=disable%\" $config\n+ else\n+ exit 0\n+ fi\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/resources/images/image.yaml",
"new_path": "sahara/plugins/ambari/resources/images/image.yaml",
"diff": "arguments:\nambari_version:\n- description: The version of Ambari to install. Defaults to 2.2.1.0.\n- default: 2.2.1.0\n+ description: The version of Ambari to install. Defaults to 2.4.2.0.\n+ default: 2.4.2.0\nchoices:\n+ - 2.4.2.0 # HDP 2.5 / HDP 2.4 / HDP 2.3\n- 2.2.0.0 # HDP 2.3\n- 2.2.1.0 # HDP 2.4\njava_distro:\n@@ -31,6 +32,7 @@ validators:\n- os_case:\n- centos:\n- script: centos/disable_selinux\n+ - script: centos/disable_certificate_check\n- script:\ncentos/setup_java_home:\nenv_vars: [java_distro]\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding Ambari 2.4.2.0 to image gen
We missed ambari 2.4.2.0 on ambari image gen
Also we are disabling CA checking for Centos/RHEL because the default SSL
certificate, which is generated when the Ambari server is installed, is
invalid.
Partial-bug: #1748507
Change-Id: I272dbab4458c902af404a6365a8a43d56e4ed94e |
488,272 | 10.03.2018 07:11:18 | 10,800 | 33335b449b49dd9a61436449862cd7d094e2199d | Adding support for RHEL images
Adding support for rhel images on the image generation system.
Note.: It is necessary to previous to start packing the image for the
user to register the image using virt-costumize and also enable the
necessary REPOS. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/image-gen.rst",
"new_path": "doc/source/contributor/image-gen.rst",
"diff": "@@ -51,6 +51,7 @@ the image packing feature. Plugins may require their own arguments at specific\nversions; use the ``--help`` feature with ``PLUGIN`` and ``VERSION`` to see\nthe appropriate argument structure.\n+\na plausible command-line invocation would be:\n::\n@@ -70,6 +71,16 @@ will first describe how to modify an image packing specification for one\nof the plugins, and second, how to enable the image packing feature for new\nor existing plugins.\n+Note: In case of a RHEL 7 images, it is necessary to register the image before\n+starting to pack it, also enable some required repos.\n+\n+::\n+\n+ virt-customize -v -a $SAHARA_RHEL_IMAGE --sm-register \\\n+ --sm-credentials ${REG_USER}:password:${REG_PASSWORD} --sm-attach \\\n+ pool:${REG_POOL_ID} --run-command 'subscription-manager repos \\\n+ --disable=* --enable=$REPO_A \\ --enable=$REPO_B \\ --enable=$REPO_C'\n+\nDev notes on the CLI itself\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/resources/images/image.yaml",
"new_path": "sahara/plugins/ambari/resources/images/image.yaml",
"diff": "@@ -30,7 +30,7 @@ validators:\noracle-java:\n- script: common/oracle_java\n- os_case:\n- - centos:\n+ - redhat:\n- script: centos/disable_selinux\n- script: centos/disable_certificate_check\n- script:\n@@ -77,7 +77,7 @@ validators:\n- rng-tools\n- iptables-services\n- os_case:\n- - centos:\n+ - redhat:\n- script: centos/disable_ambari\n- script: centos/disable_firewall\n- script:\n@@ -91,7 +91,7 @@ validators:\ncommon/add_jar:\nenv_vars: [hdfs_lib_dir, swift_url]\n- os_case:\n- - centos:\n+ - redhat:\n- package:\n- krb5-server\n- krb5-libs\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_11_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/image.yaml",
"diff": "@@ -27,7 +27,7 @@ arguments:\nvalidators:\n- script: common/install_extjs\n- os_case:\n- - centos:\n+ - redhat:\n- package: wget\n- script:\ncentos/wget_repo:\n@@ -75,7 +75,7 @@ validators:\n- oozie\n- spark-core\n- os_case:\n- - centos:\n+ - redhat:\n- package: spark-history-server\n- package: zookeeper\n- package: unzip\n@@ -101,7 +101,7 @@ validators:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n- os_case:\n- - centos:\n+ - redhat:\n- script: centos/turn_off_services\n- ubuntu:\n- script: ubuntu/turn_off_services\n@@ -112,7 +112,7 @@ validators:\ncommon/unlimited_security_artifacts:\nenv_vars: [unlimited_security_location]\n- os_case:\n- - centos:\n+ - redhat:\n- package:\n- krb5-server\n- krb5-libs\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_7_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_7_0/resources/images/image.yaml",
"diff": "@@ -30,7 +30,7 @@ arguments:\nvalidators:\n- script: common/install_extjs\n- os_case:\n- - centos:\n+ - redhat:\n- package: wget\n- script: centos/wget_repo\n- ubuntu:\n@@ -76,7 +76,7 @@ validators:\n- oozie\n- spark-core\n- os_case:\n- - centos:\n+ - redhat:\n- package: spark-history-server\n- package: zookeeper\n- package: unzip\n@@ -102,7 +102,7 @@ validators:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n- os_case:\n- - centos:\n+ - redhat:\n- script: centos/turn_off_services\n- ubuntu:\n- script: ubuntu/turn_off_services\n@@ -113,7 +113,7 @@ validators:\ncommon/unlimited_security_artifacts:\nenv_vars: [unlimited_security_location]\n- os_case:\n- - centos:\n+ - redhat:\n- package:\n- krb5-server\n- krb5-libs\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_9_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/image.yaml",
"diff": "@@ -28,7 +28,7 @@ arguments:\nvalidators:\n- script: common/install_extjs\n- os_case:\n- - centos:\n+ - redhat:\n- package: wget\n- script:\ncentos/wget_repo:\n@@ -76,7 +76,7 @@ validators:\n- oozie\n- spark-core\n- os_case:\n- - centos:\n+ - redhat:\n- package: spark-history-server\n- package: zookeeper\n- package: unzip\n@@ -102,7 +102,7 @@ validators:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n- os_case:\n- - centos:\n+ - redhat:\n- script: centos/turn_off_services\n- ubuntu:\n- script: ubuntu/turn_off_services\n@@ -113,7 +113,7 @@ validators:\ncommon/unlimited_security_artifacts:\nenv_vars: [unlimited_security_location]\n- os_case:\n- - centos:\n+ - redhat:\n- package:\n- krb5-server\n- krb5-libs\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/images.py",
"new_path": "sahara/plugins/images.py",
"diff": "@@ -186,6 +186,7 @@ class SaharaImageValidatorBase(ImageValidator):\n'centos7': 'redhat',\n'fedora': 'redhat',\n'redhat': 'redhat',\n+ 'rhel': 'redhat',\n'redhatenterpriseserver': 'redhat',\n'ubuntu': 'debian'\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/image.yaml",
"new_path": "sahara/plugins/mapr/resources/images/image.yaml",
"diff": "@@ -33,7 +33,7 @@ validators:\n- wget\n- zip\n- os_case:\n- - centos:\n+ - redhat:\n- package:\n- cups\n- cdparanoia-libs\n@@ -145,7 +145,7 @@ validators:\n- zlib1g-dev\n- script: common/configure_extjs\n- os_case:\n- - centos:\n+ - redhat:\n- copy_script: common/resources/package_utils.sh\n- copy_script: common/resources/packages.json\n- copy_script: common/resources/spec_5.1.0.json\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding support for RHEL images
Adding support for rhel images on the image generation system.
Note.: It is necessary to previous to start packing the image for the
user to register the image using virt-costumize and also enable the
necessary REPOS.
Change-Id: Ia8c483d34a26ba0ccfe25b5496cc03af4c1b7808 |
488,272 | 19.03.2018 16:37:36 | 10,800 | fee369b1d8f2d90e9d5b11b6f6693974954b26ee | Preload soci-mysql and soci on RHEL7 images
Story:
Task: | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/mapr/resources/images/centos/epel_dependencies",
"diff": "+#!/bin/bash\n+\n+yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm\n+yum install -y soci soci-mysql\n+yum remove -y epel-release\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/image.yaml",
"new_path": "sahara/plugins/mapr/resources/images/image.yaml",
"diff": "@@ -24,6 +24,8 @@ validators:\n- os_case:\n- ubuntu:\n- script: ubuntu/install_mapr_dependencies\n+ - redhat:\n+ - script: centos/epel_dependencies\n- package:\n- mtools\n- rpcbind\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Preload soci-mysql and soci on RHEL7 images
Change-Id: I789e93349b89bf96bddbb5e745cb859c975c4d80
Story: #1754313
Task: #8682 |
488,272 | 13.03.2018 00:01:03 | 10,800 | 68b447d7f9eef759f3e725cad8d8794fa79d69c5 | File copy timesout when file is too big
File copy times out when file is too big due to an problem with paramiko
write implementation.
The fix proposed comes in two parts:
1) Changing paramiko file write to putfo;
2) Increase the default copy file timeout.
Story: | [
{
"change_type": "MODIFY",
"old_path": "sahara/utils/ssh_remote.py",
"new_path": "sahara/utils/ssh_remote.py",
"diff": "@@ -73,7 +73,7 @@ ssh_config_options = [\n'ssh_timeout_interactive', default=1800, min=1,\nhelp=\"Overrides timeout for interactive ssh operations, in seconds\"),\ncfg.IntOpt(\n- 'ssh_timeout_files', default=120, min=1,\n+ 'ssh_timeout_files', default=600, min=1,\nhelp=\"Overrides timeout for ssh operations with files, in seconds\"),\n]\n@@ -270,9 +270,8 @@ def _get_http_client(host, port, proxy_command=None, gateway_host=None,\ndef _write_fl(sftp, remote_file, data):\n- fl = sftp.file(remote_file, 'w')\n- fl.write(data)\n- fl.close()\n+ write_data = paramiko.py3compat.StringIO(data)\n+ sftp.putfo(write_data, remote_file)\ndef _append_fl(sftp, remote_file, data):\n"
}
] | Python | Apache License 2.0 | openstack/sahara | File copy timesout when file is too big
File copy times out when file is too big due to an problem with paramiko
write implementation.
The fix proposed comes in two parts:
1) Changing paramiko file write to putfo;
2) Increase the default copy file timeout.
Change-Id: I9e9d2873d95923cbd8c4729b3a674dfb1b8c2ec1
Story: #1705762 |
488,272 | 03.04.2018 10:56:11 | 10,800 | b3c8d754dfec16c0957ab549bfbcc9901e18b49e | Fix MapR dependency on mysql on RHEL
MapR is missing mysql-java-connector and that makes it necessary to have
subscription enable on RHEL7
Story: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/image.yaml",
"new_path": "sahara/plugins/mapr/resources/images/image.yaml",
"diff": "@@ -75,10 +75,14 @@ validators:\n- libtirpc\n- libvisual\n- libxslt\n+ - mariadb\n+ - mariadb-server\n+ - mariadb-libs\n- mesa-dri-drivers\n- mesa-libGL\n- mesa-libGLU\n- mesa-private-llvm\n+ - mysql-connector-java\n- nmap-ncat\n- numactl\n- openjpeg-libs\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fix MapR dependency on mysql on RHEL
MapR is missing mysql-java-connector and that makes it necessary to have
subscription enable on RHEL7
Change-Id: I1866ee0e21edd46773e67cb24654f00aadf8e39a
Story: #2001773 |
488,272 | 12.04.2018 08:43:44 | 10,800 | 2a1d92c89c99acf3f017060df8e669ba41a0f971 | Adding ntpdate and Scala to mapr image
Adding missing packages to MapR
Story:
Story: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/centos/install_scala",
"new_path": "sahara/plugins/mapr/resources/images/centos/install_scala",
"diff": "echo \"START: installing Scala\"\nsudo yum -y update\n-exit 0\n+DEF_VERSION=\"2.11.6\"\nif [ $test_only -eq 0 ]; then\n@@ -12,14 +12,14 @@ if [ $test_only -eq 0 ]; then\necho \"http://www.scala-lang.org is unreachable\" && exit 1\nfi\n- if [ -n \"${scala_version:-}\" ]; then\n+ if [ \"${scala_version}\" != \"1\" ]; then\nVERSION=$scala_version\nelse\nVERSION=\"$(curl -s --fail http://www.scala-lang.org| tr -d '\\n' | sed 's/^.*<div[^<]\\+scala-version\">[^0-9]\\+\\([0-9\\.\\?]\\+\\)<.\\+$/\\1/')\"\nif [ $? != 0 -o -z \"${VERSION}\" ]; then\n- echo \"Installing default version $scala_version\"\n- VERSION=$scala_version\n+ echo \"Installing default version $DEF_VERSION\"\n+ VERSION=${DEF_VERSION}\nfi\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/image.yaml",
"new_path": "sahara/plugins/mapr/resources/images/image.yaml",
"diff": "@@ -11,8 +11,10 @@ arguments:\nhidden: True\nrequired: False\nscala_version:\n- default: 2.11.6\n- description: The version of scala to install. Defaults to 2.11.6.\n+ default: 1\n+ description: The version of scala to install. Defaults to 1 to indicate\n+ that the value should be autodetected or fallback to well\n+ known version.\nhidden: True\nrequired: False\nhdfs_lib_dir:\n@@ -80,6 +82,8 @@ validators:\n- mesa-libGLU\n- mesa-private-llvm\n- nmap-ncat\n+ - ntp\n+ - ntpdate\n- numactl\n- openjpeg-libs\n- patch\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/ubuntu/install_scala",
"new_path": "sahara/plugins/mapr/resources/images/ubuntu/install_scala",
"diff": "#!/bin/bash\necho \"START: installing Scala\"\n-\n+DEF_VERSION=\"2.11.6\"\nif [ $test_only -eq 0 ]; then\n-\nRETURN_CODE=\"$(curl -s -o /dev/null -w \"%{http_code}\" http://www.scala-lang.org/)\"\n-\nif [ \"$RETURN_CODE\" != \"200\" ]; then\necho \"http://www.scala-lang.org is unreachable\" && exit 1\nfi\n- if [ -n \"${scala_version:-}\" ]; then\n+ if [ $(lsb_release -c -s) == \"trusty\" ]; then\n+ VERSION=${DEF_VERSION}\n+ else\n+ if [ \"${scala_version}\" != \"1\" ]; then\nVERSION=$scala_version\nelse\nVERSION=\"$(curl -s --fail http://www.scala-lang.org| tr -d '\\n' | sed 's/^.*<div[^<]\\+scala-version\">[^0-9]\\+\\([0-9\\.\\?]\\+\\)<.\\+$/\\1/')\"\nif [ $? != 0 -o -z \"${VERSION}\" ]; then\n- echo \"Installing default version $scala_version\"\n- VERSION=$scala_version\n+ echo \"Installing default version $DEF_VERSION\"\n+ VERSION=${DEF_VERSION}\n+ fi\nfi\nfi\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding ntpdate and Scala to mapr image
Adding missing packages to MapR
Change-Id: I01cba8f8518a334ccd91ae07d9a210c93ac3649b
Story: #2001833
Story: #2001834 |
488,272 | 12.12.2017 18:17:51 | 10,800 | d9c9fe4c67f93e77ce26963618f29665222f87ae | Adding Ambari missing versions
With the new validation system, image versions on get_image_arguments()
has to contain all available versions in order to allow cluster creation
and validation with them all.
Story: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/plugin.py",
"new_path": "sahara/plugins/ambari/plugin.py",
"diff": "@@ -270,7 +270,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):\nresource_roots=['plugins/ambari/resources/images'])\ndef get_image_arguments(self, hadoop_version):\n- if hadoop_version != '2.4':\n+ if hadoop_version not in self.get_versions():\nreturn NotImplemented\nreturn self.validator.get_argument_list()\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding Ambari missing versions
With the new validation system, image versions on get_image_arguments()
has to contain all available versions in order to allow cluster creation
and validation with them all.
Story: #2001888
Change-Id: I88c2a553512a797099a4b2fbd9e9d204475e755b |
488,272 | 10.05.2018 14:56:38 | 10,800 | d0833f37489c0f9af185263ad9a7798bd94ec79d | Fixing java version for Ambari
From SIE java version installed for ambari is 1.8.0, we had the wrong
one here.
Story: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/resources/images/image.yaml",
"new_path": "sahara/plugins/ambari/resources/images/image.yaml",
"diff": "@@ -26,7 +26,7 @@ validators:\nargument_name: java_distro\ncases:\nopenjdk:\n- - package: java-1.7.0-openjdk-devel\n+ - package: java-1.8.0-openjdk-devel\noracle-java:\n- script: common/oracle_java\n- os_case:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixing java version for Ambari
From SIE java version installed for ambari is 1.8.0, we had the wrong
one here.
Story: #2002003
Change-Id: I78da2fb94b0e8a362c7b4daae166849974682960 |
488,269 | 07.06.2018 14:52:00 | -28,800 | d0d1a1d00dea1d9af893d226a25a07834f987219 | Updated oozie version
Old download link expired | [
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/how-to-build-oozie.rst",
"new_path": "doc/source/contributor/how-to-build-oozie.rst",
"diff": "@@ -8,12 +8,12 @@ How to build Oozie\nDownload\n--------\n-* Download tarball from `Apache mirror <http://apache-mirror.rbc.ru/pub/apache/oozie/4.0.1>`_\n+* Download tarball from `Apache mirror <http://archive.apache.org/dist/oozie/4.3.1/>`_\n* Unpack it with\n.. sourcecode:: console\n- $ tar -xzvf oozie-4.0.1.tar.gz\n+ $ tar -xzvf oozie-4.3.1.tar.gz\nHadoop Versions\n---------------\n@@ -70,5 +70,5 @@ Also, the pig version can be passed as a maven property with the flag\n``-D pig.version=x.x.x``.\nYou can find similar instructions to build oozie.tar.gz here:\n-http://oozie.apache.org/docs/4.0.0/DG_QuickStart.html#Building_Oozie\n+http://oozie.apache.org/docs/4.3.1/DG_QuickStart.html#Building_Oozie\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Updated oozie version
Old download link expired
Change-Id: I7c98a14511350f53d271ddec62b3d76665ff28a7 |
488,329 | 04.08.2016 14:15:22 | -10,800 | f1722350a7fca51c36dc3181d64235d5e8ad14ce | Boot from volume
Adding the ability to boot a sahara cluster from volume.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/quickstart.rst",
"new_path": "doc/source/user/quickstart.rst",
"diff": "@@ -210,6 +210,7 @@ Create a master node group template with the command:\n| Flavor id | 2 |\n| Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |\n| Id | 0f066e14-9a73-4379-bbb4-9d9347633e31 |\n+ | Is boot from volume | False |\n| Is default | False |\n| Is protected | False |\n| Is proxy gateway | False |\n@@ -239,6 +240,42 @@ Create a worker node group template with the command:\n| Flavor id | 2 |\n| Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |\n| Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc |\n+ | Is boot from volume | False |\n+ | Is default | False |\n+ | Is protected | False |\n+ | Is proxy gateway | False |\n+ | Is public | False |\n+ | Name | vanilla-default-worker |\n+ | Node processes | datanode, nodemanager |\n+ | Plugin name | vanilla |\n+ | Security groups | None |\n+ | Use autoconfig | False |\n+ | Version | <plugin_version> |\n+ | Volumes per node | 0 |\n+ +---------------------+--------------------------------------+\n+\n+\n+You can also create node group templates setting a flag --boot-from-volume.\n+This will tell the node group to boot its instances from a volume instead of\n+the image. This feature allows for easier live migrations and improved\n+performance.\n+\n+.. sourcecode:: console\n+\n+ $ openstack dataprocessing node group template create \\\n+ --name vanilla-default-worker --plugin vanilla \\\n+ --plugin-version <plugin_version> --processes datanode nodemanager \\\n+ --flavor 2 --auto-security-group --floating-ip-pool <pool-id> \\\n+ --boot-from-volume\n+ +---------------------+--------------------------------------+\n+ | Field | Value |\n+ +---------------------+--------------------------------------+\n+ | Auto security group | True |\n+ | Availability zone | None |\n+ | Flavor id | 2 |\n+ | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |\n+ | Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc |\n+ | Is boot from volume | True |\n| Is default | False |\n| Is protected | False |\n| Is proxy gateway | False |\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml",
"diff": "+---\n+features:\n+ - Adding the ability to boot a Sahara cluster from volumes instead of images.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/conductor/manager.py",
"new_path": "sahara/conductor/manager.py",
"diff": "@@ -50,6 +50,7 @@ NODE_GROUP_DEFAULTS = {\n\"volumes_availability_zone\": None,\n\"volume_mount_prefix\": \"/volumes/disk\",\n\"volume_type\": None,\n+ \"boot_from_volume\": False,\n\"floating_ip_pool\": None,\n\"security_groups\": None,\n\"auto_security_group\": False,\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/conductor/objects.py",
"new_path": "sahara/conductor/objects.py",
"diff": "@@ -110,6 +110,8 @@ class NodeGroup(object):\nwhere to spawn volumes\nvolume_mount_prefix\nvolume_type\n+ boot_from_volume - If set to True, the base image will be converted to a\n+ bootable volume.\nfloating_ip_pool - Floating IP Pool name used to assign Floating IPs to\ninstances in this Node Group\nsecurity_groups - List of security groups for instances in this Node Group\n@@ -231,6 +233,7 @@ class NodeGroupTemplate(object):\nvolumes_availability_zone\nvolume_mount_prefix\nvolume_type\n+ boot_from_volume\nfloating_ip_pool\nsecurity_groups\nauto_security_group\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py",
"diff": "+# Copyright 2016 OpenStack Foundation.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n+# implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"Add boot_from_volumes field for node_groups and related classes\n+\n+Revision ID: 034\n+Revises: 033\n+Create Date: 2018-06-06 17:36:04.749264\n+\n+\"\"\"\n+\n+# revision identifiers, used by Alembic.\n+revision = '034'\n+down_revision = '033'\n+\n+from alembic import op\n+import sqlalchemy as sa\n+\n+\n+def upgrade():\n+ op.add_column('node_group_templates',\n+ sa.Column('boot_from_volume', sa.Boolean(), nullable=False))\n+\n+ op.add_column('node_groups',\n+ sa.Column('boot_from_volume', sa.Boolean(), nullable=False))\n+\n+ op.add_column('templates_relations',\n+ sa.Column('boot_from_volume', sa.Boolean(), nullable=False))\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/db/sqlalchemy/models.py",
"new_path": "sahara/db/sqlalchemy/models.py",
"diff": "@@ -117,6 +117,7 @@ class NodeGroup(mb.SaharaBase):\nvolumes_availability_zone = sa.Column(sa.String(255))\nvolume_mount_prefix = sa.Column(sa.String(80))\nvolume_type = sa.Column(sa.String(255))\n+ boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)\ncount = sa.Column(sa.Integer, nullable=False)\nuse_autoconfig = sa.Column(sa.Boolean(), default=True)\n@@ -228,6 +229,7 @@ class NodeGroupTemplate(mb.SaharaBase):\nvolumes_availability_zone = sa.Column(sa.String(255))\nvolume_mount_prefix = sa.Column(sa.String(80))\nvolume_type = sa.Column(sa.String(255))\n+ boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)\nfloating_ip_pool = sa.Column(sa.String(36))\nsecurity_groups = sa.Column(st.JsonListType())\nauto_security_group = sa.Column(sa.Boolean())\n@@ -261,6 +263,7 @@ class TemplatesRelation(mb.SaharaBase):\nvolumes_availability_zone = sa.Column(sa.String(255))\nvolume_mount_prefix = sa.Column(sa.String(80))\nvolume_type = sa.Column(sa.String(255))\n+ boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)\ncount = sa.Column(sa.Integer, nullable=False)\nuse_autoconfig = sa.Column(sa.Boolean(), default=True)\ncluster_template_id = sa.Column(sa.String(36),\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/heat/templates.py",
"new_path": "sahara/service/heat/templates.py",
"diff": "@@ -28,6 +28,7 @@ from sahara.utils import general as g\nfrom sahara.utils.openstack import base as b\nfrom sahara.utils.openstack import heat as h\nfrom sahara.utils.openstack import neutron\n+from sahara.utils.openstack import nova\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n@@ -509,11 +510,20 @@ class ClusterStack(object):\nproperties.update({\n\"name\": inst_name,\n\"flavor\": six.text_type(ng.flavor_id),\n- \"image\": ng.get_image_id(),\n\"admin_user\": ng.image_username,\n\"user_data\": userdata\n})\n+ if ng.boot_from_volume:\n+ resources.update(self._get_bootable_volume(ng))\n+ properties[\"block_device_mapping\"] = [\n+ {\"device_name\": \"vda\",\n+ \"volume_id\": {\"get_resource\": \"bootable_volume\"},\n+ \"delete_on_termination\": \"true\"}]\n+\n+ else:\n+ properties.update({\"image\": ng.get_image_id()})\n+\nresources.update({\nINSTANCE_RESOURCE_NAME: {\n\"type\": \"OS::Nova::Server\",\n@@ -527,6 +537,20 @@ class ClusterStack(object):\nresources.update(self._serialize_wait_condition(ng))\nreturn resources\n+ def _get_bootable_volume(self, node_group):\n+ node_group_flavor = nova.get_flavor(id=node_group.flavor_id)\n+ image_size = node_group_flavor.disk\n+\n+ return {\n+ \"bootable_volume\": {\n+ \"type\": \"OS::Cinder::Volume\",\n+ \"properties\": {\n+ \"size\": image_size,\n+ \"image\": node_group.get_image_id()\n+ }\n+ }\n+ }\n+\ndef _serialize_wait_condition(self, ng):\nif not CONF.heat_enable_wait_condition:\nreturn {}\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validations/node_group_template_schema.py",
"new_path": "sahara/service/validations/node_group_template_schema.py",
"diff": "@@ -119,6 +119,11 @@ NODE_GROUP_TEMPLATE_SCHEMA_V2[\"properties\"].update({\n\"type\": \"string\",\n}})\nNODE_GROUP_TEMPLATE_SCHEMA_V2[\"required\"].append(\"plugin_version\")\n+NODE_GROUP_TEMPLATE_SCHEMA_V2[\"properties\"].update({\n+ \"boot_from_volume\": {\n+ \"type\": \"boolean\",\n+ }})\n+\n# For an update we do not require any fields but we want the given\n# fields to be validated\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/conductor/manager/test_clusters.py",
"new_path": "sahara/tests/unit/conductor/manager/test_clusters.py",
"diff": "@@ -132,6 +132,7 @@ class ClusterTest(test_base.ConductorManagerTestCase):\nng.pop(\"volumes_availability_zone\")\nng.pop(\"volume_type\")\nng.pop(\"floating_ip_pool\")\n+ ng.pop(\"boot_from_volume\")\nng.pop(\"image_username\")\nng.pop(\"open_ports\")\nng.pop(\"auto_security_group\")\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/conductor/manager/test_templates.py",
"new_path": "sahara/tests/unit/conductor/manager/test_templates.py",
"diff": "@@ -458,6 +458,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase):\nng.pop(\"volume_type\")\nng.pop(\"auto_security_group\")\nng.pop(\"is_proxy_gateway\")\n+ ng.pop(\"boot_from_volume\")\nng.pop('volume_local_to_instance')\nself.assertEqual(SAMPLE_CLT[\"node_groups\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/db/migration/test_migrations.py",
"new_path": "sahara/tests/unit/db/migration/test_migrations.py",
"diff": "@@ -626,6 +626,14 @@ class SaharaMigrationsCheckers(object):\ndef _check_033(self, engine, data):\nself.assertColumnExists(engine, 'clusters', 'anti_affinity_ratio')\n+ def _check_034(self, engine, data):\n+ self.assertColumnExists(engine, 'node_groups',\n+ 'boot_from_volume')\n+ self.assertColumnExists(engine, 'node_group_templates',\n+ 'boot_from_volume')\n+ self.assertColumnExists(engine, 'templates_relations',\n+ 'boot_from_volume')\n+\nclass TestMigrationsMySQL(SaharaMigrationsCheckers,\nbase.BaseWalkMigrationTestCase,\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/heat/test_templates.py",
"new_path": "sahara/tests/unit/service/heat/test_templates.py",
"diff": "@@ -36,12 +36,12 @@ class BaseTestClusterTemplate(base.SaharaWithDbTestCase):\nfloating_ip_pool=floating_ip_pool, image_id=None,\nvolumes_per_node=0, volumes_size=0, id=\"1\",\nimage_username='root', volume_type=None,\n- auto_security_group=True)\n+ boot_from_volume=False, auto_security_group=True)\nng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,\nfloating_ip_pool=floating_ip_pool, image_id=None,\nvolumes_per_node=2, volumes_size=10, id=\"2\",\nimage_username='root', volume_type=volume_type,\n- auto_security_group=True)\n+ boot_from_volume=False, auto_security_group=True)\nreturn ng1, ng2\ndef _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None,\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Boot from volume
Adding the ability to boot a sahara cluster from volume.
Story: #2001820
Task: #12558
Change-Id: Ie11c5e7a628c369868d3c56e803da4b9e7d15f85 |
488,272 | 20.06.2018 15:19:37 | 10,800 | f5bf9ff2643a18cc91c2e62cfd086e869fd00b5d | Fixing extjs check on cdh and mapr
On images generated from sahara-image-elements cluster is being stuck
due to double execution of the unzip code.
This fix prevents the code to be executed twice and in the case it does we
force unzip to overwrite.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/install_extjs",
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/common/install_extjs",
"diff": "EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\nEXTJS_DOWNLOAD_URL=\"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip\"\n-extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+extjs_basepath=$(basename ${EXTJS_DOWNLOAD_URL})\n+extjs_archive=/tmp/${extjs_basepath}\n+extjs_folder=\"${extjs_basepath%.*}\"\n-if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_archive}\" ]; then\n- if [ $test_only -eq 0 ]; then\n+function setup_extjs {\nwget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n-\nmkdir -p $EXTJS_DESTINATION_DIR\n+}\n+\nif [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n- unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ if [ ! -d \"${EXTJS_DESTINATION_DIR}/${extjs_folder}\" ]; then\n+ setup_extjs\n+ unzip -o -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\nrm -f $extjs_archive\nelse\n- mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ exit 0\nfi\n+else\n+ if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_basepath}\" ]; then\n+ setup_extjs\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\nelse\nexit 0\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_11_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_11_0/resources/images/image.yaml",
"diff": "@@ -25,7 +25,6 @@ arguments:\nrequired: False\nvalidators:\n- - script: common/install_extjs\n- os_case:\n- redhat:\n- package: wget\n@@ -100,6 +99,7 @@ validators:\n- script:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n+ - script: common/install_extjs\n- os_case:\n- redhat:\n- script: centos/turn_off_services\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_13_0/resources/images/common/install_extjs",
"new_path": "sahara/plugins/cdh/v5_13_0/resources/images/common/install_extjs",
"diff": "EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\nEXTJS_DOWNLOAD_URL=\"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip\"\n-extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+extjs_basepath=$(basename ${EXTJS_DOWNLOAD_URL})\n+extjs_archive=/tmp/${extjs_basepath}\n+extjs_folder=\"${extjs_basepath%.*}\"\n-if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_archive}\" ]; then\n- if [ $test_only -eq 0 ]; then\n+function setup_extjs {\nwget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n-\nmkdir -p $EXTJS_DESTINATION_DIR\n+}\n+\nif [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n- unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ if [ ! -d \"${EXTJS_DESTINATION_DIR}/${extjs_folder}\" ]; then\n+ setup_extjs\n+ unzip -o -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\nrm -f $extjs_archive\nelse\n- mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ exit 0\nfi\n+else\n+ if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_basepath}\" ]; then\n+ setup_extjs\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\nelse\nexit 0\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_13_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_13_0/resources/images/image.yaml",
"diff": "@@ -23,7 +23,6 @@ arguments:\nrequired: False\nvalidators:\n- - script: common/install_extjs\n- os_case:\n- redhat:\n- package: wget\n@@ -97,6 +96,7 @@ validators:\n- script:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n+ - script: common/install_extjs\n- os_case:\n- redhat:\n- script: centos/turn_off_services\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_7_0/resources/images/common/install_extjs",
"new_path": "sahara/plugins/cdh/v5_7_0/resources/images/common/install_extjs",
"diff": "EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\nEXTJS_DOWNLOAD_URL=\"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip\"\n-extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+extjs_basepath=$(basename ${EXTJS_DOWNLOAD_URL})\n+extjs_archive=/tmp/${extjs_basepath}\n+extjs_folder=\"${extjs_basepath%.*}\"\n-if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_archive}\" ]; then\n- if [ $test_only -eq 0 ]; then\n+function setup_extjs {\nwget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n-\nmkdir -p $EXTJS_DESTINATION_DIR\n+}\n+\nif [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n- unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ if [ ! -d \"${EXTJS_DESTINATION_DIR}/${extjs_folder}\" ]; then\n+ setup_extjs\n+ unzip -o -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\nrm -f $extjs_archive\nelse\n- mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ exit 0\nfi\n+else\n+ if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_basepath}\" ]; then\n+ setup_extjs\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\nelse\nexit 0\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_7_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_7_0/resources/images/image.yaml",
"diff": "@@ -28,7 +28,6 @@ arguments:\nrequired: False\nvalidators:\n- - script: common/install_extjs\n- os_case:\n- redhat:\n- package: wget\n@@ -101,6 +100,7 @@ validators:\n- script:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n+ - script: common/install_extjs\n- os_case:\n- redhat:\n- script: centos/turn_off_services\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/install_extjs",
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/common/install_extjs",
"diff": "EXTJS_DESTINATION_DIR=\"/var/lib/oozie\"\nEXTJS_DOWNLOAD_URL=\"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip\"\n-extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+extjs_basepath=$(basename ${EXTJS_DOWNLOAD_URL})\n+extjs_archive=/tmp/${extjs_basepath}\n+extjs_folder=\"${extjs_basepath%.*}\"\n-if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_archive}\" ]; then\n- if [ $test_only -eq 0 ]; then\n+function setup_extjs {\nwget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n-\nmkdir -p $EXTJS_DESTINATION_DIR\n+}\n+\nif [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n- unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ if [ ! -d \"${EXTJS_DESTINATION_DIR}/${extjs_folder}\" ]; then\n+ setup_extjs\n+ unzip -o -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\nrm -f $extjs_archive\nelse\n- mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ exit 0\nfi\n+else\n+ if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_basepath}\" ]; then\n+ setup_extjs\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\nelse\nexit 0\nfi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/cdh/v5_9_0/resources/images/image.yaml",
"new_path": "sahara/plugins/cdh/v5_9_0/resources/images/image.yaml",
"diff": "@@ -26,7 +26,6 @@ arguments:\nrequired: False\nvalidators:\n- - script: common/install_extjs\n- os_case:\n- redhat:\n- package: wget\n@@ -101,6 +100,7 @@ validators:\n- script:\ncommon/install_cloudera:\nenv_vars: [plugin_version]\n+ - script: common/install_extjs\n- os_case:\n- redhat:\n- script: centos/turn_off_services\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/mapr/resources/images/common/configure_extjs",
"new_path": "sahara/plugins/mapr/resources/images/common/configure_extjs",
"diff": "@@ -4,19 +4,27 @@ EXTJS_DESTINATION_DIR=\"/opt/mapr-repository\"\nEXTJS_DOWNLOAD_URL=\"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip\"\nEXTJS_NO_UNPACK=1\n-extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)\n+extjs_basepath=$(basename ${EXTJS_DOWNLOAD_URL})\n+extjs_archive=/tmp/${extjs_basepath}\n+extjs_folder=\"${extjs_basepath%.*}\"\n-if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_archive}\" ]; then\n- if [ $test_only -eq 0 ]; then\n+function setup_extjs {\nwget -O $extjs_archive $EXTJS_DOWNLOAD_URL\n-\nmkdir -p $EXTJS_DESTINATION_DIR\n+}\n+\nif [ -z \"${EXTJS_NO_UNPACK:-}\" ]; then\n- unzip -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\n+ if [ ! -d \"${EXTJS_DESTINATION_DIR}/${extjs_folder}\" ]; then\n+ setup_extjs\n+ unzip -o -d \"$EXTJS_DESTINATION_DIR\" $extjs_archive\nrm -f $extjs_archive\nelse\n- mv $extjs_archive $EXTJS_DESTINATION_DIR\n+ exit 0\nfi\n+else\n+ if [ ! -f \"${EXTJS_DESTINATION_DIR}/${extjs_basepath}\" ]; then\n+ setup_extjs\n+ mv $extjs_archive $EXTJS_DESTINATION_DIR\nelse\nexit 0\nfi\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixing extjs check on cdh and mapr
On images generated from sahara-image-elements cluster is being stuck
due to double execution of the unzip code.
This fix prevents the code to be executed twice and in the case it does we
force unzip to overwrite.
Change-Id: I73836a516b839bdb368997af3693c139c1fff390
Story: #2002551
Task: #22113 |
488,272 | 25.06.2018 12:00:34 | 10,800 | e1a36ee28cf9b60d740300fb952d8928f48f8069 | Updating Spark versions
We are adding new spark version 2.3.0 | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/spark-2.3-0277fe9feae6668a.yaml",
"diff": "+---\n+upgrade:\n+ - Adding Spark 2.3 to supported plugins list.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/spark/edp_engine.py",
"new_path": "sahara/plugins/spark/edp_engine.py",
"diff": "@@ -26,7 +26,7 @@ from sahara.service.edp.spark import engine as edp_engine\nclass EdpEngine(edp_engine.SparkJobEngine):\n- edp_base_version = \"1.3.1\"\n+ edp_base_version = \"1.6.0\"\ndef __init__(self, cluster):\nsuper(EdpEngine, self).__init__(cluster)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/spark/plugin.py",
"new_path": "sahara/plugins/spark/plugin.py",
"diff": "@@ -63,7 +63,7 @@ class SparkProvider(p.ProvisioningPluginBase):\ndeprecated = {'enabled': {'status': True},\n'deprecated': {'status': True}}\nresult = {'plugin_labels': copy.deepcopy(default)}\n- stable_versions = ['2.2']\n+ stable_versions = ['2.3', '2.2']\nresult['version_labels'] = {\nversion: copy.deepcopy(\ndefault if version in stable_versions else deprecated\n@@ -72,7 +72,7 @@ class SparkProvider(p.ProvisioningPluginBase):\nreturn result\ndef get_versions(self):\n- return ['2.2', '2.1.0', '1.6.0']\n+ return ['2.3', '2.2', '2.1.0', '1.6.0']\ndef get_configs(self, hadoop_version):\nreturn c_helper.get_plugin_configs()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"new_path": "sahara/tests/unit/plugins/spark/test_plugin.py",
"diff": "@@ -68,6 +68,14 @@ class SparkPluginTest(base.SaharaWithDbTestCase):\nself._test_engine('2.2', edp.JOB_TYPE_SHELL,\nengine.SparkShellJobEngine)\n+ def test_plugin23_edp_engine(self):\n+ self._test_engine('2.3', edp.JOB_TYPE_SPARK,\n+ engine.SparkJobEngine)\n+\n+ def test_plugin23_shell_engine(self):\n+ self._test_engine('2.3', edp.JOB_TYPE_SHELL,\n+ engine.SparkShellJobEngine)\n+\ndef _test_engine(self, version, job_type, eng):\ncluster_dict = self._init_cluster_dict(version)\n@@ -172,6 +180,8 @@ class SparkProviderTest(base.SaharaTestCase):\nres['2.1.0'])\nself.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\nres['2.2'])\n+ self.assertEqual([edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK],\n+ res['2.3'])\ndef test_edp_config_hints(self):\nprovider = pl.SparkProvider()\n@@ -199,3 +209,11 @@ class SparkProviderTest(base.SaharaTestCase):\nres = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, \"2.2\")\nself.assertEqual({'args': [], 'configs': {}, 'params': {}},\nres['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SPARK, \"2.3\")\n+ self.assertEqual({'args': [], 'configs': []},\n+ res['job_config'])\n+\n+ res = provider.get_edp_config_hints(edp.JOB_TYPE_SHELL, \"2.3\")\n+ self.assertEqual({'args': [], 'configs': {}, 'params': {}},\n+ res['job_config'])\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Updating Spark versions
We are adding new spark version 2.3.0
Change-Id: I3a1c8decdc17c2c9b63af29ee9199cf24f11e0e2 |
488,269 | 26.05.2018 16:38:27 | -28,800 | bb60fbce1f97e9f6261245027a672a6079562c36 | Trivial: Update Zuul Status Page to correct URL
Current URL of Zuul Status Page in code is:
The correct URL must be:
Remove outdated Jenkins reference. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/jenkins.rst",
"new_path": "doc/source/contributor/jenkins.rst",
"diff": "@@ -4,9 +4,8 @@ Continuous Integration with Jenkins\nEach change made to Sahara core code is tested with unit and integration tests\nand style checks using flake8.\n-Unit tests and style checks are performed on public `OpenStack Jenkins\n-<https://jenkins.openstack.org/>`_ managed by `Zuul\n-<http://status.openstack.org/zuul/>`_.\n+Unit tests and style checks are performed on public `OpenStack Zuul\n+<http://zuul.openstack.org/>`_ instance.\nUnit tests are checked using python 2.7.\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Trivial: Update Zuul Status Page to correct URL
Current URL of Zuul Status Page in code is:
http://status.openstack.org/zuul/
The correct URL must be:
https://zuul.openstack.org/
Remove outdated Jenkins reference.
Change-Id: I7119fe6818a2a4b3144b5cd8b2d241ff8f2cdbb2 |
488,272 | 25.06.2018 08:57:07 | 10,800 | b6504eee922b2ed00d611a8e1bb24bfc7c060884 | Adding Storm 1.2.0 and 1.2.1
Adding new versions of Storm to sahara. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/storm-1.2-af75fedb413de56a.yaml",
"diff": "+---\n+upgrade:\n+ - Adding new versions of Storm, 1.2.0 and 1.2.1. Both will exist under the\n+ same tag 1.2.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/storm/config_helper.py",
"new_path": "sahara/plugins/storm/config_helper.py",
"diff": "@@ -56,10 +56,9 @@ def generate_storm_config(master_hostname, zk_hostnames, version):\n\"storm.local.dir\": \"/app/storm\"\n}\n- # Since pyleus is built using previous versions os Storm we need this\n+ # Since pyleus is built using previous versions of Storm we need this\n# option to allow the cluster to be compatible with pyleus topologies as\n# well as with topologies built using older versions of Storm\n- if version in ['1.0.1', '1.1.0']:\ncfg['client.jartransformer.class'] = (\n\"org.apache.storm.hack.StormShadeTransformer\")\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/storm/edp_engine.py",
"new_path": "sahara/plugins/storm/edp_engine.py",
"diff": "@@ -20,7 +20,7 @@ from sahara.service.edp.storm import engine as edp_engine\nclass EdpStormEngine(edp_engine.StormJobEngine):\n- edp_base_version = \"0.9.2\"\n+ edp_base_version = \"1.0.1\"\n@staticmethod\ndef edp_supported(version):\n@@ -37,7 +37,7 @@ class EdpStormEngine(edp_engine.StormJobEngine):\nclass EdpPyleusEngine(edp_engine.StormPyleusJobEngine):\n- edp_base_version = \"0.9.2\"\n+ edp_base_version = \"1.0.1\"\n@staticmethod\ndef edp_supported(version):\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/storm/plugin.py",
"new_path": "sahara/plugins/storm/plugin.py",
"diff": "@@ -57,13 +57,14 @@ class StormProvider(p.ProvisioningPluginBase):\n'deprecated': {'status': True}}\nresult = {'plugin_labels': copy.deepcopy(default)}\nresult['version_labels'] = {\n+ '1.2': copy.deepcopy(default),\n'1.1.0': copy.deepcopy(default),\n'1.0.1': copy.deepcopy(deprecated),\n}\nreturn result\ndef get_versions(self):\n- return ['1.0.1', '1.1.0']\n+ return ['1.0.1', '1.1.0', '1.2']\ndef get_configs(self, storm_version):\nreturn c_helper.get_plugin_configs()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/storm/test_config_helper.py",
"new_path": "sahara/tests/unit/plugins/storm/test_config_helper.py",
"diff": "@@ -23,6 +23,7 @@ class TestStormConfigHelper(testcase.TestCase):\ndef test_generate_storm_config(self):\nSTORM_101 = '1.0.1'\nSTORM_110 = '1.1.0'\n+ STORM_120 = '1.2'\ntested_versions = []\nmaster_hostname = \"s-master\"\nzk_hostnames = [\"s-zoo\"]\n@@ -42,5 +43,13 @@ class TestStormConfigHelper(testcase.TestCase):\nself.assertEqual(configs_110['client.jartransformer.class'],\n'org.apache.storm.hack.StormShadeTransformer')\ntested_versions.append(STORM_110)\n+ configs_120 = s_config.generate_storm_config(\n+ master_hostname, zk_hostnames, STORM_120)\n+ self.assertNotIn('nimbus.host', configs_120.keys())\n+ self.assertIn('nimbus.seeds', configs_120.keys())\n+ self.assertIn('client.jartransformer.class', configs_120.keys())\n+ self.assertEqual(configs_120['client.jartransformer.class'],\n+ 'org.apache.storm.hack.StormShadeTransformer')\n+ tested_versions.append(STORM_120)\nstorm = s_plugin.StormProvider()\nself.assertEqual(storm.get_versions(), tested_versions)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/storm/test_plugin.py",
"new_path": "sahara/tests/unit/plugins/storm/test_plugin.py",
"diff": "@@ -71,10 +71,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\ncluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\n+ cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')\ncluster_data_101['node_groups'] = data\ncluster_data_110['node_groups'] = data\n+ cluster_data_120['node_groups'] = data\n- clusters = [cluster_data_101, cluster_data_110]\n+ clusters = [cluster_data_101, cluster_data_110, cluster_data_120]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -162,10 +164,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\ncluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\n+ cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')\ncluster_data_101['node_groups'] = data\ncluster_data_110['node_groups'] = data\n+ cluster_data_120['node_groups'] = data\n- clusters = [cluster_data_101, cluster_data_110]\n+ clusters = [cluster_data_101, cluster_data_110, cluster_data_120]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -194,10 +198,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\ncluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\n+ cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')\ncluster_data_101['node_groups'] = data\ncluster_data_110['node_groups'] = data\n+ cluster_data_120['node_groups'] = data\n- clusters = [cluster_data_101, cluster_data_110]\n+ clusters = [cluster_data_101, cluster_data_110, cluster_data_120]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -230,10 +236,12 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ncluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')\ncluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')\n+ cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')\ncluster_data_101['node_groups'] = data\ncluster_data_110['node_groups'] = data\n+ cluster_data_120['node_groups'] = data\n- clusters = [cluster_data_101, cluster_data_110]\n+ clusters = [cluster_data_101, cluster_data_110, cluster_data_120]\nfor cluster_data in clusters:\ncluster = conductor.cluster_create(context.ctx(), cluster_data)\n@@ -276,3 +284,11 @@ class StormPluginTest(base.SaharaWithDbTestCase):\ndef test_plugin110_edp_storm_pyleus_engine(self):\nself._test_engine('1.1.0', edp.JOB_TYPE_PYLEUS,\nengine.StormJobEngine)\n+\n+ def test_plugin120_edp_storm_engine(self):\n+ self._test_engine('1.2', edp.JOB_TYPE_STORM,\n+ engine.StormJobEngine)\n+\n+ def test_plugin120_edp_storm_pyleus_engine(self):\n+ self._test_engine('1.2', edp.JOB_TYPE_PYLEUS,\n+ engine.StormJobEngine)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding Storm 1.2.0 and 1.2.1
Adding new versions of Storm to sahara.
Change-Id: I7f4a96f2dc8cb66468866f77e3d4091d2a8d19d1 |
488,272 | 28.06.2018 18:38:13 | 10,800 | 15317a629eb2318aab1ec06837bf7d46774dbe7d | Adding Ambari 2.6 to image pack
Adding ambari 2.6 to image pack so users can create image to spawn
HDP 2.6 | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/ambari26-image-pack-88c9aad59bf635b2.yaml",
"diff": "+---\n+features:\n+ - Adding the ability to create Ambari 2.6 images on sahara-image-pack\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/plugin.py",
"new_path": "sahara/plugins/ambari/plugin.py",
"diff": "@@ -276,6 +276,9 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):\ndef pack_image(self, hadoop_version, remote,\ntest_only=False, image_arguments=None):\n+ if hadoop_version == '2.3':\n+ image_arguments['ambari_version'] = '2.4.3.0'\n+\nself.validator.validate(remote, test_only=test_only,\nimage_arguments=image_arguments)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "sahara/plugins/ambari/resources/images/common/mysql_connector_java_link",
"diff": "+#!/bin/bash -x\n+\n+# This is necessary due to the information on the link below\n+# https://community.hortonworks.com/articles/170133/hive-start-failed-because-of-ambari-error-mysql-co.html\n+\n+if [ ! -L /var/lib/ambari-server/resources/mysql-connector-java.jar ]; then\n+ if [ $test_only -eq 0 ]; then\n+ ln -s /usr/share/java/mysql-connector-java.jar /var/lib/ambari-server/resources/mysql-connector-java.jar\n+ else\n+ exit 1\n+ fi\n+else\n+ exit 0\n+fi\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/resources/images/image.yaml",
"new_path": "sahara/plugins/ambari/resources/images/image.yaml",
"diff": "arguments:\nambari_version:\n- description: The version of Ambari to install. Defaults to 2.4.3.0.\n- default: 2.4.3.0\n+ description: The version of Ambari to install. Defaults to 2.6.2.0.\n+ default: 2.6.2.0\nchoices:\n+ - 2.6.2.0 # HDP 2.6 / HDP 2.5 / HDP 2.4\n- 2.4.3.0 # HDP 2.5 / HDP 2.4 / HDP 2.3\n- - 2.2.0.0 # HDP 2.3\n- - 2.2.1.0 # HDP 2.4\njava_distro:\ndefault: openjdk\ndescription: The distribution of Java to install. Defaults to openjdk.\n@@ -29,6 +28,11 @@ validators:\n- package: java-1.8.0-openjdk-devel\noracle-java:\n- script: common/oracle_java\n+ - argument_case:\n+ argument_name: ambari_version\n+ cases:\n+ 2.6.2.0:\n+ - package: libtirpc-devel\n- os_case:\n- redhat:\n- script: centos/disable_selinux\n@@ -67,6 +71,7 @@ validators:\n- mysql-client-5.5\n- mysql-server-5.5\n- libmysql-java\n+ - script: common/mysql_connector_java_link\n- package: ambari-agent\n- script: common/fix_tls_ambari_agent\n- package:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding Ambari 2.6 to image pack
Adding ambari 2.6 to image pack so users can create image to spawn
HDP 2.6
Change-Id: Ifb90d8ba1958403f2c00eade013e147660357717 |
488,272 | 03.08.2018 14:54:07 | 10,800 | bb7c0399363330d6099b7be890ed89a0d5db8c33 | Another small fix for cluster creation on APIv2
Small fix that was blocking creation of multiple clusters using api v2. | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -49,7 +49,7 @@ def clusters_create(data):\ndel data['plugin_version']\nif data.get('count', None) is not None:\nresult = api.create_multiple_clusters(data)\n- for c in result:\n+ for c in result['clusters']:\nu._replace_hadoop_version_plugin_version(c['cluster'])\nreturn u.render(result)\nelse:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Another small fix for cluster creation on APIv2
Small fix that was blocking creation of multiple clusters using api v2.
Change-Id: I6c1db62a4bee3d1b064333b7b2d8b8e2d6ad50f1 |
488,272 | 01.08.2018 15:52:55 | 10,800 | 4ac73ea6ecb001e7973a563a04b25ab914f79f21 | Fixing anti-affinity for Sahara
Sahara anti-affinity was broken since some problematic changes in
This should be able to fix it.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/heat/templates.py",
"new_path": "sahara/service/heat/templates.py",
"diff": "@@ -253,8 +253,8 @@ class ClusterStack(object):\nif self.cluster.anti_affinity:\n# Creating server groups equal to the anti_affinity_ratio\n- for i in range(1, self.cluster.anti_affinity_ratio):\n- resources.update(self._serialize_aa_server_group(i))\n+ for i in range(0, self.cluster.anti_affinity_ratio):\n+ resources.update(self._serialize_aa_server_group(i + 1))\nfor ng in self.cluster.node_groups:\nresources.update(self._serialize_ng_group(ng, outputs,\n@@ -274,13 +274,16 @@ class ClusterStack(object):\nproperties = {\"instance_index\": \"%index%\"}\nif ng.cluster.anti_affinity:\n- ng_count = ng.count\n+ ng_count = self.node_groups_extra[ng.id]['node_count']\n# assuming instance_index also start from index 0\n- for i in range(0, ng_count - 1):\n+ for i in range(0, ng_count):\nserver_group_name = self._get_server_group_name()\nserver_group_resource = {\n\"get_resource\": server_group_name\n}\n+ if SERVER_GROUP_NAMES not in properties:\n+ properties[SERVER_GROUP_NAMES] = []\n+\nproperties[SERVER_GROUP_NAMES].insert(i, server_group_resource)\nif ng.auto_security_group:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixing anti-affinity for Sahara
Sahara anti-affinity was broken since some problematic changes in
cd1569852614698c4843d4c97475d8f8f3069478.
This should be able to fix it.
Change-Id: I374c0340cb0f85c00b9a04cd1b23e3912737994c
Co-Authored-By: Joe Topjian <[email protected]>
Story: #2002656
Task: #22466 |
488,296 | 04.09.2018 21:42:28 | -19,080 | c59780a955872dfb96bd726d888dcc109b74f340 | Correct repo_id_map for hdp 2.5
When hdp2.5 cluster installation is done with off-line
repo,hdp-utils repo name should be HDP-UTILS-1.1.0.21.
However this is hardcoded wrongly in sahara repo_id_map
as HDP-UTILS-1.1.0.20.As a result Ambari HDP repo set up
fails.
This fix will correct the repo_id_map
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/deploy.py",
"new_path": "sahara/plugins/ambari/deploy.py",
"diff": "@@ -49,7 +49,7 @@ repo_id_map = {\n},\n\"2.5\": {\n\"HDP\": \"HDP-2.5\",\n- \"HDP-UTILS\": \"HDP-UTILS-1.1.0.20\"\n+ \"HDP-UTILS\": \"HDP-UTILS-1.1.0.21\"\n},\n\"2.6\": {\n\"HDP\": \"HDP-2.6\",\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Correct repo_id_map for hdp 2.5
When hdp2.5 cluster installation is done with off-line
repo,hdp-utils repo name should be HDP-UTILS-1.1.0.21.
However this is hardcoded wrongly in sahara repo_id_map
as HDP-UTILS-1.1.0.20.As a result Ambari HDP repo set up
fails.
This fix will correct the repo_id_map
Change-Id: Ibf7d341cc2d2a53be521039d4f843cdbc5ee880b
Story: #2003654
Task: #26067 |
488,293 | 06.09.2018 11:35:13 | -10,800 | aa514b51e54dbcb52761edb1788d5bffa511584d | Fixed link for more information about Ambari images | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/ambari-plugin.rst",
"new_path": "doc/source/user/ambari-plugin.rst",
"diff": "@@ -26,7 +26,7 @@ Images\nThe sahara Ambari plugin is using minimal (operating system only) images.\nFor more information about Ambari images, refer to\n-`<https://git.openstack.org/cgi/openstack/sahara-image-elements>`_.\n+`<https://git.openstack.org/cgit/openstack/sahara-image-elements>`_.\nHDP plugin requires an image to be tagged in sahara Image Registry with two\ntags: 'ambari' and '<plugin version>' (e.g. '2.5').\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixed link for more information about Ambari images
Change-Id: I91a3310dfff2687975cb8c7c7693106b509943e7 |
488,296 | 25.09.2018 19:28:08 | -19,080 | 4e39a45f4584ab2232b68f250be73a515b5bd521 | adds unit test for ssh_remote.replace_remote_line
Adding unit test for the new method added to
search a line by string and replace the old line
with a new line
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/utils/test_ssh_remote.py",
"new_path": "sahara/tests/unit/utils/test_ssh_remote.py",
"diff": "@@ -451,10 +451,26 @@ class TestInstanceInteropHelper(base.SaharaTestCase):\[email protected]('sahara.utils.ssh_remote.InstanceInteropHelper._run_s')\[email protected]('sahara.utils.ssh_remote.InstanceInteropHelper._log_command')\n- def test_execute_on_vm_interactive(self, p_log_command, p_run_s):\n+ def test_replace_remote_line(self, p_log_command, p_run_s):\ninstance = FakeInstance('inst13', '123',\n'10.0.0.13', '10.0.0.13', 'user13', 'key13')\nremote = ssh_remote.InstanceInteropHelper(instance)\n+ description = ('In file \"file\" replacing line begining with string'\n+ '\"str\" with \"newline\"')\n+\n+ remote.replace_remote_line(\"file\", \"str\", \"newline\")\n+ p_run_s.assert_called_once_with(ssh_remote._replace_remote_line,\n+ None, description, \"file\", \"str\",\n+ \"newline\")\n+\n+ p_log_command.assert_called_with(description)\n+\n+ @mock.patch('sahara.utils.ssh_remote.InstanceInteropHelper._run_s')\n+ @mock.patch('sahara.utils.ssh_remote.InstanceInteropHelper._log_command')\n+ def test_execute_on_vm_interactive(self, p_log_command, p_run_s):\n+ instance = FakeInstance('inst14', '123',\n+ '10.0.0.14', '10.0.0.14', 'user14', 'key14')\n+ remote = ssh_remote.InstanceInteropHelper(instance)\ndescription = 'Executing interactively \"factor 42\"'\nremote.execute_on_vm_interactive(\"factor 42\", None)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | adds unit test for ssh_remote.replace_remote_line
Adding unit test for the new method added to
search a line by string and replace the old line
with a new line
Change-Id: I95d71bafebd9d0a4fea499813135fac06d152ab6
Story: #2003176
Task: #26708 |
488,272 | 10.10.2018 17:38:14 | 10,800 | 01d465dd385e1a30d3e6d60d8f5cdc5831aff25b | Fixing image validation for Ambari 2.3
Changing ambari version for image validation.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/ambari/plugin.py",
"new_path": "sahara/plugins/ambari/plugin.py",
"diff": "@@ -284,6 +284,10 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):\ndef validate_images(self, cluster, test_only=False, image_arguments=None):\nimage_arguments = self.get_image_arguments(cluster['hadoop_version'])\n+ if cluster['hadoop_version'] == '2.3':\n+ for arguments in image_arguments:\n+ if arguments.name == 'ambari_version':\n+ arguments.default = '2.4.3.0'\nif not test_only:\ninstances = plugin_utils.get_instances(cluster)\nelse:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixing image validation for Ambari 2.3
Changing ambari version for image validation.
Story: #2003996
Task: #26942
Change-Id: I54a1370c482a3a2862f1c8313a984fece25efbd2 |
488,283 | 12.11.2018 10:49:25 | -28,800 | 98ac996dce042fb3c908f0fcea303a73f7276eee | Update http link to https
Modify http link to https link | [
{
"change_type": "MODIFY",
"old_path": "doc/source/admin/advanced-configuration-guide.rst",
"new_path": "doc/source/admin/advanced-configuration-guide.rst",
"diff": "@@ -514,7 +514,7 @@ installed. The sources for this plugin can be found in the\nintegration see the sahara documentation sections\n:ref:`building-guest-images-label` and :ref:`swift-integration-label`.\n-.. _Sahara extra repository: http://git.openstack.org/cgit/openstack/sahara-extra\n+.. _Sahara extra repository: https://git.openstack.org/cgit/openstack/sahara-extra\n.. _volume_instance_locality_configuration:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Update http link to https
Modify http link to https link
Change-Id: I73517f80361b12da09baac5b627d580a9c9f4295 |
488,271 | 30.11.2018 09:52:58 | -28,800 | 3c1652624e73ae26ce5a5346c1d1c523fcf3f1fc | fixed word error | [
{
"change_type": "MODIFY",
"old_path": "doc/source/admin/advanced-configuration-guide.rst",
"new_path": "doc/source/admin/advanced-configuration-guide.rst",
"diff": "@@ -24,7 +24,7 @@ destination, they will be substituted at runtime. Other keywords that\ncan be used are: ``{tenant_id}``, ``{network_id}`` and ``{router_id}``.\nAdditionally, if ``proxy_command_use_internal_ip`` is set to ``True``,\n-then the internal IP will be subsituted for ``{host}`` in the command.\n+then the internal IP will be substituted for ``{host}`` in the command.\nOtherwise (if ``False``, by default) the management IP will be used: this\ncorresponds to floating IP if present in the relevant node group, else the\ninternal IP. The option is ignored if ``proxy_command`` is not also set.\n"
}
] | Python | Apache License 2.0 | openstack/sahara | fixed word error
Change-Id: I83f8abacbc1125f688daa61178a1d107f61f1dba |
488,267 | 05.12.2018 11:11:38 | 18,000 | 5bb7f3797cddacd0f180bb1a0b5eee347013a991 | doc: Fix the snippet in "The Script Validator" section
In the snippet of code, store_nfs_version should point to an object
instead of an array. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/image-gen.rst",
"new_path": "doc/source/contributor/image-gen.rst",
"diff": "@@ -186,8 +186,8 @@ as well:\noutput: OUTPUT_VAR\n- script:\nstore_nfs_version: # Because inline is set, this is just a friendly name\n- - inline: rpm -q nfs-utils # Runs this text directly, rather than reading a file\n- - output: nfs-version # Places the stdout of this script into an argument\n+ inline: rpm -q nfs-utils # Runs this text directly, rather than reading a file\n+ output: nfs-version # Places the stdout of this script into an argument\n# for future scripts to consume; if none exists, the\n# argument is created\n"
}
] | Python | Apache License 2.0 | openstack/sahara | doc: Fix the snippet in "The Script Validator" section
In the snippet of code, store_nfs_version should point to an object
instead of an array.
Change-Id: I5093baf6fa849acba0dcacdc813ec22f01c35a84 |
488,272 | 07.11.2018 10:14:51 | 10,800 | 0b8ab036e7a4b1beedbc8bcd2d33736a9e296536 | Fixing cluster scale
The current implementation fails if we try to scale from different
node groups. | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/ops.py",
"new_path": "sahara/service/ops.py",
"diff": "@@ -327,10 +327,27 @@ def _provision_cluster(cluster_id):\n_refresh_health_for_cluster(cluster_id)\n+def _specific_inst_to_delete(node_group, node_group_instance_map=None):\n+ if node_group_instance_map:\n+ if node_group.id in node_group_instance_map:\n+ return True\n+ return False\n+\n+\n@ops_error_handler(\n_(\"Scaling cluster failed for the following reason(s): {reason}\"))\ndef _provision_scaled_cluster(cluster_id, node_group_id_map,\nnode_group_instance_map=None):\n+ \"\"\"Provision scaled cluster.\n+\n+ :param cluster_id: Id of cluster to be scaled.\n+\n+ :param node_group_id_map: Dictionary in the format\n+ node_group_id: number of instances.\n+\n+ :param node_group_instance_map: Specifies the instances to be removed in\n+ each node group.\n+ \"\"\"\nctx, cluster, plugin = _prepare_provisioning(cluster_id)\n# Decommissioning surplus nodes with the plugin\n@@ -340,19 +357,25 @@ def _provision_scaled_cluster(cluster_id, node_group_id_map,\ntry:\ninstances_to_delete = []\nfor node_group in cluster.node_groups:\n+ ng_inst_to_delete_count = 0\n+ # new_count is the new number of instance on the current node group\nnew_count = node_group_id_map[node_group.id]\nif new_count < node_group.count:\n- if (node_group_instance_map and\n- node_group.id in node_group_instance_map):\n- for instance_ref in node_group_instance_map[\n- node_group.id]:\n- instance = _get_instance_obj(node_group.instances,\n- instance_ref)\n- instances_to_delete.append(instance)\n-\n- while node_group.count - new_count > len(instances_to_delete):\n+ # Adding selected instances to delete to the list\n+ if _specific_inst_to_delete(node_group,\n+ node_group_instance_map):\n+ for instance_ref in node_group_instance_map[node_group.id]:\n+ instances_to_delete.append(_get_instance_obj(\n+ node_group.instances, instance_ref))\n+ ng_inst_to_delete_count += 1\n+\n+ # Adding random instances to the list when the number of\n+ # specific instances does not equals the difference between the\n+ # current count and the new count of instances.\n+ while node_group.count - new_count > ng_inst_to_delete_count:\ninstances_to_delete.append(_get_random_instance_from_ng(\nnode_group.instances, instances_to_delete))\n+ ng_inst_to_delete_count += 1\nif instances_to_delete:\ncontext.set_step_type(_(\"Plugin: decommission cluster\"))\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/api/v2/base.py",
"new_path": "sahara/tests/unit/service/api/v2/base.py",
"diff": "@@ -85,7 +85,7 @@ SCALE_DATA_SPECIFIC_INSTANCE = {\n},\n{\n'name': 'ng_2',\n- 'count': 2,\n+ 'count': 1,\n'instances': ['ng_2_0']\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/api/v2/test_clusters.py",
"new_path": "sahara/tests/unit/service/api/v2/test_clusters.py",
"diff": "@@ -244,6 +244,37 @@ class TestClusterApi(base.SaharaWithDbTestCase):\n'ops.provision_scaled_cluster',\n'ops.terminate_cluster'], self.calls_order)\n+ @mock.patch('sahara.service.quotas.check_cluster', return_value=None)\n+ @mock.patch('sahara.service.quotas.check_scaling', return_value=None)\n+ def test_scale_cluster_specific_and_non_specific(self, check_scaling,\n+ check_cluster):\n+ cluster = api.create_cluster(api_base.SAMPLE_CLUSTER)\n+ cluster = api.get_cluster(cluster.id)\n+ api.scale_cluster(cluster.id, api_base.SCALE_DATA_SPECIFIC_INSTANCE)\n+ result_cluster = api.get_cluster(cluster.id)\n+ self.assertEqual('Scaled', result_cluster.status)\n+ expected_count = {\n+ 'ng_1': 3,\n+ 'ng_2': 1,\n+ 'ng_3': 1,\n+ }\n+ ng_count = 0\n+ for ng in result_cluster.node_groups:\n+ self.assertEqual(expected_count[ng.name], ng.count)\n+ ng_count += 1\n+ self.assertEqual(1, result_cluster.node_groups[1].count)\n+ self.assertNotIn('ng_2_0',\n+ self._get_instances_ids(\n+ result_cluster.node_groups[1]))\n+ self.assertEqual(3, ng_count)\n+ api.terminate_cluster(result_cluster.id)\n+ self.assertEqual(\n+ ['get_open_ports', 'recommend_configs', 'validate',\n+ 'ops.provision_cluster', 'get_open_ports',\n+ 'recommend_configs', 'validate_scaling',\n+ 'ops.provision_scaled_cluster',\n+ 'ops.terminate_cluster'], self.calls_order)\n+\ndef _get_instances_ids(self, node_group):\ninstance_ids = []\nfor instance in node_group.instances:\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Fixing cluster scale
The current implementation fails if we try to scale from different
node groups.
Change-Id: Ifb9e4b55959e10c9e5cb74c86bbdba9ffed50ceb |
488,272 | 27.11.2018 09:18:11 | 10,800 | 9bf50ce571442fc39245b9b83f5be6df6e618f11 | APIv2 Changing return payload to project_id
As part of the APIv2 work we need to change all tenant_id references
to project_id on the return payload.
Story:
Taks: | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/api-v2-return-payload-a84a609db410228a.yaml",
"diff": "+---\n+other:\n+ - As part of the APIv2 work we changed all tenant_id references\n+ to project_id on the return payload of REST calls.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/cluster_templates.py",
"new_path": "sahara/api/v2/cluster_templates.py",
"diff": "@@ -33,6 +33,7 @@ def cluster_templates_list():\nresult = api.get_cluster_templates(**u.get_request_args().to_dict())\nfor ct in result:\nu._replace_hadoop_version_plugin_version(ct)\n+ u._replace_tenant_id_project_id(ct)\nreturn u.render(res=result, name='cluster_templates')\n@@ -47,6 +48,7 @@ def cluster_templates_create(data):\ndel data['plugin_version']\nresult = api.create_cluster_template(data).to_wrapped_dict()\nu._replace_hadoop_version_plugin_version(result['cluster_template'])\n+ u._replace_tenant_id_project_id(result['cluster_template'])\nreturn u.render(result)\n@@ -57,6 +59,7 @@ def cluster_templates_get(cluster_template_id):\nresult = u.to_wrapped_dict_no_render(\napi.get_cluster_template, cluster_template_id)\nu._replace_hadoop_version_plugin_version(result['cluster_template'])\n+ u._replace_tenant_id_project_id(result['cluster_template'])\nreturn u.render(result)\n@@ -72,6 +75,7 @@ def cluster_templates_update(cluster_template_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.update_cluster_template, cluster_template_id, data)\nu._replace_hadoop_version_plugin_version(result['cluster_template'])\n+ u._replace_tenant_id_project_id(result['cluster_template'])\nreturn u.render(result)\n@@ -88,7 +92,7 @@ def _cluster_template_export_helper(template):\ntemplate.pop('id')\ntemplate.pop('updated_at')\ntemplate.pop('created_at')\n- template.pop('tenant_id')\n+ template.pop('project_id')\ntemplate.pop('is_default')\ntemplate['default_image_id'] = '{default_image_id}'\ntemplate['node_groups'] = '{node_groups}'\n@@ -101,6 +105,7 @@ def cluster_template_export(cluster_template_id):\ncontent = u.to_wrapped_dict_no_render(\napi.export_cluster_template, cluster_template_id)\nu._replace_hadoop_version_plugin_version(content['cluster_template'])\n+ u._replace_tenant_id_project_id(content['cluster_template'])\n_cluster_template_export_helper(content['cluster_template'])\nres = u.render(content)\nres.headers.add('Content-Disposition', 'attachment',\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -35,6 +35,7 @@ def clusters_list():\nresult = api.get_clusters(**u.get_request_args().to_dict())\nfor c in result:\nu._replace_hadoop_version_plugin_version(c)\n+ u._replace_tenant_id_project_id(c)\nreturn u.render(res=result, name='clusters')\n@@ -51,10 +52,12 @@ def clusters_create(data):\nresult = api.create_multiple_clusters(data)\nfor c in result['clusters']:\nu._replace_hadoop_version_plugin_version(c['cluster'])\n+ u._replace_tenant_id_project_id(c['cluster'])\nreturn u.render(result)\nelse:\nresult = api.create_cluster(data).to_wrapped_dict()\nu._replace_hadoop_version_plugin_version(result['cluster'])\n+ u._replace_tenant_id_project_id(c['cluster'])\nreturn u.render(result)\n@@ -66,6 +69,7 @@ def clusters_scale(cluster_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.scale_cluster, cluster_id, data)\nu._replace_hadoop_version_plugin_version(result['cluster'])\n+ u._replace_tenant_id_project_id(result['cluster'])\nreturn u.render(result)\n@@ -79,6 +83,7 @@ def clusters_get(cluster_id):\nresult = u.to_wrapped_dict_no_render(\napi.get_cluster, cluster_id, show_events)\nu._replace_hadoop_version_plugin_version(result['cluster'])\n+ u._replace_tenant_id_project_id(result['cluster'])\nreturn u.render(result)\n@@ -90,6 +95,7 @@ def clusters_update(cluster_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.update_cluster, cluster_id, data)\nu._replace_hadoop_version_plugin_version(result['cluster'])\n+ u._replace_tenant_id_project_id(result['cluster'])\nreturn u.render(result)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/data_sources.py",
"new_path": "sahara/api/v2/data_sources.py",
"diff": "@@ -31,6 +31,8 @@ rest = u.RestV2('data-sources', __name__)\nv.validate_sorting_data_sources)\ndef data_sources_list():\nresult = api.get_data_sources(**u.get_request_args().to_dict())\n+ for ds in result:\n+ u._replace_tenant_id_project_id(ds)\nreturn u.render(res=result, name='data_sources')\n@@ -38,14 +40,18 @@ def data_sources_list():\[email protected](\"data-processing:data-sources:register\")\[email protected](v_d_s_schema.DATA_SOURCE_SCHEMA, v_d_s.check_data_source_create)\ndef data_source_register(data):\n- return u.render(api.register_data_source(data).to_wrapped_dict())\n+ result = api.register_data_source(data).to_wrapped_dict()\n+ u._replace_tenant_id_project_id(result['data_source'])\n+ return u.render(result)\[email protected]('/data-sources/<data_source_id>')\[email protected](\"data-processing:data-sources:get\")\[email protected]_exists(api.get_data_source, 'data_source_id')\ndef data_source_get(data_source_id):\n- return u.to_wrapped_dict(api.get_data_source, data_source_id)\n+ result = u.to_wrapped_dict(api.get_data_source, data_source_id)\n+ u._replace_tenant_id_project_id(result['data_source'])\n+ return result\[email protected]('/data-sources/<data_source_id>')\n@@ -61,4 +67,6 @@ def data_source_delete(data_source_id):\[email protected]_exists(api.get_data_source, 'data_source_id')\[email protected](v_d_s_schema.DATA_SOURCE_UPDATE_SCHEMA)\ndef data_source_update(data_source_id, data):\n- return u.to_wrapped_dict(api.data_source_update, data_source_id, data)\n+ result = u.to_wrapped_dict(api.data_source_update, data_source_id, data)\n+ u._replace_tenant_id_project_id(result['data_source'])\n+ return result\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_binaries.py",
"new_path": "sahara/api/v2/job_binaries.py",
"diff": "@@ -28,7 +28,9 @@ rest = u.RestV2('job-binaries', __name__)\[email protected](\"data-processing:job-binaries:create\")\[email protected](v_j_b_schema.JOB_BINARY_SCHEMA, v_j_b.check_job_binary)\ndef job_binary_create(data):\n- return u.render(api.create_job_binary(data).to_wrapped_dict())\n+ result = api.create_job_binary(data).to_wrapped_dict()\n+ u._replace_tenant_id_project_id(result['job_binary'])\n+ return u.render(result)\[email protected]('/job-binaries')\n@@ -38,6 +40,8 @@ def job_binary_create(data):\nv.validate_sorting_job_binaries)\ndef job_binary_list():\nresult = api.get_job_binaries(**u.get_request_args().to_dict())\n+ for jb in result:\n+ u._replace_tenant_id_project_id(jb)\nreturn u.render(res=result, name='binaries')\n@@ -45,7 +49,9 @@ def job_binary_list():\[email protected](\"data-processing:job-binaries:get\")\[email protected]_exists(api.get_job_binary, 'job_binary_id')\ndef job_binary_get(job_binary_id):\n- return u.to_wrapped_dict(api.get_job_binary, job_binary_id)\n+ result = api.get_job_binary(job_binary_id).to_wrapped_dict()\n+ u._replace_tenant_id_project_id(result['job_binary'])\n+ return u.render(result)\[email protected]('/job-binaries/<job_binary_id>')\n@@ -70,5 +76,6 @@ def job_binary_data(job_binary_id):\[email protected](\"data-processing:job-binaries:modify\")\[email protected](v_j_b_schema.JOB_BINARY_UPDATE_SCHEMA, v_j_b.check_job_binary)\ndef job_binary_update(job_binary_id, data):\n- return u.render(api.update_job_binary(job_binary_id,\n- data).to_wrapped_dict())\n+ result = api.update_job_binary(job_binary_id, data).to_wrapped_dict()\n+ u._replace_tenant_id_project_id(result['job_binary'])\n+ return u.render(result)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_templates.py",
"new_path": "sahara/api/v2/job_templates.py",
"diff": "@@ -31,6 +31,8 @@ rest = u.RestV2('job-templates', __name__)\nv.validate_sorting_jobs)\ndef job_templates_list():\nresult = api.get_job_templates(**u.get_request_args().to_dict())\n+ for jt in result:\n+ u._replace_tenant_id_project_id(jt)\nreturn u.render(res=result, name='job_templates')\n@@ -38,15 +40,19 @@ def job_templates_list():\[email protected](\"data-processing:job-templates:create\")\[email protected](v_j_schema.JOB_SCHEMA, v_j.check_mains_libs, v_j.check_interface)\ndef job_templates_create(data):\n- return u.render({'job_template': api.create_job_template(data).to_dict()})\n+ result = {'job_template': api.create_job_template(data).to_dict()}\n+ u._replace_tenant_id_project_id(result['job_template'])\n+ return u.render(result)\[email protected]('/job-templates/<job_templates_id>')\[email protected](\"data-processing:job-templates:get\")\[email protected]_exists(api.get_job_templates, id='job_templates_id')\ndef job_templates_get(job_templates_id):\n- return u.render({'job_template': api.get_job_template(\n- job_templates_id).to_dict()})\n+ result = {'job_template': api.get_job_template(\n+ job_templates_id).to_dict()}\n+ u._replace_tenant_id_project_id(result['job_template'])\n+ return u.render(result)\[email protected]('/job-templates/<job_templates_id>')\n@@ -54,8 +60,10 @@ def job_templates_get(job_templates_id):\[email protected]_exists(api.get_job_templates, id='job_templates_id')\[email protected](v_j_schema.JOB_UPDATE_SCHEMA)\ndef job_templates_update(job_templates_id, data):\n- return u.render({'job_template': api.update_job_template(\n- job_templates_id, data).to_dict()})\n+ result = {'job_template': api.update_job_template(\n+ job_templates_id, data).to_dict()}\n+ u._replace_tenant_id_project_id(result['job_template'])\n+ return u.render(result)\[email protected]('/job-templates/<job_templates_id>')\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/jobs.py",
"new_path": "sahara/api/v2/jobs.py",
"diff": "@@ -38,6 +38,7 @@ def jobs_list():\n# removed\nfor je in result:\nje.pop('oozie_job_id', force=True)\n+ u._replace_tenant_id_project_id(je)\nreturn u.render(res=result, name='jobs')\n@@ -49,6 +50,7 @@ def jobs_execute(data):\ndict.update(result['job'],\n{'engine_job_id': result['job']['oozie_job_id']})\ndict.pop(result['job'], 'oozie_job_id')\n+ u._replace_tenant_id_project_id(result['job'])\nreturn u.render(result)\n@@ -61,6 +63,7 @@ def jobs_get(job_id):\ndata.get('refresh_status', 'false')).lower() == 'true'\nresult = {'job': api.get_job_execution(job_id, refresh_status)}\nresult['job'].pop('oozie_job_id', force=True)\n+ u._replace_tenant_id_project_id(result['job'])\nreturn u.render(result)\n@@ -72,6 +75,7 @@ def jobs_get(job_id):\ndef jobs_update(job_id, data):\nresult = {'job': api.update_job_execution(job_id, data)}\nresult['job'].pop('oozie_job_id', force=True)\n+ u._replace_tenant_id_project_id(result['job'])\nreturn u.render(result)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/node_group_templates.py",
"new_path": "sahara/api/v2/node_group_templates.py",
"diff": "@@ -34,6 +34,7 @@ def node_group_templates_list():\nresult = api.get_node_group_templates(**u.get_request_args().to_dict())\nfor ngt in result:\nu._replace_hadoop_version_plugin_version(ngt)\n+ u._replace_tenant_id_project_id(ngt)\nreturn u.render(res=result, name=\"node_group_templates\")\n@@ -48,6 +49,7 @@ def node_group_templates_create(data):\ndel data['plugin_version']\nresult = api.create_node_group_template(data).to_wrapped_dict()\nu._replace_hadoop_version_plugin_version(result['node_group_template'])\n+ u._replace_tenant_id_project_id(result['node_group_template'])\nreturn u.render(result)\n@@ -58,6 +60,7 @@ def node_group_templates_get(node_group_template_id):\nresult = u.to_wrapped_dict_no_render(\napi.get_node_group_template, node_group_template_id)\nu._replace_hadoop_version_plugin_version(result['node_group_template'])\n+ u._replace_tenant_id_project_id(result['node_group_template'])\nreturn u.render(result)\n@@ -73,6 +76,7 @@ def node_group_templates_update(node_group_template_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.update_node_group_template, node_group_template_id, data)\nu._replace_hadoop_version_plugin_version(result['node_group_template'])\n+ u._replace_tenant_id_project_id(result['node_group_template'])\nreturn u.render(result)\n@@ -89,7 +93,7 @@ def _node_group_template_export_helper(template):\ntemplate.pop('id')\ntemplate.pop('updated_at')\ntemplate.pop('created_at')\n- template.pop('tenant_id')\n+ template.pop('project_id')\ntemplate.pop('is_default')\ntemplate['flavor_id'] = '{flavor_id}'\ntemplate['security_groups'] = '{security_groups}'\n@@ -104,6 +108,7 @@ def node_group_template_export(node_group_template_id):\ncontent = u.to_wrapped_dict_no_render(\napi.export_node_group_template, node_group_template_id)\nu._replace_hadoop_version_plugin_version(content['node_group_template'])\n+ u._replace_tenant_id_project_id(content['node_group_template'])\n_node_group_template_export_helper(content['node_group_template'])\nres = u.render(content)\nres.headers.add('Content-Disposition', 'attachment',\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/api.py",
"new_path": "sahara/utils/api.py",
"diff": "@@ -354,3 +354,8 @@ def to_wrapped_dict_no_render(func, id, *args, **kwargs):\ndef _replace_hadoop_version_plugin_version(obj):\ndict.update(obj, {'plugin_version': obj['hadoop_version']})\ndict.pop(obj, 'hadoop_version')\n+\n+\n+def _replace_tenant_id_project_id(obj):\n+ dict.update(obj, {'project_id': obj['tenant_id']})\n+ dict.pop(obj, 'tenant_id')\n"
}
] | Python | Apache License 2.0 | openstack/sahara | APIv2 Changing return payload to project_id
As part of the APIv2 work we need to change all tenant_id references
to project_id on the return payload.
Story: #2004505
Taks: #28227
Change-Id: I94bca161aa4f7bdd56d5528bae92fa81af188a43 |
488,272 | 04.12.2018 10:57:29 | 10,800 | 81d05942e9fad21b2ccde04544bf362504448a37 | Deploying Sahara with unversioned endpoints
One of the tasks we need in order to make APIv2 is deployment with
unversioned endpoints.
This patch makes deployment of unversioned sahara when deployed with devstack
Depends-On: | [
{
"change_type": "MODIFY",
"old_path": "devstack/plugin.sh",
"new_path": "devstack/plugin.sh",
"diff": "@@ -35,9 +35,9 @@ function create_sahara_accounts {\nget_or_create_service \"sahara\" \"data-processing\" \"Sahara Data Processing\"\nget_or_create_endpoint \"data-processing\" \\\n\"$REGION_NAME\" \\\n- \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\\$(tenant_id)s\" \\\n- \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\\$(tenant_id)s\" \\\n- \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\\$(tenant_id)s\"\n+ \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT\" \\\n+ \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT\" \\\n+ \"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT\"\n}\n# cleanup_sahara() - Remove residual data files, anything left over from\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Deploying Sahara with unversioned endpoints
One of the tasks we need in order to make APIv2 is deployment with
unversioned endpoints.
This patch makes deployment of unversioned sahara when deployed with devstack
Depends-On: I5066f73aa0ace79d8d5e1431f8d741d0d942d9d5
Change-Id: I06ac6da0c6784680330ae0431c20d3553f95e452 |
488,272 | 06.12.2018 16:42:55 | 10,800 | 7105a891c23ca066d87f68e07a7098ea692ca112 | APIv2 - Fix 500 on malformed query string on
In order to improve return information to clients we are adding a check
to verify parameters before each call and return a more appropriate
message to the users.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/cluster_templates.py",
"new_path": "sahara/api/v2/cluster_templates.py",
"diff": "@@ -29,6 +29,7 @@ rest = u.RestV2('cluster-templates', __name__)\[email protected]_exists(api.get_cluster_template, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_cluster_templates)\[email protected]_request_params(['plugin_name', 'hadoop_version', 'name'])\ndef cluster_templates_list():\nresult = api.get_cluster_templates(**u.get_request_args().to_dict())\nfor ct in result:\n@@ -40,6 +41,7 @@ def cluster_templates_list():\[email protected](\"data-processing:cluster-templates:create\")\[email protected](ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2,\nv_ct.check_cluster_template_create)\[email protected]_request_params([])\ndef cluster_templates_create(data):\n# renaming hadoop_version -> plugin_version\n# this can be removed once APIv1 is deprecated\n@@ -53,6 +55,7 @@ def cluster_templates_create(data):\[email protected]('/cluster-templates/<cluster_template_id>')\[email protected](\"data-processing:cluster-templates:get\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected]_request_params([])\ndef cluster_templates_get(cluster_template_id):\nresult = u.to_wrapped_dict_no_render(\napi.get_cluster_template, cluster_template_id)\n@@ -65,6 +68,7 @@ def cluster_templates_get(cluster_template_id):\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected](ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2,\nv_ct.check_cluster_template_update)\[email protected]_request_params([])\ndef cluster_templates_update(cluster_template_id, data):\nif data.get('plugin_version', None):\ndata['hadoop_version'] = data['plugin_version']\n@@ -79,6 +83,7 @@ def cluster_templates_update(cluster_template_id, data):\[email protected](\"data-processing:cluster-templates:delete\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected](None, v_ct.check_cluster_template_usage)\[email protected]_request_params([])\ndef cluster_templates_delete(cluster_template_id):\napi.terminate_cluster_template(cluster_template_id)\nreturn u.render()\n@@ -97,6 +102,7 @@ def _cluster_template_export_helper(template):\[email protected]('/cluster-templates/<cluster_template_id>/export')\[email protected](\"data-processing:cluster-templates:get\")\[email protected]_exists(api.get_cluster_template, 'cluster_template_id')\[email protected]_request_params([])\ndef cluster_template_export(cluster_template_id):\ncontent = u.to_wrapped_dict_no_render(\napi.export_cluster_template, cluster_template_id)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -31,6 +31,7 @@ rest = u.RestV2('clusters', __name__)\[email protected](\"data-processing:clusters:get_all\")\[email protected]_exists(api.get_cluster, 'marker')\[email protected](None, v.validate_pagination_limit)\[email protected]_request_params(['plugin_name', 'hadoop_version', 'name'])\ndef clusters_list():\nresult = api.get_clusters(**u.get_request_args().to_dict())\nfor c in result:\n@@ -42,6 +43,7 @@ def clusters_list():\[email protected](\"data-processing:clusters:create\")\[email protected](v_c_schema.CLUSTER_SCHEMA_V2,\nv_c.check_one_or_multiple_clusters_create)\[email protected]_request_params([])\ndef clusters_create(data):\n# renaming hadoop_version -> plugin_version\n# this can be removed once APIv1 is deprecated\n@@ -62,6 +64,7 @@ def clusters_create(data):\[email protected](\"data-processing:clusters:scale\")\[email protected]_exists(api.get_cluster, 'cluster_id')\[email protected](v_c_schema.CLUSTER_SCALING_SCHEMA_V2, v_c_s.check_cluster_scaling)\[email protected]_request_params([])\ndef clusters_scale(cluster_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.scale_cluster, cluster_id, data)\n@@ -72,6 +75,7 @@ def clusters_scale(cluster_id, data):\[email protected]('/clusters/<cluster_id>')\[email protected](\"data-processing:clusters:get\")\[email protected]_exists(api.get_cluster, 'cluster_id')\[email protected]_request_params([])\ndef clusters_get(cluster_id):\ndata = u.get_request_args()\nshow_events = six.text_type(\n@@ -86,6 +90,7 @@ def clusters_get(cluster_id):\[email protected](\"data-processing:clusters:modify\")\[email protected]_exists(api.get_cluster, 'cluster_id')\[email protected](v_c_schema.CLUSTER_UPDATE_SCHEMA, v_c.check_cluster_update)\[email protected]_request_params([])\ndef clusters_update(cluster_id, data):\nresult = u.to_wrapped_dict_no_render(\napi.update_cluster, cluster_id, data)\n@@ -97,6 +102,7 @@ def clusters_update(cluster_id, data):\[email protected](\"data-processing:clusters:delete\")\[email protected]_exists(api.get_cluster, 'cluster_id')\[email protected](v_c_schema.CLUSTER_DELETE_SCHEMA_V2, v_c.check_cluster_delete)\[email protected]_request_params([])\ndef clusters_delete(cluster_id):\ndata = u.request_data()\nforce = data.get('force', False)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/data_sources.py",
"new_path": "sahara/api/v2/data_sources.py",
"diff": "@@ -29,6 +29,7 @@ rest = u.RestV2('data-sources', __name__)\[email protected]_exists(api.get_data_source, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_data_sources)\[email protected]_request_params(['type'])\ndef data_sources_list():\nresult = api.get_data_sources(**u.get_request_args().to_dict())\nreturn u.render(res=result, name='data_sources')\n@@ -37,6 +38,7 @@ def data_sources_list():\[email protected]('/data-sources')\[email protected](\"data-processing:data-sources:register\")\[email protected](v_d_s_schema.DATA_SOURCE_SCHEMA, v_d_s.check_data_source_create)\[email protected]_request_params([])\ndef data_source_register(data):\nreturn u.render(api.register_data_source(data).to_wrapped_dict())\n@@ -44,6 +46,7 @@ def data_source_register(data):\[email protected]('/data-sources/<data_source_id>')\[email protected](\"data-processing:data-sources:get\")\[email protected]_exists(api.get_data_source, 'data_source_id')\[email protected]_request_params([])\ndef data_source_get(data_source_id):\nreturn u.to_wrapped_dict(api.get_data_source, data_source_id)\n@@ -51,6 +54,7 @@ def data_source_get(data_source_id):\[email protected]('/data-sources/<data_source_id>')\[email protected](\"data-processing:data-sources:delete\")\[email protected]_exists(api.get_data_source, 'data_source_id')\[email protected]_request_params([])\ndef data_source_delete(data_source_id):\napi.delete_data_source(data_source_id)\nreturn u.render()\n@@ -60,5 +64,6 @@ def data_source_delete(data_source_id):\[email protected](\"data-processing:data-sources:modify\")\[email protected]_exists(api.get_data_source, 'data_source_id')\[email protected](v_d_s_schema.DATA_SOURCE_UPDATE_SCHEMA)\[email protected]_request_params([])\ndef data_source_update(data_source_id, data):\nreturn u.to_wrapped_dict(api.data_source_update, data_source_id, data)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/images.py",
"new_path": "sahara/api/v2/images.py",
"diff": "@@ -25,6 +25,7 @@ rest = u.RestV2('images', __name__)\[email protected]('/images')\[email protected](\"data-processing:images:get_all\")\[email protected]_request_params(['name', 'tags', 'username'])\ndef images_list():\ntags = u.get_request_args().getlist('tags')\nname = u.get_request_args().get('name', None)\n@@ -34,6 +35,7 @@ def images_list():\[email protected]('/images/<image_id>')\[email protected](\"data-processing:images:get\")\[email protected]_exists(api.get_image, id='image_id')\[email protected]_request_params([])\ndef images_get(image_id):\nreturn u.render(api.get_registered_image(id=image_id).wrapped_dict)\n@@ -42,6 +44,7 @@ def images_get(image_id):\[email protected](\"data-processing:images:register\")\[email protected]_exists(api.get_image, id='image_id')\[email protected](v_images.image_register_schema, v_images.check_image_register)\[email protected]_request_params([])\ndef images_set(image_id, data):\nreturn u.render(api.register_image(image_id, **data).wrapped_dict)\n@@ -49,6 +52,7 @@ def images_set(image_id, data):\[email protected]('/images/<image_id>')\[email protected](\"data-processing:images:unregister\")\[email protected]_exists(api.get_image, id='image_id')\[email protected]_request_params([])\ndef images_unset(image_id):\napi.unregister_image(image_id)\nreturn u.render()\n@@ -57,6 +61,7 @@ def images_unset(image_id):\[email protected]('/images/<image_id>/tags')\[email protected](\"data-processing:images:get_tags\")\[email protected]_exists(api.get_image, id='image_id')\[email protected]_request_params([])\ndef image_tags_get(image_id):\nreturn u.render(api.get_image_tags(image_id))\n@@ -65,6 +70,7 @@ def image_tags_get(image_id):\[email protected](\"data-processing:images:set_tags\")\[email protected]_exists(api.get_image, id='image_id')\[email protected](v_images.image_tags_schema, v_images.check_tags)\[email protected]_request_params([])\ndef image_tags_update(image_id, data):\nreturn u.render(api.set_image_tags(image_id, **data).wrapped_dict)\n@@ -72,6 +78,7 @@ def image_tags_update(image_id, data):\[email protected]('/images/<image_id>/tags')\[email protected](\"data-processing:images:remove_tags\")\[email protected]_exists(api.get_image, id='image_id')\[email protected]_request_params([])\ndef image_tags_delete(image_id):\napi.remove_image_tags(image_id)\nreturn u.render()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_binaries.py",
"new_path": "sahara/api/v2/job_binaries.py",
"diff": "@@ -27,6 +27,7 @@ rest = u.RestV2('job-binaries', __name__)\[email protected]('/job-binaries')\[email protected](\"data-processing:job-binaries:create\")\[email protected](v_j_b_schema.JOB_BINARY_SCHEMA, v_j_b.check_job_binary)\[email protected]_request_params([])\ndef job_binary_create(data):\nreturn u.render(api.create_job_binary(data).to_wrapped_dict())\n@@ -36,6 +37,7 @@ def job_binary_create(data):\[email protected]_exists(api.get_job_binary, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_job_binaries)\[email protected]_request_params(['name'])\ndef job_binary_list():\nresult = api.get_job_binaries(**u.get_request_args().to_dict())\nreturn u.render(res=result, name='binaries')\n@@ -44,6 +46,7 @@ def job_binary_list():\[email protected]('/job-binaries/<job_binary_id>')\[email protected](\"data-processing:job-binaries:get\")\[email protected]_exists(api.get_job_binary, 'job_binary_id')\[email protected]_request_params([])\ndef job_binary_get(job_binary_id):\nreturn u.to_wrapped_dict(api.get_job_binary, job_binary_id)\n@@ -51,6 +54,7 @@ def job_binary_get(job_binary_id):\[email protected]('/job-binaries/<job_binary_id>')\[email protected](\"data-processing:job-binaries:delete\")\[email protected]_exists(api.get_job_binary, id='job_binary_id')\[email protected]_request_params([])\ndef job_binary_delete(job_binary_id):\napi.delete_job_binary(job_binary_id)\nreturn u.render()\n@@ -59,6 +63,7 @@ def job_binary_delete(job_binary_id):\[email protected]('/job-binaries/<job_binary_id>/data')\[email protected](\"data-processing:job-binaries:get_data\")\[email protected]_exists(api.get_job_binary, 'job_binary_id')\[email protected]_request_params([])\ndef job_binary_data(job_binary_id):\ndata = api.get_job_binary_data(job_binary_id)\nif type(data) == dict:\n@@ -69,6 +74,7 @@ def job_binary_data(job_binary_id):\[email protected]('/job-binaries/<job_binary_id>')\[email protected](\"data-processing:job-binaries:modify\")\[email protected](v_j_b_schema.JOB_BINARY_UPDATE_SCHEMA, v_j_b.check_job_binary)\[email protected]_request_params([])\ndef job_binary_update(job_binary_id, data):\nreturn u.render(api.update_job_binary(job_binary_id,\ndata).to_wrapped_dict())\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_templates.py",
"new_path": "sahara/api/v2/job_templates.py",
"diff": "@@ -29,6 +29,7 @@ rest = u.RestV2('job-templates', __name__)\[email protected]_exists(api.get_job_templates, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_jobs)\[email protected]_request_params(['type', 'name'])\ndef job_templates_list():\nresult = api.get_job_templates(**u.get_request_args().to_dict())\nreturn u.render(res=result, name='job_templates')\n@@ -37,6 +38,7 @@ def job_templates_list():\[email protected]('/job-templates')\[email protected](\"data-processing:job-templates:create\")\[email protected](v_j_schema.JOB_SCHEMA, v_j.check_mains_libs, v_j.check_interface)\[email protected]_request_params([])\ndef job_templates_create(data):\nreturn u.render({'job_template': api.create_job_template(data).to_dict()})\n@@ -44,6 +46,7 @@ def job_templates_create(data):\[email protected]('/job-templates/<job_templates_id>')\[email protected](\"data-processing:job-templates:get\")\[email protected]_exists(api.get_job_templates, id='job_templates_id')\[email protected]_request_params([])\ndef job_templates_get(job_templates_id):\nreturn u.render({'job_template': api.get_job_template(\njob_templates_id).to_dict()})\n@@ -53,6 +56,7 @@ def job_templates_get(job_templates_id):\[email protected](\"data-processing:jobs:modify\")\[email protected]_exists(api.get_job_templates, id='job_templates_id')\[email protected](v_j_schema.JOB_UPDATE_SCHEMA)\[email protected]_request_params([])\ndef job_templates_update(job_templates_id, data):\nreturn u.render({'job_template': api.update_job_template(\njob_templates_id, data).to_dict()})\n@@ -61,6 +65,7 @@ def job_templates_update(job_templates_id, data):\[email protected]('/job-templates/<job_templates_id>')\[email protected](\"data-processing:jobs:delete\")\[email protected]_exists(api.get_job_templates, id='job_templates_id')\[email protected]_request_params([])\ndef job_templates_delete(job_templates_id):\napi.delete_job_template(job_templates_id)\nreturn u.render()\n@@ -69,5 +74,6 @@ def job_templates_delete(job_templates_id):\[email protected]('/job-templates/config-hints/<job_type>')\[email protected](\"data-processing:jobs:get_config_hints\")\[email protected]_exists(api.get_job_config_hints, job_type='job_type')\[email protected]_request_params([])\ndef job_config_hints_get(job_type):\nreturn u.render(api.get_job_config_hints(job_type))\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/job_types.py",
"new_path": "sahara/api/v2/job_types.py",
"diff": "from sahara.api import acl\nfrom sahara.service.api.v2 import job_types as api\n+from sahara.service import validation as v\nimport sahara.utils.api as u\n@@ -23,6 +24,7 @@ rest = u.RestV2('job-types', __name__)\[email protected]('/job-types')\[email protected](\"data-processing:job-types:get_all\")\[email protected]_request_params(['type', 'plugin_name', 'hadoop_version'])\ndef job_types_get():\n# We want to use flat=False with to_dict() so that\n# the value of each arg is given as a list. This supports\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/jobs.py",
"new_path": "sahara/api/v2/jobs.py",
"diff": "@@ -31,6 +31,7 @@ rest = u.RestV2('jobs', __name__)\[email protected]_exists(api.get_job_execution, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_job_executions)\[email protected]_request_params(['status'])\ndef jobs_list():\nresult = api.job_execution_list(**u.get_request_args().to_dict())\n# APIv2: renaming oozie_job_id -> engine_job_id\n@@ -44,6 +45,7 @@ def jobs_list():\[email protected]('/jobs')\[email protected](\"data-processing:jobs:execute\")\[email protected](v_j_e_schema.JOB_EXEC_SCHEMA_V2, v_j_e.check_job_execution)\[email protected]_request_params([])\ndef jobs_execute(data):\nresult = {'job': api.execute_job(data)}\ndict.update(result['job'],\n@@ -55,6 +57,7 @@ def jobs_execute(data):\[email protected]('/jobs/<job_id>')\[email protected](\"data-processing:job-executions:get\")\[email protected]_exists(api.get_job_execution, id='job_id')\[email protected]_request_params([])\ndef jobs_get(job_id):\ndata = u.get_request_args()\nrefresh_status = six.text_type(\n@@ -69,6 +72,7 @@ def jobs_get(job_id):\[email protected]_exists(api.get_job_execution, id='job_id')\[email protected](\nv_j_e_schema.JOB_EXEC_UPDATE_SCHEMA, v_j_e.check_job_execution_update)\[email protected]_request_params([])\ndef jobs_update(job_id, data):\nresult = {'job': api.update_job_execution(job_id, data)}\nresult['job'].pop('oozie_job_id', force=True)\n@@ -79,6 +83,7 @@ def jobs_update(job_id, data):\[email protected](\"data-processing:job-executions:delete\")\[email protected]_exists(api.get_job_execution, id='job_id')\[email protected](None, v_j_e.check_job_execution_delete)\[email protected]_request_params([])\ndef jobs_delete(job_id):\napi.delete_job_execution(job_id)\nreturn u.render()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/node_group_templates.py",
"new_path": "sahara/api/v2/node_group_templates.py",
"diff": "@@ -30,6 +30,7 @@ rest = u.RestV2('node-group-templates', __name__)\[email protected]_exists(api.get_node_group_template, 'marker')\[email protected](None, v.validate_pagination_limit,\nv.validate_sorting_node_group_templates)\[email protected]_request_params(['plugin_name', 'hadoop_version', 'name'])\ndef node_group_templates_list():\nresult = api.get_node_group_templates(**u.get_request_args().to_dict())\nfor ngt in result:\n@@ -41,6 +42,7 @@ def node_group_templates_list():\[email protected](\"data-processing:node-group-templates:create\")\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA_V2,\nv_ngt.check_node_group_template_create)\[email protected]_request_params([])\ndef node_group_templates_create(data):\n# renaming hadoop_version -> plugin_version\n# this can be removed once APIv1 is deprecated\n@@ -54,6 +56,7 @@ def node_group_templates_create(data):\[email protected]('/node-group-templates/<node_group_template_id>')\[email protected](\"data-processing:node-group-templates:get\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected]_request_params([])\ndef node_group_templates_get(node_group_template_id):\nresult = u.to_wrapped_dict_no_render(\napi.get_node_group_template, node_group_template_id)\n@@ -66,6 +69,7 @@ def node_group_templates_get(node_group_template_id):\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected](ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2,\nv_ngt.check_node_group_template_update)\[email protected]_request_params([])\ndef node_group_templates_update(node_group_template_id, data):\nif data.get('plugin_version', None):\ndata['hadoop_version'] = data['plugin_version']\n@@ -80,6 +84,7 @@ def node_group_templates_update(node_group_template_id, data):\[email protected](\"data-processing:node-group-templates:delete\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected](None, v_ngt.check_node_group_template_usage)\[email protected]_request_params([])\ndef node_group_templates_delete(node_group_template_id):\napi.terminate_node_group_template(node_group_template_id)\nreturn u.render()\n@@ -100,6 +105,7 @@ def _node_group_template_export_helper(template):\[email protected]('/node-group-templates/<node_group_template_id>/export')\[email protected](\"data-processing:node-group-templates:get\")\[email protected]_exists(api.get_node_group_template, 'node_group_template_id')\[email protected]_request_params([])\ndef node_group_template_export(node_group_template_id):\ncontent = u.to_wrapped_dict_no_render(\napi.export_node_group_template, node_group_template_id)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/plugins.py",
"new_path": "sahara/api/v2/plugins.py",
"diff": "@@ -25,6 +25,7 @@ rest = u.RestV2('plugins', __name__)\[email protected]('/plugins')\[email protected](\"data-processing:plugins:get_all\")\[email protected]_request_params([])\ndef plugins_list():\nreturn u.render(plugins=[p.dict for p in api.get_plugins()])\n@@ -32,6 +33,7 @@ def plugins_list():\[email protected]('/plugins/<plugin_name>')\[email protected](\"data-processing:plugins:get\")\[email protected]_exists(api.get_plugin, plugin_name='plugin_name')\[email protected]_request_params([])\ndef plugins_get(plugin_name):\nreturn u.render(api.get_plugin(plugin_name).wrapped_dict)\n@@ -39,6 +41,7 @@ def plugins_get(plugin_name):\[email protected]('/plugins/<plugin_name>/<version>')\[email protected](\"data-processing:plugins:get_version\")\[email protected]_exists(api.get_plugin, plugin_name='plugin_name', version='version')\[email protected]_request_params([])\ndef plugins_get_version(plugin_name, version):\nreturn u.render(api.get_plugin(plugin_name, version).wrapped_dict)\n@@ -47,5 +50,6 @@ def plugins_get_version(plugin_name, version):\[email protected](\"data-processing:plugins:patch\")\[email protected]_exists(api.get_plugin, plugin_name='plugin_name')\[email protected](v_p.plugin_update_validation_jsonschema(), v_p.check_plugin_update)\[email protected]_request_params([])\ndef plugins_update(plugin_name, data):\nreturn u.render(api.update_plugin(plugin_name, data).wrapped_dict)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/validation.py",
"new_path": "sahara/service/validation.py",
"diff": "@@ -198,3 +198,22 @@ def check_exists(get_func, *id_prop, **get_args):\nreturn handler\nreturn decorator\n+\n+\n+def validate_request_params(supported_params):\n+ def decorator(func):\n+ @functools.wraps(func)\n+ def handler(*args, **kwargs):\n+ pagination_params = ['marker', 'limit', 'sort_by']\n+ func_name = func.__name__\n+ params = u.get_request_args()\n+ for param in params.keys():\n+ if (param not in supported_params and\n+ param not in pagination_params):\n+ return u.invalid_param_error(\n+ 400,\n+ 'The only valid params for %s are %s and %s' % (\n+ func_name, supported_params, pagination_params))\n+ return func(*args, **kwargs)\n+ return handler\n+ return decorator\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/api.py",
"new_path": "sahara/utils/api.py",
"diff": "@@ -288,6 +288,18 @@ def render_error_message(error_code, error_message, error_name):\nreturn resp\n+def invalid_param_error(status_code, descr, exc=None):\n+ LOG.error(\"Request aborted with status code {code} and \"\n+ \"message '{message}'\".format(code=status_code, message=descr))\n+\n+ if exc is not None:\n+ LOG.error(traceback.format_exc())\n+\n+ error_code = \"INVALID_PARAMS_ON_REQUEST\"\n+\n+ return render_error_message(status_code, descr, error_code)\n+\n+\ndef internal_error(status_code, descr, exc=None):\nLOG.error(\"Request aborted with status code {code} and \"\n\"message '{message}'\".format(code=status_code, message=descr))\n"
}
] | Python | Apache License 2.0 | openstack/sahara | APIv2 - Fix 500 on malformed query string on
In order to improve return information to clients we are adding a check
to verify parameters before each call and return a more appropriate
message to the users.
Change-Id: I9923601d0903e415a3fe30bec9bdc8fc34b91ff6
Story: #2004506
Task: #28228 |
488,272 | 31.01.2019 14:32:32 | 10,800 | 21791d1f8929af24196150b70db5864836ac8c83 | Changing hdfs fs to hdfs dfs
The command hdfs fs has been deprecated in favor of hdfs dfs.
Story:
Task: | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "releasenotes/notes/hdfs-dfs-94a9c4f64cf8994f.yaml",
"diff": "+---\n+fixes:\n+ - |\n+ The command hdfs fs has been deprecated in favor of hdfs fs. This\n+ fixes will allow the use of Hbase service.\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/service/edp/hdfs_helper.py",
"new_path": "sahara/service/edp/hdfs_helper.py",
"diff": "@@ -39,7 +39,7 @@ def create_hbase_common_lib(r):\npaths = stdout.split(':')\nfor p in paths:\nif p.endswith(\".jar\"):\n- r.execute_command('sudo su - -c \"hdfs fs -put -p %s %s\" hdfs'\n+ r.execute_command('sudo su - -c \"hdfs dfs -put -p %s %s\" hdfs'\n% (p, HBASE_COMMON_LIB_PATH))\nelse:\nraise ex.RequiredServiceMissingException('hbase')\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/service/edp/test_hdfs_helper.py",
"new_path": "sahara/tests/unit/service/edp/test_hdfs_helper.py",
"diff": "@@ -40,7 +40,7 @@ class HDFSHelperTestCase(base.SaharaTestCase):\nmock.call(('sudo su - -c \"hdfs dfs -mkdir -p '\n'/user/sahara-hbase-lib\" hdfs')),\nmock.call('hbase classpath'),\n- mock.call(('sudo su - -c \"hdfs fs -put -p may.jar '\n+ mock.call(('sudo su - -c \"hdfs dfs -put -p may.jar '\n'/user/sahara-hbase-lib\" hdfs'))]\nself.cluster.execute_command.assert_has_calls(calls)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Changing hdfs fs to hdfs dfs
The command hdfs fs has been deprecated in favor of hdfs dfs.
Story: #2004952
Task: #29368
Change-Id: If8db759ab40ca858594498bc5e6f94f416da2545 |
488,272 | 23.01.2019 14:28:39 | 10,800 | f6afe5b73341f18424d627fcd61101832131346c | Adding spark build image job
Depends-on: | [
{
"change_type": "MODIFY",
"old_path": ".zuul.yaml",
"new_path": ".zuul.yaml",
"diff": "- sahara-buildimages-ambari\n- sahara-buildimages-cloudera\n- sahara-buildimages-mapr\n+ - sahara-buildimages-spark\n- sahara-tests-scenario-multinode-spark\n- sahara-tests-scenario-py3\nparent: sahara-buildimages-base\nvars:\nsahara_plugin: mapr\n+\n+- job:\n+ name: sahara-buildimages-spark\n+ parent: sahara-buildimages-base\n+ vars:\n+ sahara_plugin: spark\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/gate/build-images",
"new_path": "tools/gate/build-images",
"diff": "@@ -75,6 +75,9 @@ case \"$PLUGIN\" in\n\"mapr\")\nbuild_images \"mapr\" \"5.2.0.mrv2\" \"centos7\"\n;;\n+ \"spark\")\n+ build_images \"spark\" \"2.3\" \"centos7\"\n+ ;;\n*)\necho \"Invalid version\"\n;;\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Adding spark build image job
Depends-on: https://review.openstack.org/#/c/632141/
Change-Id: Id8e8b57b16691125f7af4b277bca649398949a6b |
488,272 | 19.02.2019 10:20:14 | 10,800 | dc17f1903f520aefd566a3a9c66ddb406bf7cfa1 | Making Sahara Python 3 compatible
Sahara fails to connect and operate on remote machines
because the output from Subprocess on python 3 are bytes
and that breaks follow up actions. | [
{
"change_type": "MODIFY",
"old_path": ".zuul.yaml",
"new_path": ".zuul.yaml",
"diff": "- openstack-tox-cover:\nvoting: false\n- sahara-grenade\n+ - sahara-tests-scenario-py3\ngate:\nqueue: sahara\njobs:\n- sahara-tests-tempest\n- sahara-tests-tempest-v2\n- sahara-grenade\n+ - sahara-tests-scenario-py3\n+\nexperimental:\njobs:\n- sahara-buildimages-ambari\n- sahara-buildimages-mapr\n- sahara-buildimages-spark\n- sahara-tests-scenario-multinode-spark\n- - sahara-tests-scenario-py3\n- job:\nname: sahara-grenade\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/cli/sahara_subprocess.py",
"new_path": "sahara/cli/sahara_subprocess.py",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n+import _io\nimport pickle # nosec\nimport sys\nimport traceback\n@@ -32,6 +33,11 @@ def main():\n# TODO(elmiko) these pickle usages should be\n# reinvestigated to determine a more secure manner to\n# deploy remote commands.\n+ if isinstance(sys.stdin, _io.TextIOWrapper):\n+ func = pickle.load(sys.stdin.buffer) # nosec\n+ args = pickle.load(sys.stdin.buffer) # nosec\n+ kwargs = pickle.load(sys.stdin.buffer) # nosec\n+ else:\nfunc = pickle.load(sys.stdin) # nosec\nargs = pickle.load(sys.stdin) # nosec\nkwargs = pickle.load(sys.stdin) # nosec\n@@ -42,5 +48,8 @@ def main():\nresult['exception'] = cls_name + ': ' + str(e)\nresult['traceback'] = traceback.format_exc()\n- pickle.dump(result, sys.stdout) # nosec\n+ if isinstance(sys.stdin, _io.TextIOWrapper):\n+ pickle.dump(result, sys.stdout.buffer, protocol=2) # nosec\n+ else:\n+ pickle.dump(result, sys.stdout, protocol=2) # nosec\nsys.stdout.flush()\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/plugins/images.py",
"new_path": "sahara/plugins/images.py",
"diff": "@@ -684,7 +684,7 @@ class SaharaScriptValidator(SaharaImageValidatorBase):\nin six.iteritems(arguments)\nif key in self.env_vars)\nscript = script % {\"env_vars\": env_vars,\n- \"script\": self.script_contents}\n+ \"script\": self.script_contents.decode('utf-8')}\npath = '/tmp/%s.sh' % uuidutils.generate_uuid()\nremote.write_file_to(path, script, run_as_root=True)\n_sudo(remote, 'chmod +x %s' % path)\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/tests/unit/plugins/test_images.py",
"new_path": "sahara/tests/unit/plugins/test_images.py",
"diff": "@@ -274,7 +274,7 @@ class TestImages(b.SaharaTestCase):\nuuidutils.generate_uuid.return_value = hash_value\ncls = images.SaharaScriptValidator\nimage_arguments = {\"distro\": 'centos'}\n- cmd = \"It's dangerous to go alone. Run this.\"\n+ cmd = b\"It's dangerous to go alone. Run this.\"\nvalidator = cls(cmd, env_vars=image_arguments.keys(),\noutput_var=\"distro\")\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/procutils.py",
"new_path": "sahara/utils/procutils.py",
"diff": "@@ -31,6 +31,7 @@ def _get_sub_executable():\ndef start_subprocess():\nreturn subprocess.Popen((sys.executable, _get_sub_executable()),\nclose_fds=True,\n+ bufsize=0,\nstdin=subprocess.PIPE,\nstdout=subprocess.PIPE,\nstderr=subprocess.PIPE)\n@@ -39,12 +40,13 @@ def start_subprocess():\ndef run_in_subprocess(proc, func, args=None, kwargs=None, interactive=False):\nargs = args or ()\nkwargs = kwargs or {}\n+\ntry:\n# TODO(elmiko) these pickle usages should be reinvestigated to\n# determine a more secure manner to deploy remote commands.\n- pickle.dump(func, proc.stdin) # nosec\n- pickle.dump(args, proc.stdin) # nosec\n- pickle.dump(kwargs, proc.stdin) # nosec\n+ pickle.dump(func, proc.stdin, protocol=2) # nosec\n+ pickle.dump(args, proc.stdin, protocol=2) # nosec\n+ pickle.dump(kwargs, proc.stdin, protocol=2) # nosec\nproc.stdin.flush()\nif not interactive:\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/ssh_remote.py",
"new_path": "sahara/utils/ssh_remote.py",
"diff": "@@ -152,9 +152,9 @@ def _cleanup():\ndef _read_paramimko_stream(recv_func):\n- result = ''\n+ result = b''\nbuf = recv_func(1024)\n- while buf != '':\n+ while buf != b'':\nresult += buf\nbuf = recv_func(1024)\n@@ -182,6 +182,12 @@ def _execute_command(cmd, run_as_root=False, get_stderr=False,\nstdout = _read_paramimko_stream(chan.recv)\nstderr = _read_paramimko_stream(chan.recv_stderr)\n+ if type(stdout) == bytes:\n+ stdout = stdout.decode('utf-8')\n+\n+ if type(stderr) == bytes:\n+ stderr = stderr.decode('utf-8')\n+\nret_code = chan.recv_exit_status()\nif ret_code and raise_when_error:\n@@ -363,6 +369,9 @@ def _read_file(sftp, remote_file):\nfl = sftp.file(remote_file, 'r')\ndata = fl.read()\nfl.close()\n+ try:\n+ return data.decode('utf-8')\n+ except Exception:\nreturn data\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Making Sahara Python 3 compatible
Sahara fails to connect and operate on remote machines
because the output from Subprocess on python 3 are bytes
and that breaks follow up actions.
Change-Id: Id55e6c06d3b6ead18501a0e2146af37bf493881d |
488,272 | 25.07.2019 17:19:02 | 10,800 | abc8f570559d142bab22a7557ad1658a6407b7f6 | Python 3 fixes
String to Bytes compatibility.
Story:
Task: | [
{
"change_type": "MODIFY",
"old_path": "sahara/service/edp/spark/engine.py",
"new_path": "sahara/service/edp/spark/engine.py",
"diff": "@@ -173,7 +173,7 @@ class SparkJobEngine(base_engine.JobEngine):\njob_configs.get('configs', {})):\npath = 'service/edp/resources/edp-spark-wrapper.jar'\nname = 'builtin-%s.jar' % uuidutils.generate_uuid()\n- builtin_libs = [{'raw': files.get_file_text(path),\n+ builtin_libs = [{'raw': files.try_get_file_text(path),\n'name': name}]\nuploaded_paths = []\n"
},
{
"change_type": "MODIFY",
"old_path": "sahara/utils/ssh_remote.py",
"new_path": "sahara/utils/ssh_remote.py",
"diff": "@@ -276,7 +276,11 @@ def _get_http_client(host, port, proxy_command=None, gateway_host=None,\ndef _write_fl(sftp, remote_file, data):\n+ try:\nwrite_data = paramiko.py3compat.StringIO(data)\n+ except TypeError:\n+ write_data = paramiko.py3compat.BytesIO(data)\n+\nsftp.putfo(write_data, remote_file)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Python 3 fixes
String to Bytes compatibility.
Story: #2006258
Task: #35875
Change-Id: Id0ad0f3c644af52f41217105b249df78d0b722cc |
488,292 | 24.10.2019 17:42:55 | -28,800 | 9cf30f600372272736691bf4c777afc2bdfa26d8 | fix invaild link of installation guide in Sahara UI User Guide
update unsuitable links with the proper ones | [
{
"change_type": "MODIFY",
"old_path": "doc/source/contributor/devstack.rst",
"new_path": "doc/source/contributor/devstack.rst",
"diff": "@@ -2,7 +2,7 @@ Setup DevStack\n==============\nDevStack can be installed on Fedora, Ubuntu, and CentOS. For supported\n-versions see `DevStack documentation <http://devstack.org>`_\n+versions see `DevStack documentation <https://docs.openstack.org/devstack/latest/>`_\nWe recommend that you install DevStack in a VM, rather than on your main\nsystem. That way you may avoid contamination of your system. You may find\n@@ -16,7 +16,7 @@ Start VM and set up OS\nIn order to run DevStack in a local VM, you need to start by installing\na guest with Ubuntu 14.04 server. Download an image file from\n-`Ubuntu's web site <http://www.ubuntu.com/download/server>`_ and create\n+`Ubuntu's web site <https://www.ubuntu.com/download/server>`_ and create\na new guest from it. Virtualization solution must support\nnested virtualization. Without nested virtualization VMs running inside\nthe DevStack will be extremely slow lacking hardware acceleration, i.e.\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/dashboard-user-guide.rst",
"new_path": "doc/source/user/dashboard-user-guide.rst",
"diff": "@@ -4,7 +4,7 @@ Sahara (Data Processing) UI User Guide\nThis guide assumes that you already have the sahara service and Horizon\ndashboard up and running. Don't forget to make sure that sahara is\nregistered in Keystone. If you require assistance with that, please see the\n-`installation guide <../install/installation-guide-html>`_.\n+`installation guide <../install/installation-guide.html>`_.\nThe sections below give a panel by panel overview of setting up clusters\nand running jobs. For a description of using the guided cluster and job tools,\n"
}
] | Python | Apache License 2.0 | openstack/sahara | fix invaild link of installation guide in Sahara UI User Guide
update unsuitable links with the proper ones
Change-Id: Ib7516bea5b6ce8bfaf816c9b0cfdf547babddea7 |
488,325 | 12.05.2021 14:57:05 | -28,800 | 5d2f114b4232aca892012d22f4342e0c3e1b5e71 | delete validation error cluster
story:
task: 42465 | [
{
"change_type": "MODIFY",
"old_path": "sahara/api/v2/clusters.py",
"new_path": "sahara/api/v2/clusters.py",
"diff": "@@ -127,9 +127,8 @@ def clusters_update(cluster_id, data):\ndef clusters_delete(cluster_id):\ndata = u.request_data()\nforce = data.get('force', False)\n- stack_name = api.get_cluster(cluster_id).get(\n- 'extra', {}).get(\n- 'heat_stack_name', None)\n+ extra = api.get_cluster(cluster_id).get('extra', {})\n+ stack_name = extra.get('heat_stack_name', None) if extra else None\napi.terminate_cluster(cluster_id, force=force)\nif force:\nreturn u.render({\"stack_name\": stack_name}, status=200)\n"
}
] | Python | Apache License 2.0 | openstack/sahara | delete validation error cluster
Change-Id: I1ec693c17a43c09383fa83556cd3275b75ca90fd
story: 2008898
task: 42465 |
488,325 | 18.05.2021 11:14:37 | -28,800 | a0de19e45f3a24a584e92aa6273bf440419f7059 | health check error about SqlAlchemy
Story:
Task: 42467
Story:
Task: 39123 | [
{
"change_type": "MODIFY",
"old_path": "sahara/db/sqlalchemy/api.py",
"new_path": "sahara/db/sqlalchemy/api.py",
"diff": "@@ -1561,7 +1561,7 @@ def cluster_verification_add(context, cluster_id, values):\nverification.update(values)\nsession.add(verification)\n- return verification\n+ return _cluster_verification_get(context, session, verification.id)\ndef cluster_verification_update(context, verification_id, values):\n"
}
] | Python | Apache License 2.0 | openstack/sahara | health check error about SqlAlchemy
Story: 2008900
Task: 42467
Story: 2007450
Task: 39123
Change-Id: I7b671df0c946a96d7f6688b5452a884186e5c11e |
488,284 | 30.12.2021 16:21:28 | -28,800 | f0a0946c495598764fd5f1f683aea283af304056 | Make unclear code clear in user guide
cannot show the CLI commend, this patch fix that issue. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/plugins.rst",
"new_path": "doc/source/user/plugins.rst",
"diff": "@@ -30,7 +30,7 @@ additional values for plugin's labels.\nTo disable a plugin (Vanilla Apache Hadoop, for example), the admin\ncan run the following command:\n-.. code-block:: ini\n+.. code-block::\ncat update_configs.json\n{\n@@ -45,7 +45,7 @@ can run the following command:\nAdditionally, specific versions can be disabled by the following command:\n-.. code-block:: ini\n+.. code-block::\ncat update_configs.json\n{\n@@ -64,7 +64,7 @@ Finally, to see all labels of a specific plugin and to see the current status\nof the plugin (is it stable or not, deprecation status) the following command\ncan be executed from the CLI:\n-.. code-block:: ini\n+.. code-block::\nopenstack dataprocessing plugin show vanilla\n"
},
{
"change_type": "MODIFY",
"old_path": "doc/source/user/quickstart.rst",
"new_path": "doc/source/user/quickstart.rst",
"diff": "@@ -50,7 +50,7 @@ to the plugin-specific documentation.\nUpload the generated image into the OpenStack Image service:\n-.. code-block:: console\n+.. code-block::\n$ openstack image create sahara-vanilla-latest-ubuntu --disk-format qcow2 \\\n--container-format bare --file sahara-vanilla-latest-ubuntu.qcow2\n@@ -81,7 +81,7 @@ Remember the image name or save the image ID. This will be used during the\nimage registration with sahara. You can get the image ID using the\n``openstack`` command line tool as follows:\n-.. code-block:: console\n+.. code-block::\n$ openstack image list --property name=sahara-vanilla-latest-ubuntu\n+--------------------------------------+------------------------------+\n@@ -114,7 +114,7 @@ it shall be used.\nFor the steps below and the rest of this guide, substitute\n``<plugin_version>`` with the appropriate version of your plugin.\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing image tags add sahara-vanilla-latest-ubuntu \\\n--tags vanilla <plugin_version>\n@@ -145,7 +145,7 @@ You can get information about available plugins with the following command:\nAlso you can get information about available services for a particular plugin\nwith the ``plugin show`` command. For example:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing plugin show vanilla --plugin-version <plugin_version>\n+---------------------+-----------------------------------------------------------------------------------------------------------------------+\n@@ -174,7 +174,7 @@ with the ``plugin show`` command. For example:\nCreate a master node group template with the command:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing node group template create \\\n--name vanilla-default-master --plugin vanilla \\\n@@ -204,7 +204,7 @@ Create a master node group template with the command:\nCreate a worker node group template with the command:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing node group template create \\\n--name vanilla-default-worker --plugin vanilla \\\n@@ -238,7 +238,7 @@ This will tell the node group to boot its instances from a volume instead of\nthe image. This feature allows for easier live migrations and improved\nperformance.\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing node group template create \\\n--name vanilla-default-worker --plugin vanilla \\\n@@ -279,7 +279,7 @@ Sample templates can be found here:\nCreate a file named ``my_master_template_create.json`` with the following\ncontent:\n-.. sourcecode:: json\n+.. code-block:: json\n{\n\"plugin_name\": \"vanilla\",\n@@ -297,7 +297,7 @@ content:\nCreate a file named ``my_worker_template_create.json`` with the following\ncontent:\n-.. sourcecode:: json\n+.. code-block:: json\n{\n\"plugin_name\": \"vanilla\",\n@@ -324,7 +324,7 @@ Use the ``openstack`` client to upload the node group templates:\nList the available node group templates to ensure that they have been\nadded properly:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing node group template list --name vanilla-default\n+------------------------+--------------------------------------+-------------+--------------------+\n@@ -350,7 +350,7 @@ that describes the node groups of the cluster.\nCreate a cluster template with the command:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing cluster template create \\\n--name vanilla-default-cluster \\\n@@ -377,7 +377,7 @@ Alternatively you can create cluster template from JSON file:\nCreate a file named ``my_cluster_template_create.json`` with the following\ncontent:\n-.. sourcecode:: json\n+.. code-block:: json\n{\n\"plugin_name\": \"vanilla\",\n@@ -408,7 +408,7 @@ Remember the cluster template name or save the cluster template ID for use in\nthe cluster provisioning command. The cluster ID can be found in the output of\nthe creation command or by listing the cluster templates as follows:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing cluster template list --name vanilla-default\n+-------------------------+--------------------------------------+-------------+--------------------+\n@@ -425,7 +425,7 @@ information that can be found by querying various OpenStack services.\nCreate a cluster with the command:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing cluster create --name my-cluster-1 \\\n--cluster-template vanilla-default-cluster --user-keypair my_stack \\\n@@ -456,7 +456,7 @@ Alternatively you can create a cluster template from a JSON file:\nCreate a file named ``my_cluster_create.json`` with the following content:\n-.. sourcecode:: json\n+.. code-block:: json\n{\n\"name\": \"my-cluster-1\",\n@@ -495,7 +495,7 @@ Create and start the cluster:\nVerify the cluster status by using the ``openstack`` command\nline tool as follows:\n-.. code-block:: console\n+.. code-block::\n$ openstack dataprocessing cluster show my-cluster-1 -c Status\n+--------+--------+\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Make unclear code clear in user guide
https://docs.openstack.org/sahara/latest/user/quickstart.html
cannot show the CLI commend, this patch fix that issue.
Change-Id: I3bb41f86713facba2fa3811b1e4420e68d0f355a |
488,284 | 30.12.2021 16:29:04 | -28,800 | 257c58ef814f5bd9ca985c83b4eac42a93b69586 | Make hidden code appear in admin guide
cannot show the CLI commend, this patch fix that issue. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/admin/advanced-configuration-guide.rst",
"new_path": "doc/source/admin/advanced-configuration-guide.rst",
"diff": "@@ -32,7 +32,7 @@ internal IP. The option is ignored if ``proxy_command`` is not also set.\nFor example, the following parameter in the sahara configuration file\nwould be used if instances are accessed through a relay machine:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nproxy_command='ssh relay-machine-{tenant_id} nc {host} {port}'\n@@ -40,7 +40,7 @@ would be used if instances are accessed through a relay machine:\nWhereas the following shows an example of accessing instances though\na custom network namespace:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nproxy_command='ip netns exec ns_for_{network_id} nc {host} {port}'\n@@ -73,7 +73,7 @@ because some plugins (e.g. ``HDP``) determine hostname by ip.\nSahara also should be properly configured. In ``sahara.conf`` you must specify\ntwo config properties:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\n# Use Designate for internal and external hostnames resolution:\n@@ -109,7 +109,7 @@ through a few configuration parameters and user defined topology files.\nTo enable data-locality, set the ``enable_data_locality`` parameter to\n``true`` in the sahara configuration file\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nenable_data_locality=true\n@@ -118,7 +118,7 @@ With data locality enabled, you must now specify the topology files\nfor the Compute and Object Storage services. These files are\nspecified in the sahara configuration file as follows:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\ncompute_topology_file=/etc/sahara/compute.topology\n@@ -127,7 +127,7 @@ specified in the sahara configuration file as follows:\nThe ``compute_topology_file`` should contain mappings between Compute\nnodes and racks in the following format:\n-.. sourcecode:: cfg\n+.. code-block::\ncompute1 /rack1\ncompute2 /rack2\n@@ -139,7 +139,7 @@ OpenStack (``host`` column in admin list for instances).\nThe ``swift_topology_file`` should contain mappings between Object Storage\nnodes and racks in the following format:\n-.. sourcecode:: cfg\n+.. code-block::\nnode1 /rack1\nnode2 /rack2\n@@ -243,7 +243,7 @@ to use periodic tasks distribution, the following steps are required:\n``periodic_coordinator_backend_url`` parameter. For example, if the\nZooKeeper backend is being used:\n- .. sourcecode:: cfg\n+ .. code-block::\n[DEFAULT]\nperiodic_coordinator_backend_url=kazoo://IP:PORT\n@@ -251,7 +251,7 @@ to use periodic tasks distribution, the following steps are required:\n* Tooz extras should be installed. When using Zookeeper as coordination\nbackend, ``kazoo`` library should be installed. It can be done with pip:\n- .. sourcecode:: console\n+ .. code-block::\npip install tooz[zookeeper]\n@@ -260,7 +260,7 @@ to use periodic tasks distribution, the following steps are required:\n``periodic_workers_number`` parameter (only 1 thread will be launched by\ndefault). Example:\n- .. sourcecode:: cfg\n+ .. code-block::\n[DEFAULT]\nperiodic_workers_number=2\n@@ -269,7 +269,7 @@ to use periodic tasks distribution, the following steps are required:\nheartbeat execution (1 second by default). Heartbeats are needed to make\nsure that connection to the coordination backend is active. Example:\n- .. sourcecode:: cfg\n+ .. code-block::\n[DEFAULT]\ncoordinator_heartbeat_interval=2\n@@ -279,7 +279,7 @@ to use periodic tasks distribution, the following steps are required:\nbelongs to a particular engine. A larger number of replicas leads to better\ntask distribution across the set of engines. (40 by default). Example:\n- .. sourcecode:: cfg\n+ .. code-block::\n[DEFAULT]\nhash_ring_replicas_count=100\n@@ -302,7 +302,7 @@ to interface with the OpenStack Key Manager service. This library provides\nconfigurable access to a key manager. To configure sahara to use barbican as\nthe key manager, edit the sahara configuration file as follows:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nuse_barbican_key_manager=true\n@@ -314,7 +314,7 @@ attempt to find barbican in the Identity service's service catalog.\nFor added control of the barbican server location, optional configuration\nvalues may be added to specify the URL for the barbican API server.\n-.. sourcecode:: cfg\n+.. code-block::\n[castellan]\nbarbican_api_endpoint=http://{barbican controller IP:PORT}/\n@@ -360,7 +360,7 @@ instance of sahara should have the ``os_region_name=<region>``\nparameter set in the configuration file. The following example demonstrates\nconfiguring sahara to use the ``RegionOne`` region:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nos_region_name=RegionOne\n@@ -376,7 +376,7 @@ command), rootwrap functionality is provided to allow users other than\n``root`` access to the needed operating system facilities. To use rootwrap\nthe following configuration parameter is required to be set:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nuse_rootwrap=true\n@@ -390,7 +390,7 @@ steps:\n``/etc/sudoers.d``. This file is setup to allow a user named ``sahara``\naccess to the rootwrap script. It contains the following:\n-.. sourcecode:: cfg\n+.. code-block::\nsahara ALL = (root) NOPASSWD: /usr/bin/sahara-rootwrap /etc/sahara/rootwrap.conf *\n@@ -409,7 +409,7 @@ change user in script from ``sahara`` to ``stack``.\n(depending on ``proxy_command`` you may need to set additional filters).\nIt should look similar to the followings:\n-.. sourcecode:: cfg\n+.. code-block::\n[Filters]\nip: IpNetnsExecFilter, ip, root\n@@ -419,7 +419,7 @@ change user in script from ``sahara`` to ``stack``.\nIf you wish to use a rootwrap command other than ``sahara-rootwrap`` you can\nset the following parameter in your sahara configuration file:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nrootwrap_command='sudo sahara-rootwrap /etc/sahara/rootwrap.conf'\n@@ -467,7 +467,7 @@ this example let's assume that the name of the proxy domain is\n``sahara_proxy`` and the roles needed by proxy users will be ``member`` and\n``SwiftUser``.\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\nuse_domain_for_proxy_users=true\n@@ -524,7 +524,7 @@ set to ``true`` and some extra configurations are needed:\n* by designating an account with privileged rights in the cinder\nconfiguration:\n- .. sourcecode:: cfg\n+ .. code-block::\nos_privileged_user_name =\nos_privileged_user_password =\n@@ -561,7 +561,7 @@ server or disable NTP service using the examples below.\nIf you want to enable configuring the NTP service, you should specify the\nfollowing configs for the cluster:\n-.. sourcecode:: json\n+.. code-block::\n{\n\"cluster_configs\": {\n@@ -574,7 +574,7 @@ following configs for the cluster:\nIf you want to disable configuring NTP service, you should specify following\nconfigs for the cluster:\n-.. sourcecode:: json\n+.. code-block::\n{\n\"cluster_configs\": {\n@@ -596,7 +596,7 @@ To get started quickly, use the example configuration block below, replacing\nthe :code:`allowed origin` field with the host(s) from which your API expects\naccess.\n-.. sourcecode:: cfg\n+.. code-block::\n[cors]\nallowed_origin=https://we.example.com:443\n@@ -627,7 +627,7 @@ means that automatic clean up is disabled). For example, if you want cluster to\nbe deleted after 3 hours if it didn't leave \"Starting\" state then you should\nspecify:\n-.. sourcecode:: cfg\n+.. code-block::\n[DEFAULT]\ncleanup_time_for_incomplete_clusters = 3\n@@ -640,7 +640,7 @@ may be bigger than the default values configured in ``neutron.conf``. Then the\ndefault limit should be raised up to some bigger value which is proportional to\nthe number of cluster node groups. You can change it in ``neutron.conf`` file:\n-.. sourcecode:: cfg\n+.. code-block::\n[quotas]\nquota_security_group = 1000\n@@ -648,6 +648,6 @@ the number of cluster node groups. You can change it in ``neutron.conf`` file:\nOr you can execute openstack CLI command:\n-.. sourcecode:: console\n+.. code-block::\nopenstack quota set --secgroups 1000 --secgroup-rules 10000 $PROJECT_ID\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Make hidden code appear in admin guide
https://docs.openstack.org/sahara/latest/admin/advanced-configuration-guide.html
cannot show the CLI commend, this patch fix that issue.
Change-Id: I26b44442279e9a9b067ff3fd6e78d3d079dad919 |
488,284 | 30.12.2021 19:47:28 | -28,800 | e9e50bede6bf68adb434b7ad57e8dac164acd6ea | Let the code in the json file be displayed
cannot show the code clear, this patch fix that issue. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/edp.rst",
"new_path": "doc/source/user/edp.rst",
"diff": "@@ -332,7 +332,7 @@ A \"Description\" field may also be added to each interface argument.\nTo create such an interface via the REST API, provide an \"interface\" argument,\nthe value of which consists of a list of JSON objects, as below:\n-.. sourcecode:: json\n+.. code-block::\n[\n{\n@@ -349,7 +349,7 @@ the value of which consists of a list of JSON objects, as below:\nCreating this interface would allow you to specify a configuration for any\nexecution of the job template by passing an \"interface\" map similar to:\n-.. sourcecode:: json\n+.. code-block::\n{\n\"Rows\": \"1000000\",\n@@ -361,7 +361,7 @@ The specified arguments would be automatically placed into the args, configs,\nand params for the job, according to the mapping type and location fields of\neach interface argument. The final ``job_configs`` map would be:\n-.. sourcecode:: json\n+.. code-block::\n{\n\"job_configs\": {\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Let the code in the json file be displayed
https://docs.openstack.org/sahara/latest/user/edp.html
cannot show the code clear, this patch fix that issue.
Change-Id: Ie4f8add7e464c454c41d6c4084ec64d6b7cca3ee |
488,284 | 30.12.2021 19:56:10 | -28,800 | 4a7f7b462ae95e824b0fe3b5a7280cd4307c7580 | Solve the problem that the code in the xml file is not clear
cannot show the code clear in xml file, this patch fix that issue. | [
{
"change_type": "MODIFY",
"old_path": "doc/source/user/hadoop-swift.rst",
"new_path": "doc/source/user/hadoop-swift.rst",
"diff": "@@ -40,7 +40,7 @@ Hadoop installation. In cases where a user might require more in-depth\nconfiguration all the data is set in the ``core-site.xml`` file on the cluster\ninstances using this template:\n-.. sourcecode:: xml\n+.. code-block::\n<property>\n<name>${name} + ${config}</name>\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Solve the problem that the code in the xml file is not clear
https://docs.openstack.org/sahara/latest/user/hadoop-swift.html
cannot show the code clear in xml file, this patch fix that issue.
Change-Id: I7467e3a3665f882587adbaa8e6c50042bc00739c |
488,284 | 13.05.2022 18:35:56 | -28,800 | 7f3c2dd0b58ef0b771cdb41016a02fad56c9b9bd | Python3.6/3.7 are deleted in testing runtime
Python3.6 and Python3.7 are no longer supported by zed cycle tests, and Python3.9 was added when Python3.6 and Python3.7 were removed. | [
{
"change_type": "MODIFY",
"old_path": "setup.cfg",
"new_path": "setup.cfg",
"diff": "@@ -3,13 +3,12 @@ name = sahara\nsummary = Sahara project\ndescription_file = README.rst\nlicense = Apache Software License\n-python_requires = >=3.6\n+python_requires = >=3.8\nclassifiers =\nProgramming Language :: Python\nProgramming Language :: Python :: 3\n- Programming Language :: Python :: 3.6\n- Programming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\n+ Programming Language :: Python :: 3.9\nEnvironment :: OpenStack\nIntended Audience :: Information Technology\nIntended Audience :: System Administrators\n"
}
] | Python | Apache License 2.0 | openstack/sahara | Python3.6/3.7 are deleted in testing runtime
Python3.6 and Python3.7 are no longer supported by zed cycle tests, and Python3.9 was added when Python3.6 and Python3.7 were removed.
Change-Id: Ie3f0da2775b59df8d01e560b4e507cd331948ae7 |
89,733 | 02.02.2017 13:24:14 | -3,600 | fc1187ef1d6c5d6bbb0c25d68a7397cd30c8a863 | fix: display html fix on reaction knockin target | [
{
"change_type": "MODIFY",
"old_path": "cameo/core/target.py",
"new_path": "cameo/core/target.py",
"diff": "@@ -265,7 +265,7 @@ class ReactionKnockinTarget(KnockinTarget):\nreturn \"<ReactionKnockin %s>\" % self.id\ndef _repr_html_(self):\n- return \"::%s\"\n+ return \"::%s\" % self.id\nclass GeneModulationTarget(FluxModulationTarget):\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: display html fix on reaction knockin target (#109) |
89,733 | 03.02.2017 12:11:55 | -3,600 | 873f44d832d844a6eeea116e0ebf77e6e663d18e | fix:give a list to remove reactions so it doesn't display warnings | [
{
"change_type": "MODIFY",
"old_path": "cameo/core/target.py",
"new_path": "cameo/core/target.py",
"diff": "@@ -230,7 +230,7 @@ class ReactionKnockinTarget(KnockinTarget):\nmodel.add_reaction(self._value)\nelse:\ntime_machine(do=partial(model.add_reaction, self._value),\n- undo=partial(model.remove_reactions, self._value, delete=False, remove_orphans=True))\n+ undo=partial(model.remove_reactions, [self._value], delete=False, remove_orphans=True))\ndef to_gnomic(self):\naccession = Target.to_gnomic(self)\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix:give a list to remove reactions so it doesn't display warnings (#110) |
89,735 | 28.02.2017 15:29:40 | -3,600 | 8b29fd6c94aea2497aa7ef32f3504f6e9d21de00 | fix broken release plan
* fix broken release plan
skip automatic github deploy for now
unit tests must not edit any files or versioneer will report a make it
a 'dirty release' | [
{
"change_type": "MODIFY",
"old_path": ".travis.yml",
"new_path": ".travis.yml",
"diff": "@@ -46,23 +46,24 @@ env:\nglobal:\nsecure: QgrOXEgpcH6xgToVfWIX6j6CPvycKMPtNnoYAxPrZjkMzd2aCHHeokv0FZkCn3uePO0I8W8TkKBxilGZbWYoseDq+Snds18sBTG9u2NHvYHnDQb4Oki7+NoxhlnGIOj/8ADONOpc0n7PyFDPK8zmKVZvv9p78OHZO5CmV/ktOeg=\ndeploy:\n- - provider: releases\n- api_key:\n- secure: VsKdkwYvp7lf65S/pzLWbrk8PaRAzBVClB57s4jYepx+BbJdPJi5Zwz4zmu0Ifa1K7K2Jh0rITV9GZAyC+0Eq2ffXtZsBOsC5+2yKaWV5WiU7kNdUVhD9EFkUaNknT8+B2/sjPsl+GP8DTzCqstgdGo5EAAnvpV53qIAwwF9n0U=\n- file_glob: true\n- file: dist/cameo*.whl\n- skip_cleanup: true\n- on:\n- branch: master\n- tags: true\n- repo: biosustain/cameo\n- provider: pypi\nuser: Nikolaus.Sonnenschein\npassword:\nsecure: nxjszXtUzQfnLlfg0cmFjd9gRekXDog6dkkN1rMc7CIWH2gZ1gAX4sNETVChnuSmu9egzhuIkviHstRrdyGoEZ7ZkHlTXmpVAs9AY96eMSejnwHHODhYno0jB7DjGcfejodLF+lo6lWz7S7mXXwML6YLM3xxG+AOjLHlHbPTaKc=\ndistributions: sdist bdist_wheel\n+ skip_cleanup: true\non:\nbranch: master\ntags: true\nrepo: biosustain/cameo\ndocs_dir: docs/_build/html\n+# - provider: releases\n+# api_key:\n+# secure: VsKdkwYvp7lf65S/pzLWbrk8PaRAzBVClB57s4jYepx+BbJdPJi5Zwz4zmu0Ifa1K7K2Jh0rITV9GZAyC+0Eq2ffXtZsBOsC5+2yKaWV5WiU7kNdUVhD9EFkUaNknT8+B2/sjPsl+GP8DTzCqstgdGo5EAAnvpV53qIAwwF9n0U=\n+# file_glob: true\n+# file: dist/cameo*.whl\n+# skip_cleanup: true\n+# on:\n+# all_branches: true\n+# tags: true\n+# repo: biosustain/cameo\n"
},
{
"change_type": "DELETE",
"old_path": "tests/data/reaction_knockout_multi_objective.pkl",
"new_path": "tests/data/reaction_knockout_multi_objective.pkl",
"diff": "Binary files a/tests/data/reaction_knockout_multi_objective.pkl and /dev/null differ\n"
},
{
"change_type": "DELETE",
"old_path": "tests/data/reaction_knockout_single_objective.pkl",
"new_path": "tests/data/reaction_knockout_single_objective.pkl",
"diff": "Binary files a/tests/data/reaction_knockout_single_objective.pkl and /dev/null differ\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_strain_design_heuristics.py",
"new_path": "tests/test_strain_design_heuristics.py",
"diff": "@@ -19,6 +19,7 @@ import pickle\nimport unittest\nfrom collections import namedtuple\nfrom math import sqrt\n+from tempfile import mkstemp\nimport inspyred\nimport numpy\n@@ -876,7 +877,8 @@ class TestReactionKnockoutOptimization(unittest.TestCase):\n# @unittest.skipIf(os.getenv('TRAVIS', False) or 'cplex' not in solvers, 'Missing cplex (or Travis)')\ndef test_run_single_objective(self):\n- result_file = os.path.join(CURRENT_PATH, \"data\", \"reaction_knockout_single_objective.pkl\")\n+ # TODO: make optlang deterministic so this results can be permanently stored.\n+ _, result_file = mkstemp('.pkl')\nobjective = biomass_product_coupled_yield(\n\"Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2\",\n\"EX_ac_LPAREN_e_RPAREN_\",\n@@ -901,7 +903,8 @@ class TestReactionKnockoutOptimization(unittest.TestCase):\n# @unittest.skipIf(os.getenv('TRAVIS', False) or 'cplex' not in solvers, 'Missing cplex (or Travis)')\ndef test_run_multiobjective(self):\n- result_file = os.path.join(CURRENT_PATH, \"data\", \"reaction_knockout_multi_objective.pkl\")\n+ # TODO: make optlang deterministic so this results can be permanently stored.\n+ _, result_file = mkstemp('.pkl')\nobjective1 = biomass_product_coupled_yield(\n\"Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2\",\n\"EX_ac_LPAREN_e_RPAREN_\",\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix broken release plan (#120)
* fix broken release plan
- skip automatic github deploy for now
- unit tests must not edit any files or versioneer will report a make it
a 'dirty release' |
89,733 | 07.03.2017 13:04:00 | -3,600 | 8dbcbe5e8c8f158f00a6fdccecee24db812de348 | fix: fva flags
* fix: fva flags
Now that it runs by getting all the `min` and then all the `max`
the lower_bound flags need to be stored and evaluated in the second iteration.
I added a dictionary to store the lb_flag.
* fix: minor perfomance improvement | [
{
"change_type": "MODIFY",
"old_path": "cameo/flux_analysis/analysis.py",
"new_path": "cameo/flux_analysis/analysis.py",
"diff": "@@ -27,6 +27,7 @@ import six\nfrom cobra.core import Reaction, Metabolite\nfrom numpy import trapz\nfrom six.moves import zip\n+from sympy import S\nimport cameo\nfrom cameo import config\n@@ -37,7 +38,6 @@ from cameo.parallel import SequentialView\nfrom cameo.ui import notice\nfrom cameo.util import TimeMachine, partition, _BIOMASS_RE_\nfrom cameo.visualization.plotting import plotter\n-from sympy import S\nlogger = logging.getLogger(__name__)\n@@ -226,25 +226,34 @@ def _flux_variability_analysis(model, reactions=None):\nelse:\nreactions = model._ids_to_reactions(reactions)\nfva_sol = OrderedDict()\n- [lb_flag, ub_flag] = [False, False]\n+ lb_flags = dict()\nwith TimeMachine() as tm:\nmodel.change_objective(S.Zero, time_machine=tm)\n+\n+ model.objective.direction = 'min'\nfor reaction in reactions:\n+ lb_flags[reaction.id] = False\nfva_sol[reaction.id] = dict()\n- model.solver.objective.set_linear_coefficients({reaction.forward_variable: 1., reaction.reverse_variable: -1.})\n- model.objective.direction = 'min'\n+ model.solver.objective.set_linear_coefficients({reaction.forward_variable: 1.,\n+ reaction.reverse_variable: -1.})\ntry:\nsolution = model.solve()\nfva_sol[reaction.id]['lower_bound'] = solution.f\nexcept Unbounded:\nfva_sol[reaction.id]['lower_bound'] = -numpy.inf\nexcept Infeasible:\n- lb_flag = True\n- model.solver.objective.set_linear_coefficients({reaction.forward_variable: 0., reaction.reverse_variable: 0.})\n+ lb_flags[reaction.id] = True\n+ model.solver.objective.set_linear_coefficients({reaction.forward_variable: 0.,\n+ reaction.reverse_variable: 0.})\n+\n+ assert model.objective.expression == 0, model.objective.expression\n- for reaction in reactions:\n- model.solver.objective.set_linear_coefficients({reaction.forward_variable: 1., reaction.reverse_variable: -1.})\nmodel.objective.direction = 'max'\n+ for reaction in reactions:\n+ ub_flag = False\n+ model.solver.objective.set_linear_coefficients({reaction.forward_variable: 1.,\n+ reaction.reverse_variable: -1.})\n+\ntry:\nsolution = model.solve()\nfva_sol[reaction.id]['upper_bound'] = solution.f\n@@ -253,17 +262,23 @@ def _flux_variability_analysis(model, reactions=None):\nexcept Infeasible:\nub_flag = True\n- if lb_flag is True and ub_flag is True:\n+ if lb_flags[reaction.id] is True and ub_flag is True:\nfva_sol[reaction.id]['lower_bound'] = 0\nfva_sol[reaction.id]['upper_bound'] = 0\n- [lb_flag, ub_flag] = [False, False]\n- elif lb_flag is True and ub_flag is False:\n+ [lb_flags[reaction.id], ub_flag] = [False, False]\n+ elif lb_flags[reaction.id] is True and ub_flag is False:\nfva_sol[reaction.id]['lower_bound'] = fva_sol[reaction.id]['upper_bound']\n- lb_flag = False\n- elif lb_flag is False and ub_flag is True:\n+ lb_flags[reaction.id] = False\n+ elif lb_flags[reaction.id] is False and ub_flag is True:\nfva_sol[reaction.id]['upper_bound'] = fva_sol[reaction.id]['lower_bound']\nub_flag = False\n- model.solver.objective.set_linear_coefficients({reaction.forward_variable: 0., reaction.reverse_variable: 0.})\n+\n+ model.solver.objective.set_linear_coefficients({reaction.forward_variable: 0.,\n+ reaction.reverse_variable: 0.})\n+\n+ assert model.objective.expression == 0, model.objective.expression\n+\n+ assert lb_flags[reaction.id] is False and ub_flag is False, \"Something is wrong with FVA (%s)\" % reaction.id\ndf = pandas.DataFrame.from_dict(fva_sol, orient='index')\nlb_higher_ub = df[df.lower_bound > df.upper_bound]\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: fva flags (#123)
* fix: fva flags
Now that it runs by getting all the `min` and then all the `max`
the lower_bound flags need to be stored and evaluated in the second iteration.
I added a dictionary to store the lb_flag.
* fix: minor perfomance improvement |
89,733 | 07.03.2017 13:20:10 | -3,600 | 6c6a1b97d56b69f30f8128816a55bc71e6952554 | fix: cameo.api.products.search is now case insensitive | [
{
"change_type": "MODIFY",
"old_path": "cameo/api/products.py",
"new_path": "cameo/api/products.py",
"diff": "@@ -73,8 +73,11 @@ class Products(object):\nraise Exception(\"No compound matches found for query %s\" % query)\ndef _search_by_name_fuzzy(self, name):\n- matches = difflib.get_close_matches(name, self.data_frame.name.dropna(), n=5, cutoff=.8)\n- ranks = dict([(match, i) for i, match in enumerate(matches)])\n+ original_possibilities = self.data_frame.name.dropna()\n+ possibilities_mapping = {original_name.lower(): original_name for original_name in original_possibilities}\n+ matches = difflib.get_close_matches(name.lower(), list(possibilities_mapping.keys()), n=5, cutoff=.8)\n+ matches = [possibilities_mapping[match] for match in matches]\n+ ranks = {match: i for i, match in enumerate(matches)}\nselection = DataFrame(self.data_frame[self.data_frame.name.isin(matches)])\nselection['search_rank'] = selection.name.map(ranks)\nreturn selection.sort_values('search_rank')\n@@ -88,7 +91,7 @@ class Products(object):\ndef _search_by_inchi_fuzzy(self, inchi):\n# TODO: use openbabel if available\nmatches = difflib.get_close_matches(inchi, self.data_frame.InChI.dropna(), n=5, cutoff=.8)\n- ranks = dict([(match, i) for i, match in enumerate(matches)])\n+ ranks = {match: i for i, match in enumerate(matches)}\nselection = DataFrame(self.data_frame[self.data_frame.InChI.isin(matches)])\nselection['search_rank'] = selection.name.map(ranks)\nreturn selection.sort_values('search_rank')\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: cameo.api.products.search is now case insensitive (#126) |
89,735 | 12.03.2017 13:14:51 | -3,600 | 467fab320385967015017e810326690c4e18fe3b | feat: option to constrain max flux sum during fva
* feat: option to constrain max flux sum during fva
when doing fva, fixing max flux sum may yield more realistic bounds. Add
option in fva to accomplish this.
* fix: pin cobrapy
* implement review from | [
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -50,3 +50,4 @@ docs/_build\n.vagrant\n*~\n+/.DS_Store\n"
},
{
"change_type": "MODIFY",
"old_path": "cameo/flux_analysis/simulation.py",
"new_path": "cameo/flux_analysis/simulation.py",
"diff": "@@ -34,6 +34,7 @@ import cameo\nimport logging\nfrom functools import partial\n+from itertools import chain\nimport sympy\nfrom sympy import Add\n@@ -49,7 +50,6 @@ from cameo.visualization.palette import mapper, Palette\n__all__ = ['fba', 'pfba', 'moma', 'lmoma', 'room']\n-\nlogger = logging.getLogger(__name__)\nadd = Add._from_args\n@@ -86,14 +86,55 @@ def fba(model, objective=None, reactions=None, *args, **kwargs):\nreturn result\n+def add_pfba(model, objective=None, fraction_of_optimum=1.0, time_machine=None):\n+ \"\"\"Add pFBA objective\n+\n+ Add objective to minimize the summed flux of all reactions to the\n+ current objective.\n+\n+ Parameters\n+ ----------\n+ model : cameo.core.SolverBasedModel\n+ The model to add the objective to\n+ objective :\n+ An objective to set in combination with the pFBA objective.\n+ fraction_of_optimum : float\n+ Fraction of optimum which must be maintained. The original objective\n+ reaction is constrained to be greater than maximal_value *\n+ fraction_of_optimum.\n+ time_machine : cameo.util.TimeMachine\n+ A time machine to undo the added pFBA objective\n+ \"\"\"\n+ if objective is not None:\n+ model.objective = objective\n+ if model.solver.objective.name == '_pfba_objective':\n+ raise ValueError('model already has pfba objective')\n+ if fraction_of_optimum > 0:\n+ model.fix_objective_as_constraint(fraction=fraction_of_optimum, time_machine=time_machine)\n+ reaction_variables = ((rxn.forward_variable, rxn.reverse_variable)\n+ for rxn in model.reactions)\n+ variables = chain(*reaction_variables)\n+ pfba_objective = model.solver.interface.Objective(add(\n+ [mul((sympy.singleton.S.One, variable))\n+ for variable in variables]), direction='min', sloppy=True,\n+ name=\"_pfba_objective\")\n+ model.change_objective(pfba_objective, time_machine=time_machine)\n+\n+\ndef pfba(model, objective=None, reactions=None, fraction_of_optimum=1, *args, **kwargs):\n\"\"\"Parsimonious Enzyme Usage Flux Balance Analysis [1].\nParameters\n----------\n- model: SolverBasedModel\n+ model : cameo.core.SolverBasedModel\n+ The model to perform pFBA with\nobjective: str or reaction or optlang.Objective\nAn objective to be minimized/maximized for\n+ reactions : list\n+ list of reactions to get results for. Getting fluxes from solution can be time consuming so if not all are\n+ needed it may be faster to request specific reactions.\n+ fraction_of_optimum : float\n+ Fix the value of the current objective to a fraction of is maximum.\nReturns\n-------\n@@ -108,16 +149,7 @@ def pfba(model, objective=None, reactions=None, fraction_of_optimum=1, *args, **\n\"\"\"\nwith TimeMachine() as tm:\n- original_objective = model.objective\n- if objective is not None:\n- tm(do=partial(setattr, model, 'objective', objective),\n- undo=partial(setattr, model, 'objective', original_objective))\n- model.fix_objective_as_constraint(time_machine=tm, fraction=fraction_of_optimum)\n- pfba_obj = model.solver.interface.Objective(add(\n- [mul((sympy.singleton.S.One, variable)) for variable in list(model.solver.variables.values())]),\n- direction='min', sloppy=True)\n- tm(do=partial(setattr, model, 'objective', pfba_obj),\n- undo=partial(setattr, model, 'objective', original_objective))\n+ add_pfba(model, objective=objective, fraction_of_optimum=fraction_of_optimum, time_machine=tm)\ntry:\nsolution = model.solve()\nif reactions is not None:\n@@ -411,6 +443,7 @@ class FluxDistributionResult(Result):\n\"\"\"\n+\n@classmethod\ndef from_solution(cls, solution, *args, **kwargs):\nreturn cls(solution.fluxes, solution.f, *args, **kwargs)\n"
},
{
"change_type": "MODIFY",
"old_path": "cameo/flux_analysis/util.py",
"new_path": "cameo/flux_analysis/util.py",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-__all__ = ['remove_infeasible_cycles']\n-\nfrom functools import partial\nfrom cameo.exceptions import SolveError\n@@ -22,9 +20,12 @@ from cameo.util import TimeMachine\nimport sympy\nfrom sympy import Add, Mul\n+from cameo.flux_analysis.simulation import add_pfba\nimport logging\n+__all__ = ['remove_infeasible_cycles', 'fix_pfba_as_constraint']\n+\nFloatOne = sympy.Float(1)\nlogger = logging.getLogger(__name__)\n@@ -95,5 +96,38 @@ def remove_infeasible_cycles(model, fluxes, fix=()):\nlogger.warning(\"Couldn't remove cycles from reference flux distribution.\")\nraise e\nresult = solution.x_dict\n-\nreturn result\n+\n+\n+def fix_pfba_as_constraint(model, multiplier=1, fraction_of_optimum=1, time_machine=None):\n+ \"\"\"Fix the pFBA optimum as a constraint\n+\n+ Useful when setting other objectives, like the maximum flux through given reaction may be more realistic if all\n+ other fluxes are not allowed to reach their full upper bounds, but collectively constrained to max sum.\n+\n+ Parameters\n+ ----------\n+ model : cameo.core.SolverBasedModel\n+ The model to add the pfba constraint to\n+ multiplier : float\n+ The multiplier of the minimal sum of all reaction fluxes to use as the constraint.\n+ fraction_of_optimum : float\n+ The fraction of the objective value's optimum to use as constraint when getting the pFBA objective's minimum\n+ time_machine : TimeMachine, optional\n+ A TimeMachine instance can be provided, making it easy to undo this modification.\n+ \"\"\"\n+\n+ fix_constraint_name = '_fixed_pfba_constraint'\n+ if fix_constraint_name in model.solver.constraints:\n+ model.solver.remove(fix_constraint_name)\n+ with TimeMachine() as tm:\n+ add_pfba(model, time_machine=tm, fraction_of_optimum=fraction_of_optimum)\n+ pfba_objective_value = model.optimize().objective_value * multiplier\n+ constraint = model.solver.interface.Constraint(model.objective.expression,\n+ name=fix_constraint_name,\n+ ub=pfba_objective_value)\n+ if time_machine is None:\n+ model.solver._add_constraint(constraint, sloppy=True)\n+ else:\n+ time_machine(do=partial(model.solver._add_constraint, constraint, sloppy=True),\n+ undo=partial(model.solver.remove, constraint))\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements.txt",
"new_path": "requirements.txt",
"diff": "@@ -4,7 +4,7 @@ blessings>=1.5.1\npandas>=0.15.2\nordered-set==1.2\ninspyred>=1.0\n-cobra>=0.4.0b6\n+cobra==0.5.11\noptlang>=0.3.0\nescher>=1.0.0\nnumexpr>=2.4\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements_dev.txt",
"new_path": "requirements_dev.txt",
"diff": "@@ -12,7 +12,7 @@ blessings>=1.5.1\npandas>=0.15.2\nordered-set>=1.2\ninspyred>=1.0\n-cobra>=0.4.0b6\n+cobra==0.5.11\noptlang>=0.3.0\nescher>=1.0.0\nnumexpr>=2.4\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements_rtd.txt",
"new_path": "requirements_rtd.txt",
"diff": "sphinx-rtd-theme>=0.1.6\nnumpydoc>=0.5\n-cobra==0.3.2\n+cobra==0.5.11\nsympy==0.7.5\noptlang==0.2.9\nmock>=1.0.1\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -28,7 +28,7 @@ requirements = ['numpy>=1.9.1',\n'blessings>=1.5.1',\n'pandas>=0.15.2',\n'ordered-set>=1.2',\n- 'cobra>=0.4.1',\n+ 'cobra==0.5.11',\n'optlang>=0.4.2',\n'requests>=2.5.0',\n'numexpr>=2.4',\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_flux_analysis.py",
"new_path": "tests/test_flux_analysis.py",
"diff": "@@ -30,10 +30,10 @@ from sympy import Add\nimport cameo\nfrom cameo.config import solvers\n-from cameo.flux_analysis import remove_infeasible_cycles\n+from cameo.flux_analysis import remove_infeasible_cycles, fix_pfba_as_constraint\nfrom cameo.flux_analysis import structural\nfrom cameo.flux_analysis.analysis import flux_variability_analysis, phenotypic_phase_plane, find_blocked_reactions\n-from cameo.flux_analysis.simulation import fba, pfba, lmoma, room, moma\n+from cameo.flux_analysis.simulation import fba, pfba, lmoma, room, moma, add_pfba\nfrom cameo.flux_analysis.structural import nullspace\nfrom cameo.io import load_model\nfrom cameo.parallel import SequentialView, MultiprocessingView\n@@ -94,9 +94,24 @@ class Wrapper:\nmp_view = MultiprocessingView(2)\nfva_solution = flux_variability_analysis(self.ecoli_core, fraction_of_optimum=0.999999419892,\nremove_cycles=False, view=mp_view)\n+ pfba_fva = flux_variability_analysis(self.ecoli_core, fraction_of_optimum=1, pfba_factor=1,\n+ view=mp_view).data_frame\nmp_view.shutdown()\nassert_data_frames_equal(fva_solution, REFERENCE_FVA_SOLUTION_ECOLI_CORE)\nself.assertEqual(original_objective, self.ecoli_core.objective)\n+ self.assertAlmostEqual(sum(abs(pfba_fva.lower_bound)), 518.422, delta=0.001)\n+ self.assertAlmostEqual(sum(abs(pfba_fva.upper_bound)), 518.422, delta=0.001)\n+\n+ def test_add_remove_pfb(self):\n+ with TimeMachine() as tm:\n+ add_pfba(self.ecoli_core, time_machine=tm)\n+ self.assertEquals('_pfba_objective', self.ecoli_core.objective.name)\n+ self.assertNotEqual('_pfba_objective', self.ecoli_core.solver.constraints)\n+ with TimeMachine() as tm:\n+ fix_pfba_as_constraint(self.ecoli_core, time_machine=tm)\n+ self.assertTrue('_fixed_pfba_constraint' in self.ecoli_core.solver.constraints)\n+ self.assertTrue('_fixed_pfba_constraint' not in self.ecoli_core.solver.constraints)\n+\[email protected](TRAVIS, 'Skip multiprocessing in Travis')\ndef test_flux_variability_parallel_remove_cycles(self):\n@@ -118,14 +133,11 @@ class Wrapper:\noriginal_objective = self.ecoli_core.objective\nfva_solution = flux_variability_analysis(self.ecoli_core, fraction_of_optimum=0.999999419892,\nremove_cycles=False, view=SequentialView())\n- assert_data_frames_equal(fva_solution, REFERENCE_FVA_SOLUTION_ECOLI_CORE)\n- for key in fva_solution.data_frame.index:\n- self.assertAlmostEqual(fva_solution['lower_bound'][key],\n- REFERENCE_FVA_SOLUTION_ECOLI_CORE['lower_bound'][key], delta=0.00001)\n- self.assertAlmostEqual(fva_solution['upper_bound'][key],\n- REFERENCE_FVA_SOLUTION_ECOLI_CORE['upper_bound'][key], delta=0.00001)\n+ pfba_fva = flux_variability_analysis(self.ecoli_core, fraction_of_optimum=1, pfba_factor=1).data_frame\nassert_data_frames_equal(fva_solution, REFERENCE_FVA_SOLUTION_ECOLI_CORE)\nself.assertEqual(original_objective, self.ecoli_core.objective)\n+ self.assertAlmostEqual(sum(abs(pfba_fva.lower_bound)), 518.422, delta=0.001)\n+ self.assertAlmostEqual(sum(abs(pfba_fva.upper_bound)), 518.422, delta=0.001)\ndef test_flux_variability_sequential_remove_cycles(self):\noriginal_objective = self.ecoli_core.objective\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | feat: option to constrain max flux sum during fva (#131)
* feat: option to constrain max flux sum during fva
when doing fva, fixing max flux sum may yield more realistic bounds. Add
option in fva to accomplish this.
* fix: pin cobrapy
* implement review from @midnighter |
89,733 | 15.03.2017 14:44:23 | -3,600 | 8c14270d5181874b586d3976873bc9790daff599 | fix: evaluation was inversed | [
{
"change_type": "MODIFY",
"old_path": "cameo/strain_design/deterministic/flux_variability_based.py",
"new_path": "cameo/strain_design/deterministic/flux_variability_based.py",
"diff": "@@ -287,7 +287,7 @@ class DifferentialFVA(StrainDesignMethod):\nincluded_reactions = [reaction.id for reaction in self.reference_model.reactions if\nreaction.id not in self.exclude] + self.variables + [self.objective]\n- self.reference_flux_dist = pfba(self.reference_model)\n+ self.reference_flux_dist = pfba(self.reference_model, fraction_of_optimum=0.99)\nself.reference_flux_ranges = flux_variability_analysis(self.reference_model, reactions=included_reactions,\nview=view, remove_cycles=False,\n@@ -455,11 +455,11 @@ class DifferentialFVAResult(StrainDesignMethodResult):\nelif relevant_row.flux_reversal:\nif reference_fva['upper_bound'][rid] > 0:\ntargets.append(ReactionInversionTarget(rid,\n- value=relevant_row.upper_bound,\n+ value=float_ceil(relevant_row.upper_bound, ndecimals),\nreference_value=reference_fluxes[rid]))\nelse:\ntargets.append(ReactionInversionTarget(rid,\n- value=relevant_row.lower_bound,\n+ value=float_floor(relevant_row.lower_bound, ndecimals),\nreference_value=reference_fluxes[rid]))\nelse:\ngap_sign = relevant_row.normalized_gaps > 0\n@@ -470,9 +470,9 @@ class DifferentialFVAResult(StrainDesignMethodResult):\nclosest_bound, ref_sign = cls._closest_bound(ref_interval, row_interval)\nif gap_sign ^ ref_sign:\n- value = float_floor(relevant_row.lower_bound)\n+ value = float_ceil(relevant_row.upper_bound, ndecimals)\nelse:\n- value = float_ceil(relevant_row.upper_bound)\n+ value = float_floor(relevant_row.lower_bound, ndecimals)\ntargets.append(ReactionModulationTarget(rid,\nvalue=value,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_strain_design_deterministic.py",
"new_path": "tests/test_strain_design_deterministic.py",
"diff": "@@ -22,10 +22,13 @@ from pandas import DataFrame\nfrom pandas.util.testing import assert_frame_equal\nimport cameo\n+from cameo import fba\nfrom cameo import load_model\nfrom cameo.config import solvers\n+from cameo.exceptions import Infeasible\nfrom cameo.strain_design.deterministic.flux_variability_based import FSEOF, FSEOFResult, DifferentialFVA\nfrom cameo.strain_design.deterministic.linear_programming import OptKnock\n+from cameo.util import TimeMachine\nTRAVIS = os.getenv('TRAVIS', False)\nTESTDIR = os.path.dirname(__file__)\n@@ -73,6 +76,20 @@ class TestDifferentialFVA(unittest.TestCase):\n\"O\").sort_index(axis=1)\npandas.util.testing.assert_frame_equal(result.data_frame.iloc[0].sort_index(axis=1), ref_df)\n+ def test_apply_designs(self):\n+ result = DifferentialFVA(self.model, self.model.reactions.EX_succ_lp_e_rp_, points=5).run()\n+ works = []\n+ for strain_design in result:\n+ with TimeMachine() as tm:\n+ strain_design.apply(self.model, tm)\n+ try:\n+ solution = fba(self.model, objective=\"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2\")\n+ works.append(solution[\"EX_succ_lp_e_rp_\"] > 1e-6 and solution.objective_value > 1e-6)\n+ except Infeasible:\n+ works.append(False)\n+\n+ self.assertTrue(any(works))\n+\ndef test_with_reference_model(self):\nreference_model = self.model.copy()\nbiomass_rxn = reference_model.reactions.Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: evaluation was inversed (#136) |
89,737 | 27.03.2017 10:50:19 | -7,200 | 62fdf13f0bb7c9e4c4c291cdae05bc8d0bd4273b | fix: designer workflow
Less iterations and less pathways.
Fail gracefully if the compound is in products but not in the database | [
{
"change_type": "MODIFY",
"old_path": "cameo/api/designer.py",
"new_path": "cameo/api/designer.py",
"diff": "@@ -73,7 +73,7 @@ class _OptimizationRunner(object):\nclass _OptGeneRunner(_OptimizationRunner):\ndef __call__(self, strategy):\n- max_evaluations = 20000\n+ max_evaluations = 15000\nif self.debug:\nmax_evaluations = 1000\n@@ -98,7 +98,7 @@ class _OptGeneRunner(_OptimizationRunner):\nclass _DifferentialFVARunner(_OptimizationRunner):\ndef __call__(self, strategy):\n- points = 50\n+ points = 30\nsurface_only = False\nif self.debug:\npoints = 5\n@@ -170,7 +170,7 @@ class Designer(object):\nnotice(\"Starting searching for compound %s\" % product)\ntry:\nproduct = self.__translate_product_to_universal_reactions_model_metabolite(product, database)\n- except KeyError:\n+ except Exception:\nraise KeyError(\"Product %s is not in the %s database\" % (product, database.id))\npathways = self.predict_pathways(product, hosts=hosts, database=database, aerobic=aerobic)\noptimization_reports = self.optimize_strains(pathways, view, aerobic=aerobic)\n@@ -182,8 +182,8 @@ class Designer(object):\nArguments\n---------\n- pathways: list\n- A list of dictionaries to optimize ([Host, Model] -> PredictedPathways).\n+ pathways: dict\n+ A dictionary with information of pathways to optimize ([Host, Model] -> PredictedPathways).\nview: object\nA view for multi, single os distributed processing.\naerobic: bool\n@@ -299,13 +299,13 @@ class Designer(object):\n_bpcy, _pyield, target_flux, biomass = np.nan, np.nan, np.nan, np.nan\nreturn _bpcy, _pyield, target_flux, biomass\n- def predict_pathways(self, product, hosts=None, database=None, aerobic=True):\n+ def predict_pathways(self, product, hosts, database, aerobic=True):\n\"\"\"Predict production routes for a desired product and host spectrum.\nParameters\n----------\nproduct : str or Metabolite\nThe desired product.\n- hosts : list or Model or Host\n+ hosts : list\nA list of hosts (e.g. cameo.api.hosts), models, mixture thereof, or a single model or host.\ndatabase: SolverBasedModel\nA model to use as database. See also: cameo.models.universal\n@@ -317,7 +317,7 @@ class Designer(object):\ndict\n([Host, Model] -> PredictedPathways)\n\"\"\"\n- max_predictions = 8\n+ max_predictions = 4\ntimeout = 3 * 60\npathways = dict()\n@@ -357,11 +357,14 @@ class Designer(object):\nreturn product\nelif isinstance(product, str):\nsearch_result = products.search(product)\n+ search_result = search_result.loc[[i for i in search_result.index if i in database.metabolites]]\n+ if len(search_result) == 0:\n+ raise (\"No compound matches found for query %s\" % product)\nnotice(\"Found %d compounds that match query '%s'\" % (len(search_result), product))\nself.__display_product_search_result(search_result)\nnotice(\"Choosing best match (%s) ... please interrupt if this is not the desired compound.\"\n- % search_result.name[0])\n- self.__display_compound(search_result.InChI[0])\n+ % search_result.name.values[0])\n+ self.__display_compound(search_result.InChI.values[0])\nreturn database.metabolites.get_by_id(search_result.index[0])\n@staticmethod\n"
},
{
"change_type": "MODIFY",
"old_path": "cameo/cli/controllers.py",
"new_path": "cameo/cli/controllers.py",
"diff": "@@ -108,7 +108,7 @@ class BaseController(CementBaseController):\nview = SequentialView()\ndesign.debug = self.app.pargs.test\n-\n+ try:\nresults = design(product=product, hosts=_hosts,\nview=view, aerobic=not self.app.pargs.anaerobic)\n@@ -116,6 +116,9 @@ class BaseController(CementBaseController):\nresults['manipulations'] = results.manipulations.apply(str)\nOUTPUT_WRITER[output_format](results, output)\n+ except KeyError as e:\n+ print(e)\n+ exit(1)\n@expose(help=\"Search for products in our internal database\")\ndef search(self):\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: designer workflow
Less iterations and less pathways.
Fail gracefully if the compound is in products but not in the database |
89,737 | 27.03.2017 14:19:07 | -7,200 | c46ab214fb512541fe73c300bb4b59855c0402b4 | fix: raise KeyError | [
{
"change_type": "MODIFY",
"old_path": "cameo/api/designer.py",
"new_path": "cameo/api/designer.py",
"diff": "@@ -359,7 +359,7 @@ class Designer(object):\nsearch_result = products.search(product)\nsearch_result = search_result.loc[[i for i in search_result.index if i in database.metabolites]]\nif len(search_result) == 0:\n- raise (\"No compound matches found for query %s\" % product)\n+ raise KeyError(\"No compound matches found for query %s\" % product)\nnotice(\"Found %d compounds that match query '%s'\" % (len(search_result), product))\nself.__display_product_search_result(search_result)\nnotice(\"Choosing best match (%s) ... please interrupt if this is not the desired compound.\"\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: raise KeyError |
89,741 | 30.03.2017 08:19:23 | -7,200 | 97aedee2155d427f82145a7a5bdbbf0b2c690fee | fix: make add_pfba roll back all changes | [
{
"change_type": "MODIFY",
"old_path": "cameo/flux_analysis/simulation.py",
"new_path": "cameo/flux_analysis/simulation.py",
"diff": "@@ -106,7 +106,7 @@ def add_pfba(model, objective=None, fraction_of_optimum=1.0, time_machine=None):\nA time machine to undo the added pFBA objective\n\"\"\"\nif objective is not None:\n- model.objective = objective\n+ model.change_objective(objective, time_machine=time_machine)\nif model.solver.objective.name == '_pfba_objective':\nraise ValueError('model already has pfba objective')\nif fraction_of_optimum > 0:\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: make add_pfba roll back all changes (#145) |
89,737 | 30.03.2017 15:05:03 | -7,200 | caa6a02d6be99299fc398fc2b999c66f645cb22a | fix: make PathwayResult correctly pickable | [
{
"change_type": "MODIFY",
"old_path": "cameo/strain_design/pathway_prediction/pathway_predictor.py",
"new_path": "cameo/strain_design/pathway_prediction/pathway_predictor.py",
"diff": "@@ -58,6 +58,23 @@ class PathwayResult(Pathway, Result, StrainDesign):\nself.product = product\nself.targets = self._build_targets()\n+ def __getstate__(self):\n+ state = Result.__getstate__(self)\n+ state['reactions'] = self.reactions\n+ state['exchanges'] = self.exchanges\n+ state['adapters'] = self.adapters\n+ state['product'] = self.product\n+ state['targets'] = self.targets\n+ return state\n+\n+ def __setstate__(self, state):\n+ Result.__setstate__(self, state)\n+ self.reactions = state['reactions']\n+ self.exchanges = state['exchanges']\n+ self.adapters = state['adapters']\n+ self.product = state['product']\n+ self.targets = state['targets']\n+\ndef _replace_adapted_metabolites(self, reaction):\n\"\"\"\nReplace adapted metabolites by model metabolites\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: make PathwayResult correctly pickable |
89,737 | 30.03.2017 15:05:56 | -7,200 | 280a46e03e117dcffa9a0badfbf185a460727ebf | fix: unit tests for pathway predictions | [
{
"change_type": "MODIFY",
"old_path": "tests/test_pathway_predictions.py",
"new_path": "tests/test_pathway_predictions.py",
"diff": "from __future__ import absolute_import, print_function\n+import pickle\nimport re\nfrom os.path import join\n@@ -44,19 +45,12 @@ def pathway_predictor_result(pathway_predictor):\nclass TestPathwayPredictor:\n-\ndef test_setting_incorrect_universal_model_raises(self, pathway_predictor):\nmodel, predictor = pathway_predictor\nwith pytest.raises(ValueError) as excinfo:\nPathwayPredictor(model, universal_model='Mickey_Mouse')\nassert re.search(r'Provided universal_model.*', str(excinfo.value))\n- # def test_predict_native_compound_returns_shorter_alternatives(self):\n- # result = self.pathway_predictor.run(product='Phosphoenolpyruvate', max_predictions=1)\n- # self.assertTrue(len(result.pathways) == 1)\n- # self.assertTrue(len(result.pathways[0].pathway) == 3)\n- # self.assertTrue(len(result.pathways[0].adapters) == 0)\n-\ndef test_predict_non_native_compound(self, pathway_predictor):\nmodel, predictor = pathway_predictor\nresult = predictor.run(product='L-Serine', max_predictions=1)\n@@ -88,18 +82,30 @@ class TestPathwayPredictor:\nassert metabolite.id in metabolite_ids\n-class PathwayPredictionsTestCase:\n+class TestPathwayResult:\ndef test_pathway(self, pathway_predictor_result):\nmodel, result = pathway_predictor_result\npathway = result[0]\n- biomass = 'Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2'\n+ biomass = 'Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2'\nassert isinstance(pathway, PathwayResult)\nassert isinstance(pathway, Pathway)\nassert isinstance(pathway.production_envelope(model, objective=biomass), PhenotypicPhasePlaneResult)\nassert pathway.needs_optimization(model, objective=biomass)\n+ def test_pickle_pathway(self, pathway_predictor_result):\n+ model, result = pathway_predictor_result\n+ dump = pickle.dumps(result[0])\n+ result_recovered = pickle.loads(dump)\n+\n+ assert set(r.id for r in result[0].reactions) == set(r.id for r in result_recovered.reactions)\n+ assert set(r.id for r in result[0].targets) == set(r.id for r in result_recovered.targets)\n+ assert set(r.id for r in result[0].adapters) == set(r.id for r in result_recovered.adapters)\n+ assert set(r.id for r in result[0].exchanges) == set(r.id for r in result_recovered.exchanges)\n+ assert result[0].product.id == result_recovered.product.id\n+\ndef test_plug_model_without_time_machine(self, pathway_predictor_result):\nmodel, result = pathway_predictor_result\n+ model = model.copy()\nresult[0].plug_model(model)\nfor reaction in result[0].reactions:\nassert reaction in model.reactions\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: unit tests for pathway predictions |
89,741 | 29.03.2017 19:02:30 | -7,200 | da4b62a7f22d1f7b5395c5139f55277a8ad98e50 | doc: set docs version from cameo.__version__ | [
{
"change_type": "MODIFY",
"old_path": "docs/conf.py",
"new_path": "docs/conf.py",
"diff": "@@ -86,21 +86,25 @@ copyright = u'2014, Novo Nordisk Foundation Center for Biosustainability'\n#\n# The short X.Y version.\n-sys.path.insert(0, '.')\n-sys.path.insert(0, '../') # to get the versioneer module\n-\n-import versioneer\n-\n-versioneer.VCS = 'git'\n-versioneer.versionfile_source = '../cameo/_version.py'\n-versioneer.versionfile_build = '../cameo/_version.py'\n-versioneer.tag_prefix = '' # tags are like 1.2.0\n-versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'\n+# sys.path.insert(0, '.')\n+# sys.path.insert(0, '../') # to get the versioneer module\n+#\n+# import versioneer\n+#\n+# versioneer.VCS = 'git'\n+# versioneer.versionfile_source = '../cameo/_version.py'\n+# versioneer.versionfile_build = '../cameo/_version.py'\n+# versioneer.tag_prefix = '' # tags are like 1.2.0\n+# versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'\n+#\n+# release = versioneer.get_version()\n+# version = release\n+# sys.path.pop(0)\n+# sys.path.pop(0)\n-release = versioneer.get_version()\n+import cameo\n+release = cameo.__version__\nversion = release\n-sys.path.pop(0)\n-sys.path.pop(0)\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | doc: set docs version from cameo.__version__ |
89,741 | 29.03.2017 20:37:43 | -7,200 | c807117fe8a7fdb7fa788327bdf64183f286b35f | doc: update README.rst and index.rst | [
{
"change_type": "MODIFY",
"old_path": "docs/index.rst",
"new_path": "docs/index.rst",
"diff": "@@ -6,21 +6,10 @@ Welcome to cameo!\n**Cameo** is a high-level python library developed to aid the strain\ndesign process in metabolic engineering projects. The library provides a\n-modular framework of simulation methods, strain design methods, access\n+modular framework of simulation methods, strain design methods, and access\nto models, that targets developers that want custom analysis workflows.\n-\n-Computationally heavy methods have been parallelized and can be run on a\n-clusters using the IPython parallelization framework (see example and\n-documentation for more details). The default fallback is python's\n-multiprocessing library.\n-\n-Furthermore, it will expose (in the near future) a high-level API to users that simply want to compute\n-promising strain designs (work in progress ...).\n-\n-::\n-\n- from cameo.api import design\n- design(product='L-Serine')\n+Furthermore, it exposes a high-level API to users that just want to\n+compute promising strain designs.\nYou got curious? Head over to `try.cameo.bio <http://try.cameo.bio>`__\nand give it a try.\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | doc: update README.rst and index.rst |
89,741 | 29.03.2017 20:39:37 | -7,200 | f5a2dc625137695c38a3e18896b0efa75bf2e78c | fix: bokeh version be pinned <=0.12.1
All the tutorial notebooks with bokeh plots are broken with bokeh >0.12.1 | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -47,7 +47,7 @@ extra_requirements = {\n'docs': ['Sphinx>=1.3.5', 'numpydoc>=0.5'],\n'swiglpk': ['swiglpk>=1.2.14'],\n'plotly': ['plotly>=1.9.6'],\n- 'bokeh': ['bokeh>=0.11.1'],\n+ 'bokeh': ['bokeh<=0.12.1'],\n'jupyter': ['jupyter>=1.0.0', 'ipywidgets>=4.1.1'],\n'test': ['pytest', 'pytest-cov'],\n'parallel': ['redis>=2.10.5', 'ipyparallel>=5.0.1'],\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | fix: bokeh version be pinned <=0.12.1
All the tutorial notebooks with bokeh plots are broken with bokeh >0.12.1 |
89,741 | 30.03.2017 13:54:25 | -7,200 | d13286479bc7bda4c11ab1607332c54aa3e90138 | doc: update apidocs | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/apidoc_output/cameo.cli.rst",
"diff": "+cameo.cli package\n+=================\n+\n+Submodules\n+----------\n+\n+cameo.cli.controllers module\n+----------------------------\n+\n+.. automodule:: cameo.cli.controllers\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n+\n+\n+Module contents\n+---------------\n+\n+.. automodule:: cameo.cli\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/apidoc_output/cameo.core.rst",
"new_path": "docs/apidoc_output/cameo.core.rst",
"diff": "@@ -12,6 +12,14 @@ cameo.core.gene module\n:undoc-members:\n:show-inheritance:\n+cameo.core.manipulation module\n+------------------------------\n+\n+.. automodule:: cameo.core.manipulation\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n+\ncameo.core.metabolite module\n----------------------------\n@@ -68,6 +76,22 @@ cameo.core.solver_based_model_dual module\n:undoc-members:\n:show-inheritance:\n+cameo.core.strain_design module\n+-------------------------------\n+\n+.. automodule:: cameo.core.strain_design\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n+\n+cameo.core.target module\n+------------------------\n+\n+.. automodule:: cameo.core.target\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n+\nModule contents\n---------------\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/apidoc_output/cameo.rst",
"new_path": "docs/apidoc_output/cameo.rst",
"diff": "@@ -7,6 +7,7 @@ Subpackages\n.. toctree::\ncameo.api\n+ cameo.cli\ncameo.core\ncameo.data\ncameo.flux_analysis\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/apidoc_output/cameo.strain_design.rst",
"new_path": "docs/apidoc_output/cameo.strain_design.rst",
"diff": "@@ -10,18 +10,6 @@ Subpackages\ncameo.strain_design.heuristic\ncameo.strain_design.pathway_prediction\n-Submodules\n-----------\n-\n-cameo.strain_design.strain_design module\n-----------------------------------------\n-\n-.. automodule:: cameo.strain_design.strain_design\n- :members:\n- :undoc-members:\n- :show-inheritance:\n-\n-\nModule contents\n---------------\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | doc: update apidocs |
89,741 | 30.03.2017 15:31:26 | -7,200 | 8b271c0c4dbffa8e83861f4e2ec7677feb025ca2 | chore: no longer necessary to provide swiglpk as a soft dependency | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -45,13 +45,12 @@ requirements = ['numpy>=1.9.1',\nextra_requirements = {\n'docs': ['Sphinx>=1.3.5', 'numpydoc>=0.5'],\n- 'swiglpk': ['swiglpk>=1.2.14'],\n'plotly': ['plotly>=1.9.6'],\n'bokeh': ['bokeh<=0.12.1'],\n'jupyter': ['jupyter>=1.0.0', 'ipywidgets>=4.1.1'],\n'test': ['pytest', 'pytest-cov'],\n'parallel': ['redis>=2.10.5', 'ipyparallel>=5.0.1'],\n- 'sbml': ['python-libsbml>=5.13.0', 'lxml>=3.6.0'],\n+ 'sbml': ['python-libsbml>=5.13.0', 'lxml>=3.6.0']\n}\nextra_requirements['all'] = list(set(chain(*extra_requirements.values())))\n"
}
] | Python | Apache License 2.0 | biosustain/cameo | chore: no longer necessary to provide swiglpk as a soft dependency |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.